metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jdlar1/lidar-pbl",
"score": 3
}
|
#### File: lidar_pbl/cli/app.py
```python
import typer
from lidar_pbl import LidarDataset
app = typer.Typer()
@app.command()
def quicklook(
data_dir: str = typer.Argument(..., help="Path to the data directory"),
dark_current_dir: str = typer.Argument(
..., help="Path to the dark current directory"
),
max_height: float = typer.Option(2000, help="Maximum height in the Quicklook"),
methods: bool = typer.Option(False, help="Plot the methods"),
):
"""
Command line interface for the lidar_pbl package.
"""
lidar_dataset = LidarDataset(
data_dir=data_dir,
dark_current_dir=dark_current_dir,
)
lidar_dataset.quicklook(max_height=max_height)
if methods:
lidar_dataset.gradient_pbl(min_height=400, max_height=1250, min_grad=-0.05)
lidar_dataset.wavelet_pbl(min_height=400, max_height=1250, a_meters=90)
lidar_dataset.variance_pbl(min_height=400, max_height=1250)
lidar_dataset.show()
@app.command()
def convert(
input_file: str = typer.Argument(..., help="Path to the input file"),
output_file: str = typer.Argument(..., help="Path to the output file"),
):
"""
Command line interface for the lidar_pbl package.
"""
typer.echo(f"Input file: {input_file}")
typer.echo(f"Output file: {output_file}")
def run():
app()
```
|
{
"source": "jdlar1/ray_tracing",
"score": 3
}
|
#### File: ray_tracing/core/optic_path.py
```python
import os
import time
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib_scalebar.scalebar import ScaleBar, SI_LENGTH
import numpy as np
class OpticalSystem:
def __init__(self):
self.A = [0, 0, 0]
self.A[0] = np.eye(2)
self.A[1] = np.eye(2)
self.A[2] = np.eye(2)
self.d0 = None
def load(self, image_name, image_height = 6779000):
self.image = img.imread(os.path.join('images', image_name)) # Cargar la imagen
self.image = self.image.astype(np.uint8)
self.ishape = self.image.shape # Tamaño de la imagen
self.image_name = image_name
self.x_abs = self.ishape[1] # Tamaño de x
self.y_abs = self.ishape[0] # Tamaño de y
self.x_mid = self.x_abs/2
self.y_mid = self.y_abs/2
self.pixel_height = image_height/self.y_abs
print()
print(f'Imagen {image_name} cargada')
self.output_name = f'{image_name[:image_name.find(".")]}_output.jpg' # Nombre del archivo de salida
def add_space(self, d, n = 1):
# Transfer matrix
if type(n) in [int, float]:
n0 = np.array([n,n,n])
else:
n0 = np.array(n)
self.A[0] = np.array([[1, 0],[d/n0[0], 1]]).dot(self.A[0])
self.A[1] = np.array([[1, 0],[d/n0[1], 1]]).dot(self.A[1])
self.A[2] = np.array([[1, 0],[d/n0[2], 1]]).dot(self.A[2])
if self.d0 is None:
self.d0 = d
self.n0 = n0
def add_plane_mirror(self):
# Matriz del espejo
self.A[0] = np.array([[-1, 0], [0, 1]]).dot(self.A[0])
self.A[1] = np.array([[-1, 0], [0, 1]]).dot(self.A[1])
self.A[2] = np.array([[-1, 0], [0, 1]]).dot(self.A[2])
def add_single_lens(self, R1, R2, nl, dl):
if type(nl) in [int, float]:
n0 = np.array([nl,nl,nl])
else:
n0 = np.array(nl)
# Poder de las superficies
D1 = (n0 - 1)/R1
D2 = (n0 - 1)/(-R2)
# Términos de la matriz de lentes
a1 = (1 - (D2*dl)/n0)
a2 = -D1-D2+(D1*D2*dl/n0)
a3 = dl/n0
a4 = (1 - (D1*dl)/n0)
# Modificar la A global
self.A[0] = np.array([[a1[0],a2[0]],[a3[0],a4[0]]]).dot(self.A[0])
self.A[1] = np.array([[a1[1],a2[1]],[a3[1],a4[1]]]).dot(self.A[1])
self.A[2] = np.array([[a1[2],a2[2]],[a3[2],a4[2]]]).dot(self.A[2])
def add_curved_mirror(self, R, n = 1):
if type(n) in [int, float]:
n0 = [n,n,n]
else:
n0 = n.copy()
self.A[0] = np.array([[-1, (-2*n0[0])/R], [0, 1]]).dot(self.A[0])
self.A[1] = np.array([[-1, (-2*n0[1])/R], [0, 1]]).dot(self.A[1])
self.A[2] = np.array([[-1, (-2*n0[2])/R], [0, 1]]).dot(self.A[2])
def trace(self, ray_count = 2, output_size = None, save_rays = False, magnification = 1):
if output_size is None:
self.transformed = np.zeros((self.y_abs, self.y_abs, 3), dtype=np.uint8) # Crear la matriz de salida
else:
self.transformed = np.zeros((output_size[1], output_size[0], 3), dtype=np.uint8) # Crear la matriz de salida
# output_size debe ser (width, height)
self.output_size = self.transformed.shape
self.magnification = magnification
print(self.output_size)
print(f'Matriz A (R): \n{self.A[0]}')
print()
print('Comienza trazado de rayos')
print()
start = time.time() # Tiempo al empezar
temporal_matrix = np.zeros((*self.output_size, ray_count), dtype=np.uint8)
progress, total_progress = 0, self.image.size
for index, pixel in np.ndenumerate(self.image):
progress_bar(progress, total_progress, prefix = 'Progreso:', suffix = 'Completado', length = 70)
progress += 1
x = index[1] - self.x_mid # Conversión a coordenadas centradas
y = index[0] - self.y_mid
r = np.sqrt(x**2+y**2) # Distancia desde el origen al punto
y_obj = (r*self.pixel_height) # Multiplicación por la unidad en metros de cada píxel
if y_obj == 0:
continue
alpha_principal = -np.arctan(y_obj/self.d0)
for ray_num, alpha in np.ndenumerate(np.linspace(alpha_principal, 0, ray_count)):
v_in = np.array([self.n0[index[2]]*alpha, y_obj])
v_out = self.A[index[2]].dot(v_in)
y_image = v_out[1]
mg = (y_image/y_obj)*magnification
x_ = mg*x
y_ = mg*y
pos_x_prime = int(x_ + self.output_size[1]/2)
pos_y_prime = int(y_ + self.output_size[0]/2)
if (pos_x_prime < 0) or (pos_x_prime >= self.output_size[1]):
continue
if (pos_y_prime < 0) or (pos_y_prime >= self.output_size[0]):
continue
temporal_matrix[pos_y_prime, pos_x_prime, index[2], ray_num] = pixel
self.transformed = temporal_matrix/255
center_color1 = self.image[int(self.y_mid+1), int(self.x_mid+1), :] # Correción del píxel central
center_color2 = self.image[int(self.y_mid-1), int(self.x_mid-1), :]
stop = time.time() # Tiempo al terminar
print()
print(f'Trazado de rayos finalizado en {(stop-start):.2f} segundos')
print()
if save_rays == True:
np.save(f'{self.image_name[:self.image_name.find(".")]}_matrix_output.npy', self.transformed)
def plot(self, save = False):
fig, ax = plt.subplots(1, 2, figsize = (14,6))
ax[0].imshow(self.image)
ax[0].set_title('Imagen original', fontsize = 14)
ax[0].add_artist(ScaleBar(self.pixel_height, 'm')) # Barra de escala
ax[1].imshow(self.transformed.mean(3))
ax[1].set_title('Imagen final', fontsize = 14)
ax[1].add_artist(ScaleBar(self.pixel_height/self.magnification, 'm')) # Barra de escala
if save:
fig.savefig(os.path.join('outputs', self.output_name))
plt.show(block = True)
def progress_bar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
```
|
{
"source": "jdlarsen-UA/flopy",
"score": 3
}
|
#### File: flopy/autotest/t010_test.py
```python
import os
import flopy
from flopy.modflow.mfsfr2 import check
tpth = os.path.abspath(os.path.join("temp", "t010"))
# make the directory if it does not exist
if not os.path.isdir(tpth):
os.makedirs(tpth)
if os.path.split(os.getcwd())[-1] == "flopy3":
path = os.path.join("examples", "data", "mf2005_test")
cpth = os.path.join("py.test/temp")
else:
path = os.path.join("..", "examples", "data", "mf2005_test")
cpth = os.path.join(tpth)
sfr_items = {
0: {"mfnam": "test1ss.nam", "sfrfile": "test1ss.sfr"},
1: {"mfnam": "test1tr.nam", "sfrfile": "test1tr.sfr"},
2: {"mfnam": "testsfr2_tab.nam", "sfrfile": "testsfr2_tab_ICALC1.sfr"},
3: {"mfnam": "testsfr2_tab.nam", "sfrfile": "testsfr2_tab_ICALC2.sfr"},
4: {"mfnam": "testsfr2.nam", "sfrfile": "testsfr2.sfr"},
5: {"mfnam": "UZFtest2.nam", "sfrfile": "UZFtest2.sfr"},
}
def load_check_sfr(i, mfnam, model_ws, checker_output_path):
# print('Testing {}\n'.format(mfnam) + '='*100)
m = flopy.modflow.Modflow.load(mfnam, model_ws=model_ws)
m.model_ws = checker_output_path
checker_outfile = os.path.join(tpth, f"SFRcheck_{m.name}.txt")
chk = m.sfr.check(checker_outfile, level=1)
if i == 1:
assert "overlapping conductance" in chk.warnings
if i == 2:
assert "segment elevations vs. model grid" in chk.warnings
return
def test_sfrcheck():
m = flopy.modflow.Modflow.load("test1tr.nam", model_ws=path, verbose=False)
# run level=0 check
m.model_ws = cpth
fpth = "SFRchecker_results.txt"
m.sfr.check(fpth, level=0)
# test checks without modifications
chk = check(m.sfr)
chk.numbering()
assert "continuity in segment and reach numbering" in chk.passed
chk.routing()
assert "circular routing" in chk.passed
chk.overlapping_conductance()
assert (
"overlapping conductance" in chk.warnings
) # this example model has overlapping conductance
chk.elevations()
for test in [
"segment elevations",
"reach elevations",
"reach elevations vs. grid elevations",
]:
assert test in chk.passed
chk.slope()
assert "minimum slope" in chk.passed
# create gaps in segment numbering
m.sfr.segment_data[0]["nseg"][-1] += 1
m.sfr.reach_data["ireach"][3] += 1
# create circular routing instance
m.sfr.segment_data[0]["outseg"][0] = 1
m.sfr._graph = None # weak, but the above shouldn't happen
chk = check(m.sfr)
chk.numbering()
assert "continuity in segment and reach numbering" in chk.errors
chk.routing()
assert "circular routing" in chk.errors
m.sfr.segment_data[0]["nseg"][-1] -= 1
m.sfr.isfropt = 1.0
chk = check(m.sfr)
chk.elevations()
# throw warning if isfropt=1 and strtop at default
assert "maximum streambed top" in chk.warnings
assert "minimum streambed top" in chk.warnings
m.sfr.reach_data["strtop"] = m.sfr._interpolate_to_reaches(
"elevup", "elevdn"
)
m.sfr.get_slopes()
m.sfr.reach_data["strhc1"] = 1.0
m.sfr.reach_data["strthick"] = 1.0
chk = check(m.sfr)
chk.elevations()
assert "maximum streambed top" in chk.passed
assert "minimum streambed top" in chk.passed
m.sfr.reach_data["strtop"][2] = -99.0
chk = check(m.sfr)
chk.elevations()
assert "minimum streambed top" in chk.warnings
m.sfr.reach_data["strtop"][2] = 99999.0
chk = check(m.sfr)
chk.elevations()
assert "maximum streambed top" in chk.warnings
assert True
def test_sfrloadcheck():
for i, case in sfr_items.items():
yield load_check_sfr, i, case["mfnam"], path, cpth
def load_sfr_isfropt_icalc(isfropt, icalc):
pth = os.path.join("..", "examples", "data", "sfr_test")
nam = f"sfrtest{isfropt}{icalc}.nam"
ml = flopy.modflow.Modflow.load(
nam, check=False, model_ws=pth, exe_name="mfnwt"
)
sfr = ml.get_package("SFR")
if sfr is None:
raise AssertionError()
ml.change_model_ws(tpth)
ml.write_input()
success = ml.run_model()[0]
if not success:
raise AssertionError(
f"sfrtest{isfropt}{icalc}.nam "
"is broken, please fix SFR 6a, 6bc logic!"
)
def test_isfropt_icalc():
# test all valid combinations of isfropt and icalc
for isfropt in range(6):
for icalc in range(5):
yield load_sfr_isfropt_icalc, isfropt, icalc
if __name__ == "__main__":
test_sfrcheck()
for i, case in sfr_items.items():
load_check_sfr(i, case["mfnam"], path, cpth)
for isfropt in range(6):
for icalc in range(5):
load_sfr_isfropt_icalc(isfropt, icalc)
```
#### File: flopy/autotest/t035_test.py
```python
import os
import sys
import shutil
import numpy as np
import flopy
try:
import pymake
except:
print("could not import pymake")
cpth = os.path.join("temp", "t035")
# delete the directory if it exists
if os.path.isdir(cpth):
shutil.rmtree(cpth)
exe_name = "mflgr"
v = flopy.which(exe_name)
run = True
if v is None:
run = False
# fix for intermittent CI failure on windows
else:
if sys.platform.lower() in ("win32", "darwin"):
run = False
def test_simplelgr_load_and_write(silent=True):
# Test load and write of distributed MODFLOW-LGR example problem
pth = os.path.join("..", "examples", "data", "mflgr_v2", "ex3")
opth = os.path.join(cpth, "ex3", "orig")
# delete the directory if it exists
if os.path.isdir(opth):
shutil.rmtree(opth)
os.makedirs(opth)
# copy the original files
files = os.listdir(pth)
for file in files:
src = os.path.join(pth, file)
dst = os.path.join(opth, file)
shutil.copyfile(src, dst)
# load the lgr model
lgr = flopy.modflowlgr.ModflowLgr.load(
"ex3.lgr", verbose=True, model_ws=opth, exe_name=exe_name
)
# get the namefiles of the parent and child
namefiles = lgr.get_namefiles()
msg = f"get_namefiles returned {len(namefiles)} items instead of 2"
assert len(namefiles) == 2, msg
tpth = os.path.dirname(namefiles[0])
msg = f"dir path is {tpth} not {opth}"
assert tpth == opth, msg
# run the lgr model
if run:
success, buff = lgr.run_model(silent=silent)
assert success, "could not run original modflow-lgr model"
# check that a parent and child were read
msg = "modflow-lgr ex3 does not have 2 grids"
assert lgr.ngrids == 2, msg
npth = os.path.join(cpth, "ex3", "new")
lgr.change_model_ws(new_pth=npth, reset_external=True)
# get the namefiles of the parent and child
namefiles = lgr.get_namefiles()
msg = f"get_namefiles returned {len(namefiles)} items instead of 2"
assert len(namefiles) == 2, msg
tpth = os.path.dirname(namefiles[0])
msg = f"dir path is {tpth} not {npth}"
assert tpth == npth, msg
# write the lgr model in to the new path
lgr.write_input()
# run the lgr model
if run:
success, buff = lgr.run_model(silent=silent)
assert success, "could not run new modflow-lgr model"
# compare parent results
print("compare parent results")
pth0 = os.path.join(opth, "ex3_parent.nam")
pth1 = os.path.join(npth, "ex3_parent.nam")
msg = "parent heads do not match"
success = pymake.compare_heads(pth0, pth1)
assert success, msg
# compare child results
print("compare child results")
pth0 = os.path.join(opth, "ex3_child.nam")
pth1 = os.path.join(npth, "ex3_child.nam")
msg = "child heads do not match"
success = pymake.compare_heads(pth0, pth1)
assert success, msg
# clean up
shutil.rmtree(cpth)
def singleModel(
iChild,
modelname,
Lx,
Ly,
nlay,
nrow,
ncol,
delr,
delc,
botm,
hkPerLayer,
vkaPerLayer,
laytyp,
ssPerLayer,
nper,
perlen,
tsmult,
nstp,
steady,
xul,
yul,
proj4_str,
mfExe,
rundir=".",
welInfo=[],
startingHead=0.0,
lRunSingle=False,
):
if iChild > 0:
print(f"child model {modelname}")
iLUoffset = 100 * int(iChild)
print(f"increase Unit Numbers by {iLUoffset}")
else:
print(f"parent model {modelname}")
iLUoffset = 0
if steady:
nper = 1
perlen = 1
nstp = [1]
# Assign name and create modflow model object
mf = flopy.modflow.Modflow(
modelname, exe_name=mfExe, listunit=2 + iLUoffset, model_ws=rundir
)
# Create the discretization object
dis = flopy.modflow.ModflowDis(
mf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=botm[0],
botm=botm[1:],
nper=nper,
perlen=perlen,
tsmult=1.07,
nstp=nstp,
steady=steady,
itmuni=4,
lenuni=2,
unitnumber=11 + iLUoffset,
xul=xul,
yul=yul,
proj4_str=proj4_str,
start_datetime="28/2/2019",
)
# Variables for the BAS package
ibound = np.ones((nlay, nrow, ncol), dtype=np.int32)
if iChild > 0:
iBndBnd = 59 # code for child cell to be linked to parent; value assigned to ibflg in the LGR-data
else:
iBndBnd = -1
ibound[:, 0, :] = iBndBnd
ibound[:, -1, :] = iBndBnd
ibound[:, :, 0] = iBndBnd
ibound[:, :, -1] = iBndBnd
strt = np.ones((nlay, nrow, ncol), dtype=np.float32) * startingHead
bas = flopy.modflow.ModflowBas(
mf, ibound=ibound, strt=strt, unitnumber=13 + iLUoffset
)
# Add LPF package to the MODFLOW model
lpf = flopy.modflow.ModflowLpf(
mf,
hk=hkPerLayer,
vka=vkaPerLayer,
ss=ssPerLayer,
ipakcb=53 + iLUoffset,
unitnumber=15 + iLUoffset,
)
# add WEL package to the MODFLOW model
if len(welInfo) > 0:
wel_sp = []
for welData in welInfo:
# get data for current well
welLay = welData[0]
welX = welData[1]
welY = welData[2]
welQ = welData[3]
# calculate row and column for current well in grid
welRow = int((yul - welY) / delc) # check this calculation !!!
welCol = int((welX - xul) / delr) # check this calculation !!!
if welRow < nrow and welRow >= 0 and welCol < ncol and welCol >= 0:
# add well package data for well
wel_sp.append([welLay, welRow, welCol, welQ])
if len(wel_sp) > 0:
stress_period_data = {0: wel_sp}
wel = flopy.modflow.ModflowWel(
mf,
stress_period_data=stress_period_data,
unitnumber=20 + iLUoffset,
)
# Add OC package to the MODFLOW model
spd = {}
for kper in range(nper):
for kstp in range(nstp[kper]):
spd[(kper, kstp)] = ["save head", "save budget"]
oc = flopy.modflow.ModflowOc(
mf,
stress_period_data=spd,
compact=True,
extension=["oc", "hds", "cbc"],
unitnumber=[14 + iLUoffset, 51 + iLUoffset, 53 + iLUoffset],
)
# Add PCG package to the MODFLOW model
pcg = flopy.modflow.ModflowPcg(mf, unitnumber=27 + iLUoffset)
if lRunSingle:
# Write the MODFLOW model input files
mf.write_input()
# Run the MODFLOW model
if run:
success, buff = mf.run_model()
if success:
print(modelname, " ran successfully")
else:
print("problem running ", modelname)
return mf
def test_simple_lgrmodel_from_scratch(silent=True):
# coordinates and extend Mother
Lx_m = 1500.0
Ly_m = 2500.0
nrow_m = 25
ncol_m = 15
delr_m = Lx_m / ncol_m
delc_m = Ly_m / nrow_m
xul_m = 50550
yul_m = 418266
# Child Model domain and grid definition
modelname = "child0" # steady steate version of 'T_PW_50cm'
Lx = 300.0
Ly = 300.0
ncpp = 10 # number of child cells per parent cell
nrow = int(Ly * float(ncpp) / float(delc_m))
ncol = int(Lx * float(ncpp) / float(delr_m))
delr = Lx / ncol
delc = Ly / nrow
botm = [0.0, -15.0, -20.0, -40.0]
hkPerLayer = [1.0, 0.0015, 15.0]
ssPerLayer = [0.1, 0.001, 0.001]
nlay = len(hkPerLayer)
ilayW = 2
laytyp = 0
xul_c = 50985.00
yul_c = 416791.06
proj4_str = "EPSG:28992"
nper = 1
at = 42
perlen = [at]
ats = 100
nstp = [ats]
tsmult = 1.07
steady = True
rundir = f"{cpth}b"
lgrExe = exe_name
# wel data
pumping_rate = -720
infiltration_rate = 360
welInfo = [
[ilayW, 51135.0, 416641.0, pumping_rate],
[ilayW, 51059.0, 416750.0, infiltration_rate],
[ilayW, 51170.0, 416560.0, 0.0],
[ilayW, 51012.0, 416693.0, infiltration_rate],
[ilayW, 51220.0, 416628.0, 0.0],
]
child = singleModel(
1,
modelname,
Lx,
Ly,
nlay,
nrow,
ncol,
delr,
delc,
botm,
hkPerLayer,
hkPerLayer,
laytyp,
ssPerLayer,
nper,
perlen,
tsmult,
nstp,
steady,
xul_c,
yul_c,
proj4_str,
exe_name,
rundir=rundir,
welInfo=welInfo,
startingHead=-2.0,
)
modelname = "mother0"
mother = singleModel(
0,
modelname,
Lx_m,
Ly_m,
nlay,
nrow_m,
ncol_m,
delr_m,
delc_m,
botm,
hkPerLayer,
hkPerLayer,
laytyp,
ssPerLayer,
nper,
perlen,
tsmult,
nstp,
steady,
xul_m,
yul_m,
proj4_str,
exe_name,
rundir=rundir,
welInfo=welInfo,
startingHead=-2.0,
)
# setup LGR
nprbeg = int((yul_m - yul_c) / delc_m)
npcbeg = int((xul_c - xul_m) / delr_m)
nprend = int(nrow / ncpp + nprbeg - 1)
npcend = int(ncol / ncpp + npcbeg - 1)
childData = [
flopy.modflowlgr.mflgr.LgrChild(
ishflg=1,
ibflg=59,
iucbhsv=80,
iucbfsv=81,
mxlgriter=20,
ioutlgr=1,
relaxh=0.4,
relaxf=0.4,
hcloselgr=5e-3,
fcloselgr=5e-2,
nplbeg=0,
nprbeg=nprbeg,
npcbeg=npcbeg,
nplend=nlay - 1,
nprend=nprend,
npcend=npcend,
ncpp=ncpp,
ncppl=1,
)
]
lgrModel = flopy.modflowlgr.mflgr.ModflowLgr(
modelname="PS1",
exe_name=lgrExe,
iupbhsv=82,
iupbfsv=83,
parent=mother,
children=[child],
children_data=childData,
model_ws=rundir,
external_path=None,
verbose=False,
)
# write LGR-files
lgrModel.write_input()
# run LGR
if run:
success, buff = lgrModel.run_model(silent=silent)
assert success
# clean up
shutil.rmtree(rundir)
return
if __name__ == "__main__":
test_simplelgr_load_and_write(silent=False)
test_simple_lgrmodel_from_scratch(silent=False)
```
#### File: flopy/autotest/t038_test.py
```python
import os
import flopy
# make the working directory
tpth = os.path.join("temp", "t038")
if not os.path.isdir(tpth):
os.makedirs(tpth)
# build list of name files to try and load
usgpth = os.path.join("..", "examples", "data", "mfusg_test")
usg_files = []
for path, subdirs, files in os.walk(usgpth):
for name in files:
if name.endswith(".nam"):
usg_files.append(os.path.join(path, name))
#
def test_load_usg():
for fusg in usg_files:
d, f = os.path.split(fusg)
yield load_model, f, d
# function to load a MODFLOW-USG model and then write it back out
def load_model(namfile, model_ws):
m = flopy.modflow.Modflow.load(
namfile, model_ws=model_ws, version="mfusg", verbose=True, check=False
)
assert m, f"Could not load namefile {namfile}"
assert m.load_fail is False
m.change_model_ws(tpth)
m.write_input()
return
if __name__ == "__main__":
for fusg in usg_files:
d, f = os.path.split(fusg)
load_model(f, d)
```
#### File: flopy/autotest/t078_test_lake_connections.py
```python
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import flopy
pth = os.path.join("..", "examples", "data", "mf6-freyberg")
name = "freyberg"
tpth = os.path.join("temp", "t078")
# delete the directory if it exists
if os.path.isdir(tpth):
shutil.rmtree(tpth)
# make the directory
os.makedirs(tpth)
def __export_ascii_grid(modelgrid, file_path, v, nodata=0.0):
shape = v.shape
xcenters = modelgrid.xcellcenters[0, :]
cellsize = xcenters[1] - xcenters[0]
with open(file_path, "w") as f:
f.write(f"NCOLS {shape[1]}\n")
f.write(f"NROWS {shape[0]}\n")
f.write(f"XLLCENTER {modelgrid.xoffset + 0.5 * cellsize}\n")
f.write(f"YLLCENTER {modelgrid.yoffset + 0.5 * cellsize}\n")
f.write(f"CELLSIZE {cellsize}\n")
f.write(f"NODATA_VALUE {nodata}\n")
np.savetxt(f, v, fmt="%.4f")
return
# derived from original modflow6-examples function in ex-gwt-prudic2004t2
def __get_lake_connection_data(
nrow, ncol, delr, delc, lakibd, idomain, lakebed_leakance
):
lakeconnectiondata = []
nlakecon = [0, 0]
lak_leakance = lakebed_leakance
for i in range(nrow):
for j in range(ncol):
if lakibd[i, j] == 0:
continue
else:
ilak = lakibd[i, j] - 1
# back
if i > 0:
ci2d, ci = (i - 1, j), (0, i - 1, j)
if lakibd[ci2d] == 0 and idomain[ci] > 0:
h = [
ilak,
nlakecon[ilak],
ci,
"horizontal",
lak_leakance,
0.0,
0.0,
0.5 * delc,
delr,
]
nlakecon[ilak] += 1
lakeconnectiondata.append(h)
# left
if j > 0:
ci2d, ci = (i, j - 1), (0, i, j - 1)
if lakibd[ci2d] == 0 and idomain[ci] > 0:
h = [
ilak,
nlakecon[ilak],
ci,
"horizontal",
lak_leakance,
0.0,
0.0,
0.5 * delr,
delc,
]
nlakecon[ilak] += 1
lakeconnectiondata.append(h)
# right
if j < ncol - 1:
ci2d, ci = (i, j + 1), (0, i, j + 1)
if lakibd[ci2d] == 0 and idomain[ci] > 0:
h = [
ilak,
nlakecon[ilak],
ci,
"horizontal",
lak_leakance,
0.0,
0.0,
0.5 * delr,
delc,
]
nlakecon[ilak] += 1
lakeconnectiondata.append(h)
# front
if i < nrow - 1:
ci2d, ci = (i + 1, j), (0, i + 1, j)
if lakibd[ci2d] == 0 and idomain[ci] > 0:
h = [
ilak,
nlakecon[ilak],
ci,
"horizontal",
lak_leakance,
0.0,
0.0,
0.5 * delc,
delr,
]
nlakecon[ilak] += 1
lakeconnectiondata.append(h)
# vertical
v = [
ilak,
nlakecon[ilak],
(1, i, j),
"vertical",
lak_leakance,
0.0,
0.0,
0.0,
0.0,
]
nlakecon[ilak] += 1
lakeconnectiondata.append(v)
return lakeconnectiondata, nlakecon
def test_base_run():
sim = flopy.mf6.MFSimulation().load(
sim_name=name,
sim_ws=pth,
exe_name="mf6",
verbosity_level=0,
)
ws = os.path.join(tpth, "freyberg")
sim.set_sim_path(ws)
# remove the well package
gwf = sim.get_model("freyberg")
gwf.remove_package("wel_0")
# write the simulation files and run the model
sim.write_simulation()
sim.run_simulation()
# export bottom, water levels, and k11 as ascii raster files
# for interpolation in test_lake()
bot = gwf.dis.botm.array.squeeze()
__export_ascii_grid(
gwf.modelgrid,
os.path.join(ws, "bot.asc"),
bot,
)
top = gwf.output.head().get_data().squeeze() + 2.0
top = np.where(gwf.dis.idomain.array.squeeze() < 1.0, 0.0, top)
__export_ascii_grid(
gwf.modelgrid,
os.path.join(ws, "top.asc"),
top,
)
k11 = gwf.npf.k.array.squeeze()
__export_ascii_grid(
gwf.modelgrid,
os.path.join(ws, "k11.asc"),
k11,
)
return
def test_lake():
ws = os.path.join(tpth, "freyberg")
top = flopy.utils.Raster.load(os.path.join(ws, "top.asc"))
bot = flopy.utils.Raster.load(os.path.join(ws, "bot.asc"))
k11 = flopy.utils.Raster.load(os.path.join(ws, "k11.asc"))
sim = flopy.mf6.MFSimulation().load(
sim_name=name,
sim_ws=ws,
exe_name="mf6",
verbosity_level=0,
)
# get groundwater flow model
gwf = sim.get_model("freyberg")
# define extent of lake
lakes = gwf.dis.idomain.array.squeeze() * -1
lakes[32:, :] = -1
# fill bottom
bot_tm = bot.resample_to_grid(
gwf.modelgrid,
band=bot.bands[0],
method="linear",
extrapolate_edges=True,
)
# mm = flopy.plot.PlotMapView(modelgrid=gwf.modelgrid)
# mm.plot_array(bot_tm)
# determine a reasonable lake bottom
idx = np.where(lakes > -1)
lak_bot = bot_tm[idx].max() + 2.0
# interpolate top elevations
top_tm = top.resample_to_grid(
gwf.modelgrid,
band=top.bands[0],
method="linear",
extrapolate_edges=True,
)
# set the elevation to the lake bottom in the area of the lake
top_tm[idx] = lak_bot
# mm = flopy.plot.PlotMapView(modelgrid=gwf.modelgrid)
# v = mm.plot_array(top_tm)
# cs = mm.contour_array(
# top_tm, colors="white", linewidths=0.5, levels=np.arange(0, 25, 2)
# )
# plt.clabel(cs, fmt="%.1f", colors="white", fontsize=7)
# plt.colorbar(v, shrink=0.5)
gwf.dis.top = top_tm
gwf.dis.botm = bot_tm.reshape(gwf.modelgrid.shape)
# v = gwf.dis.top.array
# v = gwf.dis.botm.array
k11_tm = k11.resample_to_grid(
gwf.modelgrid,
band=k11.bands[0],
method="linear",
extrapolate_edges=True,
)
gwf.npf.k = k11_tm
# mm = flopy.plot.PlotMapView(modelgrid=gwf.modelgrid)
# mm.plot_array(k11_tm)
(
idomain,
pakdata_dict,
connectiondata,
) = flopy.mf6.utils.get_lak_connections(
gwf.modelgrid,
lakes,
bedleak=5e-9,
)
assert (
pakdata_dict[0] == 54
), f"number of lake connections ({pakdata_dict[0]}) not equal to 54."
assert len(connectiondata) == 54, (
"number of lake connectiondata entries ({}) not equal "
"to 54.".format(len(connectiondata))
)
lak_pak_data = []
for key, value in pakdata_dict.items():
lak_pak_data.append([key, 35.0, value])
lak_spd = {0: [[0, "rainfall", 3.2e-9]]}
lak = flopy.mf6.ModflowGwflak(
gwf,
print_stage=True,
nlakes=1,
packagedata=lak_pak_data,
connectiondata=connectiondata,
perioddata=lak_spd,
pname="LAK-1",
filename="freyberg.lak",
)
idomain = gwf.dis.idomain.array
lakes.shape = idomain.shape
gwf.dis.idomain = np.where(lakes > -1, 1, idomain)
# convert to Newton-Raphson fomulation and update the linear accelerator
gwf.name_file.newtonoptions = "NEWTON UNDER_RELAXATION"
sim.ims.linear_acceleration = "BICGSTAB"
# write the revised simulation files and run the model
sim.write_simulation()
success = sim.run_simulation(silent=False)
assert success, f"could not run {sim.name} with lake"
return
def test_embedded_lak_ex01():
nper = 1
nlay, nrow, ncol = 5, 17, 17
shape3d = (nlay, nrow, ncol)
delr = (
250.0,
1000.0,
1000.0,
1000.0,
1000.0,
1000.0,
500.0,
500.0,
500.0,
500.0,
500.0,
1000.0,
1000.0,
1000.0,
1000.0,
1000.0,
250.0,
)
delc = delr
top = 500.0
botm = (
107.0,
97.0,
87.0,
77.0,
67.0,
)
lake_map = np.ones(shape3d, dtype=np.int32) * -1
lake_map[0, 6:11, 6:11] = 0
lake_map[1, 7:10, 7:10] = 0
lake_map = np.ma.masked_where(lake_map < 0, lake_map)
strt = 115.0
k11 = 30
k33 = (
1179.0,
30.0,
30.0,
30.0,
30.0,
)
load_pth = os.path.join("..", "examples", "data", "mf2005_test")
ml = flopy.modflow.Modflow.load(
"l1a2k.nam",
model_ws=load_pth,
load_only=["EVT"],
check=False,
)
rch_rate = 0.116e-1
evt_rate = 0.141e-1
evt_depth = 15.0
evt_surf = ml.evt.surf[0].array
chd_top_bottom = (
160.0,
158.85,
157.31,
155.77,
154.23,
152.69,
151.54,
150.77,
150.0,
149.23,
148.46,
147.31,
145.77,
144.23,
142.69,
141.15,
140.0,
)
chd_spd = []
for k in range(nlay):
for i in range(nrow):
if 0 < i < nrow - 1:
chd_spd.append([k, i, 0, chd_top_bottom[0]])
chd_spd.append([k, i, ncol - 1, chd_top_bottom[-1]])
else:
for jdx, v in enumerate(chd_top_bottom):
chd_spd.append([k, i, jdx, v])
chd_spd = {0: chd_spd}
name = "lak_ex01"
ws = os.path.join(tpth, "lak_ex01")
sim = flopy.mf6.MFSimulation(
sim_name=name,
exe_name="mf6",
sim_ws=ws,
)
tdis = flopy.mf6.ModflowTdis(
sim,
nper=nper,
)
ims = flopy.mf6.ModflowIms(
sim,
print_option="summary",
linear_acceleration="BICGSTAB",
outer_maximum=1000,
inner_maximum=100,
outer_dvclose=1e-8,
inner_dvclose=1e-9,
)
gwf = flopy.mf6.ModflowGwf(
sim,
modelname=name,
newtonoptions="newton under_relaxation",
print_input=True,
)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
)
ic = flopy.mf6.ModflowGwfic(
gwf,
strt=strt,
)
npf = flopy.mf6.ModflowGwfnpf(
gwf,
icelltype=1,
k=k11,
k33=k33,
)
chd = flopy.mf6.ModflowGwfchd(
gwf,
stress_period_data=chd_spd,
)
rch = flopy.mf6.ModflowGwfrcha(
gwf,
recharge=rch_rate,
)
evt = flopy.mf6.ModflowGwfevta(
gwf,
surface=evt_surf,
depth=evt_depth,
rate=evt_rate,
)
oc = flopy.mf6.ModflowGwfoc(
gwf,
printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
)
(
idomain,
pakdata_dict,
connectiondata,
) = flopy.mf6.utils.get_lak_connections(
gwf.modelgrid,
lake_map,
bedleak=0.1,
)
assert (
pakdata_dict[0] == 57
), f"number of lake connections ({pakdata_dict[0]}) not equal to 57."
assert len(connectiondata) == 57, (
"number of lake connectiondata entries ({}) not equal "
"to 57.".format(len(connectiondata))
)
lak_pak_data = []
for key, value in pakdata_dict.items():
lak_pak_data.append([key, 110.0, value])
lak_spd = {
0: [
[0, "rainfall", rch_rate],
[0, "evaporation", 0.0103],
]
}
lak = flopy.mf6.ModflowGwflak(
gwf,
print_stage=True,
print_flows=True,
nlakes=1,
packagedata=lak_pak_data,
connectiondata=connectiondata,
perioddata=lak_spd,
pname="LAK-1",
)
# reset idomain
gwf.dis.idomain = idomain
# write the simulation files and run the model
sim.write_simulation()
success = sim.run_simulation(silent=False)
assert success, f"could not run {sim.name}"
def test_embedded_lak_prudic():
lakebed_leakance = 1.0 # Lakebed leakance ($ft^{-1}$)
nlay = 8 # Number of layers
nrow = 36 # Number of rows
ncol = 23 # Number of columns
delr = float(405.665) # Column width ($ft$)
delc = float(403.717) # Row width ($ft$)
delv = 15.0 # Layer thickness ($ft$)
top = 100.0 # Top of the model ($ft$)
shape2d = (nrow, ncol)
shape3d = (nlay, nrow, ncol)
# load data from text files
data_ws = os.path.join("..", "examples", "data", "mf6_test")
fname = os.path.join(data_ws, "prudic2004t2_bot1.dat")
bot0 = np.loadtxt(fname)
botm = np.array(
[bot0]
+ [
np.ones(shape2d, dtype=float) * (bot0 - (delv * k))
for k in range(1, nlay)
]
)
fname = os.path.join(data_ws, "prudic2004t2_idomain1.dat")
idomain0 = np.loadtxt(fname, dtype=np.int32)
idomain = np.array(nlay * [idomain0], dtype=np.int32)
fname = os.path.join(data_ws, "prudic2004t2_lakibd.dat")
lakibd = np.loadtxt(fname, dtype=int)
lake_map = np.ones(shape3d, dtype=np.int32) * -1
lake_map[0, :, :] = lakibd[:, :] - 1
# build StructuredGrid
model_grid = flopy.discretization.StructuredGrid(
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=np.ones(ncol, dtype=float) * delr,
delc=np.ones(nrow, dtype=float) * delc,
top=np.ones(shape2d, dtype=float) * top,
botm=botm,
idomain=idomain,
)
# base case
cdata, lakconn = __get_lake_connection_data(
nrow, ncol, delr, delc, lakibd, idomain, lakebed_leakance
)
# flopy test
(
idomain_rev,
pakdata_dict,
connectiondata,
) = flopy.mf6.utils.get_lak_connections(
model_grid,
lake_map,
idomain=idomain,
bedleak=lakebed_leakance,
)
# evaluate the number of connections
for idx, nconn in enumerate(lakconn):
assert pakdata_dict[idx] == nconn, (
"number of connections calculated by get_lak_connections ({}) "
"not equal to {} for lake {}.".format(
pakdata_dict[idx], nconn, idx + 1
)
)
# compare connectiondata
for idx, (cd, cdbase) in enumerate(zip(connectiondata, cdata)):
for jdx in (
0,
1,
2,
3,
7,
8,
):
match = True
if jdx not in (
7,
8,
):
if cd[jdx] != cdbase[jdx]:
match = False
else:
match = np.allclose(cd[jdx], cdbase[jdx])
if not match:
print(
f"connection data do match for connection {idx} for lake {cd[0]}"
)
break
assert match, f"connection data do not match for connection {jdx}"
# evaluate the revised idomain, only layer 1 has been adjusted
idomain0_test = idomain[0, :, :].copy()
idomain0_test[lakibd > 0] = 0
idomain_test = idomain.copy()
idomain[0, :, :] = idomain0_test
assert np.array_equal(
idomain_rev, idomain_test
), "idomain not updated correctly with lakibd"
return
def test_embedded_lak_prudic_mixed():
lakebed_leakance = 1.0 # Lakebed leakance ($ft^{-1}$)
nlay = 8 # Number of layers
nrow = 36 # Number of rows
ncol = 23 # Number of columns
delr = float(405.665) # Column width ($ft$)
delc = float(403.717) # Row width ($ft$)
delv = 15.0 # Layer thickness ($ft$)
top = 100.0 # Top of the model ($ft$)
shape2d = (nrow, ncol)
shape3d = (nlay, nrow, ncol)
# load data from text files
data_ws = os.path.join("..", "examples", "data", "mf6_test")
fname = os.path.join(data_ws, "prudic2004t2_bot1.dat")
bot0 = np.loadtxt(fname)
botm = np.array(
[bot0]
+ [
np.ones(shape2d, dtype=float) * (bot0 - (delv * k))
for k in range(1, nlay)
]
)
fname = os.path.join(data_ws, "prudic2004t2_idomain1.dat")
idomain0 = np.loadtxt(fname, dtype=np.int32)
idomain = np.array(nlay * [idomain0], dtype=np.int32)
fname = os.path.join(data_ws, "prudic2004t2_lakibd.dat")
lakibd = np.loadtxt(fname, dtype=int)
lake_map = np.ones(shape3d, dtype=np.int32) * -1
lake_map[0, :, :] = lakibd[:, :] - 1
lakebed_leakance = np.zeros(shape2d, dtype=object)
idx = np.where(lake_map[0, :, :] == 0)
lakebed_leakance[idx] = "none"
idx = np.where(lake_map[0, :, :] == 1)
lakebed_leakance[idx] = 1.0
lakebed_leakance = lakebed_leakance.tolist()
# build StructuredGrid
model_grid = flopy.discretization.StructuredGrid(
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=np.ones(ncol, dtype=float) * delr,
delc=np.ones(nrow, dtype=float) * delc,
top=np.ones(shape2d, dtype=float) * top,
botm=botm,
idomain=idomain,
)
# test mixed lakebed leakance list
(_, _, connectiondata,) = flopy.mf6.utils.get_lak_connections(
model_grid,
lake_map,
idomain=idomain,
bedleak=lakebed_leakance,
)
# test the connections
for data in connectiondata:
lakeno, bedleak = data[0], data[4]
if lakeno == 0:
assert (
bedleak == "none"
), f"bedleak for lake 0 is not 'none' ({bedleak})"
else:
assert bedleak == 1.0, f"bedleak for lake 1 is not 1.0 ({bedleak})"
return
if __name__ == "__main__":
test_embedded_lak_prudic_mixed()
test_base_run()
test_lake()
test_embedded_lak_ex01()
test_embedded_lak_prudic()
```
|
{
"source": "jdlarsen-UA/LB-colloids",
"score": 3
}
|
#### File: lb_colloids/Colloids/Colloid_Math.py
```python
from .LB_Colloid import Singleton
# import ColUtils
import numpy as np
import sys
import copy
class ForceToVelocity:
"""
Class that calculates a "velocity-like" value from force arrays
Parameters:
----------
:param np.ndarray forces: Array of forces felt by a colloid
:keyword float ts: Physical time step value
:keyword float rho_colloid: Colloid particle density, default :math:`2650 kg/m^3`
:keyword float ac: colloid radius, default 1e-6 m
Returns:
-------
:return: velocity (np.array, np.float) Array of "velocities" calculated from forces
"""
def __init__(self, forces, **kwargs):
params = {'rho_colloid': 2650., 'ac': 1e-6, 'ts': 1.}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
rho_colloid = params['rho_colloid']
ac = params['ac']
ts = params['ts']
self.mass_colloid = (4. / 3.) * np.pi * (ac * ac * ac) * rho_colloid
self.velocity = 0.5 * (forces * ts) / self.mass_colloid
class Velocity:
"""
Class that dimensionalizes LB velocity from non-dimensional lattice Boltzmann units
Parameters:
----------
:param np.ndarray LBx: Array of Lattice Boltzmann velocities in the x-direction
:param np.ndarray LBy: Array of Lattice Boltzmann velocities in the y-direction
:keyword float ts: Time step value, default is 1.
:keyword float scale_lb: Scale the dimensionalized velocity from lattice Boltzmann. Use with caution. Default is 1
:param float velocity_factor: LB to physical velocity conversion factor. Default is 1
Returns:
-------
:return: xvelocity (np.array, np.float) array of dimensionalized velocities in the x-direction
:return: yvelocity (np.array, np.float) array of dimensionalized velocities in the y-direction
"""
def __init__(self, LBx, LBy, velocity_factor, **kwargs):
params = {'lb_timestep': 1e-5, 'ts': 1, 'scale_lb': 1.}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
ts = params['ts']
self.xvelocity = LBx * velocity_factor * params['scale_lb']
self.yvelocity = LBy * velocity_factor * params['scale_lb']
class Gravity:
"""
Class to generate the estimated gravitational force experienced by a colloid
.. math::
F^{G} = \\frac{-4 \pi a_{c}^{3} \\rho_{c} g}{3}
Parameters:
----------
:keyword float rho_colloid: Particle density of a colloid in :math:`kg/m^3`. Default is 2650.
:keyword float ac: colloid radius in m. Default is 1e-6
Returns:
-------
:return: gravity (float) Gravitational force that a colloid experiences
"""
def __init__(self, **kwargs):
params = {'rho_colloid': 2650., 'ac': 1e-6}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
ac = params['ac']
rho_colloid = params['rho_colloid']
self.colloid_mass = (4./3.)*np.pi*(ac*ac*ac)*rho_colloid
self.gravity = (self.colloid_mass*-9.81)
class Bouyancy:
"""
Class to estimate the gravitational force experienced by a colloid. Gravity
is applied as a positive value to maintain vector direction.
.. math::
F^{b} = \\frac{4 \pi a_{c}^{3} \\rho_{w} g}{3}
Parameters:
----------
:keyword flaot rho_water: density of water :math:`kg/m^3`. Default is 997.
:keyword float rho_colloid: particle density of a colloid in :math:`kg/m^3`. Default is 2650.
:keyword float ac: colloid radius in m. Default is 1e-6.
Returns:
-------
:return: bouyancy (float) Bouyancy force that a colloid experiences
"""
def __init__(self, **kwargs):
params = {'rho_water': 997., 'rho_colloid': 2650., 'ac': 1e-6}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
rho_water = params['rho_water']
rho_colloid = params['rho_colloid']
ac = params['ac']
self.water_mass = (4./3.)*np.pi*(ac*ac*ac)*rho_water
self.bouyancy = self.water_mass * 9.81
class Brownian:
"""
Class to estimate brownian forces on colloids. Uses the relationships outlined in Qui et. al. 2010
where
.. math::
F_{x}^{B} = \\xi \sqrt{\\frac{2D_{0}}{f_{1}dt}}G(0,1)
F_{y}^{B} = \\xi \sqrt{\\frac{2D_{0}}{f_{4}dt}}G(0,1)
Parameters:
----------
:param np.ndarray f1: Drag force correction term [Gao et. al. 2010. Computers and Math with App]
:param np.ndarray f4: Drag force correction term [Gao et. al. 2010]
:keyword float ac: Colloid radius. Default 1e-6
:keyword float viscosity: Dynamic viscosity of water. Default 8.9e-4 Pa S.
:keyword float T: Absolute temperature in K. Default is 298.15
Returns:
-------
:return: brownian_x: (np.ndarray) array of browian (random)
forces in the x direction [Qiu et. al 2011.]
:return: brownian_y: (np.ndarray) array of browian (random)
forces in the y direction [Qiu et. al 2011.]
"""
def __init__(self, f1, f4, **kwargs):
params = {'viscosity': 8.9e-4, 'ac': 1e-6, 'T': 298.15}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
self.mu = 0
self.sigma = 1
self.f1 = f1
self.f4 = f4
self.ac = params['ac']
self.ts = params['ts']
self.viscosity = params['viscosity']
self.boltzmann = 1.38e-23
self.epsilon = 6. * np.pi * self.viscosity * self.ac
self.T = params['T']
self.diffusive = (self.boltzmann * self.T) / self.epsilon
# self.brownian_x = self.Brown_xforce(self.epsilon, self.diffusive, f4)
# self.brownian_y = self.Brown_yforce(self.epsilon, self.diffusive, f1)
@property
def brownian_x(self):
return self.epsilon * np.sqrt(((2 * self.diffusive)/(self.f4 * self.ts))) * \
np.random.normal(self.mu, self.sigma, self.f4.shape)
@property
def brownian_y(self):
return self.epsilon * np.sqrt(((2 * self.diffusive)/(self.f1 * self.ts))) * \
np.random.normal(self.mu, self.sigma, self.f1.shape)
class Drag:
"""
Class to calculate colloidal drag forces from fluid velocity arrays. Based from calculations
outlined in Gao et, al 2010 and Qui et. al. 2011.
.. math::
F_{x}^{D} = \\frac{\\xi}{f_{4}} (f_{3}u_{x} - V_{x})
F_{y}^{D} = \\xi (f_{2} u_{y} - \\frac{V_{y}}{f_{1}})
Parameters:
----------
:param np.ndarray ux: fluid velocity in the x-direction
:param np.ndarray uy: fluid velocity in the y-direction
:param np.ndarray Vx: colloid velocity in the x-direction
:param np.ndarray Vy: colloid velocity in the y-direction
:param np.ndarray f1: Hydrodynamic force correction term [Gao et. al. 2010.]
:param np.ndarray f2: Hydrodynamic force correction term [Gao et. al. 2010.]
:param np.ndarray f3: Hydrodynamic force correction term [Gao et. al. 2010.]
:param np.ndarray f4: Hydrodynamic force correction term [Gao et. al. 2010.]
:keyword float ac: Colloid radius. Default is 1e-6 m
:keyword float viscosity: Dynamic fluid viscosity of water. Default 8.9e-4 Pa S
:keyword float rho_colloid: Colloid particle density. Default :math:`2650 kg/m^3`
:keyword float rho_water: Water density. Default :math:`997 kg/m^3`
Returns:
-------
:return: drag_x (np.ndarray) non-vectorized drag forces in the x-direction
:return: drag_y: (np.ndarray) non-vectorized drag forces in the y-direction
"""
def __init__(self, ux, uy, f1, f2, f3, f4, **kwargs):
params = {'ac': 1e-6, 'viscosity': 8.9e-4, 'rho_colloid': 2650., 'rho_water': 997.,
'T': 298.15, 'ts': 1.}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
self.ac = params['ac']
self.viscosity = params['viscosity']
self.rho_water = params['rho_water']
self.rho_colloid = params['rho_colloid']
self.ux = ux
self.uy = uy
self.f1 = f1
self.f2 = f2
self.f3 = f3
self.f4 = f4
self.epsilon = 6. * np.pi * self.viscosity * self.ac
self.vx = -((self.rho_colloid - self.rho_water)*((2*self.ac)**2)*9.81)/(18*self.viscosity)
self.vy = -((self.rho_colloid - self.rho_water)*((2*self.ac)**2)*9.81)/(18*self.viscosity)
# self.drag_x = self.drag_xforce(ux, self.Vcol, self.epsilon, f3, f4)
# self.drag_y = self.drag_yforce(uy, self.Vcol, self.epsilon, f1, f2)
self.all_physical_params = copy.copy(params)
@property
def drag_x(self):
"""
:return: drag force array in the x direction
"""
return (self.epsilon / self.f4) * ((self.f3 * self.ux) - self.vx)
@property
def drag_y(self):
return self.epsilon * ((self.f2 * self.uy) - (self.vy / self.f1))
def update(self, vx, vy):
"""
Updates the colloid velocity array for producing drag forces
:param vx:
:param vy:
"""
self.vx = vx
self.vy = vy
class Gap:
"""
Class that calculates the non-dimensional gap distance between colloid and surface.
This class also calculates hydrodynamic force correction terms outlined in Gao et. al. 2010.
Note: Passing a np.nan value into here can return an overflow warning!
.. math::
f_{1}(\\bar{h}) = 1.0 - 0.443 exp(-1.299\\bar{h}) - 0.5568 exp(-0.32\\bar{h}^{0.75})
.. math::
f_{2}(\\bar{h}) = 1.0 + 1.455 exp(-1.2596\\bar{h}) - 0.7951 exp(-0.56\\bar{h}^{0.50})
.. math::
f_{3}(\\bar{h}) = 1.0 - 0.487 exp(-5.423\\bar{h}) - 0.5905 exp(-37.83\\bar{h}^{0.50})
.. math::
f_{4}(\\bar{h}) = 1.0 - 0.35 exp(-0.25\\bar{h}) - 0.40 exp(-10\\bar{h})
Parameters:
----------
:param np.ndarray xarr: Array of x-distances to nearest solid surface
:param np.ndarray yarr: Array of y-distances to nearest solid surface
:keyword float ac: Radius of a colloid. Default is 1e-6
Returns:
-------
:return: f1 (np.ndarray) Drag force correction term [Gao et al 2010]
:return: f2 (np.ndarray) Drag force correction term [Gao et al 2010]
:return: f3 (np.ndarray) Drag force correction term [Gao et al 2010]
:return: f4 (np.ndarray) Drag force correction term [Gao et al 2010]
"""
def __init__(self, xarr, yarr, **kwargs):
params = {'ac': 1e-6}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
self.ac = params['ac']
self.yhbar = np.abs(yarr/self.ac)
self.xhbar = np.abs(xarr/self.ac)
self.f1 = self.set_f1(self.yhbar)
self.f2 = self.set_f2(self.yhbar)
self.f3 = self.set_f3(self.xhbar)
self.f4 = self.set_f4(self.xhbar)
def set_f1(self, yhbar):
f1 = 1.0 - 0.443 * np.exp(yhbar * -1.299) - 0.5568 * np.exp((yhbar ** 0.75) * -0.32)
return f1
def set_f2(self, yhbar):
f2 = 1.0 + 1.455 * np.exp(yhbar * -1.259) + 0.7951 * np.exp((yhbar ** 0.50) * -0.56)
return f2
def set_f3(self, xhbar):
f3 = 1.0 - 0.487 * np.exp(xhbar * -5.423) - 0.5905 * np.exp((xhbar ** 0.50) * -37.83)
return f3
def set_f4(self, xhbar):
f4 = 1.0 - 0.35 * np.exp(xhbar * -0.25) - 0.40 * np.exp(xhbar * -10.)
return f4
class DLVO:
"""
Class method to calculate vectorized DLVO force arrays for colloid surface interaction using
methods outlined in Qui et. al. 2011 and Liang et. al. 2008? *Check this later*
Parameterization of this class is handled primary through the ChemistryDict by **kwargs
Mathematics used in calcuation of DLVO interaction energies are:
.. math::
\\frac{1}{\kappa} = (\\frac{\epsilon_{r} \epsilon_{0} k T}{e^{2} N_{A} I^{*}})^{\\frac{1}{2}}
.. math::
\Phi^{EDL} = \pi \epsilon_{0} \epsilon_{r} a_{c}
(2 \psi_{s} \psi_{c}
ln(\\frac{1 + exp(-\kappa h)}{1 - exp(-\kappa h)})
+ (\psi_{s}^{2} + \psi_{c}^{2})
ln(1 - exp(-2 \kappa h)))
Parameters:
-------
:param np.ndarray xarr: Physical distance from solid boundaries in the x direction
:param np.ndarray yarr: Physical distance from solid boundaries in the y direction
:keyword dict valence: Valences of all species in solution. (Optional)
:keyword dict concentration: Concentration of all species in solution (Optional)
:keyword float zeta_colloid: Measured_zeta potential of colloid (Reccomended).
Default -40.5e-3 Na-Kaolinite Colloid [Chorom 1995. Eur. Jour. of Soil Science]
:keyword float zeta_surface: Bulk_zeta potential of porous media (Reccomended).
Default -60.9e-3 Glass bead media [Ducker 1992, Langmuir V8]
:keyword float I: Ionic strength of simulated solution (Reccomended). Default 1e-3 M
:keyword float ac: Colloid radius in meters. Default 1e-6 m.
:keyword float epsilon_r: Relative dielectric permativity of water. (Optional)
Default 78.304 @ 298 K [Malmberg and Maryott 1956. Jour. Res. Nat. Beau. Std. V56(1)
:keyword float sheer_plane: Equivelent to the thickness of one layer of water molecules. (Optional)
Default 3e-10 m [Interface Science and Technology, 2008. Volume 16 Chapter 3]
:keyword float T: Temperature of simulation fluid. Default 298.15 k
:keyword float lvdwst_colloid: Lifshits-van der Waals surface tension component from colloid. (Reccomended)
Default is 39.9e-3 J/m**2 [Giese et. al. 1996, Jour. Disp. Sci. & Tech. 17(5)]
:keyword float lvdwst_solid: Lifshits-van der Waals surface tension component from solid. (Reccomended)
Default is 33.7e-3 J/m**2 [Giese et. al. 1996]
:keyword float lvdwst_water: Lifshits-van der Waals surface tension component from water. (Reccomended)
Default is 21.8e-3 J/m**2 [Interface Science and Technology, 2008. V16(2)]
:keyword float psi+_colloid: Lewis acid base electron acceptor parameter. (Reccomended)
Default is 0.4e-3 J/m**2 [Giese et. al. 1996]
:keyword float psi+_solid: Lewis acid base electron acceptor parameter. (Reccomended)
Default is 1.3e-3 J/m**2 [Giese et. al. 1996]
:keyword float psi+_water: Lewis acid base electron acceptor parameter. (Reccomended)
Default is 25.5e-3 J/m**2 [Interface Science and Technology, 2008. V16(2)]
:keyword float psi-_colloid: Lewis acid base electron donor parameter. (Reccomended)
Default is 34.3e-3 J/m**2 [Giese et. al. 1996]
:keyword float psi-_solid: Lewis acid base electron donor parameter. (Reccomended)
Default is 62.2e-3 J/m**2 [Giese et. al. 1996]
:keyword float psi-_water: Lewis acid base electron donor parameter. (Reccomended)
Default is 25.5e-3 J/m**2 [Interface Science and Technology, 2008. V16(2)]
:keyword np.ndarray xvArr: Array of vector directions.This array is applied to properly represent attractive
and repulsive forces
:keyword np.ndarray yvArr: Array of vector directions.This array is applied to properly represent attractive
and repulsive forces
Return:
------
:return: EDLx (np.ndarray) vectorized np.array of electric-double-layer force values in the x-direction
:return: EDLy (np.ndarray) vectorized np.array of electric-double-layer force values in the y-direction
:return: LVDWx (np.ndarray) vectorized np.array of lifshitz-van-der-walls force values in the x-direction
:return: LVDWy (np.ndarray) vectorized np.array of lifshitz-van-der-walls force values in the y-direction
:return: LewisABx (np.ndarray) vectorized np.array of lewis acid base force values in the x-direction
:return: LewisABy (np.ndarray) vectorized np.array of lewis acid base force values in the y-direction
"""
def __init__(self, xarr, yarr, **kwargs):
params = {'concentration': {'Na': 10e-4}, 'adjust_zeta': False, 'I_initial': False, 'I': 10e-4, 'ac': 1e-6,
'epsilon_r': 78.304, 'valence': {'Na': 1.}, 'sheer_plane': 3e-10, 'T': 298.15,
'lvdwst_water': 21.8e-3, 'lvdwst_colloid': 39.9e-3, 'lvdwst_solid': 33.7e-3, 'zeta_colloid': -40.5e-3,
'zeta_solid': -60.9e-3, 'psi+_colloid': 0.4e-3, 'psi-_colloid': 34.3e-3, 'psi+_water': 25.5e-3,
'psi-_water': 25.5e-3, 'psi+_solid': 1.3e-3, 'psi-_solid': 62.2e-3, 'rho_colloid': 2650.}
for kwarg in kwargs:
params[kwarg] = kwargs[kwarg]
calc_oh = False
self.__xarr = xarr
self.__yarr = yarr
self.rho_colloid = params['rho_colloid']
self.epsilon_0 = 8.85e-12
self.epsilon_r = params['epsilon_r']
self.ac = params['ac']
self.e = 1.6e-19
self.valence = params['valence']
self.concentration = params['concentration']
self.boltzmann = 1.38e-23
self.stern_z = params['sheer_plane']
self.T = params['T']
self.zeta_colloid = params['zeta_colloid']
self.zeta_solid = params['zeta_solid']
self.lvdwst_water = params['lvdwst_water']
self.lvdwst_colloid = params['lvdwst_colloid']
self.lvdwst_solid = params['lvdwst_solid']
self.eplus_water = params['psi+_water']
self.eplus_colloid = params['psi+_colloid']
self.eplus_solid = params['psi+_solid']
self.eneg_water = params['psi-_water']
self.eneg_colloid = params['psi-_colloid']
self.eneg_solid = params['psi-_solid']
self.xvArr = params['xvArr']*-1
self.yvArr = params['yvArr']*-1
self.hamaker = None
self.all_chemical_params = copy.copy(params)
self.__resolution = params['lbres']/params['gridref']
if params['I']:
self.ionic_strength = 2 * params['I'] # 2I is what is used in the debye equation
else:
self.ionic_strength = self.ionic(params['valence'], params['concentration'])
#self.k_debye = self.debye(self.epsilon_0, self.epsilon_r, self.boltzmann, self.T, self.e,
# self.ionic_strength)
self.colloid_potential = self._colloid_potential(self.zeta_colloid, self.ac, self.k_debye, self.stern_z)
self.surface_potential = self._surface_potential(self.zeta_solid, self.k_debye, self.stern_z)
# Calculate the chemical potential
# todo: change these to property methods
self.EDLx = self._EDL_energy(self.epsilon_0, self.epsilon_r, self.ac, self.colloid_potential,
self.surface_potential, self.k_debye, xarr)/xarr*self.xvArr
self.EDLy = self._EDL_energy(self.epsilon_0, self.epsilon_r, self.ac, self.colloid_potential,
self.surface_potential, self.k_debye, yarr)/yarr*self.yvArr
if calc_oh is True:
# todo: change these over to property methods
self.LVDWx = self._Lifshitz_van_der_Walls(xarr, self.ac, self.lvdwst_water, self.lvdwst_colloid,
self.lvdwst_solid)/xarr*self.xvArr
self.LVDWy = self._Lifshitz_van_der_Walls(xarr, self.ac, self.lvdwst_water, self.lvdwst_colloid,
self.lvdwst_solid)/yarr*self.yvArr
self.LewisABx = self._lewis_acid_base(xarr, self.ac, self.eplus_colloid, self.eplus_solid, self.eplus_water,
self.eneg_colloid, self.eneg_solid, self.eneg_water)/xarr*self.xvArr
self.LewisABy = self._lewis_acid_base(yarr, self.ac, self.eplus_colloid, self.eplus_solid, self.eplus_water,
self.eneg_colloid, self.eneg_solid, self.eneg_water)/yarr*self.yvArr
else:
self.LVDWx = np.zeros((1,1))
self.LVDWy = np.zeros((1,1))
self.LewisABx = np.zeros((1,1))
self.LewisABy = np.zeros((1,1))
self._combined_hamaker_constant()
# self.attractive_x = self._combined_lvdw_lewis_ab(xarr)/xarr * self.xvArr
# self.attractive_y = self._combined_lvdw_lewis_ab(yarr)/yarr * self.yvArr
@property
def attractive_x(self):
"""
Calculates the combined attractive force between colloid surface
based upon Liang et. al. 2008
Returns:
-------
:return: np.ndarray
"""
return self._combined_lvdw_lewis_ab(self.__xarr)/self.__xarr * self.xvArr
@property
def attractive_y(self):
"""
Calculates the combined attractive force between colloid surface
based upon Liang et. al. 2008
Returns:
-------
:return: np.ndarray
"""
return self._combined_lvdw_lewis_ab(self.__yarr) / self.__yarr * self.yvArr
def ionic(self, valence, concentration):
"""
Calculates the 2*I from user supplied valence and concentraitons
.. math::
I^{*} = \sum_{i} Z_{i}^{2} M_{i}
Parameters:
----------
:param dict valence: Dictionary of chemical species, valence
:param dict concentration: Dictionary of chemical species, concentration
Returns:
-------
:return: I (float) 2*ionic stength
"""
I = 0
for key in valence:
I += (float(concentration[key])*(float(valence[key])**2))
return I
@property
def k_debye(self):
"""
Method to calculate Debye length
Returns:
-------
:return: Debye length (float)
"""
NA = 6.02e23
k_inverse = np.sqrt((self.epsilon_0 * self.epsilon_r
* self.boltzmann * self.T)/(self.e * self.e * NA * self.ionic_strength))
return 1./k_inverse
def _colloid_potential(self, zeta, ac, kd, z):
"""
Calculates the surface potential on a colloid
Parameters:
----------
:param float zeta: Zeta potential of colloid
:param float ac: Colloid radius
:param float kd: Debye length
:param float z: Thickness of the sheer plane (stern layer)
Returns:
-------
:return: (float) colloid surface potential
"""
potential = zeta*(1.+(z/ac))*np.exp(kd*z)
return potential
def _surface_potential(self, zeta, kd, z):
"""
Calculates the surface potential of the solid phase
Parameters:
----------
:param float zeta: Zeta potential of Solid phase
:param float kd: Debye length
:param float z: Thickness of the sheer plane (stern layer)
Returns:
-------
:return: (float) Solid phase surface potential
"""
potential = zeta*np.exp(kd*z)
return potential
def _EDL_energy(self, E0, Er, ac, cp, sp, kd, arr):
"""
Parameters:
----------
E0: (float) dilectric permativity in a vacuum
Er: (float) fluid permativity
ac: (float) colloid radius
cp: (float) colloid potential
kd: (float) debye length
arr: (np.array: np.float) array of distances from surfaces
Output:
-------
EDL: (np.array: np.float) array of EDL energies in relation to porous surfaces
Note:
-----
Mathematical calcualtion is broken in three sections for ease of programming
"""
edl0 = np.pi*E0*Er*ac
edl1 = 2.*sp*cp
edl2 = np.log((1. + np.exp(-kd*np.abs(arr)))/(1. - np.exp(-kd*np.abs(arr))))
edl3 = sp*sp + cp*cp
edl4 = np.log(1. - np.exp(-2.*kd*np.abs(arr)))
edl = edl0*(edl1*edl2 + edl3*edl4)
return edl
def _adjust_zeta_colloid(self, potential, ac, kd, z):
zeta = potential/((1. + (z/ac))*np.exp(kd*z))
return zeta
def _adjust_zeta_surface(self, potential, kd, z):
zeta = potential/(np.exp(kd*z))
return zeta
def _combined_hamaker_constant(self):
"""
Method to calculate the hamaker constant for surface-colloid interactions
based on Israelachvili 1991
"""
s_ah = self.surface_potential * (24 * np.pi * 0.165e-9 ** 2)
c_ah = self.colloid_potential * (24 * np.pi * 0.165e-9 ** 2)
self.hamaker = np.sqrt(s_ah * c_ah)
def _combined_lvdw_lewis_ab(self, arr):
"""
Method to calculate the combined attractive force profile based on liang et. al.
instead of using vdw and lewis acid base profiles seperately
Parameters:
----------
:param np.ndarray arr: distance array
:return: (np.ndarray) attractive force profile for porous media
"""
lvdw_lab0 = -self.hamaker / 6.
lvdw_lab1 = (self.ac / arr) + (self.ac / (arr + (2.* self.ac)))
lvdw_lab2 = np.log(arr / (arr + self.ac))
return lvdw_lab0 * (lvdw_lab1 + lvdw_lab2)
def _Lifshitz_van_der_Walls(self, arr, ac, vdw_st_water, vdw_st_colloid, vdw_st_solid):
"""
Parameters:
----------
arr: (np.array, np.float) array of distances from solid surfaces
ac: (float) colloid radius
vdw_st_water: (float) vdW surface tension of water
vdw_st_colloid: (float) vdW surface tension of colloid
vdw_st_solid: (float) vdW surface tension (bulk) of solid phase
constant:
--------
h0: contact plane between colloid and surface {Interface Science and Technology, 2008. Volume 16. Chapter 3}
Returns:
-------
lvdw: (np.array, np.float) array of lifshitz_vdW interaction energies
"""
h0 = 1.57e-10
lvdw0 = -4.*np.pi*(h0*h0/arr)*ac
lvdw1 = np.sqrt(vdw_st_water) - np.sqrt(vdw_st_solid)
lvdw2 = np.sqrt(vdw_st_water) - np.sqrt(vdw_st_colloid)
lvdw = lvdw0*lvdw1*lvdw2
return lvdw
def _lewis_acid_base(self, arr, ac, eplus_colloid, eplus_solid, eplus_water, eneg_colloid,
eneg_solid, eneg_water):
"""
Parameters:
----------
arr: (np.array, np.float) array of distances from solid surfaces
e_plus_*: (float) electron acceptor parameter for each specific phase
e_minus_*: (float) electron donor parameter for each specific phase
Constants:
----------
h0: contact plane between colloid and surface {Interface Science and Technology, 2008. Volume 16. Chapter 3}
chi: water decay length {van Oss 2008}
Returns:
-------
lab: (np.array, np.float) array of lewis acid base interaction energies
"""
h0 = 1.57e-10
chi = 0.6e-10
lab0 = -4.*np.pi*h0*ac
lab1 = np.exp((h0-arr)/chi)
lab2 = np.sqrt(eplus_water)*(np.sqrt(eneg_colloid) + np.sqrt(eneg_solid) - np.sqrt(eneg_water))
lab3 = np.sqrt(eneg_water)*(np.sqrt(eplus_colloid) + np.sqrt(eplus_solid) - np.sqrt(eplus_water))
lab4 = np.sqrt(eplus_colloid*eneg_solid)
lab5 = np.sqrt(eneg_colloid*eplus_solid)
lab = lab0*lab1*(lab2+lab3-lab4-lab5)
return lab
class ColloidColloid(object):
"""
The ColloidColloid class is used to calculate colloid-colloid interaction forces
using the formulations presented in Liang 2008, Qui 2012, and Israelichevi 1996.
Attractive forces are based on the Liang & Israelichevi formulation. Electric
doulbe layer forces are calculated using Qui et. al. 2012.
The ColloidColloid object also provides methods to update ColloidColloid force
array fields during model streaming.
Colloid colloid interaction energies are calculated via:
.. math::
\Phi^{EDL} = 32 \pi \epsilon_{0} \epsilon_{r} a_{c}
(\\frac{kT}{Ze})^{2} * [tanh(\\frac{Ze\psi_c}{4kT})]^{2}
* exp(-\kappa h)
.. math::
A_{H} = 384 \pi \\frac{\psi_{c}^{2} h k T I^{*}}{\kappa^{2}} exp(- \kappa h)
.. math::
\Phi^{A} = - \\frac{A_{H}}{6}[\\frac{2a_{c}^{2}}{h^{2} + 4a_{c}h} +
\\frac{2a_{c}^{2}}{(h + 2a_{c})^{2}} + ln(1 - \\frac{4a_{c}^{2}}{(h + 2a_{c})^{2}})]
Parameters:
----------
:param np.ndarray arr: A np.ndarray that represents the shape of the colloid
domain
:param float resolution: Colloid model resolution
:keyword dict valence: Valences of all species in solution. (Optional)
:keyword dict concentration: Concentration of all species in solution (Optional)
:keyword float zeta_colloid: Measured_zeta potential of colloid (Reccomended).
Default -40.5e-3 Na-Kaolinite Colloid [Chorom 1995. Eur. Jour. of Soil Science]
:keyword float zeta_surface: Bulk_zeta potential of porous media (Reccomended).
Default -60.9e-3 Glass bead media [Ducker 1992, Langmuir V8]
:keyword float I: Ionic strength of simulated solution (Reccomended). Default 1e-3 M
:keyword float ac: Colloid radius in meters. Default 1e-6 m.
:keyword float epsilon_r: Relative dielectric permativity of water. (Optional)
Default 78.304 @ 298 K [Malmberg and Maryott 1956. Jour. Res. Nat. Beau. Std. V56(1)
:keyword float sheer_plane: Equivelent to the thickness of one layer of water molecules. (Optional)
Default 3e-10 m [Interface Science and Technology, 2008. Volume 16 Chapter 3]
:keyword float T: Temperature of simulation fluid. Default 298.15 k
"""
def __init__(self, arr, **kwargs):
self.__params = {'concentration': False, 'adjust_zeta': False, 'I_initial': False,
'I': 10e-4, 'ac': 1e-6, 'epsilon_0': 8.85e-12 , 'epsilon_r': 78.304, 'valence': {'Na': 1.},
'sheer_plane': 3e-10, 'T': 298.15, 'lvdwst_water': 21.8e-3, 'lvdwst_colloid': 39.9e-3,
'lvdwst_solid': 33.7e-3, 'zeta_colloid': -40.5e-3, 'zeta_solid': -60.9e-3,
'psi+_colloid': 0.4e-3, 'psi-_colloid': 34.3e-3, 'psi+_water': 25.5e-3,
'psi-_water': 25.5e-3, 'psi+_solid': 1.3e-3, 'psi-_solid': 62.2e-3, 'kb': 1.38e-23,
'e': 1.6e-19, 'rho_colloid': 2650.}
for kwarg, value in kwargs.items():
self.__params[kwarg] = value
self.__arr = arr
self.__xarr = np.zeros(arr.shape)
self.__yarr = np.zeros(arr.shape)
self.__xlen = arr.shape[1]
self.__ylen = arr.shape[0]
self.__debye = False
self.__colloid_potential = False
self.__ionic_strength = False
self.__resolution = copy.copy(self.__params['lbres'])/self.__params['gridref']
self.__pos = []
self.__x_distance = False
self.__y_distance = False
self.__x = False
self.__y = False
self.__center = False
self.__dlvo_xarray = False
self.__dlvo_yarray = False
def __reset(self):
"""
Resets the calculation arrays
"""
# self.__xarr = np.zeros(self.__arr.shape)
# self.__yarr = np.zeros(self.__arr.shape)
self.__pos = []
self.__x = False
self.__y = False
self.__dlvo_xarray = False
self.__dlvo_yarray = False
def __get_colloid_positions(self):
"""
Get the specific x, y positions of each colloid in the system
Parameters:
-----------
colloids: (list, <class: Colloids.LB_Colloid.Colloid)
Returns:
--------
pos: (list) list of colloid positions within the model space
"""
self.__pos = Singleton.positions
return self.__pos
def update(self, colloids):
"""
Updates the colloidal positions and force arrays for the system
Parameters:
----------
:param list colloids: (list, <class: Colloids.LB_Colloid.Colloid)
"""
self.__reset()
@property
def x_array(self):
"""
Property method to generate the full x force array for colloid-colloid interaction
"""
if isinstance(self.__dlvo_xarray, bool):
self.__get_full_dlvo_array("x")
return self.__dlvo_xarray
@property
def y_array(self):
"""
Property method to generate the full y force array for colloid-colloid interaction
"""
if isinstance(self.__dlvo_yarray, bool):
self.__get_full_dlvo_array("y")
return self.__dlvo_yarray
@property
def x(self):
"""
Property method to generate the x force array for colloid-colloid interaction
"""
if isinstance(self.__x, bool):
self.__x = self.__dlvo_interaction_energy("x")
return self.__x
@property
def y(self):
"""
Property method to generate or return the y force array for colloid-colloid interaction
"""
if isinstance(self.__y, bool):
self.__y = self.__dlvo_interaction_energy("y")
return self.__y
@property
def x_distance_array(self):
"""
Generates an angular distance array in the x direction.
"""
if isinstance(self.__x_distance, bool):
self.__x_distance = self.__angular_array("x")
return self.__x_distance
@property
def y_distance_array(self):
"""
Generates an angular distance array in the y direction
"""
if isinstance(self.__y_distance, bool):
self.__y_distance = self.__angular_array("y")
return self.__y_distance
@property
def positions(self):
"""
Property method to generate colloid positions if they are not stored yet
"""
if not self.__pos:
self.__get_colloid_positions()
return self.__pos
@property
def ionic_strength(self):
"""
Property method to calculate ionic_strength on the fly
"""
if not self.__params['concentration']:
return self.__params['I']*2
else:
I = 0
for key in self.__params['concentration']:
I += (float(self.__params['concentration'][key])
* (float(self.__params['valence'][key]) ** 2))
return I
@property
def debye(self):
"""
Property method to calculate the debye length on the fly
"""
if isinstance(self.__debye, bool):
na = 6.02e23
k_inverse = np.sqrt((self.__params['epsilon_0']*self.__params['epsilon_r']
*self.__params['kb']*self.__params['T'])/
(self.__params['e']*self.__params['e']*na*self.ionic_strength))
self.__debye = 1./k_inverse
return self.__debye
@property
def colloid_potential(self):
"""
Property method that generates colloid potential
"""
if isinstance(self.__colloid_potential, bool):
self.__colloid_potential = self.__params['zeta_colloid']*(1. +
(self.__params['sheer_plane']/self.__params['ac']))\
*np.exp(self.debye*self.__params['sheer_plane'])
return self.__colloid_potential
def __get_full_dlvo_array(self, arr_type):
"""
Handler definition to call subroutes to generate dvlo_force_array
Parameters:
arr_type: (str) x direction or y direction , "x", "y"
Returns:
dvlo: (np.ndarray) full array of dlvo interaction forces from colloids
"""
dlvo_x = self.x
dlvo_y = self.y
# if arr_type.lower() == "x":
# arr = self.__xarr
# dlvo_colloid = self.x
# elif arr_type.lower() == "y":
# arr = self.__yarr
# dlvo_colloid = self.y
# else:
# raise TypeError("arr_type {} is not valid".format(arr_type))
dlvo = self.__create_colloid_colloid_array(dlvo_x, dlvo_y)
return dlvo
def __dlvo_interaction_energy(self, arr_type):
"""
Uses formulation of Israelachvili 1992 Intermolecular surface forces
to calculate Hamaker constant, followed by the Liang et. al. 2007 to calc
attractive and repulsive forces
Parameters:
arr_type: (str) x direction or y direction , "x", "y"
Returns:
dvlo: (np.ndarray) dlvo interaction force from colloids
"""
kb = 1.31e-23
if arr_type.lower() == "x":
c_arr = self.x_distance_array
elif arr_type.lower() == "y":
c_arr = self.y_distance_array
else:
raise TypeError("arr_type {} is not valid".format(arr_type))
"""
A = 384. * np.pi * c_arr * kb * self.__params['T']\
* self.ionic_strength * self.colloid_potential * self.colloid_potential \
* np.exp(-self.debye * np.abs(c_arr))/ (self.debye * self.debye)
"""
# use Israelachvili 1991 for hamaker constant
A = self.colloid_potential * 24 * np.pi * 0.165e-9 ** 2
lwdv0 = -A / 6.
lvdw1 = (2. * self.__params['ac'] ** 2.) / (self.__params['ac'] ** 2. + 4. * self.__params['ac'] * c_arr)
lvdw2 = (2. * self.__params['ac'] ** 2.) / (c_arr + 2. * self.__params['ac']) ** 2.
lvdw3 = np.log(1. - ((4. * self.__params['ac'] ** 2.) / (c_arr + 2. * self.__params['ac']) ** 2.))
lewis_vdw = lwdv0 * (lvdw1 + lvdw2 + lvdw3)
"""
edl0 = 128. * np.pi * self.__params['ac'] * self.__params['ac'] *\
0.5 * self.ionic_strength * 1.38e-23 * self.__params['T']
edl1 = (2. * self.__params['ac']) * self.debye ** 2.
z = 0.
nz = 0.
for key, value in self.__params['valence'].items():
z += float(value)
nz += 1
z /= nz # todo: this term may be more correct!
# z /= 58.44 # todo: look up this term (might be stern length insted!)
# todo: look into Liang for attractive energy of col-col interaction. Replace for simplification.
edl2 = np.tanh((z * 1.6e-19 * self.colloid_potential)/(4. * 1.38e-23 * self.__params['T']))
edl3 = np.exp(-self.debye * c_arr)
edl = (edl0 / edl1) * (edl2 ** 2.) * edl3
"""
# original formulation by Derjaguin 1939
edl0 = (self.__params['epsilon_0'] * self.__params['epsilon_r'] * self.__params['ac'] * self.colloid_potential * self.colloid_potential) / 2.
edl1 = np.log(1. + np.exp(-self.debye * c_arr))
edl = edl0 * edl1
# todo: look more into the dlvo col-col interactions
dlvo = (edl - lewis_vdw)/c_arr # lewis_vdw + edl)/c_arr
if arr_type.lower() == "x":
dlvo[:, :self.__center] *= -1
elif arr_type.lower() == "y":
dlvo[self.__center + 1:, :] *= -1
else:
raise TypeError("arr_type {} is not valid".format(arr_type))
dlvo[self.__center, self.__center] = 0.
return dlvo
def __angular_array(self, arr_type):
"""
Calculates the angular proportion of the force a colloid particle
exerts in grid space, with regard to distance from the colloid.
Parameters:
arr_type: (str) delimiter to determine if the array is in the
x-direction or y-direction
Return:
arr (np.ndarray) Array of angular distances adjusted for the proportion
of force the colloid would be exposed to.
"""
if 1.01e-6 >= self.__resolution >= 1e-7:
self.__center = 2
arr = np.ones((5, 5))
center = 2
elif 1e-7 > self.__resolution >= 1e-8:
self.__center = 25
arr = np.ones((51, 51))
center = 25
elif 1e-8 > self.__resolution >= 1e-9:
self.__center = 250
arr = np.ones((501, 501))
center = 250
else:
raise AssertionError("model resolution: {} is out of bounds".format(self.__resolution))
for i, n in enumerate(arr):
for j, m in enumerate(n):
y = float(i - center)
x = float(j - center)
if x == 0 and y == 0:
arr[i, j] = 0.1
elif x == 0:
arr[i, j] = 1 * np.abs(y)
elif y == 0:
arr[i, j] = 1 * np.abs(x)
else:
arr[i, j] = np.sqrt(x**2 + y**2) + np.abs((m * (np.arctan(y / x) / (np.pi / 2.))))
if arr_type.lower() == 'x':
arr = arr.T
elif arr_type.lower() == 'y':
pass
else:
raise TypeError("arr_type {} is not valid".format(arr_type))
return arr * self.__resolution # /1e-6
def __create_colloid_colloid_array(self, c_arr, cy_arr, kernal="python"):
"""
Method to set colloidal forces to a model array.
Parameters:
-----------
c_arr: (np.ndarray) calculated colloid force array in x direction
cy_arr: (np.ndarray) calculated colloid force array in y direction
Return:
f_arr: (np.ndarray) an array of colloidal forces in a single primary
dimension
"""
center = (c_arr.shape[0] - 1) // 2
colloids = np.array(self.positions)
if kernal == 'fortran':
# this is actually slower than the numpy function! Who would've figured!
f_arr = np.zeros((self.__ylen, self.__xlen))
"""
collen = len(colloids)
fxlen = int(self.__xlen)
fylen = int(self.__ylen)
cxlen = int(c_arr.shape[1])
cylen = int(c_arr.shape[0])
# we send colcol setting utility to fortran for efficiency sake
f_arr = ColUtils.colcolarray(c_arr, colloids, fxlen,
fylen, cxlen, cylen,
center, collen)
"""
return f_arr
else:
f_arr = np.zeros((self.__ylen, self.__xlen))
fy_arr = np.zeros((self.__ylen, self.__xlen))
for colloid in colloids:
x, y = colloid
if np.isnan(x) or np.isnan(y):
pass
else:
x -= center
y -= center
if x < 0:
c_left_x = -x
c_right_x = c_arr.shape[1]
f_right_x = c_arr.shape[1] + x
f_left_x = 0
elif x + c_arr.shape[1] > f_arr.shape[1]:
f_left_x = x
f_right_x = f_arr.shape[1]
c_left_x = 0
c_right_x = -(x - f_arr.shape[1])
else:
c_left_x = 0
c_right_x = c_arr.shape[1]
f_left_x = x
f_right_x = x + c_arr.shape[1]
if y < 0:
c_top_y = -y
c_bottom_y = c_arr.shape[0]
f_top_y = 0
f_bottom_y = c_arr.shape[0] + y
elif y + c_arr.shape[0] > f_arr.shape[0]:
c_top_y = 0
c_bottom_y = -(y - f_arr.shape[0])
f_top_y = y
f_bottom_y = f_arr.shape[0]
else:
c_top_y = 0
c_bottom_y = c_arr.shape[0]
f_top_y = y
f_bottom_y = y + c_arr.shape[0]
try:
f_arr[f_top_y:f_bottom_y, f_left_x:f_right_x] += c_arr[c_top_y:c_bottom_y, c_left_x:c_right_x]
fy_arr[f_top_y:f_bottom_y, f_left_x:f_right_x] += cy_arr[c_top_y:c_bottom_y, c_left_x:c_right_x]
except ValueError:
pass
self.__dlvo_xarray = f_arr
self.__dlvo_yarray = fy_arr
# todo: write conversion of force to chemical potential
def force_to_kT(arr, T):
k = 1.38e-23
return
```
|
{
"source": "jdlaubrie/florence",
"score": 2
}
|
#### File: examples/curved_mesh_generation/high_order_curved_mesh_generation.py
```python
import os, sys
from Florence import *
from Florence.VariationalPrinciple import *
def high_order_curved_mesh_generation(p=2, analysis_nature="linear",
optimise=True, parallelise=False, recompute_sparsity_pattern=True, squeeze_sparsity_pattern=False):
"""An example of high order curved mesh generation on a hollow cylinder
with unstructured tetrahedral elements
"""
ProblemPath = PWD(__file__)
mesh_file = ProblemPath + '/Hollow_Cylinder.dat'
cad_file = ProblemPath + '/Hollow_Cylinder.igs'
mesh = Mesh()
mesh.Read(filename=mesh_file, reader_type="salome", element_type="tet")
mesh.GetHighOrderMesh(p=p, Decimals=7)
ndim = mesh.InferSpatialDimension()
material = NeoHookean(ndim, youngs_modulus=1e5, poissons_ratio=0.48)
scale = 1000.
condition = 1.e020
boundary_condition = BoundaryCondition()
boundary_condition.SetCADProjectionParameters(cad_file,
scale=scale,condition=condition, project_on_curves=True, solve_for_planar_faces=True)
boundary_condition.GetProjectionCriteria(mesh)
solver = LinearSolver(linear_solver="amg", linear_solver_type="cg", iterative_solver_tolerance=5.0e-07)
formulation = DisplacementFormulation(mesh)
fem_solver = FEMSolver(number_of_load_increments=2,
analysis_nature=analysis_nature,
optimise=optimise,
recompute_sparsity_pattern=recompute_sparsity_pattern,
squeeze_sparsity_pattern=squeeze_sparsity_pattern,
parallelise=parallelise)
solution = fem_solver.Solve(formulation=formulation, mesh=mesh,
material=material, boundary_condition=boundary_condition)
# check mesh quality
assert solution.ScaledJacobian.min() > 0.2
assert solution.ScaledJacobian.min() < 0.3
assert solution.ScaledHH.min() > 0.35
assert solution.ScaledHH.min() < 0.55
assert solution.ScaledFF.min() > 0.45
assert solution.ScaledFF.min() < 0.65
# In-built fancy curvilinear mesh plotter
# solution.CurvilinearPlot(plot_points=True, point_radius=0.2, color="#E3A933")
# Write the results to VTK
# mesh.points += solution.sol[:,:,-1]
# mesh.WriteVTK("cylinder_mesh")
if __name__ == "__main__":
# With optimisation ON
high_order_curved_mesh_generation(p=2, analysis_nature="linear", optimise=True)
high_order_curved_mesh_generation(p=2, analysis_nature="nonlinear", optimise=True)
high_order_curved_mesh_generation(p=2, analysis_nature="linear", optimise=True,
recompute_sparsity_pattern=False)
high_order_curved_mesh_generation(p=2, analysis_nature="nonlinear", optimise=True,
recompute_sparsity_pattern=False)
high_order_curved_mesh_generation(p=2, analysis_nature="linear", optimise=True,
recompute_sparsity_pattern=False, squeeze_sparsity_pattern=True)
high_order_curved_mesh_generation(p=2, analysis_nature="nonlinear", optimise=True,
recompute_sparsity_pattern=False, squeeze_sparsity_pattern=True)
# With optimisation OFF
high_order_curved_mesh_generation(p=2, analysis_nature="linear", optimise=False)
high_order_curved_mesh_generation(p=2, analysis_nature="nonlinear", optimise=False)
high_order_curved_mesh_generation(p=2, analysis_nature="linear", optimise=False,
recompute_sparsity_pattern=False)
high_order_curved_mesh_generation(p=2, analysis_nature="nonlinear", optimise=False,
recompute_sparsity_pattern=False)
high_order_curved_mesh_generation(p=2, analysis_nature="linear", optimise=False,
recompute_sparsity_pattern=False, squeeze_sparsity_pattern=True)
high_order_curved_mesh_generation(p=2, analysis_nature="nonlinear", optimise=False,
recompute_sparsity_pattern=False, squeeze_sparsity_pattern=True)
# With parallelisation ON
high_order_curved_mesh_generation(p=2, analysis_nature="nonlinear", optimise=False, parallelise=True)
high_order_curved_mesh_generation(p=2, analysis_nature="nonlinear", optimise=True, parallelise=True)
```
#### File: examples/hyperelastic_explicit_dynamics/hyperelastic_explicit_dynamics.py
```python
import numpy as np
from Florence import *
def explicit_dynamics_mechanics():
"""A hyperelastic explicit dynamics example using Mooney Rivlin model
of a column under compression with cubic (p=3) hexahedral elements
"""
mesh = Mesh()
mesh.Parallelepiped(upper_right_front_point=(1,1,6),nx=3,ny=3,nz=18,element_type="hex")
mesh.GetHighOrderMesh(p=3)
ndim = mesh.InferSpatialDimension()
material = NearlyIncompressibleMooneyRivlin(ndim, mu=4e5, lamb=2e6, rho=1100)
def DirichletFuncDyn(mesh, time_step):
boundary_data = np.zeros((mesh.points.shape[0],3, time_step))+np.NAN
X_0 = np.isclose(mesh.points[:,2],0)
boundary_data[X_0,:,:] = 0.
return boundary_data
def NeumannFuncDyn(mesh, time_step):
boundary_flags = np.zeros((mesh.faces.shape[0], time_step),dtype=np.uint8)
boundary_data = np.zeros((mesh.faces.shape[0],3, time_step))
mag = -1e4
for i in range(mesh.faces.shape[0]):
coord = mesh.points[mesh.faces[i,:],:]
avg = np.sum(coord,axis=0)/mesh.faces.shape[1]
if np.isclose(avg[2],mesh.points[:,2].max()):
boundary_data[i,2,:] = np.linspace(0,mag,time_step)
boundary_flags[i,:] = True
return boundary_flags, boundary_data
time_step = 1000
boundary_condition = BoundaryCondition()
boundary_condition.SetDirichletCriteria(DirichletFuncDyn, mesh, time_step)
boundary_condition.SetNeumannCriteria(NeumannFuncDyn, mesh, time_step)
formulation = DisplacementFormulation(mesh)
fem_solver = FEMSolver( total_time=1.,
number_of_load_increments=time_step,
analysis_type="dynamic",
analysis_subtype="explicit",
mass_type="lumped",
optimise=True,
print_incremental_log=True,
memory_store_frequency=10)
solution = fem_solver.Solve(formulation=formulation, mesh=mesh,
material=material, boundary_condition=boundary_condition)
# Write to paraview
# solution.WriteVTK("explicit_dynamics_mechanics",quantity=2)
# Write to HDF5/MATLAB(.mat)
# solution.WriteHDF5("explicit_dynamics_mechanics",compute_recovered_fields=False)
# In-built plotter - requires mayavi
# solution.Plot(quantity=2,configuration='deformed')
if __name__ == "__main__":
explicit_dynamics_mechanics()
```
#### File: examples/mixed_fem_multiphysics_strain_gradient_solvers/mixed_fem_multiphysics_strain_gradient_solvers.py
```python
import numpy as np
from Florence import *
def strain_gradient_elastodynamics():
"""An example of strain gradient elasticity under explicit dynamics with penalty
contact. The strain gradient model is based the couple stress (constrained Cosserat) theory
for solids. The couple stress strain gradient model in florence is implemented using
standard C0 continuous elements with penalty, Lagrange multiplier and augmented Lagrangian
techniques. These variational forms are also available for coupled electromechanical problems
"""
mesh = Mesh()
mesh.HollowCircle(inner_radius=30, outer_radius=50,nrad=6,ncirc=120, element_type="quad")
mesh.GetHighOrderMesh(p=2)
mu = 1.0e5
v = 0.4
material = CoupleStressModel(2, mu=mu, lamb=2.*mu*v/(1-2.*v), eta=1000., kappa=1e-6, rho=1100.)
def DirichletFuncDyn(mesh, time_step):
boundary_data = np.zeros((mesh.points.shape[0],2, time_step))+np.NAN
return boundary_data
def NeumannFuncDyn(mesh, time_step):
boundary_data = np.zeros((mesh.points.shape[0],2, time_step))+np.NAN
mag=3.5e4
d1 = np.ones(150)*mag
d2 = np.zeros(time_step-150)
d = np.concatenate((d1,d2))
boundary_data[:,0,:] = d
return boundary_data
time_step = 2000
boundary_condition = BoundaryCondition()
boundary_condition.SetDirichletCriteria(DirichletFuncDyn, mesh, time_step)
boundary_condition.SetNeumannCriteria(NeumannFuncDyn, mesh, time_step)
# Contact formulation
contact_formulation = ExplicitPenaltyContactFormulation(mesh, np.array([-1.,0.]), 80, 5e6)
# Lagrange multiplier strain gradient formulation
lagrange_multiplier_strain_gradient = CoupleStressFormulation(mesh,
save_condensed_matrices=False, subtype="lagrange_multiplier")
# Penalty strain gradient formulation
penalty_strain_gradient = CoupleStressFormulation(mesh,
save_condensed_matrices=False, subtype="penalty")
fem_solver = FEMSolver(total_time=60.,
number_of_load_increments=time_step,
analysis_type="dynamic",
analysis_nature="linear",
print_incremental_log=True,
include_physical_damping=True,
damping_factor=2.,
break_at_increment=400,
do_not_reset=False)
penalty_results = fem_solver.Solve(formulation=penalty_strain_gradient, mesh=mesh,
material=material, boundary_condition=boundary_condition,
contact_formulation=contact_formulation)
lagrange_multiplier_results = fem_solver.Solve(formulation=lagrange_multiplier_strain_gradient, mesh=mesh,
material=material, boundary_condition=boundary_condition,
contact_formulation=contact_formulation)
# Uncomment to plot both results superimposed on top of each other
# import matplotlib.pyplot as plt
# figure = plt.figure()
# penalty_results.Plot(configuration="deformed", quantity=0,
# plot_edges=False, figure=figure, show_plot=False)
# lagrange_multiplier_results.Plot(configuration="deformed",
# quantity=0, plot_edges=True, colorbar=False, figure=figure, show_plot=False)
# plt.show()
def strain_gradient_electroelastodynamics():
"""An example of strain gradient electro-elasticity under explicit dynamics with penalty
contact. The strain gradient model is based the couple stress (constrained Cosserat) theory
for solids. The couple stress strain gradient model in florence is implemented using
standard C0 continuous elements with penalty, Lagrange multiplier and augmented Lagrangian
techniques. This example serves rather as a test than a fully functional/valid example
"""
mesh = Mesh()
mesh.HollowCircle(inner_radius=30, outer_radius=50,nrad=6,ncirc=120, element_type="quad")
mesh.GetHighOrderMesh(p=2)
mu = 1.0e5
v = 0.4
material = IsotropicLinearFlexoelectricModel(2, mu=mu, lamb=2.*mu*v/(1-2.*v),
eta=1000., kappa=1e-6, rho=1100., eps=1e-9, P=np.zeros((3,2)), f=1e-30*np.eye(2,2))
def DirichletFuncDyn(mesh, time_step):
boundary_data = np.zeros((mesh.points.shape[0],3, time_step))+np.NAN
return boundary_data
def NeumannFuncDyn(mesh, time_step):
boundary_data = np.zeros((mesh.points.shape[0],3, time_step))+np.NAN
mag=3.5e4
d1 = np.ones(150)*mag
d2 = np.zeros(time_step-150)
d = np.concatenate((d1,d2))
boundary_data[:,0,:] = d
return boundary_data
time_step = 2000
boundary_condition = BoundaryCondition()
boundary_condition.SetDirichletCriteria(DirichletFuncDyn, mesh, time_step)
boundary_condition.SetNeumannCriteria(NeumannFuncDyn, mesh, time_step)
# Contact formulation
contact_formulation = ExplicitPenaltyContactFormulation(mesh, np.array([-1.,0.]), 80, 5e6)
# Lagrange multiplier strain gradient formulation
lagrange_multiplier_strain_gradient = FlexoelectricFormulation(mesh,
save_condensed_matrices=False, subtype="lagrange_multiplier")
# Penalty strain gradient formulation
penalty_strain_gradient = FlexoelectricFormulation(mesh,
save_condensed_matrices=False, subtype="penalty")
# Lagrange multiplier strain gradient formulation
augmented_lagrange_strain_gradient = FlexoelectricFormulation(mesh,
save_condensed_matrices=False, subtype="augmented_lagrangian")
fem_solver = FEMSolver(total_time=60.,
number_of_load_increments=time_step,
analysis_type="dynamic",
analysis_nature="linear",
print_incremental_log=True,
include_physical_damping=True,
damping_factor=2.,
break_at_increment=100,
do_not_reset=False)
penalty_results = fem_solver.Solve(formulation=penalty_strain_gradient, mesh=mesh,
material=material, boundary_condition=boundary_condition,
contact_formulation=contact_formulation)
lagrange_multiplier_results = fem_solver.Solve(formulation=lagrange_multiplier_strain_gradient, mesh=mesh,
material=material, boundary_condition=boundary_condition,
contact_formulation=contact_formulation)
lagrange_multiplier_results = fem_solver.Solve(formulation=lagrange_multiplier_strain_gradient, mesh=mesh,
material=material, boundary_condition=boundary_condition,
contact_formulation=contact_formulation)
# Static problems
def DirichletFuncStat(mesh):
boundary_data = np.zeros((mesh.points.shape[0],3))+np.NAN
r = np.linalg.norm(mesh.points,axis=1)
boundary_data[np.isclose(r,30),:2] = 0.
return boundary_data
def NeumannFuncStat(mesh):
boundary_flags = np.zeros((mesh.edges.shape[0]),dtype=np.uint8)
boundary_data = np.zeros((mesh.edges.shape[0],3))
normals = mesh.Normals()
boundary_data[:,:2] = -1e5*normals
boundary_flags[:] = True
return boundary_flags, boundary_data
time_step = 1
boundary_condition.__reset_state__()
boundary_condition.SetDirichletCriteria(DirichletFuncStat, mesh)
boundary_condition.SetNeumannCriteria(NeumannFuncStat, mesh)
fem_solver = FEMSolver(analysis_nature="linear", print_incremental_log=True)
penalty_results = fem_solver.Solve(formulation=penalty_strain_gradient, mesh=mesh,
material=material, boundary_condition=boundary_condition)
lagrange_multiplier_results = fem_solver.Solve(formulation=lagrange_multiplier_strain_gradient, mesh=mesh,
material=material, boundary_condition=boundary_condition)
lagrange_multiplier_results = fem_solver.Solve(formulation=lagrange_multiplier_strain_gradient, mesh=mesh,
material=material, boundary_condition=boundary_condition)
if __name__ == "__main__":
strain_gradient_elastodynamics()
strain_gradient_electroelastodynamics()
```
#### File: examples/wrinkling_of_soft_dielectric_film/wrinkling_of_soft_dielectric_film.py
```python
import numpy as np
from Florence import *
def dielectric_wrinkling(recompute_sparsity_pattern=True, squeeze_sparsity_pattern=False):
""" Implicit quasi-static analysis of large deformation in a soft dielectric elastomer
undergoing potential wrinkling using the couple electromechanics formulation
"""
# Create a cylindrical disc
radius = 20
mesh = Mesh()
# mesh.Cylinder(radius=radius,length=0.1,nlong=1, nrad=15, ncirc=30)
mesh.Cylinder(radius=radius,length=0.1,nlong=1, nrad=20, ncirc=45)
# Material constants
e0 = 8.8541e-12
mu = 1.0e5
mu1 = mu
mu2 = 0.
eps_2 = 4.0*e0
v = 0.4
lamb = 2.*mu*v/(1-2.*v)
# Use one of the ideal dielectric models
material = IsotropicElectroMechanics_108(3, mu1=mu1, mu2=mu2, lamb=lamb, eps_2=eps_2, rho=1000.)
def DirichletFunc(mesh):
boundary_data = np.zeros((mesh.points.shape[0],4))+np.NAN
# Constrain (mechanically) the perimeter of disc at the base
r = np.sqrt(mesh.points[:,0]**2 + mesh.points[:,1]**2)
Z_0 = np.logical_and(np.isclose(r,radius),np.isclose(mesh.points[:,2],0.))
boundary_data[Z_0,:3] = 0.
# Closed circuit condition [electric potential dofs]
Z_0 = np.isclose(mesh.points[:,2],0.)
boundary_data[Z_0,3] = 0.
Z_0 = np.isclose(mesh.points[:,2],mesh.points[:,2].max())
boundary_data[Z_0,3] = 5e6
return boundary_data
boundary_condition = BoundaryCondition()
boundary_condition.SetDirichletCriteria(DirichletFunc, mesh)
formulation = DisplacementPotentialFormulation(mesh)
fem_solver = FEMSolver(number_of_load_increments=50,
analysis_nature="nonlinear",
analysis_type="static",
newton_raphson_tolerance=1e-5,
maximum_iteration_for_newton_raphson=200,
optimise=True,
recompute_sparsity_pattern=recompute_sparsity_pattern,
squeeze_sparsity_pattern=squeeze_sparsity_pattern,
print_incremental_log=True
)
results = fem_solver.Solve(formulation=formulation, mesh=mesh,
material=material, boundary_condition=boundary_condition)
# Check results
norm = lambda s: np.linalg.norm(s[:,:3,:])
assert norm(results.GetSolutionVectors()) > 900.
assert norm(results.GetSolutionVectors()) < 910.
# Plot the deformation process - requires mayavi
# results.Plot(quantity=0, configuration='deformed')
if __name__ == "__main__":
dielectric_wrinkling()
dielectric_wrinkling(recompute_sparsity_pattern=False, squeeze_sparsity_pattern=False)
dielectric_wrinkling(recompute_sparsity_pattern=False, squeeze_sparsity_pattern=True)
```
#### File: Florence/Base/FlorenceExceptions.py
```python
class JacobianError(ArithmeticError):
def __init__(self,value=None):
self.value = value
def __str__(self):
if self.value is None:
self.value = 'Jacobian of mapping is close to zero'
return repr(self.value)
class IllConditionedError(ArithmeticError):
def __init__(self,value=None):
self.value = value
def __str__(self):
if self.value is None:
self.value = 'Matrix is ill conditioned'
return repr(self.value)
```
#### File: Florence/BoundaryElements/GetBases.py
```python
import numpy as np
def GetBases(C,z):
from Florence.FunctionSpace.OneDimensional.Line import LagrangeGaussLobatto, Lagrange
# Get basis at all integration points - every column corresponds to a Gauss point
Basis = np.zeros((C+2,z.shape[0])); dBasis = np.copy(Basis)
for i in range(0,z.shape[0]):
# Basis[0:,i], dBasis[0:,i], _ = Lagrange(C,z[i])
Basis[0:,i], dBasis[0:,i], _ = LagrangeGaussLobatto(C,z[i])
return Basis, dBasis
```
#### File: Florence/BoundaryElements/PostProcessBEM2D.py
```python
import numpy as np
def InteriorPostProcess(total_sol,internal_points,global_coord,element_connectivity,w,z,boundary_elements,C,dN,Basis,Jacobian, nx, ny, XCO, YCO):
# Computing potential and flux - Interiors
POT = np.zeros((internal_points.shape[0],1))
FLUX1 = np.zeros((internal_points.shape[0],1))
FLUX2 = np.zeros((internal_points.shape[0],1))
# Loop over collocation points
for j in range(0,internal_points.shape[0]):
XP = internal_points[j,0]; YP = internal_points[j,1]
# Loop over elements
# for elem in range(0,len(boundary_elements)):
for elem in range(0,boundary_elements.shape[0]):
# Loop over nodes of the element
for i in range(0,C+2):
# Carry out usual Gaussian integration
A=0; B=0
DU1 = 0; DU2=0; DQ1=0; DQ2=0
for g in range(0,w.shape[0]):
# Compute the radial distance
RA = np.sqrt((XCO[g,elem]-XP)**2+(YCO[g,elem]-YP)**2)
# Compute Kernels - Assuming both sides are multiplied by 2pi
K1 = (-1.0/(RA**2))*((XP-XCO[g,elem])*nx[g,elem]+(YP-YCO[g,elem])*ny[g,elem])
K2 = np.log(1.0/RA)
RD1 = (XCO[g,elem]-XP)/RA
RD2 = (YCO[g,elem]-YP)/RA
# For potential
A+= K1*Basis[i,g]*Jacobian[g,elem]*w[g]
B+= K2*Basis[i,g]*Jacobian[g,elem]*w[g]
# Derivatives of potential along x and y
DU1 +=(1.0/RA**2)*(XCO[g,elem]-XP)*Basis[i,g]*Jacobian[g,elem]*w[g]
DU2 +=(1.0/RA**2)*(YCO[g,elem]-YP)*Basis[i,g]*Jacobian[g,elem]*w[g]
# Derivatives of flux along x and y
DQ1 += -((2.0*(RD1**2)-1.0)*nx[g,elem]+2.0*RD1*RD2*ny[g,elem])*Basis[i,g]*w[g]*Jacobian[g,elem]/(RA**2)
DQ2 += -((2.0*(RD2**2)-1.0)*ny[g,elem]+2.0*RD1*RD2*nx[g,elem])*Basis[i,g]*w[g]*Jacobian[g,elem]/(RA**2)
POT[j] += total_sol[element_connectivity[elem,i],0]*A-total_sol[element_connectivity[elem,i],1]*B
FLUX1[j] += total_sol[element_connectivity[elem,i],1]*DU1-total_sol[element_connectivity[elem,i],0]*DQ1
FLUX2[j] += total_sol[element_connectivity[elem,i],1]*DU2-total_sol[element_connectivity[elem,i],0]*DQ2
# Divide by 2pi
POT[j] = POT[j]/2.0/np.pi
FLUX1[j] = FLUX1[j]/2.0/np.pi
FLUX2[j] = FLUX2[j]/2.0/np.pi
return POT, FLUX1, FLUX2
def GetTotalSolution(sol,boundary_data,LHS2LHS,RHS2LHS):
total_sol = np.copy(boundary_data)
total_sol[np.array(LHS2LHS,dtype=int),0] = sol[np.array(LHS2LHS,dtype=int),0]
total_sol[np.array(RHS2LHS,dtype=int),1] = sol[np.array(RHS2LHS,dtype=int),0]
return total_sol
```
#### File: FunctionSpace/JacobiPolynomials/JacobiPolynomials_PurePython.py
```python
def JacobiPolynomials(n,xi,a=0,b=0):
# Input arguments:
# n - polynomial degree
# xi - evalution point
# a,b - alpha and beta parameters for Jacobi Polynmials
# a=b=0 for Legendre polynomials
# a=b=-0.5 for Chebychev polynomials
# Written Jacobi is not a good idea at least for Python (Numpy/Scipy)
# P = []
# # if n < 17:
# if n < 50:
# # P = WrittenJacobiPolynomials(n,xi,a,b)
# P = JacobiPolynomials_Cy.JacobiPolynomials(n,xi,a,b)
# else:
# The first two polynomials
# P = np.zeros((n+1,1))
P=[0]*(n+1) # List seems much faster than np.array here
P[0] = 1.0
if n>0:
P[1] = 0.5*((a-b)+(a+b+2)*xi)
if n>1:
for p in range(1,n):
# Evaluate coefficients
a1n = 2*(p+1)*(p+a+b+1)*(2*p+a+b)
a2n = (2*p+a+b+1)*(a**2-b**2)
a3n = (2*p+a+b)*(2*p+a+b+1)*(2*p+a+b+2)
a4n = 2*(p+a)*(p+b)*(2*p+a+b+2)
# print p
P[p+1] = ((a2n+a3n*xi)*P[p]-a4n*P[p-1])/a1n
return P
# @jit
def DiffJacobiPolynomials(n,xi,a=0,b=0,opt=0):
# opt is for Gauss-Lobatto integration purpose only
# Compute derivatives
# dP = np.zeros((n+1,1))
dP=[0]*(n+1) # List seems much faster than np.array here
if opt==1:
P = JacobiPolynomials(n,xi,a+1,b+1)
else:
P = JacobiPolynomials(n,xi,a,b)
for p in range(1,n+1):
dP[p] = 0.5*(a+b+p+1)*P[p-1]
return dP
```
#### File: ThreeDimensional/Tet/hpModal.py
```python
import imp, os
import numpy as np
from Florence.FunctionSpace.JacobiPolynomials import *
def hpBases(C,r0,s,t):
# The input argument r is changed to r0, because r is used as the polynomial degree in the 3rd (z) direction
# Coordinate transformation for tetrahedrals
a = 2.0*(1.+r0)/(-s-t) -1.
b = 2.0*(1.+s)/(1.-t) - 1.
c = t
order = -1
P1=C+1
P2=C+1
P3=C+1
# Size of bases is (for equal order interpolation)
nsize = int((P1+1.)*(P1+2.)*(P1+3.)/6.)
# Vertex based bases size
vsize = 4
# Edge based bases size
esize = 6*C
# Face based bases size
fsize = 2*C*(C-1)
# Interior base bases size
isize = int(C*(C-1)*(C-2)/6.)
# Allocate
Bases = np.zeros(nsize)
# Vertices
va = ((1.-a)/2.)*((1.-b)/2.)*((1.-c)/2.)
vb = ((1.+a)/2.)*((1.-b)/2.)*((1.-c)/2.)
vc = ((1.-a)/2.)*((1.+b)/2.)*((1.-c)/2.) # vc = ((1.+b)/2.)*((1.-c)/2.)
vd = (1.+c)/2.
Bases[:4] = np.array([va,vb,vc,vd])
if C > 0:
p = P1-1; q = P2-1; r = P3-1
# Edges
e1 = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[:,0]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1)
e2 = ((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*((1.-c)/2.)**(q+1)
e3 = ((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*((1.-c)/2.)**(q+1)
e4 = ((1.-a)/2.)*((1.-b)/2.)*((1.-c)/2.)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0]
e5 = ((1.+a)/2.)*((1.-b)/2.)*((1.-c)/2.)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0]
e6 = ((1.+b)/2.)*((1.-c)/2.)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0]
Bases[4:4+C] = e1; Bases[4+C:4+2*C] = e2; Bases[4+2*C:4+3*C] = e3; Bases[4+3*C:4+4*C] = e4; Bases[4+4*C:4+5*C] = e5; Bases[4+5*C:4+6*C] = e6
# Faces
f1 = []; f2 = []; f3 = []; f4 = []
for p in range(1,P1):
for q in range(1,P2):
if p+q < P2:
f1 = np.append(f1,((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1))
for p in range(1,P1):
for r in range(1,P3):
if p+r < P3:
f2 = np.append(f2,((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+1.,1.)[-1])
for q in range(1,P2):
for r in range(1,P3):
if q+r < P3:
f3 = np.append(f3,((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1])
f4 = np.append(f4,((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1])
Bases[4+6*C:4+6*C+2*C*(C-1)] = np.append(np.append(np.append(f1,f2),f3),f4) # 2*C*(C-1) is the total number of bases on the faces (fsize)
# Interior
interior = []
for p in range(1,P1):
for q in range(1,P2):
for r in range(1,P3):
if p+q+r < P3:
interior = np.append(interior,((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1])
Bases[4+6*C+2*C*(C-1):4+6*C+2*C*(C-1)+isize] = interior
return Bases, np.array([nsize,vsize,esize,fsize,isize])
def GradhpBases(C,r0,s,t):
# The input argument r is changed to r0, because r is used as the polynomial degree in the 3rd (z) direction
# Coordinate transformation for tetrahedrals
a = 2.0*(1.+r0)/(-s-t) -1.
b = 2.0*(1.+s)/(1.-t) - 1.
c = t
order = -1
P1=C+1
P2=C+1
P3=C+1
# Size of bases is (for equal order interpolation)
nsize = int((P1+1.)*(P1+2.)*(P1+3.)/6.);
vsize = 4; esize = 6*C; fsize = 2*C*(C-1); isize = int(C*(C-1)*(C-2)/6.)
# Allocate
GradBases = np.zeros((nsize,3))
# Vertices
# dN/dx = dN/da (a being the tetrahedral coordinate)
dvadx = (-0.5)*((1.-b)/2.)*((1.-c)/2.)
dvbdx = (0.5)*((1.-b)/2.)*((1.-c)/2.)
dvcdx = (-0.5)*((1.+b)/2.)*((1.-c)/2.) # dvcdx = 0. # The commented one is if we follow Sherwin's 95 paper
dvddx = 0.
# dN/dy = dN/db (b being the tetrahedral coordinate)
dvady = ((1.-a)/2.)*(-0.5)*((1.-c)/2.)
dvbdy = ((1.+a)/2.)*(-0.5)*((1.-c)/2.)
dvcdy = ((1.-a)/2.)*(0.5)*((1.-c)/2.) # dvcdx = (0.5)*((1.-c)/2.)
dvddy = 0.
# dN/dz = dN/dc (c being the tetrahedral coordinate)
dvadz = ((1.-a)/2.)*((1.-b)/2.)*(-0.5)
dvbdz = ((1.+a)/2.)*((1.-b)/2.)*(-0.5)
dvcdz = ((1.-a)/2.)*((1.+b)/2.)*(-0.5) # dvcdx = ((1.+b)/2.)*(-0.5)
dvddz = 0.5
GradBases[:4,:] = np.array([
[dvadx,dvbdx,dvcdx,dvddx],
[dvady,dvbdy,dvcdy,dvddy],
[dvadz,dvbdz,dvcdz,dvddz]
]).T
if C > 0:
p = P1-1; q = P2-1; r = P3-1
# Edges
# dN/dx = dN/da (a being the tetrahedral coordinate)
de1dx = (-0.5)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[:,0]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1) +\
((1.-a)/2.)*(0.5)*JacobiPolynomials(p-1,a,1.,1.)[:,0]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1) +\
((1.-a)/2.)*((1.+a)/2.)*DiffJacobiPolynomials(p-1,a,1.,1.,1)[:,0]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1)
de2dx = (-0.5)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*((1.-c)/2.)**(q+1)
de3dx = (0.5)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*((1.-c)/2.)**(q+1)
de4dx = (-0.5)*((1.-b)/2.)*((1.-c)/2.)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0]
de5dx = (0.5)*((1.-b)/2.)*((1.-c)/2.)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0]
de6dx = 0.
# dN/dy = dN/db (b being the tetrahedral coordinate)
de1dy = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[:,0]*(p+1)*((1.-b)/2.)**(p)*(-0.5)*((1.-c)/2.)**(p+1)
de2dy = ((1.-a)/2.)*(-0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*((1.-c)/2.)**(q+1) +\
((1.-a)/2.)*((1.-b)/2.)*(0.5)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*((1.-c)/2.)**(q+1) +\
((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,1.,1.,1)[:,0]*((1.-c)/2.)**(q+1)
de3dy = ((1.+a)/2.)*(-0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*((1.-c)/2.)**(q+1) +\
((1.+a)/2.)*((1.-b)/2.)*(0.5)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*((1.-c)/2.)**(q+1) +\
((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,1.,1.,1)[:,0]*((1.-c)/2.)**(q+1)
de4dy = ((1.-a)/2.)*(-0.5)*((1.-c)/2.)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0]
de5dy = ((1.+a)/2.)*(-0.5)*((1.-c)/2.)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0]
de6dy = (0.5)*((1.-c)/2.)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0]
# dN/dz = dN/dc (c being the tetrahedral coordinate)
de1dz = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[:,0]*((1.-b)/2.)**(p+1)*(p+1)*((1.-c)/2.)**(p)*(-0.5)
de2dz = ((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*(q+1)*((1.-c)/2.)**(q)*(-0.5)
de3dz = ((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]*(q+1)*((1.-c)/2.)**(q)*(-0.5)
de4dz = ((1.-a)/2.)*((1.-b)/2.)*(-0.5)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0] +\
((1.-a)/2.)*((1.-b)/2.)*((1.-c)/2.)*(0.5)*JacobiPolynomials(r-1,c,1.,1.)[:,0] +\
((1.-a)/2.)*((1.-b)/2.)*((1.-c)/2.)*((1.+c)/2.)*DiffJacobiPolynomials(r-1,c,1.,1.,1)[:,0]
de5dz = ((1.+a)/2.)*((1.-b)/2.)*(-0.5)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0] +\
((1.+a)/2.)*((1.-b)/2.)*((1.-c)/2.)*(0.5)*JacobiPolynomials(r-1,c,1.,1.)[:,0] +\
((1.+a)/2.)*((1.-b)/2.)*((1.-c)/2.)*((1.+c)/2.)*DiffJacobiPolynomials(r-1,c,1.,1.,1)[:,0]
de6dz = ((1.+b)/2.)*(-0.5)*((1.+c)/2.)*JacobiPolynomials(r-1,c,1.,1.)[:,0] +\
((1.+b)/2.)*((1.-c)/2.)*(0.5)*JacobiPolynomials(r-1,c,1.,1.)[:,0] +\
((1.+b)/2.)*((1.-c)/2.)*((1.+c)/2.)*DiffJacobiPolynomials(r-1,c,1.,1.,1)[:,0]
GradBases[4:4+C,0] = de1dx; GradBases[4+C:4+2*C,0] = de2dx; GradBases[4+2*C:4+3*C,0] = de3dx; GradBases[4+3*C:4+4*C,0] = de4dx; GradBases[4+4*C:4+5*C,0] = de5dx; GradBases[4+5*C:4+6*C,0] = de6dx
GradBases[4:4+C,1] = de1dy; GradBases[4+C:4+2*C,1] = de2dy; GradBases[4+2*C:4+3*C,1] = de3dy; GradBases[4+3*C:4+4*C,1] = de4dy; GradBases[4+4*C:4+5*C,1] = de5dy; GradBases[4+5*C:4+6*C,1] = de6dy
GradBases[4:4+C,2] = de1dy; GradBases[4+C:4+2*C,2] = de2dz; GradBases[4+2*C:4+3*C,2] = de3dz; GradBases[4+3*C:4+4*C,2] = de4dz; GradBases[4+4*C:4+5*C,2] = de5dz; GradBases[4+5*C:4+6*C,2] = de6dz
# Faces
dface1dx = []; dface1dy = []; dface1dz = []
for p in range(1,P1):
for q in range(1,P2):
if p+q < P2:
df1dx = (-0.5)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1) +\
((1.-a)/2.)*(0.5)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1) +\
((1.-a)/2.)*((1.+a)/2.)*DiffJacobiPolynomials(p-1,a,1.,1.,1)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)
dface1dx = np.append(dface1dx,df1dx)
df1dy = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*(p+1)*((1.-b)/2.)**(p)*(0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1) +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*(0.5)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1) +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,2.*p+1.,1.,1)[-1]*((1.-c)/2.)**(p+q+1)
dface1dy = np.append(dface1dy,df1dy)
df1dz = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*(p+q+1)*((1.-c)/2.)**(p+q)*(-0.5)
dface1dz = np.append(dface1dz,df1dz)
dface2dx = []; dface2dy = []; dface2dz = []
for p in range(1,P1):
for r in range(1,P3):
if p+r < P3:
df2dx = (-0.5)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+1.,1.)[-1] +\
((1.-a)/2.)*(0.5)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+1.,1.)[-1] +\
((1.-a)/2.)*((1.+a)/2.)*DiffJacobiPolynomials(p-1,a,1.,1.,1)[-1]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+1.,1.)[-1]
dface2dx = np.append(dface2dx,df2dx)
df2dy = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*(p+1)*((1.-b)/2.)**(p)*(-0.5)*((1.-c)/2.)**(p+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+1.,1.)[-1]
dface2dy = np.append(dface2dy,df2dy)
df2dz = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*(p+1)*((1.-c)/2.)**(p)*(-0.5)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+1.,1.)[-1] +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1)*(0.5)*JacobiPolynomials(r-1,c,2.*p+1.,1.)[-1] +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.-c)/2.)**(p+1)*((1.+c)/2.)*DiffJacobiPolynomials(r-1,c,2.*p+1.,1.,1)[-1]
dface2dz = np.append(dface2dz,df2dz)
dface3dx = []; dface3dy = []; dface3dz = []
dface4dx = []; dface4dy = []; dface4dz = []
for q in range(1,P2):
for r in range(1,P3):
if q+r < P3:
df3dx = (-0.5)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1]
dface3dx = np.append(dface3dx,df3dx)
df3dy = ((1.-a)/2.)*(-0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.-b)/2.)*(0.5)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,1.,1.,1)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1]
dface3dy = np.append(dface3dy,df3dy)
df3dz = ((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*(q+1)*((1.-c)/2.)**(q)*(-0.5)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*(0.5)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*DiffJacobiPolynomials(r-1,c,2.*q+1.,1.,1)[-1]
dface3dz = np.append(dface3dz,df3dz)
df4dx = (0.5)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1]
dface4dx = np.append(dface4dx,df4dx)
df4dy = ((1.+a)/2.)*(-0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1] +\
((1.+a)/2.)*((1.-b)/2.)*(0.5)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1] +\
((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,1.,1.,1)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1]
dface4dy = np.append(dface4dy,df4dy)
df4dz = ((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*(q+1)*((1.-c)/2.)**(q)*(-0.5)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1] +\
((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*(0.5)*JacobiPolynomials(r-1,c,2.*q+1.,1.)[-1] +\
((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[-1]*((1.-c)/2.)**(q+1)*((1.+c)/2.)*DiffJacobiPolynomials(r-1,c,2.*q+1.,1.,1)[-1]
dface4dz = np.append(dface4dz,df4dz)
GradBases[4+6*C:4+6*C+2*C*(C-1),0] = np.append(np.append(np.append(dface1dx,dface2dx),dface3dx),dface4dx)
GradBases[4+6*C:4+6*C+2*C*(C-1),1] = np.append(np.append(np.append(dface1dy,dface2dy),dface3dy),dface4dy)
GradBases[4+6*C:4+6*C+2*C*(C-1),2] = np.append(np.append(np.append(dface1dz,dface2dz),dface3dz),dface4dz)
# Interior
dinteriordx = []; dinteriordy = []; dinteriordz = []
for p in range(1,P1):
for q in range(1,P2):
for r in range(1,P3):
if p+q+r < P3:
didx = (-0.5)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1] +\
((1.-a)/2.)*(0.5)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.+a)/2.)*DiffJacobiPolynomials(p-1,a,1.,1.,1)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1]
dinteriordx = np.append(dinteriordx,didx)
didy = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*(p+1)*((1.-b)/2.)**(p)*(-0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*(0.5)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,2.*p+1.,1.,1)[-1]*((1.-c)/2.)**(p+q+1)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1]
dinteriordy = np.append(dinteriordy,didy)
didz = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*(p+q+1)*((1.-c)/2.)**(p+q)*(-0.5)*((1.+c)/2.)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)*(0.5)*JacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.)[-1] +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[-1]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[-1]*((1.-c)/2.)**(p+q+1)*((1.+c)/2.)*DiffJacobiPolynomials(r-1,c,2.*p+2.*q+1.,1.,1)[-1]
dinteriordz = np.append(dinteriordz,didz)
GradBases[4+6*C+2*C*(C-1):4+6*C+2*C*(C-1)+isize,0] = dinteriordx
GradBases[4+6*C+2*C*(C-1):4+6*C+2*C*(C-1)+isize,1] = dinteriordy
GradBases[4+6*C+2*C*(C-1):4+6*C+2*C*(C-1)+isize,2] = dinteriordz
# Build the Jacobian to take you from a,b,c to r,s,t (Recently changed fro r to r0)
Jacobian = np.array([
[-2./(s+t), 2.*(1.+r0)/(s+t)**2, 2.*(1.+r0)/(s+t)**2],
[0., 2.0/(1.-t), 2.*(1.+s)/(1.-t)**2],
[0., 0., 1.]
])
return GradBases, Jacobian
```
#### File: TwoDimensional/Tri/hpModal.py
```python
import os, imp
import numpy as np
from Florence.FunctionSpace.JacobiPolynomials import *
def hpBases(C,r,s):
order = -1
P1=C+1
P2=C+1
# Size of bases is (for equal order interpolation)
nsize = int((P1+1.)*(P1+2.)/2.)
p = P1-1
q = P2-1
Bases = np.zeros(nsize)
a = 2.*(1.+r)/(1.-s) - 1.
b = s
# Vertices
va = ((1.-a)/2.)*((1.-b)/2.)
vb = ((1.+a)/2.)*((1.-b)/2.)
vc = ((1.+b)/2.)
Bases[:3] = np.array([va,vb,vc])
if C>0:
# Edges
e1 = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[:,0]*((1.-b)/2.)**(p+1)
e2 = ((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]
e3 = ((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)[:,0]
Bases[3:3+C] = e1; Bases[3+C:3+2*C] = e2; Bases[3+2*C:3+3*C] = e3
# print Bases
# Interior
interior = []
for p in range(1,P1):
for q in range(1,P2):
if p+q < P2:
interior = np.append(interior,((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[order]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[order])
# print p-1,q-1
# print interior
Bases[3+3*C:] = interior
# Bases = np.array([e1,e2,e3,i])
elif C<0 or isinstance(C,float):
raise ValueError('Order of interpolation degree should a non-negative integer')
return Bases
def GradhpBases(C,r,s):
order = -1
P1=C+1
P2=C+1
# Size of bases is (for equal order interpolation)
nsize = int((P1+1.)*(P1+2.)/2.)
p = P1-1
q = P2-1
GradBases = np.zeros((nsize,2))
a = 2.*(1.+r)/(1.-s) - 1.
b = s
# Vertices
dvadx = -0.5*((1.-b)/2.)
dvbdx = 0.5*((1.-b)/2.)
dvcdx = 0.
dvady = -0.5*((1.-a)/2.)
dvbdy = -0.5*((1.+a)/2.)
dvcdy = 0.5
GradBases[:3,:] = np.array([
[dvadx,dvbdx,dvcdx],
[dvady,dvbdy,dvcdy]
]).T
if C>0:
# Edges
# dN/dx = dN/da (a being the triangular coordinate)
de1dx = -0.5*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)*((1.-b)/2.)**(p+1) +\
((1.-a)/2.)*0.5*JacobiPolynomials(p-1,a,1.,1.)*((1.-b)/2.)**(p+1) +\
((1.-a)/2.)*((1.+a)/2.)*DiffJacobiPolynomials(p-1,a,1.,1.,1)*((1.-b)/2.)**(p+1)
de2dx = -0.5*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)
de3dx = 0.5*((1.-b)/2.)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.)
# dN/dy = dN/db (b being the triangular coordinate)
de1dy = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)*(p+1)*((1.-b)/2.)**p*(-0.5)
de2dy = ((1.-a)/2.)*(-0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.) +\
((1.-a)/2.)*((1.-b)/2.)*0.5*JacobiPolynomials(q-1,b,1.,1.) +\
((1.-a)/2.)*((1.-b)/2.)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,1.,1.,1)
de3dy = ((1.+a)/2.)*(-0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,1.,1.) +\
((1.+a)/2.)*((1.-b)/2.)*0.5*JacobiPolynomials(q-1,b,1.,1.) +\
((1.+a)/2.)*((1.-b)/2.)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,1.,1.,1)
GradBases[3:3+C,0] = de1dx[:,0]; GradBases[3+C:3+2*C,0] = de2dx[:,0]; GradBases[3+2*C:3+3*C,0] = de3dx[:,0]
GradBases[3:3+C,1] = de1dy[:,0]; GradBases[3+C:3+2*C,1] = de2dy[:,0]; GradBases[3+2*C:3+3*C,1] = de3dy[:,0]
# Interior
dinteriordx = []; dinteriordy = []
for p in range(1,P1):
for q in range(1,P2):
if p+q < P2:
# dN/dx = dN/da (a being the triangular coordinate)
didx = -0.5*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[order]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[order] +\
((1.-a)/2.)*0.5*JacobiPolynomials(p-1,a,1.,1.)[order]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[order] +\
((1.-a)/2.)*((1.+a)/2.)*DiffJacobiPolynomials(p-1,a,1.,1.,1)[order]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[order]
dinteriordx = np.append(dinteriordx,didx)
# dN/dy = dN/db (b being the triangular coordinate)
didy = ((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[order]*(p+1)*((1.-b)/2.)**p*(-0.5)*((1.+b)/2.)*JacobiPolynomials(q-1,b,2.*p+1.,1.)[order] +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[order]*((1.-b)/2.)**(p+1)*0.5*JacobiPolynomials(q-1,b,2.*p+1.,1.)[order] +\
((1.-a)/2.)*((1.+a)/2.)*JacobiPolynomials(p-1,a,1.,1.)[order]*((1.-b)/2.)**(p+1)*((1.+b)/2.)*DiffJacobiPolynomials(q-1,b,2.*p+1.,1.,1)[order]
dinteriordy = np.append(dinteriordy,didy)
GradBases[3+3*C:,0] = dinteriordx
GradBases[3+3*C:,1] = dinteriordy
elif C<0 or isinstance(C,float):
raise ValueError('Order of interpolation degree should a non-negative integer')
return GradBases
```
#### File: TwoDimensional/Tri/hpNodalLagrange.py
```python
from __future__ import division
import numpy as np
from Core.QuadratureRules.FeketePointsTri import *
def hpBasesLagrange(C,xi,eta):
""" Consructs nodal bases lagrange shape functions with Fekete
nodes based on Pascal's triangles
1
x y
x**2 xy y**2
x**3 x**2*y x*y**2 y**3
x**4 x**3*y x**2*y**2 x*y**3 y**4
x**5 x**4*y x**3*y**2 x**2*y**3 x*y**4 y**5
For instance to consruct quadratic bases functions we have
one polynomial of the type:
N(x,y) = a1+a2*x+a3*y+a4*x**2+a5*x*y+a6*y**2 # 6 coefficients
where x and y are the parent coordinates (not the physical ones).
This polynomial is then evaluated at all triangular Fekete points.
There would be 6 Fekete points for this case:
[(-1,-1),(1,-1),(-1,1),(0,-1),(-1,0),(0,0)]
evaluating the polynomial at these 6 points would give us 6 equations
in terms of coefficients a_i, which results in a Vandermonde matrix.
Solving the Vandermonde matrix with an rhs which is zero every where
apart from one node would give one of the shape functions. Repeating
this we will obtain 6 bases functions.
(-1,1)
|\
| \
| \
| \
| \
| \
(-1,-1) -------- (1,-1)
Returns:
Bases [np.ndarray of doubles ] bases functions
gBasesx [np.ndarray of doubles ] gradient of bases functions wrt x
gBasesy [np.ndarray of doubles ] gradient of bases functions wrt y
ggBasesxx [np.ndarray of doubles ] hessian of bases functions wrt x and x
ggBasesxy [np.ndarray of doubles ] hessian of bases functions wrt x and y
ggBasesyx [np.ndarray of doubles ] hessian of bases functions wrt y and x
ggBasesyy [np.ndarray of doubles ] hessian of bases functions wrt y and y
"""
x = xi
y = eta
nsize = int((C+2)*(C+3)/2)
gBases = np.zeros((nsize,2),dtype=np.float64)
ggBases = np.zeros((nsize,2,2),dtype=np.float64)
if C==1:
Bases = np.array([
x**2/2. + x*y + x/2. + y**2/2. + y/2.,
(x*(x + 1))/2,
(y*(y + 1))/2,
-(x + y)*(x + 1),
-(x + y)*(y + 1),
(x + 1)*(y + 1),
])
gBasesx = np.array([
x + y + 1/2.,
x + 1/2.,
0.,
- 2*x - y - 1.,
- y - 1.,
y + 1.,
])
gBasesy = np.array([
x + y + 1/2.,
0,
y + 1/2.,
- x - 1,
- x - 2.*y - 1,
x + 1,
])
ggBasesxx = np.array([ 1., 1, 0, -2, 0, 0])
ggBasesxy = np.array([ 1., 0, 0, -1, -1, 1])
ggBasesyx = np.array([ 1., 0, 0, -1, -1, 1])
ggBasesyy = np.array([ 1., 0, 1, 0, -2, 0])
elif C==2:
Bases = np.array([
- (5*x**3)/8 - 2*x**2*y - (11*x**2)/8 - 2*x*y**2 - (11*x*y)/4 - (5*x)/8 - (5*y**3)/8 - (11*y**2)/8 - (5*y)/8 - 1/2251799813685248,
-((x + 1)*(- 5*x**2 + x*y + x + y**2 + y + 1))/8,
- (4056890586614813*x**3)/40564819207303340847894502572032 - (x**2*y)/8 - x**2/8 - (x*y**2)/8 - (x*y)/4 - x/8 + (5*y**3)/8 + y**2/2 - y/4 - 1/8,
(5*5**(1/2)*x**3)/8 + (6831541189506395*x**2*y)/2251799813685248 + (2712083152976557*x**2)/1125899906842624 + (7369110560110821*x*y**2)/4503599627370496 + (2277180396502131*x*y)/562949953421312 + (2277180396502129*x)/2251799813685248 + (7369110560110821*y**2)/4503599627370496 + (2277180396502129*y)/2251799813685248,
(8601109929670857*x*y**2)/36028797018963968 - (869805512948851*y)/2251799813685248 - (3479222051795405*x*y)/2251799813685248 - (5*5**(1/2)*x**3)/8 - (1739611025897699*x)/4503599627370496 - (1304708269423277*x**2*y)/1125899906842624 - (2008395711199917*x**2)/1125899906842624 + (8601109929670857*y**2)/36028797018963968 - 1/4503599627370496,
- (5409187448819751*x**3)/10141204801825835211973625643008 + (7369110560110821*x**2*y)/4503599627370496 + (3684555280055409*x**2)/2251799813685248 + (6831541189506395*x*y**2)/2251799813685248 + (2277180396502131*x*y)/562949953421312 + (1138590198251065*x)/1125899906842624 + (5*5**(1/2)*y**3)/8 + (1356041576488279*y**2)/562949953421312 + (142323774781383*y)/140737488355328 - 3/2251799813685248,
(7212249931759667*x**3)/20282409603651670423947251286016 - (27*y)/8 - (27*x*y)/4 - (27*x*y**2)/8 - (27*x**2*y)/8 - (27*x**2)/8 - (27*x)/8 - (27*y**2)/8 + 5/2251799813685248,
- (7212249931759667*x**3)/40564819207303340847894502572032 + (7369110560110815*x**2*y)/4503599627370496 + (7369110560110815*x**2)/4503599627370496 + (2150277482417697*x*y**2)/9007199254740992 + (5*x*y)/2 + (2545965081804343*x)/1125899906842624 + (2150277482417697*y**2)/9007199254740992 + (3889888508315405*y)/4503599627370496 + 5/8,
(537569370604429*x**2*y)/2251799813685248 - (1739611025897699*y)/4503599627370496 - (3479222051795405*x*y)/2251799813685248 - (5*5**(1/2)*y**3)/8 - (1304708269423277*x*y**2)/1125899906842624 - (217451378237213*x)/562949953421312 + (2150277482417715*x**2)/9007199254740992 + (4056890586614813*x**3)/10141204801825835211973625643008 - (8033582844799669*y**2)/4503599627370496 + 5/9007199254740992,
- (5409187448819751*x**3)/20282409603651670423947251286016 + (8601109929670797*x**2*y)/36028797018963968 + (2150277482417697*x**2)/9007199254740992 + (7369110560110817*x*y**2)/4503599627370496 + (5*x*y)/2 + (243118031769713*x)/281474976710656 + (3684555280055409*y**2)/2251799813685248 + (5091930163608687*y)/2251799813685248 + 5/8
])
gBasesx = np.array([ - (11*x)/4 - (11*y)/4 - 4*x*y - (15*x**2)/8 - 2*y**2 - 5/8,
(5*x**2)/8 - y/8 - (x*y)/8 - x/8 - y**2/8 - ((x + 1)*(y - 10*x + 1))/8 - 1/8,
- x/4 - y/4 - (x*y)/4 - (12170671759844439*x**2)/40564819207303340847894502572032 - y**2/8 - 1/8,
(15*5**(1/2)*x**2)/8 + (6831541189506395*x*y)/1125899906842624 + (2712083152976557*x)/562949953421312 + (7369110560110821*y**2)/4503599627370496 + (2277180396502131*y)/562949953421312 + 2277180396502129/2251799813685248,
(8601109929670857*y**2)/36028797018963968 - (3479222051795405*y)/2251799813685248 - (1304708269423277*x*y)/562949953421312 - (15*5**(1/2)*x**2)/8 - (2008395711199917*x)/562949953421312 - 1739611025897699/4503599627370496,
(3684555280055409*x)/1125899906842624 + (2277180396502131*y)/562949953421312 + (7369110560110821*x*y)/2251799813685248 - (16227562346459253*x**2)/10141204801825835211973625643008 + (6831541189506395*y**2)/2251799813685248 + 1138590198251065/1125899906842624,
(21636749795279001*x**2)/20282409603651670423947251286016 - (27*y)/4 - (27*x*y)/4 - (27*x)/4 - (27*y**2)/8 - 27/8,
(7369110560110815*x)/2251799813685248 + (5*y)/2 + (7369110560110815*x*y)/2251799813685248 - (21636749795279001*x**2)/40564819207303340847894502572032 + (2150277482417697*y**2)/9007199254740992 + 2545965081804343/1125899906842624,
(2150277482417715*x)/4503599627370496 - (3479222051795405*y)/2251799813685248 + (537569370604429*x*y)/1125899906842624 + (12170671759844439*x**2)/10141204801825835211973625643008 - (1304708269423277*y**2)/1125899906842624 - 217451378237213/562949953421312,
(2150277482417697*x)/4503599627370496 + (5*y)/2 + (8601109929670797*x*y)/18014398509481984 - (16227562346459253*x**2)/20282409603651670423947251286016 + (7369110560110817*y**2)/4503599627370496 + 243118031769713/281474976710656
])
gBasesy = np.array([
- (11*x)/4 - (11*y)/4 - 4*x*y - 2*x**2 - (15*y**2)/8 - 5/8,
-((x + 1)*(x + 2*y + 1))/8,
- x**2/8 - (x*y)/4 - x/4 + (15*y**2)/8 + y - 1/4,
(2277180396502131*x)/562949953421312 + (7369110560110821*y)/2251799813685248 + (7369110560110821*x*y)/2251799813685248 + (6831541189506395*x**2)/2251799813685248 + 2277180396502129/2251799813685248,
(8601109929670857*y)/18014398509481984 - (3479222051795405*x)/2251799813685248 + (8601109929670857*x*y)/18014398509481984 - (1304708269423277*x**2)/1125899906842624 - 869805512948851/2251799813685248,
(7369110560110821*x**2)/4503599627370496 + (6831541189506395*x*y)/1125899906842624 + (2277180396502131*x)/562949953421312 + (15*5**(1/2)*y**2)/8 + (1356041576488279*y)/281474976710656 + 142323774781383/140737488355328,
- (27*x)/4 - (27*y)/4 - (27*x*y)/4 - (27*x**2)/8 - 27/8,
(5*x)/2 + (2150277482417697*y)/4503599627370496 + (2150277482417697*x*y)/4503599627370496 + (7369110560110815*x**2)/4503599627370496 + 3889888508315405/4503599627370496,
(537569370604429*x**2)/2251799813685248 - (8033582844799669*y)/2251799813685248 - (1304708269423277*x*y)/562949953421312 - (15*5**(1/2)*y**2)/8 - (3479222051795405*x)/2251799813685248 - 1739611025897699/4503599627370496,
(5*x)/2 + (3684555280055409*y)/1125899906842624 + (7369110560110817*x*y)/2251799813685248 + (8601109929670797*x**2)/36028797018963968 + 5091930163608687/2251799813685248
])
ggBasesxx = np.array([ - (15*x)/4 - 4*y - 11/4,
(15*x)/4 - y/4 + 1,
- (12170671759844439*x)/20282409603651670423947251286016 - y/4 - 1/4,
(6831541189506395*y)/1125899906842624 + (15*5**(1/2)*x)/4 + 2712083152976557/562949953421312,
- (1304708269423277*y)/562949953421312 - (15*5**(1/2)*x)/4 - 2008395711199917/562949953421312,
(7369110560110821*y)/2251799813685248 - (16227562346459253*x)/5070602400912917605986812821504 + 3684555280055409/1125899906842624,
(21636749795279001*x)/10141204801825835211973625643008 - (27*y)/4 - 27/4,
(7369110560110815*y)/2251799813685248 - (21636749795279001*x)/20282409603651670423947251286016 + 7369110560110815/2251799813685248,
(12170671759844439*x)/5070602400912917605986812821504 + (537569370604429*y)/1125899906842624 + 2150277482417715/4503599627370496,
(8601109929670797*y)/18014398509481984 - (16227562346459253*x)/10141204801825835211973625643008 + 2150277482417697/4503599627370496])
ggBasesxy = np.array([ - 4*x - 4*y - 11/4,
- x/4 - y/4 - 1/4,
- x/4 - y/4 - 1/4,
(6831541189506395*x)/1125899906842624 + (7369110560110821*y)/2251799813685248 + 2277180396502131/562949953421312,
(8601109929670857*y)/18014398509481984 - (1304708269423277*x)/562949953421312 - 3479222051795405/2251799813685248,
(7369110560110821*x)/2251799813685248 + (6831541189506395*y)/1125899906842624 + 2277180396502131/562949953421312,
- (27*x)/4 - (27*y)/4 - 27/4,
(7369110560110815*x)/2251799813685248 + (2150277482417697*y)/4503599627370496 + 5/2,
(537569370604429*x)/1125899906842624 - (1304708269423277*y)/562949953421312 - 3479222051795405/2251799813685248,
(8601109929670797*x)/18014398509481984 + (7369110560110817*y)/2251799813685248 + 5/2])
ggBasesyx = np.array([ - 4*x - 4*y - 11/4,
- x/4 - y/4 - 1/4,
- x/4 - y/4 - 1/4,
(6831541189506395*x)/1125899906842624 + (7369110560110821*y)/2251799813685248 + 2277180396502131/562949953421312,
(8601109929670857*y)/18014398509481984 - (1304708269423277*x)/562949953421312 - 3479222051795405/2251799813685248,
(7369110560110821*x)/2251799813685248 + (6831541189506395*y)/1125899906842624 + 2277180396502131/562949953421312,
- (27*x)/4 - (27*y)/4 - 27/4,
(7369110560110815*x)/2251799813685248 + (2150277482417697*y)/4503599627370496 + 5/2,
(537569370604429*x)/1125899906842624 - (1304708269423277*y)/562949953421312 - 3479222051795405/2251799813685248,
(8601109929670797*x)/18014398509481984 + (7369110560110817*y)/2251799813685248 + 5/2])
ggBasesyy = np.array([ - 4*x - (15*y)/4 - 11/4,
- x/4 - 1/4,
(15*y)/4 - x/4 + 1,
(7369110560110821*x)/2251799813685248 + 7369110560110821/2251799813685248,
(8601109929670857*x)/18014398509481984 + 8601109929670857/18014398509481984,
(6831541189506395*x)/1125899906842624 + (15*5**(1/2)*y)/4 + 1356041576488279/281474976710656,
- (27*x)/4 - 27/4,
(2150277482417697*x)/4503599627370496 + 2150277482417697/4503599627370496,
- (1304708269423277*x)/562949953421312 - (15*5**(1/2)*y)/4 - 8033582844799669/2251799813685248,
(7369110560110817*x)/2251799813685248 + 3684555280055409/1125899906842624])
gBases[:,0] = gBasesx
gBases[:,1] = gBasesy
ggBases[:,0,0] = ggBasesxx
ggBases[:,0,1] = ggBasesxy
ggBases[:,1,0] = ggBasesxy
ggBases[:,1,1] = ggBasesyy
return Bases, gBases, ggBases
```
#### File: Florence/MaterialLibrary/IsotropicElectroMechanics_1.py
```python
import numpy as np
from numpy import einsum
from .MaterialBase import Material
from Florence.Tensor import trace, Voigt
class IsotropicElectroMechanics_1(Material):
"""docstring for IsotropicElectroMechanics"""
def __init__(self, ndim, **kwargs):
mtype = type(self).__name__
super(IsotropicElectroMechanics_1, self).__init__(mtype, ndim, **kwargs)
self.nvar = self.ndim+1
self.energy_type = "enthalpy"
self.nature = "nonlinear"
self.fields = "electro_mechanics"
if self.ndim == 2:
self.H_VoigtSize = 5
elif self.ndim == 3:
self.H_VoigtSize = 9
# LOW LEVEL DISPATCHER
self.has_low_level_dispatcher = False
def Hessian(self,StrainTensors, ElectricFieldx=0, elem=0, gcounter=0):
mu = self.mu
lamb = self.lamb
varepsilon_1 = self.eps_1
detF = StrainTensors['J'][gcounter]
mu2 = mu - lamb*(detF-1.0)
lamb2 = lamb*(2.0*detF-1.0) - mu
delta = StrainTensors['I']
E = 1.0*ElectricFieldx
Ex = E.reshape(E.shape[0])
EE = np.outer(E,E)
innerEE = np.dot(E,E.T)
I = delta
# C = lamb2*AijBkl(I,I) +mu2*(AikBjl(I,I)+AilBjk(I,I)) + varepsilon_1*(AijBkl(I,EE) + AijBkl(EE,I) - \
# 2.*AikBjl(EE,I)-2.0*AilBjk(I,EE) ) + varepsilon_1*(np.dot(E.T,E)[0,0])*(AikBjl(I,I)-0.5*AijBkl(I,I))
# ORIGINAL
# C = lamb2*AijBkl(I,I) +mu2*(AikBjl(I,I)+AilBjk(I,I)) +\
# varepsilon_1*(AijBkl(I,EE) + AijBkl(EE,I) -AikBjl(EE,I)-AilBjk(EE,I)-AilBjk(I,EE)-AikBjl(I,EE) ) +\
# varepsilon_1*(np.dot(E.T,E)[0,0])*(0.5*(AikBjl(I,I) + AilBjk(I,I))-0.5*AijBkl(I,I))
# C=0.5*(C+C.T)
# C_Voigt = C
C = lamb2*einsum("ij,kl",I,I) +mu2*(einsum("ik,jl",I,I)+einsum("il,jk",I,I)) +\
varepsilon_1*(einsum("ij,kl",I,EE) + einsum("ij,kl",EE,I) - einsum("ik,jl",EE,I)- einsum("il,jk",I,EE) -\
einsum("il,jl",I,EE)- einsum("ik,jl",I,EE) ) +\
varepsilon_1*(innerEE)*(0.5*( einsum("ik,jl",I,I)+einsum("il,jk",I,I) )-0.5* einsum("ij,kl",I,I) )
C_Voigt = Voigt(C,1)
# Computing the hessian
# Elasticity tensor (C - 4th order tensor)
# C[i,j,k,l] += lamb2*delta[i,j]*delta[k,l]+2.0*mu2*(delta[i,k]*delta[j,l]) #
b = StrainTensors['b'][gcounter]
be = np.dot(b,ElectricFieldx).reshape(self.ndim,1)
# Coupled Tensor (e - 3rd order)
# e[k,i,j] += (-2.0*varepsilon_1/detF)*(be[j]*b[i,k] + be[i]*b[j,k]) #
# e[i,j,k] += 1.0*varepsilon_1*( E[i]*delta[j,k] + E[j]*delta[i,k] - delta[i,j]*E[k]) ##
# e[k,i,j] += 1.0*varepsilon_1*(E[i]*delta[j,k] + E[j]*delta[i,k] - delta[i,j]*E[k]) ##
# Note that the actual piezoelectric tensor is symmetric wrt to the last two indices
# Actual tensor is: e[k,i,j] += 1.0*varepsilon_1*(E[i]*delta[j,k] + E[j]*delta[i,k] - delta[i,j]*E[k])
# We need to make its Voigt_form symmetric with respect to (j,k) instead of (i,j)
# ORIGINAL
# e_voigt = 1.0*varepsilon_1*(AijUk(I,Ex)+AikUj(I,Ex)-UiAjk(Ex,I)).T
e_voigt = 1.0*varepsilon_1*( einsum('ij,k',I,Ex) + einsum('ik,j',I,Ex) - einsum('i,jk',Ex,I) ).T
e_voigt = Voigt(np.ascontiguousarray(e_voigt),1)
# Dielectric Tensor (Permittivity - 2nd order)
Permittivity = -varepsilon_1*delta ##
# bb = np.dot(StrainTensors.b,StrainTensors.b) #
# Permittivity = -(2.0*varepsilon_1/detF)*bb #
factor = -1.
H1 = np.concatenate((C_Voigt,factor*e_voigt),axis=1)
H2 = np.concatenate((factor*e_voigt.T,Permittivity),axis=1)
H_Voigt = np.concatenate((H1,H2),axis=0)
self.H_VoigtSize = H_Voigt.shape[0]
# return H_Voigt, C, e, Permittivity
return H_Voigt
def CauchyStress(self, StrainTensors, ElectricFieldx, elem=0,gcounter=0):
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
E = ElectricFieldx
mu = self.mu
lamb = self.lamb
varepsilon_1 = self.eps_1
be = np.dot(b,ElectricFieldx)
return 1.0*mu/J*b+(lamb*(J-1.0)-mu)*I + varepsilon_1*(np.dot(E,E.T)-0.5*np.dot(E.T,E)*I) ##
# return 1.0*mu/J*b+(lamb*(J-1.0)-mu)*I - (2.0*varepsilon_1/J)*np.dot(be,be.T)
def ElectricDisplacementx(self, StrainTensors, ElectricFieldx, elem=0, gcounter=0):
varepsilon_1 = self.eps_1
return varepsilon_1*ElectricFieldx[:,None] ##
# J = StrainTensors['J'][gcounter]
# b = StrainTensors['b'][gcounter]
# bb = np.dot(b,b)
# return (2.0*varepsilon_1/StrainTensors.J)*np.dot(bb,ElectricFieldx).reshape(StrainTensors.b.shape[0],1)
```
#### File: Florence/MaterialLibrary/IsotropicElectroMechanics_3.py
```python
import numpy as np
from numpy import einsum
from Florence.Tensor import trace, Voigt
from .MaterialBase import Material
class IsotropicElectroMechanics_3(Material):
"""Isotropic electromechanical model in terms of Helmoltz energy
with one nonlinear electrostatic invariant
W(C,E) = W_n(C) - eps_1/2*J*C**(-1):(E 0 E) + eps_2/2*(E*E)**2
W_n(C) = mu/2*(C:I-3) - mu*lnJ + lamb/2*(lnJ)**2
where 0 stands for dyadic/outer product
"""
def __init__(self, ndim, **kwargs):
mtype = type(self).__name__
super(IsotropicElectroMechanics_3, self).__init__(mtype, ndim, **kwargs)
# REQUIRES SEPARATELY
self.nvar = self.ndim+1
self.energy_type = "enthalpy"
self.nature = "nonlinear"
self.fields = "electro_mechanics"
if self.ndim == 2:
self.H_VoigtSize = 5
elif self.ndim == 3:
self.H_VoigtSize = 9
# LOW LEVEL DISPATCHER
self.has_low_level_dispatcher = True
# self.has_low_level_dispatcher = False
def KineticMeasures(self,F,ElectricFieldx, elem=0):
from Florence.MaterialLibrary.LLDispatch._IsotropicElectroMechanics_3_ import KineticMeasures
return KineticMeasures(self, np.ascontiguousarray(F), ElectricFieldx)
def Hessian(self,StrainTensors,ElectricFieldx=0,elem=0,gcounter=0):
mu = self.mu
lamb = self.lamb
eps_1 = self.eps_1
eps_2 = self.eps_2
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
E = 1.0*ElectricFieldx.reshape(self.ndim,1)
Ex = E.reshape(E.shape[0])
EE = np.dot(E,E.T)
be = np.dot(b,ElectricFieldx).reshape(self.ndim)
C_Voigt = lamb/J*einsum('ij,kl',I,I) - (lamb*np.log(J) - mu)/J*( einsum('ik,jl',I,I) + einsum('il,jk',I,I) ) + \
eps_1*( einsum('ij,kl',I,EE) + einsum('ij,kl',EE,I) - einsum('ik,jl',EE,I) - einsum('il,jk',EE,I) - \
einsum('ik,jl',I,EE) - einsum('il,jk',I,EE) ) + \
eps_1*(np.dot(E.T,E)[0,0])*0.5*( einsum('ik,jl',I,I) + einsum('il,jk',I,I) - einsum('ij,kl',I,I) )
C_Voigt = Voigt(C_Voigt,1)
P_Voigt = eps_1*( einsum('ik,j',I,Ex) + einsum('jk,i',I,Ex) - einsum('ij,k',I,Ex))
P_Voigt = Voigt(P_Voigt,1)
E_Voigt = -eps_1*I + 2.*eps_2/J*(2*np.dot(be,be.T)+np.dot(be.T,be)*I)
# Build the Hessian
factor = -1.
H1 = np.concatenate((C_Voigt,factor*P_Voigt),axis=1)
H2 = np.concatenate((factor*P_Voigt.T,E_Voigt),axis=1)
H_Voigt = np.concatenate((H1,H2),axis=0)
return H_Voigt
def CauchyStress(self,StrainTensors,ElectricFieldx,elem=0,gcounter=0):
mu = self.mu
lamb = self.lamb
eps_1 = self.eps_1
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
E = ElectricFieldx.reshape(self.ndim,1)
stress = 1.0*mu/J*(b-I) + lamb/J*np.log(J)*I + \
eps_1*(np.dot(E,E.T) - 0.5*np.dot(E.T,E)[0,0]*I)
return stress
def ElectricDisplacementx(self,StrainTensors,ElectricFieldx,elem=0,gcounter=0):
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
E = ElectricFieldx.reshape(self.ndim,1)
varepsilon_1 = self.eps_1
eps_2 = self.eps_2
be = np.dot(b,E)
ebe = np.dot(E.T,be)[0,0]
D = varepsilon_1*E - 2.*eps_2/J*(ebe)*be
return D
```
#### File: Florence/MaterialLibrary/MaterialBase.py
```python
from __future__ import print_function
import numpy as np
from Florence.Utils import insensitive
from warnings import warn
# BASE CLASS FOR ALL MATERIAL MODELS - SHOULD NOT BE USED DIRECTLY
class Material(object):
"""Base class for all material models"""
def __init__(self, mtype, ndim, energy_type="internal_energy",
lame_parameter_1=None, lame_parameter_2=None, poissons_ratio=None, youngs_modulus=None,
shear_modulus=None, transverse_iso_youngs_modulus=None, transverse_iso_shear_modulus=None,
bulk_modulus=None, density=None, permittivity=None, permeability=None,
is_compressible=True, is_incompressible=False, is_nearly_incompressible=False,
is_nonisotropic=True,is_anisotropic=False,is_transversely_isotropic=False, anisotropic_orientations=None,
**kwargs):
# SAFETY CHECKS
if not isinstance(mtype, str):
raise TypeError("Type of material model should be given as a string")
if not isinstance(energy_type, str):
raise TypeError("Material energy can either be 'internal_energy' or 'enthalpy'")
self.energy_type = energy_type
# MATERIAL CONSTANTS
self.mu = lame_parameter_1
self.lamb = lame_parameter_2
self.nu = poissons_ratio
self.E = youngs_modulus
self.E_A = transverse_iso_youngs_modulus
self.G_A = transverse_iso_shear_modulus
self.K = bulk_modulus
self.rho = density
# if self.rho is None:
# self.rho = 0.0
self.e = permittivity
self.u = permeability
# SET ALL THE OPTIONAL KEYWORD ARGUMENTS
for i in kwargs.keys():
if "__" not in i:
setattr(self,i,kwargs[i])
self.mtype = mtype
self.ndim = ndim
if 'elec' not in insensitive(self.mtype):
if 'magnet' not in insensitive(self.mtype):
self.nvar = self.ndim
elif 'elec' in insensitive(self.mtype) and 'magnet' not in insensitive(self.mtype):
self.nvar = self.ndim + 1
elif 'elec' not in insensitive(self.mtype) and 'magnet' in insensitive(self.mtype):
self.nvar = self.ndim + 1
elif 'elec' in insensitive(self.mtype) and 'magnet' in insensitive(self.mtype):
self.nvar = self.ndim + 2
else:
self.nvar = self.ndim
self.H_Voigt = None
if self.mu is None or self.lamb is None:
if self.E is not None and self.nu is not None:
self.GetLameParametersFromYoungsPoisson()
# else:
# warn("You must set the material constants for problem")
try:
if self.mtype == 'LinearElastic' or \
self.mtype == 'IncrementalLinearElastic':
if self.ndim == 2:
self.H_Voigt = self.lamb*np.array([[1.,1.,0.],[1.,1.,0],[0.,0.,0.]]) +\
self.mu*np.array([[2.,0.,0.],[0.,2.,0],[0.,0.,1.]])
else:
block_1 = np.zeros((6,6),dtype=np.float64); block_1[:3,:3] = np.ones((3,3))
block_2 = np.eye(6,6); block_2[0,0],block_2[1,1],block_2[2,2]=2.,2.,2.
self.H_Voigt = self.lamb*block_1 + self.mu*block_2
else:
if self.ndim == 2:
self.vIijIkl = np.array([[1.,1.,0.],[1.,1.,0],[0.,0.,0.]])
self.vIikIjl = np.array([[2.,0.,0.],[0.,2.,0],[0.,0.,1.]])
else:
block_1 = np.zeros((6,6),dtype=np.float64); block_1[:3,:3] = np.ones((3,3))
block_2 = np.eye(6,6); block_2[0,0],block_2[1,1],block_2[2,2]=2.,2.,2.
self.vIijIkl = block_1
self.vIikIjl = block_2
I = np.eye(self.ndim,self.ndim)
self.Iijkl = np.einsum('ij,kl',I,I)
self.Iikjl = np.einsum('ik,jl',I,I) + np.einsum('il,jk',I,I)
except TypeError:
# CATCH ONLY TypeError. OTHER MATERIAL CONSTANT RELATED ERRORS ARE SELF EXPLANATORY
raise ValueError("Material constants for {} does not seem correct".format(self.mtype))
if self.H_Voigt is not None:
self.H_VoigtSize = self.H_Voigt.shape[0]
self.is_compressible = is_compressible
self.is_nearly_incompressible = is_nearly_incompressible
self.is_incompressible = is_incompressible
self.is_anisotropic = is_anisotropic
self.is_transversely_isotropic = is_transversely_isotropic
self.is_nonisotropic = is_nonisotropic
self.anisotropic_orientations = anisotropic_orientations
self.pressure = 0.0
self.has_low_level_dispatcher = False
def SetFibresOrientation(self,anisotropic_orientations):
self.anisotropic_orientations = anisotropic_orientations
def GetFibresOrientation(self, mesh, interior_orientation=None, plot=False):
"""Convenience function for computing anisotropic orientations of fibres
in a transversely isotropic material.
The orientation is computed based on the popular concept of reinforced composites
where for the elements at the boundary, the fibres are perpendicular to the boundary
edge/face
input:
mesh: [Mesh]
interior_orientation: [1D numpy.array or list] orientation of all interior
fibres. Default is negative X-axis i.e [-1.,0.] for 2D
and [-1.,0.,0.] for 3D
"""
ndim = mesh.InferSpatialDimension()
if self.ndim != ndim:
raise ValueError('Mesh object and material model do not have the same spatial dimension')
if self.ndim == 2:
edge_elements = mesh.GetElementsWithBoundaryEdges()
self.anisotropic_orientations = np.zeros((mesh.nelem,self.ndim),dtype=np.float64)
for iedge in range(edge_elements.shape[0]):
coords = mesh.points[mesh.edges[iedge,:],:]
min_x = min(coords[0,0],coords[1,0])
dist = (coords[0,0:]-coords[1,:])/np.linalg.norm(coords[0,0:]-coords[1,:])
if min_x != coords[0,0]:
dist *= -1
self.anisotropic_orientations[edge_elements[iedge],:] = dist
if interior_orientation is None:
interior_orientation = [-1.,0.]
for i in range(mesh.nelem):
if np.allclose(self.anisotropic_orientations[i,:],0.):
self.anisotropic_orientations[i,:] = interior_orientation
if plot:
Xs,Ys = [],[]
for i in range(mesh.nelem):
x_avg = np.sum(mesh.points[mesh.elements[i,:],0])/mesh.points[mesh.elements[i,:],0].shape[0]
y_avg = np.sum(mesh.points[mesh.elements[i,:],1])/mesh.points[mesh.elements[i,:],1].shape[0]
Xs=np.append(Xs,x_avg)
Ys=np.append(Ys,y_avg)
import matplotlib.pyplot as plt
figure = plt.figure()
q = plt.quiver(Xs, Ys, self.anisotropic_orientations[:,0],
self.anisotropic_orientations[:,1], color='Teal',
headlength=5,width=0.004)
if mesh.element_type == "tri":
plt.triplot(mesh.points[:,0],mesh.points[:,1], mesh.elements[:,:3],color='k')
else:
from Florence.MeshGeneration.NodeArrangement import NodeArrangementQuad
C = mesh.InferPolynomialDegree() - 1
reference_edges = NodeArrangementQuad(C)[0]
reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)
reference_edges = np.delete(reference_edges,1,1)
all_edge_elements = mesh.GetElementsEdgeNumberingQuad()
mesh.GetEdgesQuad()
x_edges = np.zeros((C+2,mesh.all_edges.shape[0]))
y_edges = np.zeros((C+2,mesh.all_edges.shape[0]))
BasesOneD = np.eye(2,2)
for iedge in range(mesh.all_edges.shape[0]):
ielem = all_edge_elements[iedge,0]
edge = mesh.elements[ielem,reference_edges[all_edge_elements[iedge,1],:]]
x_edges[:,iedge], y_edges[:,iedge] = mesh.points[edge,:].T
plt.plot(x_edges,y_edges,'-k')
plt.axis('equal')
plt.axis('off')
plt.show()
elif self.ndim == 3:
face_elements = mesh.GetElementsWithBoundaryFaces()
self.anisotropic_orientations = np.zeros((mesh.nelem,self.ndim),dtype=np.float64)
for iface in range(face_elements.shape[0]):
coords = mesh.points[mesh.faces[iface,:],:]
min_x = min(coords[0,0], coords[1,0], coords[2,0])
# ORIENTS THE FIBRE TO ONE OF THE EDGES OF THE FACE
fibre = (coords[0,:]-coords[1,:])/np.linalg.norm(coords[0,:]-coords[1,:])
if min_x != coords[0,0]:
fibre *= -1
self.anisotropic_orientations[face_elements[iface],:] = fibre
if interior_orientation is None:
interior_orientation = [-1.,0.,0.]
for i in range(mesh.nelem):
if np.allclose(self.anisotropic_orientations[i,:],0.):
self.anisotropic_orientations[i,:] = interior_orientation
if plot:
# all_face_elements = mesh.GetElementsFaceNumbering()
Xs = np.zeros(mesh.elements.shape[0])
Ys = np.zeros(mesh.elements.shape[0])
Zs = np.zeros(mesh.elements.shape[0])
# divider = mesh.points[mesh.elements[0,:],0].shape[0]
# for i in range(mesh.nelem):
# Xs[i] = np.sum(mesh.points[mesh.elements[i,:],0])/divider
# Ys[i] = np.sum(mesh.points[mesh.elements[i,:],1])/divider
# Zs[i] = np.sum(mesh.points[mesh.elements[i,:],2])/divider
divider = mesh.points[mesh.faces[0,:],0].shape[0]
for i in range(mesh.faces.shape[0]):
Xs[face_elements[i,0]] = np.sum(mesh.points[mesh.faces[i,:],0])/divider
Ys[face_elements[i,0]] = np.sum(mesh.points[mesh.faces[i,:],1])/divider
Zs[face_elements[i,0]] = np.sum(mesh.points[mesh.faces[i,:],2])/divider
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
from Florence.PostProcessing import PostProcess
if mesh.element_type == "tet":
tmesh = PostProcess.TessellateTets(mesh,np.zeros_like(mesh.points),
interpolation_degree=0)
elif mesh.element_type == "hex":
tmesh = PostProcess.TessellateHexes(mesh,np.zeros_like(mesh.points),
interpolation_degree=0)
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
mlab.quiver3d(Xs, Ys, Zs, self.anisotropic_orientations[:,0],
self.anisotropic_orientations[:,1], self.anisotropic_orientations[:,2],
color=(0.,128./255,128./255),line_width=2)
src = mlab.pipeline.scalar_scatter(tmesh.x_edges.T.copy().flatten(),
tmesh.y_edges.T.copy().flatten(), tmesh.z_edges.T.copy().flatten())
src.mlab_source.dataset.lines = tmesh.connections
lines = mlab.pipeline.stripper(src)
h_edges = mlab.pipeline.surface(lines, color = (0,0,0), line_width=2)
mlab.show()
def Linearise(self,energy):
"""Linearises a material model, by dispatching invariants to self.LineariseInvariant"""
pass
def LineariseInvariant(self,invariant_to_linearise):
"""Give an invariant in the form of a dictionary in order to linearise it.
input:
invariant_to_linearise: [dict] must contain the following keys:
invariant [str] the invariant to linearise
cofficient [str] a material coefficient
kinematics [str] for instance deformation gradient tensor F
constants [str] constants like kronecker delta d_ij
Linearisation is always carried out with respect to the right Cauchy-Green tensor (C)
>>> material = Material("MooneyRivlin",3)
>>> material.Linearise({'invariant':'uC:I','coefficient':'u','kinematics':'C','constants':'I'})
Cauchy stress: 2*u*I
Spatial Hessian: 0
"""
if not isinstance(invariant_to_linearise,dict):
raise ValueError('invariant_to_linearise should be a dictionary')
if 'invariant' not in invariant_to_linearise.keys():
raise ValueError("invariant_to_linearise should have at least one key named 'invariant' with no spaces")
strip_invariant = "".join(invariant_to_linearise['invariant'].split())
if 'coefficient' not in invariant_to_linearise.keys():
coefficient = ''
invariant = strip_invariant
else:
coefficient = "".join(invariant_to_linearise['coefficient'].split())
invariant = strip_invariant.split(coefficient)
if len(invariant) > 1:
if invariant[0] == '':
if invariant[1][0] == '*':
invariant = invariant[1][1:]
elif invariant[1] == '':
if invariant[0][-1] == '*':
invariant = invariant[0][:-1]
delta = u'\u03B4'
delta = delta.encode('utf-8')
if "C:I" in invariant or "F:F" in invariant or "trC" in invariant or "II_F" in invariant or "I_C" in invariant:
cauchy = "2.0/J*"+coefficient+"*I"
elasticity = "0"
if "G:I" in invariant or "H:H" in invariant or "trG" in invariant or "II_H" in invariant or "I_G" in invariant:
cauchy = "2.0/J*"+coefficient+"*(trace(b)*I-b)*b"
elasticity = "4.0/J*"+coefficient+"*(b_ij*b_kl - b_ikb_jl)"
if "lnJ" in invariant:
cauchy = "2.0/J*"+coefficient+"*I"
elasticity = "4.0/J*"+coefficient+"*"+delta+"_ik*"+delta+"_jl"
if "(J-1)**2" in invariant:
cauchy = "2.0*"+coefficient+"*(J-1)*I"
elasticity = "2.0*"+coefficient+"*(2*J-1)"+delta+"_ij*"+delta+"_jk"+\
"-4.0*"+coefficient+"*(J-1)"+delta+"_ik*"+delta+"_jl"
if "NCN" in invariant or "FNFN" in invariant or "II_FN" in invariant:
cauchy = "2.0/J*"+coefficient+"*(FN)_i(FN)_j"
elasticity = "0"
if "NGN" in invariant or "HNHN" in invariant or "II_HN" in invariant:
cauchy = "2.0/J*"+coefficient+"*( ((HN)_k(HN)_k)*I -(HN)_i(HN)_j )"
elasticity = "4.0/J*"+coefficient+"*( -"+delta+"_ij*(HN)_k(HN)_l +(HN)_i(HN)_j*"+\
delta+"_kl" + "(HN)_m(HN)_m*"+delta+"_ij"+delta+"_kl" + "-(HN)_m(HN)_m*"+delta+"_ik"+delta+"_jl" +\
delta+"_il"+"(HN)_j(HN)_k"+delta+"_jl"+"(HN)_i(HN)_k )"
if "cauchy" not in locals() or "elasticity" not in locals():
cauchy = "NIL"
elasticity = "NIL"
warn("I could not linearise the invariant %s" % invariant)
print("Cauchy stress tensor:\t\t\t", cauchy)
print("Spatial Hessian:\t\t\t\t", elasticity)
return cauchy, elasticity
def GetYoungsPoissonsFromLameParameters(self):
assert self.mu != None
assert self.lamb != None
self.E = self.mu*(3.0*self.lamb + 2.*self.mu)/(self.lamb + self.mu)
self.nu = self.lamb/2.0/(self.lamb + self.mu)
def GetLameParametersFromYoungsPoisson(self):
assert self.nu != None
assert self.E != None
self.lamb = self.E*self.nu/(1.+self.nu)/(1.-2.0*self.nu)
self.mu = self.E/2./(1+self.nu)
@property
def Types(self):
"""Returns available material types"""
import os
pwd = os.path.dirname(os.path.realpath(__file__))
list_of_materials = os.listdir(pwd)
list_of_materials = [list_of_materials[i].split(".")[0] for i in range(len(list_of_materials))]
list_of_materials = list(np.unique(list_of_materials))
if "__init__" in list_of_materials:
idx = list_of_materials.index("__init__")
del list_of_materials[idx]
return np.asarray(list_of_materials).reshape(-1,1)
def GetType(self):
"""Get the type of material used"""
if self.mtype is None:
raise ValueError("You have not specified a material type. "
"Call the 'Types' property for a list of available material models")
return self.mtype
def SetType(self,mtype):
"""Set the type of material to be used"""
self.mtype = mtype
def pprint(self):
"""Pretty print"""
import pandas
from copy import deepcopy
Dict = deepcopy(self.__dict__)
for key in Dict.keys():
if Dict[key] is None:
Dict[key] = np.NAN
if isinstance(Dict[key],np.ndarray):
del Dict[key]
print(pandas.DataFrame(Dict,index=["Available parameters:"]))
```
#### File: Florence/MaterialLibrary/NearlyIncompressibleMooneyRivlin.py
```python
from __future__ import division
import numpy as np
from numpy import einsum
from .MaterialBase import Material
from Florence.Tensor import trace, Voigt
from math import sqrt
#####################################################################################################
# NEARLY INCOMPRESSIBLE MOONEY-RIVLIN
#####################################################################################################
class NearlyIncompressibleMooneyRivlin(Material):
""" A nearly incompressible Mooney-Rivlin material model whose energy functional is given by:
W(C,G,J**2) = alpha*J**(-2/3)*(C:I) + beta*J**(-2)*(G:I)**(3/2) + kappa/2*(J-1)**2
Note that this energy is decomposed into deviatoric and volumetric components such that
C:I and (G:I)**(3/2) contain only deviatoric contribution and the volumetric contribution
is taken care of by the bulk modulus (kappa) term (J-1)**2
"""
def __init__(self, ndim, **kwargs):
mtype = type(self).__name__
super(NearlyIncompressibleMooneyRivlin, self).__init__(mtype, ndim, **kwargs)
self.gamma=1.
self.alpha = self.gamma*self.mu/2.
self.beta = (self.mu - 2.*self.alpha)/3./sqrt(3.)
# self.kappa = self.lamb+2.0*self.mu/3.0 # or
self.kappa = self.lamb+4.0/3.0*self.alpha+2.0*sqrt(3.0)*self.beta
self.is_transversely_isotropic = False
self.energy_type = "internal_energy"
self.nature = "nonlinear"
self.fields = "mechanics"
if self.ndim==3:
self.H_VoigtSize = 6
elif self.ndim==2:
self.H_VoigtSize = 3
# LOW LEVEL DISPATCHER
self.has_low_level_dispatcher = True
# self.has_low_level_dispatcher = False
def KineticMeasures(self,F,ElectricFieldx=0, elem=0):
from Florence.MaterialLibrary.LLDispatch._NearlyIncompressibleMooneyRivlin_ import KineticMeasures
return KineticMeasures(self,np.ascontiguousarray(F))
def Hessian(self,StrainTensors,ElectricFieldx=0,elem=0,gcounter=0):
alpha = self.alpha
beta = self.beta
kappa = self.kappa
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
F = StrainTensors['F'][gcounter]
# b=np.dot(F,F.T)
H = J*np.linalg.inv(F).T
g = np.dot(H,H.T)
if self.ndim == 2:
trb = trace(b)+1
trg = trace(g)+J**2
elif self.ndim == 3:
trb = trace(b)
trg = trace(g)
H_Voigt = -4/3.*alpha*J**(-5/3.)*( einsum('ij,kl',b,I) + einsum('ij,kl',I,b) ) + \
4.*alpha/9.*J**(-5/3.)*trb*einsum('ij,kl',I,I) + \
2/3.*alpha*J**(-5/3.)*trb*( einsum('il,jk',I,I) + einsum('ik,jl',I,I) ) + \
beta*J**(-3)*trg**(3./2.)* ( einsum('ij,kl',I,I) - einsum('ik,jl',I,I) - einsum('il,jk',I,I) ) - \
3.*beta*J**(-3)*trg**(1./2.)*( einsum('ij,kl',I,g) + einsum('ij,kl',g,I) ) + \
6.*beta*J**(-3)*trg**(1./2.)*( einsum('ik,jl',I,g) + einsum('il,jk',g,I) ) + \
3.*beta*J**(-3)*trg**(-1./2.)*( einsum('ij,kl',g,g) ) + \
kappa*(2.0*J-1)*einsum('ij,kl',I,I) - kappa*(J-1)*(einsum('ik,jl',I,I)+einsum('il,jk',I,I)) # #
# # WITH PRE-COMPUTED IDENTITY TENSORS
# H_Voigt = -4/3.*alpha*J**(-5/3.)*( einsum('ij,kl',b,I) + einsum('ij,kl',I,b) ) + \
# 4.*alpha/9.*J**(-5/3.)*trb*self.Iijkl + \
# 2/3.*alpha*J**(-5/3.)*trb*self.Iikjl + \
# beta*J**(-3)*trg**(3./2.)*( self.Iijkl - self.Iikjl ) - \
# 3.*beta*J**(-3)*trg**(1./2.)*( einsum('ij,kl',I,g) + einsum('ij,kl',g,I) ) + \
# 6.*beta*J**(-3)*trg**(1./2.)*( einsum('ik,jl',I,g) + einsum('il,jk',g,I) ) + \
# 3.*beta*J**(-3)*trg**(-1./2.)*( einsum('ij,kl',g,g) ) + \
# kappa*(2.0*J-1)*self.Iijkl - kappa*(J-1)*self.Iikjl
H_Voigt = Voigt( H_Voigt ,1)
self.H_VoigtSize = H_Voigt.shape[0]
return H_Voigt
def CauchyStress(self,StrainTensors,ElectricFieldx,elem=0,gcounter=0):
alpha = self.alpha
beta = self.beta
kappa = self.kappa
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
F = StrainTensors['F'][gcounter]
H = J*np.linalg.inv(F).T
g = np.dot(H,H.T)
bcross = trace(b)*b-np.dot(b,b)
# b=np.dot(F,F.T)
# stress = 2.*alpha*J**(-5/3.)*b - 2./3.*alpha*J**(-5/3.)*trace(b)*I + \
# beta*J**(-3)*trace(g)**(3./2.)*I - 3*beta*J**(-3)*trace(g)**(1./2.)*g + \
# +(kappa*(J-1.0))*I #####
if self.ndim == 2:
trb = trace(b)+1
trg = trace(g)+J**2
elif self.ndim == 3:
trb = trace(b)
trg = trace(g)
stress = 2.*alpha*J**(-5/3.)*b - 2./3.*alpha*J**(-5/3.)*(trb)*I + \
beta*J**(-3)*(trg)**(3./2.)*I - 3*beta*J**(-3)*(trg)**(1./2.)*g + \
+(kappa*(J-1.0))*I
return stress
```
#### File: Florence/MaterialLibrary/NeoHookean.py
```python
import numpy as np
from .MaterialBase import Material
from Florence.Tensor import trace
class NeoHookean(Material):
"""The fundamental Neo-Hookean internal energy, described in Bonet et. al.
W(C) = mu/2*(C:I-3)- mu*lnJ + lamb/2*(J-1)**2
"""
def __init__(self, ndim, **kwargs):
mtype = type(self).__name__
super(NeoHookean, self).__init__(mtype, ndim, **kwargs)
self.is_transversely_isotropic = False
self.energy_type = "internal_energy"
self.nature = "nonlinear"
self.fields = "mechanics"
if self.ndim==3:
self.H_VoigtSize = 6
elif self.ndim==2:
self.H_VoigtSize = 3
# LOW LEVEL DISPATCHER
self.has_low_level_dispatcher = True
# self.has_low_level_dispatcher = False
def KineticMeasures(self,F,ElectricFieldx=0, elem=0):
from Florence.MaterialLibrary.LLDispatch._NeoHookean_ import KineticMeasures
return KineticMeasures(self,F)
def Hessian(self,StrainTensors,ElectricFieldx=None,elem=0,gcounter=0):
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
mu2 = self.mu/J- self.lamb*(J-1.0)
lamb2 = self.lamb*(2*J-1.0)
H_Voigt = lamb2*self.vIijIkl+mu2*self.vIikIjl
self.H_VoigtSize = H_Voigt.shape[0]
return H_Voigt
def CauchyStress(self,StrainTensors,ElectricFieldx=None,elem=0,gcounter=0):
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
mu = self.mu
lamb = self.lamb
return 1.0*mu/J*b + (lamb*(J-1.0)-mu/J)*I
def InternalEnergy(self,StrainTensors,elem=0,gcounter=0):
mu = self.mu
lamb = self.lamb
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
F = StrainTensors['F'][gcounter]
C = np.dot(F.T,F)
energy = mu/2.*(trace(C) - 3.) - mu*np.log(J) + lamb/2.*(J-1.)**2
return energy
```
#### File: Florence/QuadratureRules/FeketePointsTet.py
```python
import numpy as np
def FeketePointsTet(C):
if C==0:
feketeNodes = np.array([
[-1., -1., -1.],
[1., -1., -1.],
[-1., 1., -1.],
[-1., -1., 1.]
])
elif C==1:
feketeNodes = np.array([
[-1., -1., -1.],
[1., -1., -1.],
[-1., 1., -1.],
[-1., -1., 1.],
[0., -1., -1.],
[-1., 0., -1.],
[0., 0., -1.],
[-1., -1., 0.],
[0., -1., 0.],
[-1., 0., 0.]
])
elif C==2:
feketeNodes = np.array([
[-1.000000000000000, -1.000000000000000, -1.000000000000000],
[1.000000000000000, -1.000000000000000, -1.000000000000000],
[-1.000000000000000, 1.000000000000000, -1.000000000000000],
[-1.000000000000000, -1.000000000000000, 1.000000000000000],
[-0.447213595499958, -1.000000000000000, -1.000000000000000],
[0.447213595499958, -1.000000000000000, -1.000000000000000],
[-1.000000000000000, -0.447213595499958, -1.000000000000000],
[-0.333333333333333, -0.333333333333333, -1.000000000000000],
[0.447213595499958, -0.447213595499958, -1.000000000000000],
[-1.000000000000000, 0.447213595499958, -1.000000000000000],
[-0.447213595499958, 0.447213595499958, -1.000000000000000],
[-1.000000000000000, -1.000000000000000, -0.447213595499958],
[-0.333333333333333, -1.000000000000000, -0.333333333333333],
[0.447213595499958, -1.000000000000000, -0.447213595499958],
[-1.000000000000000, -0.333333333333333, -0.333333333333333],
[-0.333333333333333, -0.333333333333333, -0.333333333333333],
[-1.000000000000000, 0.447213595499958, -0.447213595499958],
[-1.000000000000000, -1.000000000000000, 0.447213595499958],
[-0.447213595499958, -1.000000000000000, 0.447213595499958],
[-1.000000000000000, -0.447213595499958, 0.447213595499958]
])
elif C==3:
feketeNodes = np.array([
[-1.000000000000000, -1.000000000000000, -1.000000000000000],
[1.000000000000000, -1.000000000000000, -1.000000000000000],
[-1.000000000000000, 1.000000000000000, -1.000000000000000],
[-1.000000000000000, -1.000000000000000, 1.000000000000000],
[-0.654653670707977, -1.000000000000000, -1.000000000000000],
[0.0000, -1.000000000000000, -1.000000000000000],
[0.654653670707977, -1.000000000000000, -1.000000000000000],
[-1.000000000000000, -0.654653670707977, -1.000000000000000],
[-0.551551223569326, -0.551551223569326, -1.000000000000000],
[0.103102447138651, -0.551551223569326, -1.000000000000000],
[0.654653670707977, -0.654653670707977, -1.000000000000000],
[-1.000000000000000, 0.00000000000000, -1.000000000000000],
[-0.551551223569326, 0.103102447138651, -1.000000000000000],
[0.000, 0.000000000000000, -1.000000000000000],
[-1.000000000000000, 0.654653670707977, -1.000000000000000],
[-0.654653670707977, 0.654653670707977, -1.000000000000000],
[-1.000000000000000, -1.000000000000000, -0.654653670707977],
[-0.551551223569326, -1.000000000000000, -0.551551223569326],
[0.103102447138651, -1.000000000000000, -0.551551223569326],
[0.654653670707977, -1.000000000000000, -0.654653670707977],
[-1.000000000000000, -0.551551223569326, -0.551551223569326],
[-0.500000000000000, -0.500000000000000, -0.500000000000000],
[0.103102447138651, -0.551551223569326, -0.551551223569326],
[-1.000000000000000, 0.103102447138651, -0.551551223569326],
[-0.551551223569326, 0.103102447138651, -0.551551223569326],
[-1.000000000000000, 0.654653670707977, -0.654653670707977],
[-1.000000000000000, -1.000000000000000, 0.00000000000000],
[-0.551551223569326, -1.000000000000000, 0.103102447138651],
[0.0, -1.000000000000000, 0.000000000000],
[-1.000000000000000, -0.551551223569326, 0.103102447138651],
[-0.551551223569326, -0.551551223569326, 0.103102447138651],
[-1.000000000000000, 0., 0.0],
[-1.000000000000000, -1.000000000000000, 0.654653670707977],
[-0.654653670707977, -1.000000000000000, 0.654653670707977],
[-1.000000000000000, -0.654653670707977, 0.654653670707977]
])
elif C==4:
feketeNodes = np.array([
[-1.000000000000000, -1.000000000000000, -1.000000000000000],
[1.000000000000000, -1.000000000000000, -1.000000000000000],
[-1.000000000000000, 1.000000000000000, -1.000000000000000],
[-1.000000000000000, -1.000000000000000, 1.000000000000000],
[-0.765055323929465, -1.000000000000000, -1.000000000000000],
[-0.285231516480645, -1.000000000000000, -1.000000000000000],
[0.285231516480645, -1.000000000000000, -1.000000000000000],
[0.765055323929465, -1.000000000000000, -1.000000000000000],
[-1.000000000000000, -0.765055323929465, -1.000000000000000],
[-0.683428946803370, -0.683428946803370, -1.000000000000000],
[-0.173392064183727, -0.653215871632546, -1.000000000000000],
[0.366857893606740, -0.683428946803370, -1.000000000000000],
[0.765055323929465, -0.765055323929465, -1.000000000000000],
[-1.000000000000000, -0.285231516480645, -1.000000000000000],
[-0.653215871632546, -0.173392064183727, -1.000000000000000],
[-0.173392064183727, -0.173392064183727, -1.000000000000000],
[0.285231516480645, -0.285231516480645, -1.000000000000000],
[-1.000000000000000, 0.285231516480645, -1.000000000000000],
[-0.683428946803370, 0.366857893606740, -1.000000000000000],
[-0.285231516480645, 0.285231516480645, -1.000000000000000],
[-1.000000000000000, 0.765055323929465, -1.000000000000000],
[-0.765055323929465, 0.765055323929465, -1.000000000000000],
[-1.000000000000000, -1.000000000000000, -0.765055323929465],
[-0.683428946803370, -1.000000000000000, -0.683428946803370],
[-0.173392064183727, -1.000000000000000, -0.653215871632546],
[0.366857893606740, -1.000000000000000, -0.683428946803370],
[0.765055323929465, -1.000000000000000, -0.765055323929465],
[-1.000000000000000, -0.683428946803370, -0.683428946803370],
[-0.619955951862205, -0.619955951862205, -0.619955951862205],
[-0.140132144413385, -0.619955951862205, -0.619955951862205],
[0.366857893606740, -0.683428946803370, -0.683428946803370],
[-1.000000000000000, -0.173392064183727, -0.653215871632546],
[-0.619955951862205, -0.140132144413385, -0.619955951862205],
[-0.173392064183727, -0.173392064183727, -0.653215871632546],
[-1.000000000000000, 0.366857893606740, -0.683428946803370],
[-0.683428946803370, 0.366857893606740, -0.683428946803370],
[-1.000000000000000, 0.765055323929465, -0.765055323929465],
[-1.000000000000000, -1.000000000000000, -0.285231516480645],
[-0.653215871632546, -1.000000000000000, -0.173392064183727],
[-0.173392064183727, -1.000000000000000, -0.173392064183727],
[0.285231516480645, -1.000000000000000, -0.285231516480645],
[-1.000000000000000, -0.653215871632546, -0.173392064183727],
[-0.619955951862205, -0.619955951862205, -0.140132144413385],
[-0.173392064183727, -0.653215871632546, -0.173392064183727],
[-1.000000000000000, -0.173392064183727, -0.173392064183727],
[-0.653215871632546, -0.173392064183727, -0.173392064183727],
[-1.000000000000000, 0.285231516480645, -0.285231516480645],
[-1.000000000000000, -1.000000000000000, 0.285231516480645],
[-0.683428946803370, -1.000000000000000, 0.366857893606740],
[-0.285231516480645, -1.000000000000000, 0.285231516480645],
[-1.000000000000000, -0.683428946803370, 0.366857893606740],
[-0.683428946803370, -0.683428946803370, 0.366857893606740],
[-1.000000000000000, -0.285231516480645, 0.285231516480645],
[-1.000000000000000, -1.000000000000000, 0.765055323929465],
[-0.765055323929465, -1.000000000000000, 0.765055323929465],
[-1.000000000000000, -0.765055323929465, 0.765055323929465]
])
elif C==5:
feketeNodes = np.array([
[-1.000000000000000e+000, -1.000000000000000e+000, -1.000000000000000e+000],
[1.000000000000000e+000, -1.000000000000000e+000, -1.000000000000000e+000],
[-1.000000000000000e+000, 1.000000000000000e+000, -1.000000000000000e+000],
[-1.000000000000000e+000, -1.000000000000000e+000, 1.000000000000000e+000],
[-8.302238962785670e-001, -1.000000000000000e+000, -1.000000000000000e+000],
[-4.688487934707142e-001, -1.000000000000000e+000, -1.000000000000000e+000],
[0., -1.000000000000000e+000, -1.000000000000000e+000],
[4.688487934707142e-001, -1.000000000000000e+000, -1.000000000000000e+000],
[8.302238962785671e-001, -1.000000000000000e+000, -1.000000000000000e+000],
[-1.000000000000000e+000, -8.302238962785671e-001, -1.000000000000000e+000],
[-7.663575632497603e-001, -7.663575632497603e-001, -1.000000000000000e+000],
[-3.691578968876205e-001, -7.305329996954733e-001, -1.000000000000000e+000],
[9.969089658309382e-002, -7.305329996954733e-001, -1.000000000000000e+000],
[5.327151264995207e-001, -7.663575632497605e-001, -1.000000000000000e+000],
[8.302238962785671e-001, -8.302238962785670e-001, -1.000000000000000e+000],
[-1.000000000000000e+000, -4.688487934707142e-001, -1.000000000000000e+000],
[-7.305329996954733e-001, -3.691578968876205e-001, -1.000000000000000e+000],
[-3.333333333333334e-001, -3.333333333333334e-001, -1.000000000000000e+000],
[9.969089658309382e-002, -3.691578968876205e-001, -1.000000000000000e+000],
[4.688487934707142e-001, -4.688487934707142e-001, -1.000000000000000e+000],
[-1.000000000000000e+000, 0., -1.000000000000000e+000],
[-7.305329996954733e-001, 9.969089658309382e-002, -1.000000000000000e+000],
[-3.691578968876205e-001, 9.969089658309382e-002, -1.000000000000000e+000],
[0., 0., -1.000000000000000e+000],
[-1.000000000000000e+000, 4.688487934707142e-001, -1.000000000000000e+000],
[-7.663575632497605e-001, 5.327151264995207e-001, -1.000000000000000e+000],
[-4.688487934707142e-001, 4.688487934707142e-001, -1.000000000000000e+000],
[-1.000000000000000e+000, 8.302238962785671e-001, -1.000000000000000e+000],
[-8.302238962785671e-001, 8.302238962785671e-001, -1.000000000000000e+000],
[-1.000000000000000e+000, -1.000000000000000e+000, -8.302238962785670e-001],
[-7.663575632497603e-001, -1.000000000000000e+000, -7.663575632497603e-001],
[-3.691578968876205e-001, -1.000000000000000e+000, -7.305329996954733e-001],
[9.969089658309382e-002, -1.000000000000000e+000, -7.305329996954733e-001],
[5.327151264995207e-001, -1.000000000000000e+000, -7.663575632497605e-001],
[8.302238962785671e-001, -1.000000000000000e+000, -8.302238962785670e-001],
[-1.000000000000000e+000, -7.663575632497603e-001, -7.663575632497603e-001],
[-7.075559740696417e-001, -7.075559740696417e-001, -7.075559740696417e-001],
[-3.193124485960736e-001, -6.806875514039265e-001, -6.806875514039265e-001],
[1.226679222089253e-001, -7.075559740696417e-001, -7.075559740696417e-001],
[5.327151264995207e-001, -7.663575632497603e-001, -7.663575632497603e-001],
[-1.000000000000000e+000, -3.691578968876205e-001, -7.305329996954733e-001],
[-6.806875514039265e-001, -3.193124485960736e-001, -6.806875514039263e-001],
[-3.193124485960734e-001, -3.193124485960734e-001, -6.806875514039263e-001],
[9.969089658309382e-002, -3.691578968876205e-001, -7.305329996954733e-001],
[-1.000000000000000e+000, 9.969089658309382e-002, -7.305329996954733e-001],
[-7.075559740696417e-001, 1.226679222089253e-001, -7.075559740696417e-001],
[-3.691578968876206e-001, 9.969089658309382e-002, -7.305329996954733e-001],
[-1.000000000000000e+000, 5.327151264995207e-001, -7.663575632497605e-001],
[-7.663575632497602e-001, 5.327151264995207e-001, -7.663575632497605e-001],
[-1.000000000000000e+000, 8.302238962785671e-001, -8.302238962785670e-001],
[-1.000000000000000e+000, -1.000000000000000e+000, -4.688487934707142e-001],
[-7.305329996954733e-001, -1.000000000000000e+000, -3.691578968876205e-001],
[-3.333333333333334e-001, -1.000000000000000e+000, -3.333333333333334e-001],
[9.969089658309382e-002, -1.000000000000000e+000, -3.691578968876205e-001],
[4.688487934707142e-001, -1.000000000000000e+000, -4.688487934707142e-001],
[-1.000000000000000e+000, -7.305329996954733e-001, -3.691578968876205e-001],
[-6.806875514039263e-001, -6.806875514039263e-001, -3.193124485960736e-001],
[-3.193124485960734e-001, -6.806875514039263e-001, -3.193124485960734e-001],
[9.969089658309382e-002, -7.305329996954733e-001, -3.691578968876205e-001],
[-1.000000000000000e+000, -3.333333333333334e-001, -3.333333333333334e-001],
[-6.806875514039263e-001, -3.193124485960734e-001, -3.193124485960734e-001],
[-3.333333333333332e-001, -3.333333333333334e-001, -3.333333333333334e-001],
[-1.000000000000000e+000, 9.969089658309382e-002, -3.691578968876205e-001],
[-7.305329996954734e-001, 9.969089658309382e-002, -3.691578968876205e-001],
[-1.000000000000000e+000, 4.688487934707142e-001, -4.688487934707142e-001],
[-1.000000000000000e+000, -1.000000000000000e+000, 0.],
[-7.305329996954733e-001, -1.000000000000000e+000, 9.969089658309382e-002],
[-3.691578968876205e-001, -1.000000000000000e+000, 9.969089658309382e-002],
[0., -1.000000000000000e+000, 0.],
[-1.000000000000000e+000, -7.305329996954733e-001, 9.969089658309382e-002],
[-7.075559740696417e-001, -7.075559740696417e-001, 1.226679222089253e-001],
[-3.691578968876206e-001, -7.305329996954733e-001, 9.969089658309382e-002],
[-1.000000000000000e+000, -3.691578968876205e-001, 9.969089658309382e-002],
[-7.305329996954733e-001, -3.691578968876205e-001, 9.969089658309382e-002],
[-1.000000000000000e+000, 0., 0.],
[-1.000000000000000e+000, -1.000000000000000e+000, 4.688487934707142e-001],
[-7.663575632497605e-001, -1.000000000000000e+000, 5.327151264995207e-001],
[-4.688487934707142e-001, -1.000000000000000e+000, 4.688487934707142e-001],
[-1.000000000000000e+000, -7.663575632497605e-001, 5.327151264995207e-001],
[-7.663575632497603e-001, -7.663575632497605e-001, 5.327151264995207e-001],
[-1.000000000000000e+000, -4.688487934707142e-001, 4.688487934707142e-001],
[-1.000000000000000e+000, -1.000000000000000e+000, 8.302238962785671e-001],
[-8.302238962785671e-001, -1.000000000000000e+000, 8.302238962785671e-001],
[-1.000000000000000e+000, -8.302238962785671e-001, 8.302238962785671e-001]
])
elif C==6:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.87174014850961, -1.00000000000000, -1.00000000000000],
[-0.59170018143314, -1.00000000000000, -1.00000000000000],
[-0.20929921790248, -1.00000000000000, -1.00000000000000],
[0.20929921790248, -1.00000000000000, -1.00000000000000],
[0.59170018143314, -1.00000000000000, -1.00000000000000],
[0.87174014850961, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.87174014850961, -1.00000000000000],
[-0.82528381670207, -0.82528381670207, -1.00000000000000],
[-0.50221137036112, -0.80669930545461, -1.00000000000000],
[-0.09895829633478, -0.80208340733043, -1.00000000000000],
[0.30891067581573, -0.80669930545461, -1.00000000000000],
[0.65056763340413, -0.82528381670207, -1.00000000000000],
[0.87174014850961, -0.87174014850961, -1.00000000000000],
[-1.00000000000000, -0.59170018143314, -1.00000000000000],
[-0.80669930545461, -0.50221137036112, -1.00000000000000],
[-0.46667144513197, -0.46667144513197, -1.00000000000000],
[-0.06665710973605, -0.46667144513197, -1.00000000000000],
[0.30891067581573, -0.50221137036112, -1.00000000000000],
[0.59170018143314, -0.59170018143314, -1.00000000000000],
[-1.00000000000000, -0.20929921790248, -1.00000000000000],
[-0.80208340733043, -0.09895829633478, -1.00000000000000],
[-0.46667144513197, -0.06665710973605, -1.00000000000000],
[-0.09895829633478, -0.09895829633478, -1.00000000000000],
[0.20929921790248, -0.20929921790248, -1.00000000000000],
[-1.00000000000000, 0.20929921790248, -1.00000000000000],
[-0.80669930545461, 0.30891067581573, -1.00000000000000],
[-0.50221137036112, 0.30891067581573, -1.00000000000000],
[-0.20929921790248, 0.20929921790248, -1.00000000000000],
[-1.00000000000000, 0.59170018143314, -1.00000000000000],
[-0.82528381670207, 0.65056763340413, -1.00000000000000],
[-0.59170018143314, 0.59170018143314, -1.00000000000000],
[-1.00000000000000, 0.87174014850961, -1.00000000000000],
[-0.87174014850961, 0.87174014850961, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.87174014850961],
[-0.82528381670207, -1.00000000000000, -0.82528381670207],
[-0.50221137036112, -1.00000000000000, -0.80669930545461],
[-0.09895829633478, -1.00000000000000, -0.80208340733043],
[0.30891067581573, -1.00000000000000, -0.80669930545461],
[0.65056763340413, -1.00000000000000, -0.82528381670207],
[0.87174014850961, -1.00000000000000, -0.87174014850961],
[-1.00000000000000, -0.82528381670207, -0.82528381670207],
[-0.76969787609152, -0.76969787609152, -0.76969787609152],
[-0.44445869174761, -0.74795442580198, -0.74795442580198],
[-0.05963245664842, -0.74795442580198, -0.74795442580198],
[0.30909362827455, -0.76969787609152, -0.76969787609152],
[0.65056763340413, -0.82528381670207, -0.82528381670207],
[-1.00000000000000, -0.50221137036112, -0.80669930545461],
[-0.74795442580198, -0.44445869174761, -0.74795442580198],
[-0.42209801484566, -0.42209801484566, -0.73370595546302],
[-0.05963245664842, -0.44445869174761, -0.74795442580198],
[0.30891067581573, -0.50221137036112, -0.80669930545461],
[-1.00000000000000, -0.09895829633478, -0.80208340733043],
[-0.74795442580198, -0.05963245664842, -0.74795442580198],
[-0.44445869174761, -0.05963245664842, -0.74795442580198],
[-0.09895829633478, -0.09895829633478, -0.80208340733043],
[-1.00000000000000, 0.30891067581573, -0.80669930545461],
[-0.76969787609152, 0.30909362827455, -0.76969787609152],
[-0.50221137036112, 0.30891067581573, -0.80669930545461],
[-1.00000000000000, 0.65056763340413, -0.82528381670207],
[-0.82528381670207, 0.65056763340413, -0.82528381670207],
[-1.00000000000000, 0.87174014850961, -0.87174014850961],
[-1.00000000000000, -1.00000000000000, -0.59170018143314],
[-0.80669930545461, -1.00000000000000, -0.50221137036112],
[-0.46667144513197, -1.00000000000000, -0.46667144513197],
[-0.06665710973605, -1.00000000000000, -0.46667144513197],
[0.30891067581573, -1.00000000000000, -0.50221137036112],
[0.59170018143314, -1.00000000000000, -0.59170018143314],
[-1.00000000000000, -0.80669930545461, -0.50221137036112],
[-0.74795442580198, -0.74795442580198, -0.44445869174761],
[-0.42209801484566, -0.73370595546302, -0.42209801484566],
[-0.05963245664842, -0.74795442580198, -0.44445869174761],
[0.30891067581573, -0.80669930545461, -0.50221137036112],
[-1.00000000000000, -0.46667144513197, -0.46667144513197],
[-0.73370595546302, -0.42209801484566, -0.42209801484566],
[-0.42209801484566, -0.42209801484566, -0.42209801484566],
[-0.06665710973605, -0.46667144513197, -0.46667144513197],
[-1.00000000000000, -0.06665710973605, -0.46667144513197],
[-0.74795442580198, -0.05963245664842, -0.44445869174761],
[-0.46667144513197, -0.06665710973605, -0.46667144513197],
[-1.00000000000000, 0.30891067581573, -0.50221137036112],
[-0.80669930545461, 0.30891067581573, -0.50221137036112],
[-1.00000000000000, 0.59170018143314, -0.59170018143314],
[-1.00000000000000, -1.00000000000000, -0.20929921790248],
[-0.80208340733043, -1.00000000000000, -0.09895829633478],
[-0.46667144513197, -1.00000000000000, -0.06665710973605],
[-0.09895829633478, -1.00000000000000, -0.09895829633478],
[0.20929921790248, -1.00000000000000, -0.20929921790248],
[-1.00000000000000, -0.80208340733043, -0.09895829633478],
[-0.74795442580198, -0.74795442580198, -0.05963245664842],
[-0.44445869174761, -0.74795442580198, -0.05963245664842],
[-0.09895829633478, -0.80208340733043, -0.09895829633478],
[-1.00000000000000, -0.46667144513197, -0.06665710973605],
[-0.74795442580198, -0.44445869174761, -0.05963245664842],
[-0.46667144513197, -0.46667144513197, -0.06665710973605],
[-1.00000000000000, -0.09895829633478, -0.09895829633478],
[-0.80208340733043, -0.09895829633478, -0.09895829633478],
[-1.00000000000000, 0.20929921790248, -0.20929921790248],
[-1.00000000000000, -1.00000000000000, 0.20929921790248],
[-0.80669930545461, -1.00000000000000, 0.30891067581573],
[-0.50221137036112, -1.00000000000000, 0.30891067581573],
[-0.20929921790248, -1.00000000000000, 0.20929921790248],
[-1.00000000000000, -0.80669930545461, 0.30891067581573],
[-0.76969787609152, -0.76969787609152, 0.30909362827455],
[-0.50221137036112, -0.80669930545461, 0.30891067581573],
[-1.00000000000000, -0.50221137036112, 0.30891067581573],
[-0.80669930545461, -0.50221137036112, 0.30891067581573],
[-1.00000000000000, -0.20929921790248, 0.20929921790248],
[-1.00000000000000, -1.00000000000000, 0.59170018143314],
[-0.82528381670207, -1.00000000000000, 0.65056763340413],
[-0.59170018143314, -1.00000000000000, 0.59170018143314],
[-1.00000000000000, -0.82528381670207, 0.65056763340413],
[-0.82528381670207, -0.82528381670207, 0.65056763340413],
[-1.00000000000000, -0.59170018143314, 0.59170018143314],
[-1.00000000000000, -1.00000000000000, 0.87174014850961],
[-0.87174014850961, -1.00000000000000, 0.87174014850961],
[-1.00000000000000, -0.87174014850961, 0.87174014850961],
])
elif C==7:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.89975799541146, -1.00000000000000, -1.00000000000000],
[-0.67718627951074, -1.00000000000000, -1.00000000000000],
[-0.36311746382618, -1.00000000000000, -1.00000000000000],
[0.00000000000000, -1.00000000000000, -1.00000000000000],
[0.36311746382618, -1.00000000000000, -1.00000000000000],
[0.67718627951074, -1.00000000000000, -1.00000000000000],
[0.89975799541146, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.89975799541146, -1.00000000000000],
[-0.86170949449417, -0.86170949449417, -1.00000000000000],
[-0.60262188608665, -0.84339274864735, -1.00000000000000],
[-0.26504420712991, -0.83639267859385, -1.00000000000000],
[0.10143688572376, -0.83639267859385, -1.00000000000000],
[0.44601463473400, -0.84339274864735, -1.00000000000000],
[0.72341898898835, -0.86170949449417, -1.00000000000000],
[0.89975799541146, -0.89975799541146, -1.00000000000000],
[-1.00000000000000, -0.67718627951074, -1.00000000000000],
[-0.84339274864735, -0.60262188608665, -1.00000000000000],
[-0.56629365587868, -0.56629365587868, -1.00000000000000],
[-0.22219562253010, -0.55560875493980, -1.00000000000000],
[0.13258731175736, -0.56629365587868, -1.00000000000000],
[0.44601463473400, -0.60262188608665, -1.00000000000000],
[0.67718627951074, -0.67718627951074, -1.00000000000000],
[-1.00000000000000, -0.36311746382618, -1.00000000000000],
[-0.83639267859385, -0.26504420712991, -1.00000000000000],
[-0.55560875493980, -0.22219562253010, -1.00000000000000],
[-0.22219562253010, -0.22219562253010, -1.00000000000000],
[0.10143688572376, -0.26504420712991, -1.00000000000000],
[0.36311746382618, -0.36311746382618, -1.00000000000000],
[-1.00000000000000, 0.00000000000000, -1.00000000000000],
[-0.83639267859385, 0.10143688572376, -1.00000000000000],
[-0.56629365587868, 0.13258731175736, -1.00000000000000],
[-0.26504420712991, 0.10143688572376, -1.00000000000000],
[0.00000000000000, 0.00000000000000, -1.00000000000000],
[-1.00000000000000, 0.36311746382618, -1.00000000000000],
[-0.84339274864735, 0.44601463473400, -1.00000000000000],
[-0.60262188608665, 0.44601463473400, -1.00000000000000],
[-0.36311746382618, 0.36311746382618, -1.00000000000000],
[-1.00000000000000, 0.67718627951074, -1.00000000000000],
[-0.86170949449417, 0.72341898898835, -1.00000000000000],
[-0.67718627951074, 0.67718627951074, -1.00000000000000],
[-1.00000000000000, 0.89975799541146, -1.00000000000000],
[-0.89975799541146, 0.89975799541146, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.89975799541146],
[-0.86170949449417, -1.00000000000000, -0.86170949449417],
[-0.60262188608665, -1.00000000000000, -0.84339274864735],
[-0.26504420712991, -1.00000000000000, -0.83639267859385],
[0.10143688572376, -1.00000000000000, -0.83639267859385],
[0.44601463473400, -1.00000000000000, -0.84339274864735],
[0.72341898898835, -1.00000000000000, -0.86170949449417],
[0.89975799541146, -1.00000000000000, -0.89975799541146],
[-1.00000000000000, -0.86170949449417, -0.86170949449417],
[-0.81264414406961, -0.81264414406961, -0.81264414406961],
[-0.54857984257542, -0.78978165180393, -0.78978165180393],
[-0.21619316488673, -0.78380683511327, -0.78380683511327],
[0.12814314618328, -0.78978165180393, -0.78978165180393],
[0.43793243220883, -0.81264414406961, -0.81264414406961],
[0.72341898898835, -0.86170949449417, -0.86170949449417],
[-1.00000000000000, -0.60262188608665, -0.84339274864735],
[-0.78978165180393, -0.54857984257542, -0.78978165180393],
[-0.51657615948891, -0.51657615948891, -0.77141155022715],
[-0.19543613079503, -0.51657615948891, -0.77141155022715],
[0.12814314618328, -0.54857984257542, -0.78978165180393],
[0.44601463473400, -0.60262188608665, -0.84339274864735],
[-1.00000000000000, -0.26504420712991, -0.83639267859385],
[-0.78380683511327, -0.21619316488673, -0.78380683511327],
[-0.51657615948891, -0.19543613079503, -0.77141155022715],
[-0.21619316488673, -0.21619316488673, -0.78380683511327],
[0.10143688572376, -0.26504420712991, -0.83639267859385],
[-1.00000000000000, 0.10143688572376, -0.83639267859385],
[-0.78978165180393, 0.12814314618328, -0.78978165180393],
[-0.54857984257542, 0.12814314618328, -0.78978165180393],
[-0.26504420712991, 0.10143688572376, -0.83639267859385],
[-1.00000000000000, 0.44601463473400, -0.84339274864735],
[-0.81264414406961, 0.43793243220883, -0.81264414406961],
[-0.60262188608665, 0.44601463473400, -0.84339274864735],
[-1.00000000000000, 0.72341898898835, -0.86170949449417],
[-0.86170949449417, 0.72341898898835, -0.86170949449417],
[-1.00000000000000, 0.89975799541146, -0.89975799541146],
[-1.00000000000000, -1.00000000000000, -0.67718627951074],
[-0.84339274864735, -1.00000000000000, -0.60262188608665],
[-0.56629365587868, -1.00000000000000, -0.56629365587868],
[-0.22219562253010, -1.00000000000000, -0.55560875493980],
[0.13258731175736, -1.00000000000000, -0.56629365587868],
[0.44601463473400, -1.00000000000000, -0.60262188608665],
[0.67718627951074, -1.00000000000000, -0.67718627951074],
[-1.00000000000000, -0.84339274864735, -0.60262188608665],
[-0.78978165180393, -0.78978165180393, -0.54857984257542],
[-0.51657615948891, -0.77141155022715, -0.51657615948891],
[-0.19543613079503, -0.77141155022715, -0.51657615948891],
[0.12814314618328, -0.78978165180393, -0.54857984257542],
[0.44601463473400, -0.84339274864735, -0.60262188608665],
[-1.00000000000000, -0.56629365587868, -0.56629365587868],
[-0.77141155022715, -0.51657615948891, -0.51657615948891],
[-0.50000000000000, -0.50000000000000, -0.50000000000000],
[-0.19543613079503, -0.51657615948891, -0.51657615948891],
[0.13258731175736, -0.56629365587868, -0.56629365587868],
[-1.00000000000000, -0.22219562253010, -0.55560875493980],
[-0.77141155022715, -0.19543613079503, -0.51657615948891],
[-0.51657615948891, -0.19543613079503, -0.51657615948891],
[-0.22219562253010, -0.22219562253010, -0.55560875493980],
[-1.00000000000000, 0.13258731175736, -0.56629365587868],
[-0.78978165180393, 0.12814314618328, -0.54857984257542],
[-0.56629365587868, 0.13258731175736, -0.56629365587868],
[-1.00000000000000, 0.44601463473400, -0.60262188608665],
[-0.84339274864735, 0.44601463473400, -0.60262188608665],
[-1.00000000000000, 0.67718627951074, -0.67718627951074],
[-1.00000000000000, -1.00000000000000, -0.36311746382618],
[-0.83639267859385, -1.00000000000000, -0.26504420712991],
[-0.55560875493980, -1.00000000000000, -0.22219562253010],
[-0.22219562253010, -1.00000000000000, -0.22219562253010],
[0.10143688572376, -1.00000000000000, -0.26504420712991],
[0.36311746382618, -1.00000000000000, -0.36311746382618],
[-1.00000000000000, -0.83639267859385, -0.26504420712991],
[-0.78380683511327, -0.78380683511327, -0.21619316488673],
[-0.51657615948891, -0.77141155022715, -0.19543613079503],
[-0.21619316488673, -0.78380683511327, -0.21619316488673],
[0.10143688572376, -0.83639267859385, -0.26504420712991],
[-1.00000000000000, -0.55560875493980, -0.22219562253010],
[-0.77141155022715, -0.51657615948891, -0.19543613079503],
[-0.51657615948891, -0.51657615948891, -0.19543613079503],
[-0.22219562253010, -0.55560875493980, -0.22219562253010],
[-1.00000000000000, -0.22219562253010, -0.22219562253010],
[-0.78380683511327, -0.21619316488673, -0.21619316488673],
[-0.55560875493980, -0.22219562253010, -0.22219562253010],
[-1.00000000000000, 0.10143688572376, -0.26504420712991],
[-0.83639267859385, 0.10143688572376, -0.26504420712991],
[-1.00000000000000, 0.36311746382618, -0.36311746382618],
[-1.00000000000000, -1.00000000000000, -0.00000000000000],
[-0.83639267859385, -1.00000000000000, 0.10143688572376],
[-0.56629365587868, -1.00000000000000, 0.13258731175736],
[-0.26504420712991, -1.00000000000000, 0.10143688572376],
[0.00000000000000, -1.00000000000000, -0.00000000000000],
[-1.00000000000000, -0.83639267859385, 0.10143688572376],
[-0.78978165180393, -0.78978165180393, 0.12814314618328],
[-0.54857984257542, -0.78978165180393, 0.12814314618328],
[-0.26504420712991, -0.83639267859385, 0.10143688572376],
[-1.00000000000000, -0.56629365587868, 0.13258731175736],
[-0.78978165180393, -0.54857984257542, 0.12814314618328],
[-0.56629365587868, -0.56629365587868, 0.13258731175736],
[-1.00000000000000, -0.26504420712991, 0.10143688572376],
[-0.83639267859385, -0.26504420712991, 0.10143688572376],
[-1.00000000000000, 0.00000000000000, -0.00000000000000],
[-1.00000000000000, -1.00000000000000, 0.36311746382618],
[-0.84339274864735, -1.00000000000000, 0.44601463473400],
[-0.60262188608665, -1.00000000000000, 0.44601463473400],
[-0.36311746382618, -1.00000000000000, 0.36311746382618],
[-1.00000000000000, -0.84339274864735, 0.44601463473400],
[-0.81264414406961, -0.81264414406961, 0.43793243220883],
[-0.60262188608665, -0.84339274864735, 0.44601463473400],
[-1.00000000000000, -0.60262188608665, 0.44601463473400],
[-0.84339274864735, -0.60262188608665, 0.44601463473400],
[-1.00000000000000, -0.36311746382618, 0.36311746382618],
[-1.00000000000000, -1.00000000000000, 0.67718627951074],
[-0.86170949449417, -1.00000000000000, 0.72341898898835],
[-0.67718627951074, -1.00000000000000, 0.67718627951074],
[-1.00000000000000, -0.86170949449417, 0.72341898898835],
[-0.86170949449417, -0.86170949449417, 0.72341898898835],
[-1.00000000000000, -0.67718627951074, 0.67718627951074],
[-1.00000000000000, -1.00000000000000, 0.89975799541146],
[-0.89975799541146, -1.00000000000000, 0.89975799541146],
[-1.00000000000000, -0.89975799541146, 0.89975799541146],
])
elif C==8:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.91953390816646, -1.00000000000000, -1.00000000000000],
[-0.73877386510550, -1.00000000000000, -1.00000000000000],
[-0.47792494981044, -1.00000000000000, -1.00000000000000],
[-0.16527895766639, -1.00000000000000, -1.00000000000000],
[0.16527895766639, -1.00000000000000, -1.00000000000000],
[0.47792494981044, -1.00000000000000, -1.00000000000000],
[0.73877386510550, -1.00000000000000, -1.00000000000000],
[0.91953390816646, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.91953390816646, -1.00000000000000],
[-0.88782128766618, -0.88782128766618, -1.00000000000000],
[-0.67626256165966, -0.87017829530514, -1.00000000000000],
[-0.39267792331144, -0.86163425910518, -1.00000000000000],
[-0.07043475725321, -0.85913048549358, -1.00000000000000],
[0.25431218241661, -0.86163425910518, -1.00000000000000],
[0.54644085696480, -0.87017829530514, -1.00000000000000],
[0.77564257533236, -0.88782128766618, -1.00000000000000],
[0.91953390816646, -0.91953390816646, -1.00000000000000],
[-1.00000000000000, -0.73877386510550, -1.00000000000000],
[-0.87017829530514, -0.67626256165966, -1.00000000000000],
[-0.64122478358221, -0.64122478358221, -1.00000000000000],
[-0.34742291811411, -0.62573097059544, -1.00000000000000],
[-0.02684611129045, -0.62573097059544, -1.00000000000000],
[0.28244956716441, -0.64122478358221, -1.00000000000000],
[0.54644085696480, -0.67626256165966, -1.00000000000000],
[0.73877386510550, -0.73877386510550, -1.00000000000000],
[-1.00000000000000, -0.47792494981044, -1.00000000000000],
[-0.86163425910518, -0.39267792331144, -1.00000000000000],
[-0.62573097059544, -0.34742291811411, -1.00000000000000],
[-0.33333333333333, -0.33333333333333, -1.00000000000000],
[-0.02684611129045, -0.34742291811411, -1.00000000000000],
[0.25431218241661, -0.39267792331144, -1.00000000000000],
[0.47792494981044, -0.47792494981044, -1.00000000000000],
[-1.00000000000000, -0.16527895766639, -1.00000000000000],
[-0.85913048549358, -0.07043475725321, -1.00000000000000],
[-0.62573097059544, -0.02684611129045, -1.00000000000000],
[-0.34742291811411, -0.02684611129045, -1.00000000000000],
[-0.07043475725321, -0.07043475725321, -1.00000000000000],
[0.16527895766639, -0.16527895766639, -1.00000000000000],
[-1.00000000000000, 0.16527895766639, -1.00000000000000],
[-0.86163425910518, 0.25431218241661, -1.00000000000000],
[-0.64122478358221, 0.28244956716441, -1.00000000000000],
[-0.39267792331144, 0.25431218241661, -1.00000000000000],
[-0.16527895766639, 0.16527895766639, -1.00000000000000],
[-1.00000000000000, 0.47792494981044, -1.00000000000000],
[-0.87017829530514, 0.54644085696480, -1.00000000000000],
[-0.67626256165966, 0.54644085696480, -1.00000000000000],
[-0.47792494981044, 0.47792494981044, -1.00000000000000],
[-1.00000000000000, 0.73877386510550, -1.00000000000000],
[-0.88782128766618, 0.77564257533236, -1.00000000000000],
[-0.73877386510550, 0.73877386510550, -1.00000000000000],
[-1.00000000000000, 0.91953390816646, -1.00000000000000],
[-0.91953390816646, 0.91953390816646, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.91953390816646],
[-0.88782128766618, -1.00000000000000, -0.88782128766618],
[-0.67626256165966, -1.00000000000000, -0.87017829530514],
[-0.39267792331144, -1.00000000000000, -0.86163425910518],
[-0.07043475725321, -1.00000000000000, -0.85913048549358],
[0.25431218241661, -1.00000000000000, -0.86163425910518],
[0.54644085696480, -1.00000000000000, -0.87017829530514],
[0.77564257533236, -1.00000000000000, -0.88782128766618],
[0.91953390816646, -1.00000000000000, -0.91953390816646],
[-1.00000000000000, -0.88782128766618, -0.88782128766618],
[-0.84436571464720, -0.84436571464720, -0.84436571464720],
[-0.62793646547893, -0.82168802074045, -0.82168802074045],
[-0.34352508237476, -0.81318719011147, -0.81318719011147],
[-0.03010053740230, -0.81318719011147, -0.81318719011147],
[0.27131250695983, -0.82168802074045, -0.82168802074045],
[0.53309714394159, -0.84436571464720, -0.84436571464720],
[0.77564257533236, -0.88782128766618, -0.88782128766618],
[-1.00000000000000, -0.67626256165966, -0.87017829530514],
[-0.82168802074045, -0.62793646547893, -0.82168802074045],
[-0.59208531429249, -0.59208531429249, -0.80238818113213],
[-0.31067450382622, -0.58148680722995, -0.79716418511761],
[-0.01344119028290, -0.59208531429249, -0.80238818113213],
[0.27131250695983, -0.62793646547893, -0.82168802074045],
[0.54644085696480, -0.67626256165966, -0.87017829530514],
[-1.00000000000000, -0.39267792331144, -0.86163425910518],
[-0.81318719011147, -0.34352508237476, -0.81318719011147],
[-0.58148680722995, -0.31067450382622, -0.79716418511761],
[-0.31067450382622, -0.31067450382622, -0.79716418511761],
[-0.03010053740230, -0.34352508237476, -0.81318719011147],
[0.25431218241661, -0.39267792331144, -0.86163425910518],
[-1.00000000000000, -0.07043475725321, -0.85913048549358],
[-0.81318719011147, -0.03010053740230, -0.81318719011147],
[-0.59208531429249, -0.01344119028290, -0.80238818113213],
[-0.34352508237476, -0.03010053740230, -0.81318719011147],
[-0.07043475725321, -0.07043475725321, -0.85913048549358],
[-1.00000000000000, 0.25431218241661, -0.86163425910518],
[-0.82168802074045, 0.27131250695983, -0.82168802074045],
[-0.62793646547893, 0.27131250695983, -0.82168802074045],
[-0.39267792331144, 0.25431218241661, -0.86163425910518],
[-1.00000000000000, 0.54644085696480, -0.87017829530514],
[-0.84436571464720, 0.53309714394159, -0.84436571464720],
[-0.67626256165966, 0.54644085696480, -0.87017829530514],
[-1.00000000000000, 0.77564257533236, -0.88782128766618],
[-0.88782128766618, 0.77564257533236, -0.88782128766618],
[-1.00000000000000, 0.91953390816646, -0.91953390816646],
[-1.00000000000000, -1.00000000000000, -0.73877386510550],
[-0.87017829530514, -1.00000000000000, -0.67626256165966],
[-0.64122478358221, -1.00000000000000, -0.64122478358221],
[-0.34742291811411, -1.00000000000000, -0.62573097059544],
[-0.02684611129045, -1.00000000000000, -0.62573097059544],
[0.28244956716441, -1.00000000000000, -0.64122478358221],
[0.54644085696480, -1.00000000000000, -0.67626256165966],
[0.73877386510550, -1.00000000000000, -0.73877386510550],
[-1.00000000000000, -0.87017829530514, -0.67626256165966],
[-0.82168802074045, -0.82168802074045, -0.62793646547893],
[-0.59208531429249, -0.80238818113213, -0.59208531429249],
[-0.31067450382622, -0.79716418511761, -0.58148680722995],
[-0.01344119028290, -0.80238818113213, -0.59208531429249],
[0.27131250695983, -0.82168802074045, -0.62793646547893],
[0.54644085696480, -0.87017829530514, -0.67626256165966],
[-1.00000000000000, -0.64122478358221, -0.64122478358221],
[-0.80238818113213, -0.59208531429249, -0.59208531429249],
[-0.56787752300869, -0.56787752300869, -0.56787752300869],
[-0.29636743097392, -0.56787752300869, -0.56787752300869],
[-0.01344119028290, -0.59208531429249, -0.59208531429249],
[0.28244956716441, -0.64122478358221, -0.64122478358221],
[-1.00000000000000, -0.34742291811411, -0.62573097059544],
[-0.79716418511761, -0.31067450382622, -0.58148680722995],
[-0.56787752300869, -0.29636743097392, -0.56787752300869],
[-0.31067450382622, -0.31067450382622, -0.58148680722995],
[-0.02684611129045, -0.34742291811411, -0.62573097059544],
[-1.00000000000000, -0.02684611129045, -0.62573097059544],
[-0.80238818113213, -0.01344119028290, -0.59208531429249],
[-0.59208531429249, -0.01344119028290, -0.59208531429249],
[-0.34742291811411, -0.02684611129045, -0.62573097059544],
[-1.00000000000000, 0.28244956716441, -0.64122478358221],
[-0.82168802074045, 0.27131250695983, -0.62793646547893],
[-0.64122478358221, 0.28244956716441, -0.64122478358221],
[-1.00000000000000, 0.54644085696480, -0.67626256165966],
[-0.87017829530514, 0.54644085696480, -0.67626256165966],
[-1.00000000000000, 0.73877386510550, -0.73877386510550],
[-1.00000000000000, -1.00000000000000, -0.47792494981044],
[-0.86163425910518, -1.00000000000000, -0.39267792331144],
[-0.62573097059544, -1.00000000000000, -0.34742291811411],
[-0.33333333333333, -1.00000000000000, -0.33333333333333],
[-0.02684611129045, -1.00000000000000, -0.34742291811411],
[0.25431218241661, -1.00000000000000, -0.39267792331144],
[0.47792494981044, -1.00000000000000, -0.47792494981044],
[-1.00000000000000, -0.86163425910518, -0.39267792331144],
[-0.81318719011147, -0.81318719011147, -0.34352508237476],
[-0.58148680722995, -0.79716418511761, -0.31067450382622],
[-0.31067450382622, -0.79716418511761, -0.31067450382622],
[-0.03010053740230, -0.81318719011147, -0.34352508237476],
[0.25431218241661, -0.86163425910518, -0.39267792331144],
[-1.00000000000000, -0.62573097059544, -0.34742291811411],
[-0.79716418511761, -0.58148680722995, -0.31067450382622],
[-0.56787752300869, -0.56787752300869, -0.29636743097392],
[-0.31067450382622, -0.58148680722995, -0.31067450382622],
[-0.02684611129045, -0.62573097059544, -0.34742291811411],
[-1.00000000000000, -0.33333333333333, -0.33333333333333],
[-0.79716418511761, -0.31067450382622, -0.31067450382622],
[-0.58148680722995, -0.31067450382622, -0.31067450382622],
[-0.33333333333333, -0.33333333333333, -0.33333333333333],
[-1.00000000000000, -0.02684611129045, -0.34742291811411],
[-0.81318719011147, -0.03010053740230, -0.34352508237476],
[-0.62573097059544, -0.02684611129045, -0.34742291811411],
[-1.00000000000000, 0.25431218241661, -0.39267792331144],
[-0.86163425910518, 0.25431218241661, -0.39267792331144],
[-1.00000000000000, 0.47792494981044, -0.47792494981044],
[-1.00000000000000, -1.00000000000000, -0.16527895766639],
[-0.85913048549358, -1.00000000000000, -0.07043475725321],
[-0.62573097059544, -1.00000000000000, -0.02684611129045],
[-0.34742291811411, -1.00000000000000, -0.02684611129045],
[-0.07043475725321, -1.00000000000000, -0.07043475725321],
[0.16527895766639, -1.00000000000000, -0.16527895766639],
[-1.00000000000000, -0.85913048549358, -0.07043475725321],
[-0.81318719011147, -0.81318719011147, -0.03010053740230],
[-0.59208531429249, -0.80238818113213, -0.01344119028290],
[-0.34352508237476, -0.81318719011147, -0.03010053740230],
[-0.07043475725321, -0.85913048549358, -0.07043475725321],
[-1.00000000000000, -0.62573097059544, -0.02684611129045],
[-0.80238818113213, -0.59208531429249, -0.01344119028290],
[-0.59208531429249, -0.59208531429249, -0.01344119028290],
[-0.34742291811411, -0.62573097059544, -0.02684611129045],
[-1.00000000000000, -0.34742291811411, -0.02684611129045],
[-0.81318719011147, -0.34352508237476, -0.03010053740230],
[-0.62573097059544, -0.34742291811411, -0.02684611129045],
[-1.00000000000000, -0.07043475725321, -0.07043475725321],
[-0.85913048549358, -0.07043475725321, -0.07043475725321],
[-1.00000000000000, 0.16527895766639, -0.16527895766639],
[-1.00000000000000, -1.00000000000000, 0.16527895766639],
[-0.86163425910518, -1.00000000000000, 0.25431218241661],
[-0.64122478358221, -1.00000000000000, 0.28244956716441],
[-0.39267792331144, -1.00000000000000, 0.25431218241661],
[-0.16527895766639, -1.00000000000000, 0.16527895766639],
[-1.00000000000000, -0.86163425910518, 0.25431218241661],
[-0.82168802074045, -0.82168802074045, 0.27131250695983],
[-0.62793646547893, -0.82168802074045, 0.27131250695983],
[-0.39267792331144, -0.86163425910518, 0.25431218241661],
[-1.00000000000000, -0.64122478358221, 0.28244956716441],
[-0.82168802074045, -0.62793646547893, 0.27131250695983],
[-0.64122478358221, -0.64122478358221, 0.28244956716441],
[-1.00000000000000, -0.39267792331144, 0.25431218241661],
[-0.86163425910518, -0.39267792331144, 0.25431218241661],
[-1.00000000000000, -0.16527895766639, 0.16527895766639],
[-1.00000000000000, -1.00000000000000, 0.47792494981044],
[-0.87017829530514, -1.00000000000000, 0.54644085696480],
[-0.67626256165966, -1.00000000000000, 0.54644085696480],
[-0.47792494981044, -1.00000000000000, 0.47792494981044],
[-1.00000000000000, -0.87017829530514, 0.54644085696480],
[-0.84436571464720, -0.84436571464720, 0.53309714394159],
[-0.67626256165966, -0.87017829530514, 0.54644085696480],
[-1.00000000000000, -0.67626256165966, 0.54644085696480],
[-0.87017829530514, -0.67626256165966, 0.54644085696480],
[-1.00000000000000, -0.47792494981044, 0.47792494981044],
[-1.00000000000000, -1.00000000000000, 0.73877386510550],
[-0.88782128766618, -1.00000000000000, 0.77564257533236],
[-0.73877386510550, -1.00000000000000, 0.73877386510550],
[-1.00000000000000, -0.88782128766618, 0.77564257533236],
[-0.88782128766618, -0.88782128766618, 0.77564257533236],
[-1.00000000000000, -0.73877386510550, 0.73877386510550],
[-1.00000000000000, -1.00000000000000, 0.91953390816646],
[-0.91953390816646, -1.00000000000000, 0.91953390816646],
[-1.00000000000000, -0.91953390816646, 0.91953390816646],
])
elif C==9:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.93400143040806, -1.00000000000000, -1.00000000000000],
[-0.78448347366314, -1.00000000000000, -1.00000000000000],
[-0.56523532699620, -1.00000000000000, -1.00000000000000],
[-0.29575813558694, -1.00000000000000, -1.00000000000000],
[-0.00000000000000, -1.00000000000000, -1.00000000000000],
[0.29575813558694, -1.00000000000000, -1.00000000000000],
[0.56523532699621, -1.00000000000000, -1.00000000000000],
[0.78448347366314, -1.00000000000000, -1.00000000000000],
[0.93400143040806, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.93400143040806, -1.00000000000000],
[-0.90729952697735, -0.90729952697735, -1.00000000000000],
[-0.73152230491144, -0.89101338351023, -1.00000000000000],
[-0.49119920657729, -0.88196956377807, -1.00000000000000],
[-0.20950668229487, -0.87799847850836, -1.00000000000000],
[0.08750516080323, -0.87799847850836, -1.00000000000000],
[0.37316877035536, -0.88196956377807, -1.00000000000000],
[0.62253568842167, -0.89101338351023, -1.00000000000000],
[0.81459905395471, -0.90729952697735, -1.00000000000000],
[0.93400143040806, -0.93400143040806, -1.00000000000000],
[-1.00000000000000, -0.78448347366314, -1.00000000000000],
[-0.89101338351023, -0.73152230491144, -1.00000000000000],
[-0.69900284547384, -0.69900284547384, -1.00000000000000],
[-0.44709790670022, -0.68165204574864, -1.00000000000000],
[-0.16188382733210, -0.67623234533581, -1.00000000000000],
[0.12874995244886, -0.68165204574864, -1.00000000000000],
[0.39800569094768, -0.69900284547384, -1.00000000000000],
[0.62253568842167, -0.73152230491144, -1.00000000000000],
[0.78448347366314, -0.78448347366314, -1.00000000000000],
[-1.00000000000000, -0.56523532699620, -1.00000000000000],
[-0.88196956377807, -0.49119920657729, -1.00000000000000],
[-0.68165204574864, -0.44709790670022, -1.00000000000000],
[-0.42664737762904, -0.42664737762904, -1.00000000000000],
[-0.14670524474192, -0.42664737762904, -1.00000000000000],
[0.12874995244886, -0.44709790670022, -1.00000000000000],
[0.37316877035536, -0.49119920657729, -1.00000000000000],
[0.56523532699620, -0.56523532699620, -1.00000000000000],
[-1.00000000000000, -0.29575813558694, -1.00000000000000],
[-0.87799847850836, -0.20950668229487, -1.00000000000000],
[-0.67623234533581, -0.16188382733210, -1.00000000000000],
[-0.42664737762904, -0.14670524474192, -1.00000000000000],
[-0.16188382733210, -0.16188382733210, -1.00000000000000],
[0.08750516080323, -0.20950668229487, -1.00000000000000],
[0.29575813558694, -0.29575813558694, -1.00000000000000],
[-1.00000000000000, -0.00000000000000, -1.00000000000000],
[-0.87799847850836, 0.08750516080323, -1.00000000000000],
[-0.68165204574864, 0.12874995244886, -1.00000000000000],
[-0.44709790670022, 0.12874995244886, -1.00000000000000],
[-0.20950668229487, 0.08750516080322, -1.00000000000000],
[0.00000000000000, -0.00000000000000, -1.00000000000000],
[-1.00000000000000, 0.29575813558694, -1.00000000000000],
[-0.88196956377807, 0.37316877035535, -1.00000000000000],
[-0.69900284547384, 0.39800569094768, -1.00000000000000],
[-0.49119920657729, 0.37316877035536, -1.00000000000000],
[-0.29575813558694, 0.29575813558694, -1.00000000000000],
[-1.00000000000000, 0.56523532699620, -1.00000000000000],
[-0.89101338351023, 0.62253568842167, -1.00000000000000],
[-0.73152230491144, 0.62253568842167, -1.00000000000000],
[-0.56523532699621, 0.56523532699620, -1.00000000000000],
[-1.00000000000000, 0.78448347366314, -1.00000000000000],
[-0.90729952697735, 0.81459905395471, -1.00000000000000],
[-0.78448347366314, 0.78448347366314, -1.00000000000000],
[-1.00000000000000, 0.93400143040806, -1.00000000000000],
[-0.93400143040806, 0.93400143040806, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.93400143040806],
[-0.90729952697735, -1.00000000000000, -0.90729952697735],
[-0.73152230491144, -1.00000000000000, -0.89101338351023],
[-0.49119920657729, -1.00000000000000, -0.88196956377807],
[-0.20950668229487, -1.00000000000000, -0.87799847850836],
[0.08750516080323, -1.00000000000000, -0.87799847850836],
[0.37316877035536, -1.00000000000000, -0.88196956377807],
[0.62253568842167, -1.00000000000000, -0.89101338351023],
[0.81459905395471, -1.00000000000000, -0.90729952697735],
[0.93400143040806, -1.00000000000000, -0.93400143040806],
[-1.00000000000000, -0.90729952697735, -0.90729952697735],
[-0.86859548568613, -0.86859548568613, -0.86859548568613],
[-0.68921299955661, -0.84680080912920, -0.84680080912920],
[-0.44588737305619, -0.83728581509126, -0.83728581509126],
[-0.16543486454076, -0.83456513545924, -0.83456513545924],
[0.12045900323870, -0.83728581509126, -0.83728581509126],
[0.38281461781500, -0.84680080912920, -0.84680080912920],
[0.60578645705840, -0.86859548568613, -0.86859548568613],
[0.81459905395471, -0.90729952697735, -0.90729952697735],
[-1.00000000000000, -0.73152230491144, -0.89101338351023],
[-0.84680080912920, -0.68921299955661, -0.84680080912920],
[-0.65244925135376, -0.65244925135376, -0.82768715531655],
[-0.40689302311094, -0.63668791072659, -0.82031213075495],
[-0.13610693540751, -0.63668791072659, -0.82031213075495],
[0.13258565802407, -0.65244925135376, -0.82768715531655],
[0.38281461781500, -0.68921299955661, -0.84680080912920],
[0.62253568842167, -0.73152230491144, -0.89101338351023],
[-1.00000000000000, -0.49119920657729, -0.88196956377807],
[-0.83728581509126, -0.44588737305619, -0.83728581509126],
[-0.63668791072659, -0.40689302311095, -0.82031213075495],
[-0.39476381053366, -0.39476381053366, -0.81570856839903],
[-0.13610693540751, -0.40689302311095, -0.82031213075495],
[0.12045900323870, -0.44588737305619, -0.83728581509126],
[0.37316877035536, -0.49119920657729, -0.88196956377807],
[-1.00000000000000, -0.20950668229487, -0.87799847850836],
[-0.83456513545924, -0.16543486454076, -0.83456513545924],
[-0.63668791072659, -0.13610693540751, -0.82031213075495],
[-0.40689302311094, -0.13610693540751, -0.82031213075495],
[-0.16543486454076, -0.16543486454076, -0.83456513545924],
[0.08750516080323, -0.20950668229487, -0.87799847850836],
[-1.00000000000000, 0.08750516080322, -0.87799847850836],
[-0.83728581509126, 0.12045900323870, -0.83728581509126],
[-0.65244925135376, 0.13258565802407, -0.82768715531655],
[-0.44588737305619, 0.12045900323870, -0.83728581509126],
[-0.20950668229487, 0.08750516080322, -0.87799847850836],
[-1.00000000000000, 0.37316877035536, -0.88196956377807],
[-0.84680080912920, 0.38281461781500, -0.84680080912920],
[-0.68921299955661, 0.38281461781500, -0.84680080912920],
[-0.49119920657729, 0.37316877035536, -0.88196956377807],
[-1.00000000000000, 0.62253568842167, -0.89101338351023],
[-0.86859548568613, 0.60578645705840, -0.86859548568613],
[-0.73152230491144, 0.62253568842167, -0.89101338351023],
[-1.00000000000000, 0.81459905395471, -0.90729952697735],
[-0.90729952697735, 0.81459905395471, -0.90729952697735],
[-1.00000000000000, 0.93400143040806, -0.93400143040806],
[-1.00000000000000, -1.00000000000000, -0.78448347366314],
[-0.89101338351023, -1.00000000000000, -0.73152230491144],
[-0.69900284547384, -1.00000000000000, -0.69900284547384],
[-0.44709790670022, -1.00000000000000, -0.68165204574864],
[-0.16188382733210, -1.00000000000000, -0.67623234533581],
[0.12874995244886, -1.00000000000000, -0.68165204574864],
[0.39800569094768, -1.00000000000000, -0.69900284547384],
[0.62253568842167, -1.00000000000000, -0.73152230491144],
[0.78448347366314, -1.00000000000000, -0.78448347366314],
[-1.00000000000000, -0.89101338351023, -0.73152230491144],
[-0.84680080912920, -0.84680080912920, -0.68921299955661],
[-0.65244925135376, -0.82768715531655, -0.65244925135376],
[-0.40689302311095, -0.82031213075495, -0.63668791072659],
[-0.13610693540751, -0.82031213075495, -0.63668791072659],
[0.13258565802407, -0.82768715531655, -0.65244925135376],
[0.38281461781500, -0.84680080912920, -0.68921299955661],
[0.62253568842167, -0.89101338351023, -0.73152230491144],
[-1.00000000000000, -0.69900284547384, -0.69900284547384],
[-0.82768715531655, -0.65244925135376, -0.65244925135376],
[-0.62495499771497, -0.62495499771497, -0.62495499771497],
[-0.38343990765303, -0.61656009234697, -0.61656009234697],
[-0.12513500685510, -0.62495499771497, -0.62495499771497],
[0.13258565802407, -0.65244925135376, -0.65244925135376],
[0.39800569094768, -0.69900284547384, -0.69900284547384],
[-1.00000000000000, -0.44709790670022, -0.68165204574864],
[-0.82031213075495, -0.40689302311095, -0.63668791072659],
[-0.61656009234697, -0.38343990765303, -0.61656009234697],
[-0.38343990765303, -0.38343990765303, -0.61656009234697],
[-0.13610693540751, -0.40689302311095, -0.63668791072659],
[0.12874995244886, -0.44709790670022, -0.68165204574864],
[-1.00000000000000, -0.16188382733210, -0.67623234533581],
[-0.82031213075495, -0.13610693540751, -0.63668791072659],
[-0.62495499771497, -0.12513500685510, -0.62495499771497],
[-0.40689302311094, -0.13610693540751, -0.63668791072659],
[-0.16188382733210, -0.16188382733210, -0.67623234533581],
[-1.00000000000000, 0.12874995244886, -0.68165204574864],
[-0.82768715531655, 0.13258565802407, -0.65244925135376],
[-0.65244925135376, 0.13258565802407, -0.65244925135376],
[-0.44709790670022, 0.12874995244886, -0.68165204574864],
[-1.00000000000000, 0.39800569094768, -0.69900284547384],
[-0.84680080912920, 0.38281461781500, -0.68921299955661],
[-0.69900284547384, 0.39800569094768, -0.69900284547384],
[-1.00000000000000, 0.62253568842167, -0.73152230491144],
[-0.89101338351023, 0.62253568842167, -0.73152230491144],
[-1.00000000000000, 0.78448347366314, -0.78448347366314],
[-1.00000000000000, -1.00000000000000, -0.56523532699621],
[-0.88196956377807, -1.00000000000000, -0.49119920657729],
[-0.68165204574864, -1.00000000000000, -0.44709790670022],
[-0.42664737762904, -1.00000000000000, -0.42664737762904],
[-0.14670524474192, -1.00000000000000, -0.42664737762904],
[0.12874995244886, -1.00000000000000, -0.44709790670022],
[0.37316877035536, -1.00000000000000, -0.49119920657729],
[0.56523532699621, -1.00000000000000, -0.56523532699621],
[-1.00000000000000, -0.88196956377807, -0.49119920657729],
[-0.83728581509126, -0.83728581509126, -0.44588737305619],
[-0.63668791072659, -0.82031213075495, -0.40689302311095],
[-0.39476381053366, -0.81570856839903, -0.39476381053366],
[-0.13610693540751, -0.82031213075495, -0.40689302311095],
[0.12045900323870, -0.83728581509126, -0.44588737305619],
[0.37316877035536, -0.88196956377807, -0.49119920657729],
[-1.00000000000000, -0.68165204574864, -0.44709790670022],
[-0.82031213075495, -0.63668791072659, -0.40689302311095],
[-0.61656009234697, -0.61656009234697, -0.38343990765303],
[-0.38343990765303, -0.61656009234697, -0.38343990765303],
[-0.13610693540751, -0.63668791072659, -0.40689302311095],
[0.12874995244886, -0.68165204574864, -0.44709790670022],
[-1.00000000000000, -0.42664737762904, -0.42664737762904],
[-0.81570856839903, -0.39476381053366, -0.39476381053366],
[-0.61656009234697, -0.38343990765303, -0.38343990765303],
[-0.39476381053366, -0.39476381053366, -0.39476381053366],
[-0.14670524474192, -0.42664737762904, -0.42664737762904],
[-1.00000000000000, -0.14670524474192, -0.42664737762904],
[-0.82031213075495, -0.13610693540751, -0.40689302311095],
[-0.63668791072659, -0.13610693540751, -0.40689302311095],
[-0.42664737762904, -0.14670524474192, -0.42664737762904],
[-1.00000000000000, 0.12874995244886, -0.44709790670022],
[-0.83728581509126, 0.12045900323870, -0.44588737305619],
[-0.68165204574864, 0.12874995244886, -0.44709790670022],
[-1.00000000000000, 0.37316877035536, -0.49119920657729],
[-0.88196956377807, 0.37316877035536, -0.49119920657729],
[-1.00000000000000, 0.56523532699620, -0.56523532699621],
[-1.00000000000000, -1.00000000000000, -0.29575813558694],
[-0.87799847850836, -1.00000000000000, -0.20950668229487],
[-0.67623234533581, -1.00000000000000, -0.16188382733210],
[-0.42664737762904, -1.00000000000000, -0.14670524474192],
[-0.16188382733210, -1.00000000000000, -0.16188382733210],
[0.08750516080323, -1.00000000000000, -0.20950668229487],
[0.29575813558694, -1.00000000000000, -0.29575813558694],
[-1.00000000000000, -0.87799847850836, -0.20950668229487],
[-0.83456513545924, -0.83456513545924, -0.16543486454076],
[-0.63668791072659, -0.82031213075495, -0.13610693540751],
[-0.40689302311095, -0.82031213075495, -0.13610693540751],
[-0.16543486454076, -0.83456513545924, -0.16543486454076],
[0.08750516080323, -0.87799847850836, -0.20950668229487],
[-1.00000000000000, -0.67623234533581, -0.16188382733210],
[-0.82031213075495, -0.63668791072659, -0.13610693540751],
[-0.62495499771497, -0.62495499771497, -0.12513500685510],
[-0.40689302311095, -0.63668791072659, -0.13610693540751],
[-0.16188382733210, -0.67623234533581, -0.16188382733210],
[-1.00000000000000, -0.42664737762904, -0.14670524474192],
[-0.82031213075495, -0.40689302311094, -0.13610693540751],
[-0.63668791072659, -0.40689302311094, -0.13610693540751],
[-0.42664737762904, -0.42664737762904, -0.14670524474192],
[-1.00000000000000, -0.16188382733210, -0.16188382733210],
[-0.83456513545924, -0.16543486454076, -0.16543486454076],
[-0.67623234533581, -0.16188382733210, -0.16188382733210],
[-1.00000000000000, 0.08750516080323, -0.20950668229487],
[-0.87799847850836, 0.08750516080323, -0.20950668229487],
[-1.00000000000000, 0.29575813558694, -0.29575813558694],
[-1.00000000000000, -1.00000000000000, 0.00000000000000],
[-0.87799847850836, -1.00000000000000, 0.08750516080323],
[-0.68165204574864, -1.00000000000000, 0.12874995244886],
[-0.44709790670022, -1.00000000000000, 0.12874995244886],
[-0.20950668229487, -1.00000000000000, 0.08750516080323],
[-0.00000000000000, -1.00000000000000, 0.00000000000000],
[-1.00000000000000, -0.87799847850836, 0.08750516080323],
[-0.83728581509126, -0.83728581509126, 0.12045900323870],
[-0.65244925135376, -0.82768715531655, 0.13258565802407],
[-0.44588737305619, -0.83728581509126, 0.12045900323870],
[-0.20950668229487, -0.87799847850836, 0.08750516080323],
[-1.00000000000000, -0.68165204574864, 0.12874995244886],
[-0.82768715531655, -0.65244925135376, 0.13258565802407],
[-0.65244925135376, -0.65244925135376, 0.13258565802407],
[-0.44709790670022, -0.68165204574864, 0.12874995244886],
[-1.00000000000000, -0.44709790670022, 0.12874995244886],
[-0.83728581509126, -0.44588737305619, 0.12045900323870],
[-0.68165204574864, -0.44709790670022, 0.12874995244886],
[-1.00000000000000, -0.20950668229487, 0.08750516080323],
[-0.87799847850836, -0.20950668229487, 0.08750516080323],
[-1.00000000000000, 0.00000000000000, -0.00000000000000],
[-1.00000000000000, -1.00000000000000, 0.29575813558694],
[-0.88196956377807, -1.00000000000000, 0.37316877035536],
[-0.69900284547384, -1.00000000000000, 0.39800569094768],
[-0.49119920657729, -1.00000000000000, 0.37316877035536],
[-0.29575813558694, -1.00000000000000, 0.29575813558694],
[-1.00000000000000, -0.88196956377807, 0.37316877035536],
[-0.84680080912920, -0.84680080912920, 0.38281461781500],
[-0.68921299955661, -0.84680080912920, 0.38281461781500],
[-0.49119920657729, -0.88196956377807, 0.37316877035536],
[-1.00000000000000, -0.69900284547384, 0.39800569094768],
[-0.84680080912920, -0.68921299955661, 0.38281461781500],
[-0.69900284547384, -0.69900284547384, 0.39800569094768],
[-1.00000000000000, -0.49119920657729, 0.37316877035536],
[-0.88196956377807, -0.49119920657729, 0.37316877035536],
[-1.00000000000000, -0.29575813558694, 0.29575813558694],
[-1.00000000000000, -1.00000000000000, 0.56523532699620],
[-0.89101338351023, -1.00000000000000, 0.62253568842167],
[-0.73152230491144, -1.00000000000000, 0.62253568842167],
[-0.56523532699621, -1.00000000000000, 0.56523532699620],
[-1.00000000000000, -0.89101338351023, 0.62253568842167],
[-0.86859548568613, -0.86859548568613, 0.60578645705840],
[-0.73152230491144, -0.89101338351023, 0.62253568842167],
[-1.00000000000000, -0.73152230491144, 0.62253568842167],
[-0.89101338351023, -0.73152230491144, 0.62253568842167],
[-1.00000000000000, -0.56523532699621, 0.56523532699621],
[-1.00000000000000, -1.00000000000000, 0.78448347366314],
[-0.90729952697735, -1.00000000000000, 0.81459905395471],
[-0.78448347366314, -1.00000000000000, 0.78448347366314],
[-1.00000000000000, -0.90729952697735, 0.81459905395471],
[-0.90729952697735, -0.90729952697735, 0.81459905395471],
[-1.00000000000000, -0.78448347366314, 0.78448347366314],
[-1.00000000000000, -1.00000000000000, 0.93400143040806],
[-0.93400143040806, -1.00000000000000, 0.93400143040806],
[-1.00000000000000, -0.93400143040806, 0.93400143040806],
])
elif C==10:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.94489927222288, -1.00000000000000, -1.00000000000000],
[-0.81927932164401, -1.00000000000000, -1.00000000000000],
[-0.63287615303186, -1.00000000000000, -1.00000000000000],
[-0.39953094096535, -1.00000000000000, -1.00000000000000],
[-0.13655293285493, -1.00000000000000, -1.00000000000000],
[0.13655293285493, -1.00000000000000, -1.00000000000000],
[0.39953094096535, -1.00000000000000, -1.00000000000000],
[0.63287615303186, -1.00000000000000, -1.00000000000000],
[0.81927932164401, -1.00000000000000, -1.00000000000000],
[0.94489927222288, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.94489927222288, -1.00000000000000],
[-0.92151786043038, -0.92151786043038, -1.00000000000000],
[-0.77429591383879, -0.90410902745121, -1.00000000000000],
[-0.57040226533050, -0.89209241318327, -1.00000000000000],
[-0.32565807981549, -0.88505087202618, -1.00000000000000],
[-0.05863404688654, -0.88273190622692, -1.00000000000000],
[0.21070895184167, -0.88505087202618, -1.00000000000000],
[0.46249467851378, -0.89209241318327, -1.00000000000000],
[0.67840494128999, -0.90410902745121, -1.00000000000000],
[0.84303572086077, -0.92151786043038, -1.00000000000000],
[0.94489927222288, -0.94489927222288, -1.00000000000000],
[-1.00000000000000, -0.81927932164401, -1.00000000000000],
[-0.90410902745121, -0.77429591383879, -1.00000000000000],
[-0.74166110875971, -0.74166110875971, -1.00000000000000],
[-0.52692548127315, -0.72043884349009, -1.00000000000000],
[-0.27740272398980, -0.70998997530133, -1.00000000000000],
[-0.01260730070887, -0.70998997530133, -1.00000000000000],
[0.24736432476324, -0.72043884349009, -1.00000000000000],
[0.48332221751941, -0.74166110875971, -1.00000000000000],
[0.67840494128999, -0.77429591383879, -1.00000000000000],
[0.81927932164401, -0.81927932164401, -1.00000000000000],
[-1.00000000000000, -0.63287615303186, -1.00000000000000],
[-0.89209241318327, -0.57040226533050, -1.00000000000000],
[-0.72043884349009, -0.52692548127315, -1.00000000000000],
[-0.50130644558671, -0.50130644558671, -1.00000000000000],
[-0.25357728582324, -0.49284542835353, -1.00000000000000],
[0.00261289117342, -0.50130644558671, -1.00000000000000],
[0.24736432476324, -0.52692548127315, -1.00000000000000],
[0.46249467851378, -0.57040226533050, -1.00000000000000],
[0.63287615303186, -0.63287615303186, -1.00000000000000],
[-1.00000000000000, -0.39953094096535, -1.00000000000000],
[-0.88505087202618, -0.32565807981549, -1.00000000000000],
[-0.70998997530133, -0.27740272398980, -1.00000000000000],
[-0.49284542835353, -0.25357728582324, -1.00000000000000],
[-0.25357728582324, -0.25357728582324, -1.00000000000000],
[-0.01260730070887, -0.27740272398980, -1.00000000000000],
[0.21070895184167, -0.32565807981549, -1.00000000000000],
[0.39953094096535, -0.39953094096535, -1.00000000000000],
[-1.00000000000000, -0.13655293285493, -1.00000000000000],
[-0.88273190622692, -0.05863404688654, -1.00000000000000],
[-0.70998997530133, -0.01260730070887, -1.00000000000000],
[-0.50130644558671, 0.00261289117342, -1.00000000000000],
[-0.27740272398980, -0.01260730070887, -1.00000000000000],
[-0.05863404688654, -0.05863404688654, -1.00000000000000],
[0.13655293285493, -0.13655293285493, -1.00000000000000],
[-1.00000000000000, 0.13655293285493, -1.00000000000000],
[-0.88505087202618, 0.21070895184167, -1.00000000000000],
[-0.72043884349009, 0.24736432476324, -1.00000000000000],
[-0.52692548127315, 0.24736432476324, -1.00000000000000],
[-0.32565807981549, 0.21070895184167, -1.00000000000000],
[-0.13655293285493, 0.13655293285493, -1.00000000000000],
[-1.00000000000000, 0.39953094096535, -1.00000000000000],
[-0.89209241318327, 0.46249467851378, -1.00000000000000],
[-0.74166110875971, 0.48332221751941, -1.00000000000000],
[-0.57040226533050, 0.46249467851378, -1.00000000000000],
[-0.39953094096535, 0.39953094096535, -1.00000000000000],
[-1.00000000000000, 0.63287615303186, -1.00000000000000],
[-0.90410902745121, 0.67840494128999, -1.00000000000000],
[-0.77429591383879, 0.67840494128999, -1.00000000000000],
[-0.63287615303186, 0.63287615303186, -1.00000000000000],
[-1.00000000000000, 0.81927932164401, -1.00000000000000],
[-0.92151786043038, 0.84303572086077, -1.00000000000000],
[-0.81927932164401, 0.81927932164401, -1.00000000000000],
[-1.00000000000000, 0.94489927222288, -1.00000000000000],
[-0.94489927222288, 0.94489927222288, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.94489927222288],
[-0.92151786043038, -1.00000000000000, -0.92151786043038],
[-0.77429591383879, -1.00000000000000, -0.90410902745121],
[-0.57040226533050, -1.00000000000000, -0.89209241318327],
[-0.32565807981549, -1.00000000000000, -0.88505087202618],
[-0.05863404688654, -1.00000000000000, -0.88273190622692],
[0.21070895184167, -1.00000000000000, -0.88505087202618],
[0.46249467851378, -1.00000000000000, -0.89209241318327],
[0.67840494128999, -1.00000000000000, -0.90410902745121],
[0.84303572086077, -1.00000000000000, -0.92151786043038],
[0.94489927222288, -1.00000000000000, -0.94489927222288],
[-1.00000000000000, -0.92151786043038, -0.92151786043038],
[-0.88661024062098, -0.88661024062098, -0.88661024062098],
[-0.73694173023916, -0.86491347593435, -0.86491347593435],
[-0.52974325723302, -0.85396178016912, -0.85396178016912],
[-0.28267608212559, -0.84919912434147, -0.84919912434147],
[-0.01892566919147, -0.84919912434147, -0.84919912434147],
[0.23766681757125, -0.85396178016912, -0.85396178016912],
[0.46676868210785, -0.86491347593435, -0.86491347593435],
[0.65983072186294, -0.88661024062098, -0.88661024062098],
[0.84303572086077, -0.92151786043038, -0.92151786043038],
[-1.00000000000000, -0.77429591383879, -0.90410902745121],
[-0.86491347593435, -0.73694173023916, -0.86491347593435],
[-0.69929094490663, -0.69929094490663, -0.84651122824852],
[-0.48717651513984, -0.67966769800228, -0.83802132950146],
[-0.24545730477403, -0.67357697905978, -0.83550841139217],
[0.00486554264358, -0.67966769800228, -0.83802132950146],
[0.24509311806177, -0.69929094490663, -0.84651122824852],
[0.46676868210785, -0.73694173023916, -0.86491347593435],
[0.67840494128999, -0.77429591383879, -0.90410902745121],
[-1.00000000000000, -0.57040226533050, -0.89209241318327],
[-0.85396178016912, -0.52974325723302, -0.85396178016912],
[-0.67966769800228, -0.48717651513984, -0.83802132950146],
[-0.46740379986922, -0.46740379986922, -0.83175894856653],
[-0.23343345169504, -0.46740379986922, -0.83175894856653],
[0.00486554264358, -0.48717651513984, -0.83802132950146],
[0.23766681757125, -0.52974325723302, -0.85396178016912],
[0.46249467851378, -0.57040226533050, -0.89209241318327],
[-1.00000000000000, -0.32565807981549, -0.88505087202618],
[-0.84919912434147, -0.28267608212560, -0.84919912434147],
[-0.67357697905978, -0.24545730477403, -0.83550841139217],
[-0.46740379986922, -0.23343345169504, -0.83175894856653],
[-0.24545730477403, -0.24545730477403, -0.83550841139217],
[-0.01892566919147, -0.28267608212559, -0.84919912434147],
[0.21070895184167, -0.32565807981549, -0.88505087202618],
[-1.00000000000000, -0.05863404688654, -0.88273190622692],
[-0.84919912434147, -0.01892566919147, -0.84919912434147],
[-0.67966769800228, 0.00486554264358, -0.83802132950146],
[-0.48717651513984, 0.00486554264358, -0.83802132950146],
[-0.28267608212559, -0.01892566919147, -0.84919912434147],
[-0.05863404688654, -0.05863404688654, -0.88273190622692],
[-1.00000000000000, 0.21070895184167, -0.88505087202618],
[-0.85396178016912, 0.23766681757125, -0.85396178016912],
[-0.69929094490663, 0.24509311806177, -0.84651122824852],
[-0.52974325723302, 0.23766681757125, -0.85396178016912],
[-0.32565807981549, 0.21070895184167, -0.88505087202618],
[-1.00000000000000, 0.46249467851378, -0.89209241318327],
[-0.86491347593435, 0.46676868210785, -0.86491347593435],
[-0.73694173023916, 0.46676868210785, -0.86491347593435],
[-0.57040226533050, 0.46249467851378, -0.89209241318327],
[-1.00000000000000, 0.67840494128999, -0.90410902745121],
[-0.88661024062098, 0.65983072186294, -0.88661024062098],
[-0.77429591383879, 0.67840494128999, -0.90410902745121],
[-1.00000000000000, 0.84303572086077, -0.92151786043038],
[-0.92151786043038, 0.84303572086077, -0.92151786043038],
[-1.00000000000000, 0.94489927222288, -0.94489927222288],
[-1.00000000000000, -1.00000000000000, -0.81927932164401],
[-0.90410902745121, -1.00000000000000, -0.77429591383879],
[-0.74166110875971, -1.00000000000000, -0.74166110875971],
[-0.52692548127315, -1.00000000000000, -0.72043884349009],
[-0.27740272398980, -1.00000000000000, -0.70998997530133],
[-0.01260730070887, -1.00000000000000, -0.70998997530133],
[0.24736432476324, -1.00000000000000, -0.72043884349009],
[0.48332221751941, -1.00000000000000, -0.74166110875971],
[0.67840494128999, -1.00000000000000, -0.77429591383879],
[0.81927932164401, -1.00000000000000, -0.81927932164401],
[-1.00000000000000, -0.90410902745121, -0.77429591383879],
[-0.86491347593435, -0.86491347593435, -0.73694173023916],
[-0.69929094490663, -0.84651122824852, -0.69929094490663],
[-0.48717651513984, -0.83802132950146, -0.67966769800228],
[-0.24545730477403, -0.83550841139217, -0.67357697905978],
[0.00486554264358, -0.83802132950146, -0.67966769800228],
[0.24509311806177, -0.84651122824852, -0.69929094490663],
[0.46676868210785, -0.86491347593435, -0.73694173023916],
[0.67840494128999, -0.90410902745121, -0.77429591383879],
[-1.00000000000000, -0.74166110875971, -0.74166110875971],
[-0.84651122824852, -0.69929094490663, -0.69929094490663],
[-0.67073447278009, -0.67073447278009, -0.67073447278009],
[-0.45884944222678, -0.65778667667481, -0.65778667667481],
[-0.22557720442361, -0.65778667667481, -0.65778667667481],
[0.01220341834026, -0.67073447278009, -0.67073447278009],
[0.24509311806177, -0.69929094490663, -0.69929094490663],
[0.48332221751941, -0.74166110875971, -0.74166110875971],
[-1.00000000000000, -0.52692548127315, -0.72043884349009],
[-0.83802132950146, -0.48717651513984, -0.67966769800228],
[-0.65778667667481, -0.45884944222678, -0.65778667667481],
[-0.44965012855595, -0.44965012855595, -0.65104961433215],
[-0.22557720442361, -0.45884944222678, -0.65778667667481],
[0.00486554264358, -0.48717651513984, -0.67966769800228],
[0.24736432476324, -0.52692548127315, -0.72043884349009],
[-1.00000000000000, -0.27740272398980, -0.70998997530133],
[-0.83550841139217, -0.24545730477403, -0.67357697905978],
[-0.65778667667481, -0.22557720442361, -0.65778667667481],
[-0.45884944222678, -0.22557720442361, -0.65778667667481],
[-0.24545730477403, -0.24545730477403, -0.67357697905978],
[-0.01260730070887, -0.27740272398980, -0.70998997530133],
[-1.00000000000000, -0.01260730070887, -0.70998997530133],
[-0.83802132950146, 0.00486554264358, -0.67966769800228],
[-0.67073447278009, 0.01220341834026, -0.67073447278009],
[-0.48717651513984, 0.00486554264358, -0.67966769800228],
[-0.27740272398980, -0.01260730070887, -0.70998997530133],
[-1.00000000000000, 0.24736432476324, -0.72043884349009],
[-0.84651122824852, 0.24509311806177, -0.69929094490663],
[-0.69929094490663, 0.24509311806177, -0.69929094490663],
[-0.52692548127315, 0.24736432476324, -0.72043884349009],
[-1.00000000000000, 0.48332221751941, -0.74166110875971],
[-0.86491347593435, 0.46676868210785, -0.73694173023916],
[-0.74166110875971, 0.48332221751941, -0.74166110875971],
[-1.00000000000000, 0.67840494128999, -0.77429591383879],
[-0.90410902745121, 0.67840494128999, -0.77429591383879],
[-1.00000000000000, 0.81927932164401, -0.81927932164401],
[-1.00000000000000, -1.00000000000000, -0.63287615303186],
[-0.89209241318327, -1.00000000000000, -0.57040226533050],
[-0.72043884349009, -1.00000000000000, -0.52692548127315],
[-0.50130644558671, -1.00000000000000, -0.50130644558671],
[-0.25357728582324, -1.00000000000000, -0.49284542835353],
[0.00261289117342, -1.00000000000000, -0.50130644558671],
[0.24736432476324, -1.00000000000000, -0.52692548127315],
[0.46249467851378, -1.00000000000000, -0.57040226533050],
[0.63287615303186, -1.00000000000000, -0.63287615303186],
[-1.00000000000000, -0.89209241318327, -0.57040226533050],
[-0.85396178016912, -0.85396178016912, -0.52974325723302],
[-0.67966769800228, -0.83802132950146, -0.48717651513984],
[-0.46740379986922, -0.83175894856653, -0.46740379986922],
[-0.23343345169504, -0.83175894856653, -0.46740379986922],
[0.00486554264358, -0.83802132950146, -0.48717651513984],
[0.23766681757125, -0.85396178016912, -0.52974325723302],
[0.46249467851378, -0.89209241318327, -0.57040226533050],
[-1.00000000000000, -0.72043884349009, -0.52692548127315],
[-0.83802132950146, -0.67966769800228, -0.48717651513984],
[-0.65778667667481, -0.65778667667481, -0.45884944222678],
[-0.44965012855595, -0.65104961433215, -0.44965012855595],
[-0.22557720442361, -0.65778667667481, -0.45884944222678],
[0.00486554264358, -0.67966769800228, -0.48717651513984],
[0.24736432476324, -0.72043884349009, -0.52692548127315],
[-1.00000000000000, -0.50130644558671, -0.50130644558671],
[-0.83175894856653, -0.46740379986922, -0.46740379986922],
[-0.65104961433215, -0.44965012855595, -0.44965012855595],
[-0.44965012855595, -0.44965012855595, -0.44965012855595],
[-0.23343345169504, -0.46740379986922, -0.46740379986922],
[0.00261289117342, -0.50130644558671, -0.50130644558671],
[-1.00000000000000, -0.25357728582324, -0.49284542835353],
[-0.83175894856653, -0.23343345169504, -0.46740379986922],
[-0.65778667667481, -0.22557720442361, -0.45884944222678],
[-0.46740379986922, -0.23343345169504, -0.46740379986922],
[-0.25357728582324, -0.25357728582324, -0.49284542835353],
[-1.00000000000000, 0.00261289117342, -0.50130644558671],
[-0.83802132950146, 0.00486554264358, -0.48717651513984],
[-0.67966769800228, 0.00486554264358, -0.48717651513984],
[-0.50130644558671, 0.00261289117342, -0.50130644558671],
[-1.00000000000000, 0.24736432476324, -0.52692548127315],
[-0.85396178016912, 0.23766681757125, -0.52974325723302],
[-0.72043884349009, 0.24736432476324, -0.52692548127315],
[-1.00000000000000, 0.46249467851378, -0.57040226533050],
[-0.89209241318327, 0.46249467851378, -0.57040226533050],
[-1.00000000000000, 0.63287615303186, -0.63287615303186],
[-1.00000000000000, -1.00000000000000, -0.39953094096535],
[-0.88505087202618, -1.00000000000000, -0.32565807981549],
[-0.70998997530133, -1.00000000000000, -0.27740272398980],
[-0.49284542835353, -1.00000000000000, -0.25357728582324],
[-0.25357728582324, -1.00000000000000, -0.25357728582324],
[-0.01260730070887, -1.00000000000000, -0.27740272398980],
[0.21070895184167, -1.00000000000000, -0.32565807981549],
[0.39953094096535, -1.00000000000000, -0.39953094096535],
[-1.00000000000000, -0.88505087202618, -0.32565807981549],
[-0.84919912434147, -0.84919912434147, -0.28267608212560],
[-0.67357697905978, -0.83550841139217, -0.24545730477403],
[-0.46740379986922, -0.83175894856653, -0.23343345169504],
[-0.24545730477403, -0.83550841139217, -0.24545730477403],
[-0.01892566919147, -0.84919912434147, -0.28267608212559],
[0.21070895184167, -0.88505087202618, -0.32565807981549],
[-1.00000000000000, -0.70998997530133, -0.27740272398980],
[-0.83550841139217, -0.67357697905978, -0.24545730477403],
[-0.65778667667481, -0.65778667667481, -0.22557720442361],
[-0.45884944222678, -0.65778667667481, -0.22557720442361],
[-0.24545730477403, -0.67357697905978, -0.24545730477403],
[-0.01260730070887, -0.70998997530133, -0.27740272398980],
[-1.00000000000000, -0.49284542835353, -0.25357728582324],
[-0.83175894856653, -0.46740379986922, -0.23343345169504],
[-0.65778667667481, -0.45884944222678, -0.22557720442361],
[-0.46740379986922, -0.46740379986922, -0.23343345169504],
[-0.25357728582324, -0.49284542835353, -0.25357728582324],
[-1.00000000000000, -0.25357728582324, -0.25357728582324],
[-0.83550841139217, -0.24545730477403, -0.24545730477403],
[-0.67357697905978, -0.24545730477403, -0.24545730477403],
[-0.49284542835353, -0.25357728582324, -0.25357728582324],
[-1.00000000000000, -0.01260730070887, -0.27740272398980],
[-0.84919912434147, -0.01892566919147, -0.28267608212559],
[-0.70998997530133, -0.01260730070887, -0.27740272398980],
[-1.00000000000000, 0.21070895184167, -0.32565807981549],
[-0.88505087202618, 0.21070895184167, -0.32565807981549],
[-1.00000000000000, 0.39953094096535, -0.39953094096535],
[-1.00000000000000, -1.00000000000000, -0.13655293285493],
[-0.88273190622692, -1.00000000000000, -0.05863404688654],
[-0.70998997530133, -1.00000000000000, -0.01260730070887],
[-0.50130644558671, -1.00000000000000, 0.00261289117342],
[-0.27740272398980, -1.00000000000000, -0.01260730070887],
[-0.05863404688654, -1.00000000000000, -0.05863404688654],
[0.13655293285493, -1.00000000000000, -0.13655293285493],
[-1.00000000000000, -0.88273190622692, -0.05863404688654],
[-0.84919912434147, -0.84919912434147, -0.01892566919147],
[-0.67966769800228, -0.83802132950146, 0.00486554264358],
[-0.48717651513984, -0.83802132950146, 0.00486554264358],
[-0.28267608212559, -0.84919912434147, -0.01892566919147],
[-0.05863404688654, -0.88273190622692, -0.05863404688654],
[-1.00000000000000, -0.70998997530133, -0.01260730070887],
[-0.83802132950146, -0.67966769800228, 0.00486554264358],
[-0.67073447278009, -0.67073447278009, 0.01220341834026],
[-0.48717651513984, -0.67966769800228, 0.00486554264358],
[-0.27740272398980, -0.70998997530133, -0.01260730070887],
[-1.00000000000000, -0.50130644558671, 0.00261289117342],
[-0.83802132950146, -0.48717651513984, 0.00486554264358],
[-0.67966769800228, -0.48717651513984, 0.00486554264358],
[-0.50130644558671, -0.50130644558671, 0.00261289117342],
[-1.00000000000000, -0.27740272398980, -0.01260730070887],
[-0.84919912434147, -0.28267608212560, -0.01892566919147],
[-0.70998997530133, -0.27740272398980, -0.01260730070887],
[-1.00000000000000, -0.05863404688654, -0.05863404688654],
[-0.88273190622692, -0.05863404688654, -0.05863404688654],
[-1.00000000000000, 0.13655293285493, -0.13655293285493],
[-1.00000000000000, -1.00000000000000, 0.13655293285493],
[-0.88505087202618, -1.00000000000000, 0.21070895184167],
[-0.72043884349009, -1.00000000000000, 0.24736432476324],
[-0.52692548127315, -1.00000000000000, 0.24736432476324],
[-0.32565807981549, -1.00000000000000, 0.21070895184167],
[-0.13655293285493, -1.00000000000000, 0.13655293285493],
[-1.00000000000000, -0.88505087202618, 0.21070895184167],
[-0.85396178016912, -0.85396178016912, 0.23766681757125],
[-0.69929094490663, -0.84651122824852, 0.24509311806177],
[-0.52974325723302, -0.85396178016912, 0.23766681757125],
[-0.32565807981549, -0.88505087202618, 0.21070895184167],
[-1.00000000000000, -0.72043884349009, 0.24736432476324],
[-0.84651122824852, -0.69929094490663, 0.24509311806177],
[-0.69929094490663, -0.69929094490663, 0.24509311806177],
[-0.52692548127315, -0.72043884349009, 0.24736432476324],
[-1.00000000000000, -0.52692548127315, 0.24736432476324],
[-0.85396178016912, -0.52974325723302, 0.23766681757125],
[-0.72043884349009, -0.52692548127315, 0.24736432476324],
[-1.00000000000000, -0.32565807981549, 0.21070895184167],
[-0.88505087202618, -0.32565807981549, 0.21070895184167],
[-1.00000000000000, -0.13655293285493, 0.13655293285493],
[-1.00000000000000, -1.00000000000000, 0.39953094096535],
[-0.89209241318327, -1.00000000000000, 0.46249467851378],
[-0.74166110875971, -1.00000000000000, 0.48332221751941],
[-0.57040226533050, -1.00000000000000, 0.46249467851378],
[-0.39953094096535, -1.00000000000000, 0.39953094096535],
[-1.00000000000000, -0.89209241318327, 0.46249467851378],
[-0.86491347593435, -0.86491347593435, 0.46676868210785],
[-0.73694173023916, -0.86491347593435, 0.46676868210785],
[-0.57040226533050, -0.89209241318327, 0.46249467851378],
[-1.00000000000000, -0.74166110875971, 0.48332221751941],
[-0.86491347593435, -0.73694173023916, 0.46676868210785],
[-0.74166110875971, -0.74166110875971, 0.48332221751941],
[-1.00000000000000, -0.57040226533050, 0.46249467851378],
[-0.89209241318327, -0.57040226533050, 0.46249467851378],
[-1.00000000000000, -0.39953094096535, 0.39953094096535],
[-1.00000000000000, -1.00000000000000, 0.63287615303186],
[-0.90410902745121, -1.00000000000000, 0.67840494128999],
[-0.77429591383879, -1.00000000000000, 0.67840494128999],
[-0.63287615303186, -1.00000000000000, 0.63287615303186],
[-1.00000000000000, -0.90410902745121, 0.67840494128999],
[-0.88661024062098, -0.88661024062098, 0.65983072186294],
[-0.77429591383879, -0.90410902745121, 0.67840494128999],
[-1.00000000000000, -0.77429591383879, 0.67840494128999],
[-0.90410902745121, -0.77429591383879, 0.67840494128999],
[-1.00000000000000, -0.63287615303186, 0.63287615303186],
[-1.00000000000000, -1.00000000000000, 0.81927932164401],
[-0.92151786043038, -1.00000000000000, 0.84303572086077],
[-0.81927932164401, -1.00000000000000, 0.81927932164401],
[-1.00000000000000, -0.92151786043038, 0.84303572086077],
[-0.92151786043038, -0.92151786043038, 0.84303572086077],
[-1.00000000000000, -0.81927932164401, 0.81927932164401],
[-1.00000000000000, -1.00000000000000, 0.94489927222288],
[-0.94489927222288, -1.00000000000000, 0.94489927222288],
[-1.00000000000000, -0.94489927222288, 0.94489927222288],
])
elif C==11:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.95330984664216, -1.00000000000000, -1.00000000000000],
[-0.84634756465187, -1.00000000000000, -1.00000000000000],
[-0.68618846908176, -1.00000000000000, -1.00000000000000],
[-0.48290982109134, -1.00000000000000, -1.00000000000000],
[-0.24928693010624, -1.00000000000000, -1.00000000000000],
[0.00000000000000, -1.00000000000000, -1.00000000000000],
[0.24928693010624, -1.00000000000000, -1.00000000000000],
[0.48290982109134, -1.00000000000000, -1.00000000000000],
[0.68618846908176, -1.00000000000000, -1.00000000000000],
[0.84634756465187, -1.00000000000000, -1.00000000000000],
[0.95330984664216, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.95330984664216, -1.00000000000000],
[-0.93314929852058, -0.93314929852058, -1.00000000000000],
[-0.80749278205939, -0.91728801647872, -1.00000000000000],
[-0.63165500712680, -0.90554479118684, -1.00000000000000],
[-0.41711976131788, -0.89778817877123, -1.00000000000000],
[-0.17764768825164, -0.89393150746962, -1.00000000000000],
[0.07157919572125, -0.89393150746962, -1.00000000000000],
[0.31490794008910, -0.89778817877123, -1.00000000000000],
[0.53719979831364, -0.90554479118684, -1.00000000000000],
[0.72478079853811, -0.91728801647872, -1.00000000000000],
[0.86629859704117, -0.93314929852058, -1.00000000000000],
[0.95330984664216, -0.95330984664216, -1.00000000000000],
[-1.00000000000000, -0.84634756465187, -1.00000000000000],
[-0.91728801647872, -0.80749278205939, -1.00000000000000],
[-0.77762756291359, -0.77762756291359, -1.00000000000000],
[-0.59112372335382, -0.75648843684164, -1.00000000000000],
[-0.37045870845417, -0.74388329902467, -1.00000000000000],
[-0.13015261776080, -0.73969476447840, -1.00000000000000],
[0.11434200747884, -0.74388329902467, -1.00000000000000],
[0.34761216019546, -0.75648843684164, -1.00000000000000],
[0.55525512582717, -0.77762756291359, -1.00000000000000],
[0.72478079853811, -0.80749278205939, -1.00000000000000],
[0.84634756465187, -0.84634756465187, -1.00000000000000],
[-1.00000000000000, -0.68618846908176, -1.00000000000000],
[-0.90554479118684, -0.63165500712680, -1.00000000000000],
[-0.75648843684164, -0.59112372335382, -1.00000000000000],
[-0.56428255450949, -0.56428255450949, -1.00000000000000],
[-0.34259753977192, -0.55091672680826, -1.00000000000000],
[-0.10648573341982, -0.55091672680826, -1.00000000000000],
[0.12856510901897, -0.56428255450949, -1.00000000000000],
[0.34761216019546, -0.59112372335382, -1.00000000000000],
[0.53719979831364, -0.63165500712680, -1.00000000000000],
[0.68618846908176, -0.68618846908176, -1.00000000000000],
[-1.00000000000000, -0.48290982109134, -1.00000000000000],
[-0.89778817877123, -0.41711976131788, -1.00000000000000],
[-0.74388329902467, -0.37045870845417, -1.00000000000000],
[-0.55091672680826, -0.34259753977192, -1.00000000000000],
[-0.33333333333333, -0.33333333333333, -1.00000000000000],
[-0.10648573341982, -0.34259753977192, -1.00000000000000],
[0.11434200747884, -0.37045870845417, -1.00000000000000],
[0.31490794008910, -0.41711976131788, -1.00000000000000],
[0.48290982109134, -0.48290982109134, -1.00000000000000],
[-1.00000000000000, -0.24928693010624, -1.00000000000000],
[-0.89393150746962, -0.17764768825164, -1.00000000000000],
[-0.73969476447840, -0.13015261776080, -1.00000000000000],
[-0.55091672680826, -0.10648573341982, -1.00000000000000],
[-0.34259753977192, -0.10648573341982, -1.00000000000000],
[-0.13015261776080, -0.13015261776080, -1.00000000000000],
[0.07157919572125, -0.17764768825164, -1.00000000000000],
[0.24928693010624, -0.24928693010624, -1.00000000000000],
[-1.00000000000000, 0.00000000000000, -1.00000000000000],
[-0.89393150746961, 0.07157919572125, -1.00000000000000],
[-0.74388329902467, 0.11434200747884, -1.00000000000000],
[-0.56428255450949, 0.12856510901897, -1.00000000000000],
[-0.37045870845417, 0.11434200747884, -1.00000000000000],
[-0.17764768825164, 0.07157919572125, -1.00000000000000],
[0.00000000000000, 0.00000000000000, -1.00000000000000],
[-1.00000000000000, 0.24928693010624, -1.00000000000000],
[-0.89778817877123, 0.31490794008910, -1.00000000000000],
[-0.75648843684164, 0.34761216019546, -1.00000000000000],
[-0.59112372335382, 0.34761216019546, -1.00000000000000],
[-0.41711976131788, 0.31490794008910, -1.00000000000000],
[-0.24928693010624, 0.24928693010624, -1.00000000000000],
[-1.00000000000000, 0.48290982109134, -1.00000000000000],
[-0.90554479118684, 0.53719979831364, -1.00000000000000],
[-0.77762756291359, 0.55525512582717, -1.00000000000000],
[-0.63165500712680, 0.53719979831364, -1.00000000000000],
[-0.48290982109134, 0.48290982109134, -1.00000000000000],
[-1.00000000000000, 0.68618846908176, -1.00000000000000],
[-0.91728801647872, 0.72478079853811, -1.00000000000000],
[-0.80749278205939, 0.72478079853811, -1.00000000000000],
[-0.68618846908176, 0.68618846908176, -1.00000000000000],
[-1.00000000000000, 0.84634756465187, -1.00000000000000],
[-0.93314929852058, 0.86629859704117, -1.00000000000000],
[-0.84634756465187, 0.84634756465187, -1.00000000000000],
[-1.00000000000000, 0.95330984664216, -1.00000000000000],
[-0.95330984664216, 0.95330984664216, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.95330984664216],
[-0.93314929852058, -1.00000000000000, -0.93314929852058],
[-0.80749278205939, -1.00000000000000, -0.91728801647872],
[-0.63165500712680, -1.00000000000000, -0.90554479118684],
[-0.41711976131788, -1.00000000000000, -0.89778817877123],
[-0.17764768825164, -1.00000000000000, -0.89393150746962],
[0.07157919572125, -1.00000000000000, -0.89393150746962],
[0.31490794008910, -1.00000000000000, -0.89778817877123],
[0.53719979831364, -1.00000000000000, -0.90554479118684],
[0.72478079853811, -1.00000000000000, -0.91728801647872],
[0.86629859704117, -1.00000000000000, -0.93314929852058],
[0.95330984664216, -1.00000000000000, -0.95330984664216],
[-1.00000000000000, -0.93314929852058, -0.93314929852058],
[-0.90161653879272, -0.90161653879272, -0.90161653879272],
[-0.77508380848694, -0.88110342063780, -0.88110342063780],
[-0.59656429957776, -0.87011349861293, -0.87011349861293],
[-0.37818640578667, -0.86444870279201, -0.86444870279201],
[-0.13732266869294, -0.86267733130706, -0.86267733130706],
[0.10708381137069, -0.86444870279201, -0.86444870279201],
[0.33679129680362, -0.87011349861293, -0.87011349861293],
[0.53729064976254, -0.88110342063780, -0.88110342063780],
[0.70484961637815, -0.90161653879272, -0.90161653879272],
[0.86629859704117, -0.93314929852058, -0.93314929852058],
[-1.00000000000000, -0.80749278205939, -0.91728801647872],
[-0.88110342063780, -0.77508380848694, -0.88110342063780],
[-0.73854061486702, -0.73854061486702, -0.86344693882162],
[-0.55312273716793, -0.71759540924990, -0.85457290260458],
[-0.33645081609683, -0.70800860312878, -0.85074609043347],
[-0.10479449034092, -0.70800860312878, -0.85074609043347],
[0.12529104902241, -0.71759540924990, -0.85457290260458],
[0.34052816855565, -0.73854061486702, -0.86344693882162],
[0.53729064976254, -0.77508380848694, -0.88110342063780],
[0.72478079853811, -0.80749278205939, -0.91728801647872],
[-1.00000000000000, -0.63165500712680, -0.90554479118684],
[-0.87011349861293, -0.59656429957776, -0.87011349861293],
[-0.71759540924990, -0.55312273716793, -0.85457290260458],
[-0.52919780292488, -0.52919780292488, -0.84747798944968],
[-0.31650668955869, -0.52160010820411, -0.84538651267852],
[-0.09412640470056, -0.52919780292488, -0.84747798944968],
[0.12529104902241, -0.55312273716793, -0.85457290260458],
[0.33679129680362, -0.59656429957776, -0.87011349861293],
[0.53719979831364, -0.63165500712680, -0.90554479118684],
[-1.00000000000000, -0.41711976131788, -0.89778817877123],
[-0.86444870279201, -0.37818640578667, -0.86444870279201],
[-0.70800860312878, -0.33645081609683, -0.85074609043347],
[-0.52160010820411, -0.31650668955869, -0.84538651267852],
[-0.31650668955869, -0.31650668955869, -0.84538651267852],
[-0.10479449034092, -0.33645081609683, -0.85074609043347],
[0.10708381137069, -0.37818640578667, -0.86444870279201],
[0.31490794008910, -0.41711976131788, -0.89778817877123],
[-1.00000000000000, -0.17764768825164, -0.89393150746962],
[-0.86267733130706, -0.13732266869294, -0.86267733130706],
[-0.70800860312878, -0.10479449034092, -0.85074609043347],
[-0.52919780292488, -0.09412640470056, -0.84747798944968],
[-0.33645081609683, -0.10479449034092, -0.85074609043347],
[-0.13732266869294, -0.13732266869294, -0.86267733130706],
[0.07157919572125, -0.17764768825164, -0.89393150746962],
[-1.00000000000000, 0.07157919572125, -0.89393150746962],
[-0.86444870279201, 0.10708381137068, -0.86444870279201],
[-0.71759540924990, 0.12529104902241, -0.85457290260458],
[-0.55312273716793, 0.12529104902241, -0.85457290260458],
[-0.37818640578667, 0.10708381137068, -0.86444870279201],
[-0.17764768825164, 0.07157919572125, -0.89393150746962],
[-1.00000000000000, 0.31490794008910, -0.89778817877123],
[-0.87011349861293, 0.33679129680362, -0.87011349861293],
[-0.73854061486702, 0.34052816855565, -0.86344693882162],
[-0.59656429957776, 0.33679129680362, -0.87011349861293],
[-0.41711976131788, 0.31490794008910, -0.89778817877123],
[-1.00000000000000, 0.53719979831364, -0.90554479118684],
[-0.88110342063780, 0.53729064976254, -0.88110342063780],
[-0.77508380848694, 0.53729064976254, -0.88110342063780],
[-0.63165500712680, 0.53719979831364, -0.90554479118684],
[-1.00000000000000, 0.72478079853811, -0.91728801647872],
[-0.90161653879272, 0.70484961637815, -0.90161653879272],
[-0.80749278205939, 0.72478079853811, -0.91728801647872],
[-1.00000000000000, 0.86629859704117, -0.93314929852058],
[-0.93314929852058, 0.86629859704117, -0.93314929852058],
[-1.00000000000000, 0.95330984664216, -0.95330984664216],
[-1.00000000000000, -1.00000000000000, -0.84634756465187],
[-0.91728801647872, -1.00000000000000, -0.80749278205939],
[-0.77762756291359, -1.00000000000000, -0.77762756291359],
[-0.59112372335382, -1.00000000000000, -0.75648843684164],
[-0.37045870845417, -1.00000000000000, -0.74388329902467],
[-0.13015261776080, -1.00000000000000, -0.73969476447840],
[0.11434200747884, -1.00000000000000, -0.74388329902467],
[0.34761216019546, -1.00000000000000, -0.75648843684164],
[0.55525512582717, -1.00000000000000, -0.77762756291359],
[0.72478079853811, -1.00000000000000, -0.80749278205939],
[0.84634756465187, -1.00000000000000, -0.84634756465187],
[-1.00000000000000, -0.91728801647872, -0.80749278205939],
[-0.88110342063780, -0.88110342063780, -0.77508380848694],
[-0.73854061486702, -0.86344693882162, -0.73854061486702],
[-0.55312273716793, -0.85457290260458, -0.71759540924990],
[-0.33645081609683, -0.85074609043347, -0.70800860312878],
[-0.10479449034092, -0.85074609043347, -0.70800860312878],
[0.12529104902241, -0.85457290260458, -0.71759540924990],
[0.34052816855565, -0.86344693882162, -0.73854061486702],
[0.53729064976254, -0.88110342063780, -0.77508380848694],
[0.72478079853811, -0.91728801647872, -0.80749278205939],
[-1.00000000000000, -0.77762756291359, -0.77762756291359],
[-0.86344693882162, -0.73854061486702, -0.73854061486702],
[-0.70988989486536, -0.70988989486536, -0.70988989486536],
[-0.52208922493361, -0.69452976296383, -0.69452976296383],
[-0.31028394193009, -0.68971605806991, -0.68971605806991],
[-0.08885124913873, -0.69452976296383, -0.69452976296383],
[0.12966968459608, -0.70988989486536, -0.70988989486536],
[0.34052816855565, -0.73854061486702, -0.73854061486702],
[0.55525512582717, -0.77762756291359, -0.77762756291359],
[-1.00000000000000, -0.59112372335382, -0.75648843684164],
[-0.85457290260458, -0.55312273716793, -0.71759540924990],
[-0.69452976296383, -0.52208922493361, -0.69452976296383],
[-0.50715376456902, -0.50715376456902, -0.68412224658597],
[-0.30157022427599, -0.50715376456902, -0.68412224658597],
[-0.08885124913873, -0.52208922493361, -0.69452976296383],
[0.12529104902241, -0.55312273716793, -0.71759540924990],
[0.34761216019546, -0.59112372335382, -0.75648843684164],
[-1.00000000000000, -0.37045870845417, -0.74388329902467],
[-0.85074609043347, -0.33645081609683, -0.70800860312878],
[-0.68971605806991, -0.31028394193009, -0.68971605806991],
[-0.50715376456902, -0.30157022427599, -0.68412224658597],
[-0.31028394193009, -0.31028394193009, -0.68971605806991],
[-0.10479449034092, -0.33645081609683, -0.70800860312878],
[0.11434200747884, -0.37045870845417, -0.74388329902467],
[-1.00000000000000, -0.13015261776080, -0.73969476447840],
[-0.85074609043347, -0.10479449034092, -0.70800860312878],
[-0.69452976296383, -0.08885124913873, -0.69452976296383],
[-0.52208922493361, -0.08885124913873, -0.69452976296383],
[-0.33645081609683, -0.10479449034092, -0.70800860312878],
[-0.13015261776080, -0.13015261776080, -0.73969476447840],
[-1.00000000000000, 0.11434200747884, -0.74388329902467],
[-0.85457290260458, 0.12529104902241, -0.71759540924990],
[-0.70988989486536, 0.12966968459608, -0.70988989486536],
[-0.55312273716793, 0.12529104902241, -0.71759540924990],
[-0.37045870845417, 0.11434200747884, -0.74388329902467],
[-1.00000000000000, 0.34761216019546, -0.75648843684164],
[-0.86344693882162, 0.34052816855565, -0.73854061486702],
[-0.73854061486702, 0.34052816855565, -0.73854061486702],
[-0.59112372335382, 0.34761216019546, -0.75648843684164],
[-1.00000000000000, 0.55525512582717, -0.77762756291359],
[-0.88110342063780, 0.53729064976254, -0.77508380848694],
[-0.77762756291359, 0.55525512582717, -0.77762756291359],
[-1.00000000000000, 0.72478079853811, -0.80749278205939],
[-0.91728801647872, 0.72478079853811, -0.80749278205939],
[-1.00000000000000, 0.84634756465187, -0.84634756465187],
[-1.00000000000000, -1.00000000000000, -0.68618846908176],
[-0.90554479118684, -1.00000000000000, -0.63165500712680],
[-0.75648843684164, -1.00000000000000, -0.59112372335382],
[-0.56428255450949, -1.00000000000000, -0.56428255450949],
[-0.34259753977192, -1.00000000000000, -0.55091672680826],
[-0.10648573341982, -1.00000000000000, -0.55091672680826],
[0.12856510901897, -1.00000000000000, -0.56428255450949],
[0.34761216019546, -1.00000000000000, -0.59112372335382],
[0.53719979831364, -1.00000000000000, -0.63165500712680],
[0.68618846908176, -1.00000000000000, -0.68618846908176],
[-1.00000000000000, -0.90554479118684, -0.63165500712680],
[-0.87011349861293, -0.87011349861293, -0.59656429957776],
[-0.71759540924990, -0.85457290260458, -0.55312273716793],
[-0.52919780292488, -0.84747798944968, -0.52919780292488],
[-0.31650668955869, -0.84538651267852, -0.52160010820411],
[-0.09412640470056, -0.84747798944968, -0.52919780292488],
[0.12529104902241, -0.85457290260458, -0.55312273716793],
[0.33679129680362, -0.87011349861293, -0.59656429957776],
[0.53719979831364, -0.90554479118684, -0.63165500712680],
[-1.00000000000000, -0.75648843684164, -0.59112372335382],
[-0.85457290260458, -0.71759540924990, -0.55312273716793],
[-0.69452976296383, -0.69452976296383, -0.52208922493361],
[-0.50715376456902, -0.68412224658597, -0.50715376456902],
[-0.30157022427599, -0.68412224658597, -0.50715376456902],
[-0.08885124913873, -0.69452976296383, -0.52208922493361],
[0.12529104902241, -0.71759540924990, -0.55312273716793],
[0.34761216019546, -0.75648843684164, -0.59112372335382],
[-1.00000000000000, -0.56428255450949, -0.56428255450949],
[-0.84747798944968, -0.52919780292488, -0.52919780292488],
[-0.68412224658597, -0.50715376456902, -0.50715376456902],
[-0.50000000000000, -0.50000000000000, -0.50000000000000],
[-0.30157022427599, -0.50715376456902, -0.50715376456902],
[-0.09412640470056, -0.52919780292488, -0.52919780292488],
[0.12856510901897, -0.56428255450949, -0.56428255450949],
[-1.00000000000000, -0.34259753977192, -0.55091672680826],
[-0.84538651267852, -0.31650668955869, -0.52160010820411],
[-0.68412224658597, -0.30157022427599, -0.50715376456902],
[-0.50715376456902, -0.30157022427599, -0.50715376456902],
[-0.31650668955869, -0.31650668955869, -0.52160010820411],
[-0.10648573341982, -0.34259753977192, -0.55091672680826],
[-1.00000000000000, -0.10648573341982, -0.55091672680826],
[-0.84747798944968, -0.09412640470056, -0.52919780292488],
[-0.69452976296383, -0.08885124913873, -0.52208922493361],
[-0.52919780292488, -0.09412640470056, -0.52919780292488],
[-0.34259753977192, -0.10648573341982, -0.55091672680826],
[-1.00000000000000, 0.12856510901897, -0.56428255450949],
[-0.85457290260458, 0.12529104902241, -0.55312273716793],
[-0.71759540924990, 0.12529104902241, -0.55312273716793],
[-0.56428255450949, 0.12856510901897, -0.56428255450949],
[-1.00000000000000, 0.34761216019546, -0.59112372335382],
[-0.87011349861293, 0.33679129680362, -0.59656429957776],
[-0.75648843684164, 0.34761216019546, -0.59112372335382],
[-1.00000000000000, 0.53719979831364, -0.63165500712680],
[-0.90554479118684, 0.53719979831364, -0.63165500712680],
[-1.00000000000000, 0.68618846908176, -0.68618846908176],
[-1.00000000000000, -1.00000000000000, -0.48290982109134],
[-0.89778817877123, -1.00000000000000, -0.41711976131788],
[-0.74388329902467, -1.00000000000000, -0.37045870845417],
[-0.55091672680826, -1.00000000000000, -0.34259753977192],
[-0.33333333333333, -1.00000000000000, -0.33333333333333],
[-0.10648573341982, -1.00000000000000, -0.34259753977192],
[0.11434200747884, -1.00000000000000, -0.37045870845417],
[0.31490794008910, -1.00000000000000, -0.41711976131788],
[0.48290982109134, -1.00000000000000, -0.48290982109134],
[-1.00000000000000, -0.89778817877123, -0.41711976131788],
[-0.86444870279201, -0.86444870279201, -0.37818640578667],
[-0.70800860312878, -0.85074609043347, -0.33645081609683],
[-0.52160010820411, -0.84538651267852, -0.31650668955869],
[-0.31650668955869, -0.84538651267852, -0.31650668955869],
[-0.10479449034092, -0.85074609043347, -0.33645081609683],
[0.10708381137069, -0.86444870279201, -0.37818640578667],
[0.31490794008910, -0.89778817877123, -0.41711976131788],
[-1.00000000000000, -0.74388329902467, -0.37045870845417],
[-0.85074609043347, -0.70800860312878, -0.33645081609683],
[-0.68971605806991, -0.68971605806991, -0.31028394193009],
[-0.50715376456902, -0.68412224658597, -0.30157022427599],
[-0.31028394193009, -0.68971605806991, -0.31028394193009],
[-0.10479449034092, -0.70800860312878, -0.33645081609683],
[0.11434200747884, -0.74388329902467, -0.37045870845417],
[-1.00000000000000, -0.55091672680826, -0.34259753977192],
[-0.84538651267852, -0.52160010820411, -0.31650668955869],
[-0.68412224658597, -0.50715376456902, -0.30157022427599],
[-0.50715376456902, -0.50715376456902, -0.30157022427599],
[-0.31650668955869, -0.52160010820411, -0.31650668955869],
[-0.10648573341982, -0.55091672680826, -0.34259753977192],
[-1.00000000000000, -0.33333333333333, -0.33333333333333],
[-0.84538651267852, -0.31650668955869, -0.31650668955869],
[-0.68971605806991, -0.31028394193009, -0.31028394193009],
[-0.52160010820411, -0.31650668955869, -0.31650668955869],
[-0.33333333333333, -0.33333333333333, -0.33333333333333],
[-1.00000000000000, -0.10648573341982, -0.34259753977192],
[-0.85074609043347, -0.10479449034092, -0.33645081609683],
[-0.70800860312878, -0.10479449034092, -0.33645081609683],
[-0.55091672680826, -0.10648573341982, -0.34259753977192],
[-1.00000000000000, 0.11434200747884, -0.37045870845417],
[-0.86444870279201, 0.10708381137068, -0.37818640578667],
[-0.74388329902467, 0.11434200747884, -0.37045870845417],
[-1.00000000000000, 0.31490794008910, -0.41711976131788],
[-0.89778817877123, 0.31490794008910, -0.41711976131788],
[-1.00000000000000, 0.48290982109134, -0.48290982109134],
[-1.00000000000000, -1.00000000000000, -0.24928693010624],
[-0.89393150746962, -1.00000000000000, -0.17764768825164],
[-0.73969476447840, -1.00000000000000, -0.13015261776080],
[-0.55091672680826, -1.00000000000000, -0.10648573341982],
[-0.34259753977192, -1.00000000000000, -0.10648573341982],
[-0.13015261776080, -1.00000000000000, -0.13015261776080],
[0.07157919572125, -1.00000000000000, -0.17764768825164],
[0.24928693010624, -1.00000000000000, -0.24928693010624],
[-1.00000000000000, -0.89393150746962, -0.17764768825164],
[-0.86267733130706, -0.86267733130706, -0.13732266869294],
[-0.70800860312878, -0.85074609043347, -0.10479449034092],
[-0.52919780292488, -0.84747798944968, -0.09412640470056],
[-0.33645081609683, -0.85074609043347, -0.10479449034092],
[-0.13732266869294, -0.86267733130706, -0.13732266869294],
[0.07157919572125, -0.89393150746962, -0.17764768825164],
[-1.00000000000000, -0.73969476447840, -0.13015261776080],
[-0.85074609043347, -0.70800860312878, -0.10479449034092],
[-0.69452976296383, -0.69452976296383, -0.08885124913873],
[-0.52208922493361, -0.69452976296383, -0.08885124913873],
[-0.33645081609683, -0.70800860312878, -0.10479449034092],
[-0.13015261776080, -0.73969476447840, -0.13015261776080],
[-1.00000000000000, -0.55091672680826, -0.10648573341982],
[-0.84747798944968, -0.52919780292488, -0.09412640470056],
[-0.69452976296383, -0.52208922493361, -0.08885124913873],
[-0.52919780292488, -0.52919780292488, -0.09412640470056],
[-0.34259753977192, -0.55091672680826, -0.10648573341982],
[-1.00000000000000, -0.34259753977192, -0.10648573341982],
[-0.85074609043347, -0.33645081609683, -0.10479449034092],
[-0.70800860312878, -0.33645081609683, -0.10479449034092],
[-0.55091672680826, -0.34259753977192, -0.10648573341982],
[-1.00000000000000, -0.13015261776080, -0.13015261776080],
[-0.86267733130706, -0.13732266869294, -0.13732266869294],
[-0.73969476447840, -0.13015261776080, -0.13015261776080],
[-1.00000000000000, 0.07157919572125, -0.17764768825164],
[-0.89393150746962, 0.07157919572125, -0.17764768825164],
[-1.00000000000000, 0.24928693010624, -0.24928693010624],
[-1.00000000000000, -1.00000000000000, -0.00000000000000],
[-0.89393150746961, -1.00000000000000, 0.07157919572125],
[-0.74388329902467, -1.00000000000000, 0.11434200747884],
[-0.56428255450949, -1.00000000000000, 0.12856510901897],
[-0.37045870845417, -1.00000000000000, 0.11434200747884],
[-0.17764768825164, -1.00000000000000, 0.07157919572125],
[0.00000000000000, -1.00000000000000, -0.00000000000000],
[-1.00000000000000, -0.89393150746961, 0.07157919572125],
[-0.86444870279201, -0.86444870279201, 0.10708381137068],
[-0.71759540924990, -0.85457290260458, 0.12529104902241],
[-0.55312273716793, -0.85457290260458, 0.12529104902241],
[-0.37818640578667, -0.86444870279201, 0.10708381137068],
[-0.17764768825164, -0.89393150746961, 0.07157919572125],
[-1.00000000000000, -0.74388329902467, 0.11434200747884],
[-0.85457290260458, -0.71759540924990, 0.12529104902241],
[-0.70988989486536, -0.70988989486536, 0.12966968459608],
[-0.55312273716793, -0.71759540924990, 0.12529104902241],
[-0.37045870845417, -0.74388329902467, 0.11434200747884],
[-1.00000000000000, -0.56428255450949, 0.12856510901897],
[-0.85457290260458, -0.55312273716793, 0.12529104902241],
[-0.71759540924990, -0.55312273716793, 0.12529104902241],
[-0.56428255450949, -0.56428255450949, 0.12856510901897],
[-1.00000000000000, -0.37045870845417, 0.11434200747884],
[-0.86444870279201, -0.37818640578667, 0.10708381137068],
[-0.74388329902467, -0.37045870845417, 0.11434200747884],
[-1.00000000000000, -0.17764768825164, 0.07157919572125],
[-0.89393150746962, -0.17764768825164, 0.07157919572125],
[-1.00000000000000, -0.00000000000000, 0.00000000000000],
[-1.00000000000000, -1.00000000000000, 0.24928693010624],
[-0.89778817877123, -1.00000000000000, 0.31490794008910],
[-0.75648843684164, -1.00000000000000, 0.34761216019546],
[-0.59112372335382, -1.00000000000000, 0.34761216019546],
[-0.41711976131788, -1.00000000000000, 0.31490794008910],
[-0.24928693010624, -1.00000000000000, 0.24928693010624],
[-1.00000000000000, -0.89778817877123, 0.31490794008910],
[-0.87011349861293, -0.87011349861293, 0.33679129680362],
[-0.73854061486702, -0.86344693882162, 0.34052816855565],
[-0.59656429957776, -0.87011349861293, 0.33679129680362],
[-0.41711976131788, -0.89778817877123, 0.31490794008910],
[-1.00000000000000, -0.75648843684164, 0.34761216019546],
[-0.86344693882162, -0.73854061486702, 0.34052816855565],
[-0.73854061486702, -0.73854061486702, 0.34052816855565],
[-0.59112372335382, -0.75648843684164, 0.34761216019546],
[-1.00000000000000, -0.59112372335382, 0.34761216019546],
[-0.87011349861293, -0.59656429957776, 0.33679129680362],
[-0.75648843684164, -0.59112372335382, 0.34761216019546],
[-1.00000000000000, -0.41711976131788, 0.31490794008910],
[-0.89778817877123, -0.41711976131788, 0.31490794008910],
[-1.00000000000000, -0.24928693010624, 0.24928693010624],
[-1.00000000000000, -1.00000000000000, 0.48290982109134],
[-0.90554479118684, -1.00000000000000, 0.53719979831364],
[-0.77762756291359, -1.00000000000000, 0.55525512582717],
[-0.63165500712680, -1.00000000000000, 0.53719979831364],
[-0.48290982109134, -1.00000000000000, 0.48290982109134],
[-1.00000000000000, -0.90554479118684, 0.53719979831364],
[-0.88110342063780, -0.88110342063780, 0.53729064976254],
[-0.77508380848694, -0.88110342063780, 0.53729064976254],
[-0.63165500712680, -0.90554479118684, 0.53719979831364],
[-1.00000000000000, -0.77762756291359, 0.55525512582717],
[-0.88110342063780, -0.77508380848694, 0.53729064976254],
[-0.77762756291359, -0.77762756291359, 0.55525512582717],
[-1.00000000000000, -0.63165500712680, 0.53719979831364],
[-0.90554479118684, -0.63165500712680, 0.53719979831364],
[-1.00000000000000, -0.48290982109134, 0.48290982109134],
[-1.00000000000000, -1.00000000000000, 0.68618846908176],
[-0.91728801647872, -1.00000000000000, 0.72478079853811],
[-0.80749278205939, -1.00000000000000, 0.72478079853811],
[-0.68618846908176, -1.00000000000000, 0.68618846908176],
[-1.00000000000000, -0.91728801647872, 0.72478079853811],
[-0.90161653879272, -0.90161653879272, 0.70484961637815],
[-0.80749278205939, -0.91728801647872, 0.72478079853811],
[-1.00000000000000, -0.80749278205939, 0.72478079853811],
[-0.91728801647872, -0.80749278205939, 0.72478079853811],
[-1.00000000000000, -0.68618846908176, 0.68618846908176],
[-1.00000000000000, -1.00000000000000, 0.84634756465187],
[-0.93314929852058, -1.00000000000000, 0.86629859704117],
[-0.84634756465187, -1.00000000000000, 0.84634756465187],
[-1.00000000000000, -0.93314929852058, 0.86629859704117],
[-0.93314929852058, -0.93314929852058, 0.86629859704117],
[-1.00000000000000, -0.84634756465187, 0.84634756465187],
[-1.00000000000000, -1.00000000000000, 0.95330984664216],
[-0.95330984664216, -1.00000000000000, 0.95330984664216],
[-1.00000000000000, -0.95330984664216, 0.95330984664216],
])
elif C==12:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.95993504526726, -1.00000000000000, -1.00000000000000],
[-0.86780105383035, -1.00000000000000, -1.00000000000000],
[-0.72886859909133, -1.00000000000000, -1.00000000000000],
[-0.55063940292865, -1.00000000000000, -1.00000000000000],
[-0.34272401334271, -1.00000000000000, -1.00000000000000],
[-0.11633186888370, -1.00000000000000, -1.00000000000000],
[0.11633186888370, -1.00000000000000, -1.00000000000000],
[0.34272401334271, -1.00000000000000, -1.00000000000000],
[0.55063940292865, -1.00000000000000, -1.00000000000000],
[0.72886859909133, -1.00000000000000, -1.00000000000000],
[0.86780105383035, -1.00000000000000, -1.00000000000000],
[0.95993504526726, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.95993504526726, -1.00000000000000],
[-0.94280318420554, -0.94280318420554, -1.00000000000000],
[-0.83375815972819, -0.93015181941672, -1.00000000000000],
[-0.67967319908614, -0.92117037483149, -1.00000000000000],
[-0.48946894926468, -0.91521985405486, -1.00000000000000],
[-0.27382614524558, -0.91183707059936, -1.00000000000000],
[-0.04462954783073, -0.91074090433855, -1.00000000000000],
[0.18566321584494, -0.91183707059936, -1.00000000000000],
[0.40468880331954, -0.91521985405486, -1.00000000000000],
[0.60084357391763, -0.92117037483149, -1.00000000000000],
[0.76390997914491, -0.93015181941672, -1.00000000000000],
[0.88560636841107, -0.94280318420554, -1.00000000000000],
[0.95993504526726, -0.95993504526726, -1.00000000000000],
[-1.00000000000000, -0.86780105383035, -1.00000000000000],
[-0.93015181941672, -0.83375815972819, -1.00000000000000],
[-0.80867264609489, -0.80867264609489, -1.00000000000000],
[-0.64389688899633, -0.79113271241773, -1.00000000000000],
[-0.44605118773572, -0.78005396215852, -1.00000000000000],
[-0.22677621402734, -0.77470054561703, -1.00000000000000],
[0.00147675964437, -0.77470054561703, -1.00000000000000],
[0.22610514989424, -0.78005396215852, -1.00000000000000],
[0.43502960141406, -0.79113271241773, -1.00000000000000],
[0.61734529218978, -0.80867264609489, -1.00000000000000],
[0.76390997914491, -0.83375815972819, -1.00000000000000],
[0.86780105383035, -0.86780105383035, -1.00000000000000],
[-1.00000000000000, -0.72886859909133, -1.00000000000000],
[-0.92117037483149, -0.67967319908614, -1.00000000000000],
[-0.79113271241773, -0.64389688899633, -1.00000000000000],
[-0.61969584469643, -0.61969584469643, -1.00000000000000],
[-0.41829388073718, -0.60572275652627, -1.00000000000000],
[-0.19942153173453, -0.60115693653094, -1.00000000000000],
[0.02401663726344, -0.60572275652627, -1.00000000000000],
[0.23939168939285, -0.61969584469643, -1.00000000000000],
[0.43502960141406, -0.64389688899633, -1.00000000000000],
[0.60084357391763, -0.67967319908614, -1.00000000000000],
[0.72886859909133, -0.72886859909133, -1.00000000000000],
[-1.00000000000000, -0.55063940292865, -1.00000000000000],
[-0.91521985405486, -0.48946894926468, -1.00000000000000],
[-0.78005396215852, -0.44605118773572, -1.00000000000000],
[-0.60572275652627, -0.41829388073718, -1.00000000000000],
[-0.40477489692834, -0.40477489692834, -1.00000000000000],
[-0.19045020614332, -0.40477489692834, -1.00000000000000],
[0.02401663726344, -0.41829388073718, -1.00000000000000],
[0.22610514989424, -0.44605118773572, -1.00000000000000],
[0.40468880331954, -0.48946894926468, -1.00000000000000],
[0.55063940292865, -0.55063940292865, -1.00000000000000],
[-1.00000000000000, -0.34272401334271, -1.00000000000000],
[-0.91183707059936, -0.27382614524558, -1.00000000000000],
[-0.77470054561703, -0.22677621402734, -1.00000000000000],
[-0.60115693653094, -0.19942153173453, -1.00000000000000],
[-0.40477489692834, -0.19045020614332, -1.00000000000000],
[-0.19942153173453, -0.19942153173453, -1.00000000000000],
[0.00147675964437, -0.22677621402734, -1.00000000000000],
[0.18566321584494, -0.27382614524558, -1.00000000000000],
[0.34272401334271, -0.34272401334271, -1.00000000000000],
[-1.00000000000000, -0.11633186888370, -1.00000000000000],
[-0.91074090433855, -0.04462954783073, -1.00000000000000],
[-0.77470054561703, 0.00147675964437, -1.00000000000000],
[-0.60572275652627, 0.02401663726344, -1.00000000000000],
[-0.41829388073718, 0.02401663726344, -1.00000000000000],
[-0.22677621402734, 0.00147675964437, -1.00000000000000],
[-0.04462954783073, -0.04462954783073, -1.00000000000000],
[0.11633186888370, -0.11633186888370, -1.00000000000000],
[-1.00000000000000, 0.11633186888370, -1.00000000000000],
[-0.91183707059936, 0.18566321584494, -1.00000000000000],
[-0.78005396215852, 0.22610514989424, -1.00000000000000],
[-0.61969584469643, 0.23939168939285, -1.00000000000000],
[-0.44605118773572, 0.22610514989424, -1.00000000000000],
[-0.27382614524558, 0.18566321584494, -1.00000000000000],
[-0.11633186888370, 0.11633186888370, -1.00000000000000],
[-1.00000000000000, 0.34272401334271, -1.00000000000000],
[-0.91521985405486, 0.40468880331954, -1.00000000000000],
[-0.79113271241773, 0.43502960141406, -1.00000000000000],
[-0.64389688899633, 0.43502960141406, -1.00000000000000],
[-0.48946894926468, 0.40468880331954, -1.00000000000000],
[-0.34272401334271, 0.34272401334271, -1.00000000000000],
[-1.00000000000000, 0.55063940292865, -1.00000000000000],
[-0.92117037483149, 0.60084357391763, -1.00000000000000],
[-0.80867264609489, 0.61734529218978, -1.00000000000000],
[-0.67967319908614, 0.60084357391763, -1.00000000000000],
[-0.55063940292865, 0.55063940292865, -1.00000000000000],
[-1.00000000000000, 0.72886859909133, -1.00000000000000],
[-0.93015181941672, 0.76390997914491, -1.00000000000000],
[-0.83375815972819, 0.76390997914491, -1.00000000000000],
[-0.72886859909133, 0.72886859909133, -1.00000000000000],
[-1.00000000000000, 0.86780105383035, -1.00000000000000],
[-0.94280318420554, 0.88560636841107, -1.00000000000000],
[-0.86780105383035, 0.86780105383035, -1.00000000000000],
[-1.00000000000000, 0.95993504526726, -1.00000000000000],
[-0.95993504526726, 0.95993504526726, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.95993504526726],
[-0.94280318420554, -1.00000000000000, -0.94280318420554],
[-0.83375815972819, -1.00000000000000, -0.93015181941672],
[-0.67967319908614, -1.00000000000000, -0.92117037483149],
[-0.48946894926468, -1.00000000000000, -0.91521985405486],
[-0.27382614524558, -1.00000000000000, -0.91183707059936],
[-0.04462954783073, -1.00000000000000, -0.91074090433855],
[0.18566321584494, -1.00000000000000, -0.91183707059936],
[0.40468880331954, -1.00000000000000, -0.91521985405486],
[0.60084357391763, -1.00000000000000, -0.92117037483149],
[0.76390997914491, -1.00000000000000, -0.93015181941672],
[0.88560636841107, -1.00000000000000, -0.94280318420554],
[0.95993504526726, -1.00000000000000, -0.95993504526726],
[-1.00000000000000, -0.94280318420554, -0.94280318420554],
[-0.91431199293741, -0.91431199293741, -0.91431199293741],
[-0.80615441916401, -0.89575662617359, -0.89575662617359],
[-0.65049802947159, -0.88595656041477, -0.88595656041477],
[-0.45611282442131, -0.88068055165978, -0.88068055165978],
[-0.23650087718621, -0.87832626052912, -0.87832626052912],
[-0.00684660175554, -0.87832626052912, -0.87832626052912],
[0.21747392774087, -0.88068055165978, -0.88068055165978],
[0.42241115030113, -0.88595656041477, -0.88595656041477],
[0.59766767151118, -0.89575662617359, -0.89575662617359],
[0.74293597881223, -0.91431199293741, -0.91431199293741],
[0.88560636841107, -0.94280318420554, -0.94280318420554],
[-1.00000000000000, -0.83375815972819, -0.93015181941672],
[-0.89575662617359, -0.80615441916401, -0.89575662617359],
[-0.77230644419553, -0.77230644419553, -0.87894963349278],
[-0.60844335283895, -0.75243530658995, -0.87032102456111],
[-0.41271389426298, -0.74198989111028, -0.86607109044172],
[-0.19825030613490, -0.73872048496583, -0.86477890276437],
[0.02077487581497, -0.74198989111028, -0.86607109044172],
[0.23119968399001, -0.75243530658995, -0.87032102456111],
[0.42356252188384, -0.77230644419553, -0.87894963349278],
[0.59766767151118, -0.80615441916401, -0.89575662617359],
[0.76390997914491, -0.83375815972819, -0.93015181941672],
[-1.00000000000000, -0.67967319908614, -0.92117037483149],
[-0.88595656041477, -0.65049802947159, -0.88595656041477],
[-0.75243530658995, -0.60844335283895, -0.87032102456111],
[-0.58358469123934, -0.58358469123934, -0.86273477213625],
[-0.38849852269218, -0.57213603480564, -0.85954072529241],
[-0.17982471720977, -0.57213603480564, -0.85954072529241],
[0.02990415461493, -0.58358469123934, -0.86273477213625],
[0.23119968399001, -0.60844335283895, -0.87032102456111],
[0.42241115030113, -0.65049802947159, -0.88595656041477],
[0.60084357391763, -0.67967319908614, -0.92117037483149],
[-1.00000000000000, -0.48946894926468, -0.91521985405486],
[-0.88068055165978, -0.45611282442131, -0.88068055165978],
[-0.74198989111028, -0.41271389426298, -0.86607109044172],
[-0.57213603480564, -0.38849852269218, -0.85954072529241],
[-0.38078730234989, -0.38078730234989, -0.85763809295034],
[-0.17982471720977, -0.38849852269218, -0.85954072529241],
[0.02077487581497, -0.41271389426298, -0.86607109044172],
[0.21747392774087, -0.45611282442131, -0.88068055165978],
[0.40468880331954, -0.48946894926468, -0.91521985405486],
[-1.00000000000000, -0.27382614524558, -0.91183707059936],
[-0.87832626052912, -0.23650087718621, -0.87832626052912],
[-0.73872048496583, -0.19825030613490, -0.86477890276437],
[-0.57213603480564, -0.17982471720977, -0.85954072529241],
[-0.38849852269218, -0.17982471720977, -0.85954072529241],
[-0.19825030613490, -0.19825030613490, -0.86477890276437],
[-0.00684660175554, -0.23650087718621, -0.87832626052912],
[0.18566321584494, -0.27382614524558, -0.91183707059936],
[-1.00000000000000, -0.04462954783073, -0.91074090433855],
[-0.87832626052912, -0.00684660175554, -0.87832626052912],
[-0.74198989111028, 0.02077487581497, -0.86607109044172],
[-0.58358469123934, 0.02990415461493, -0.86273477213625],
[-0.41271389426298, 0.02077487581497, -0.86607109044172],
[-0.23650087718621, -0.00684660175554, -0.87832626052912],
[-0.04462954783073, -0.04462954783073, -0.91074090433855],
[-1.00000000000000, 0.18566321584494, -0.91183707059936],
[-0.88068055165978, 0.21747392774087, -0.88068055165978],
[-0.75243530658995, 0.23119968399001, -0.87032102456111],
[-0.60844335283895, 0.23119968399001, -0.87032102456111],
[-0.45611282442131, 0.21747392774087, -0.88068055165978],
[-0.27382614524558, 0.18566321584494, -0.91183707059936],
[-1.00000000000000, 0.40468880331954, -0.91521985405486],
[-0.88595656041477, 0.42241115030113, -0.88595656041477],
[-0.77230644419553, 0.42356252188384, -0.87894963349278],
[-0.65049802947159, 0.42241115030113, -0.88595656041477],
[-0.48946894926468, 0.40468880331954, -0.91521985405486],
[-1.00000000000000, 0.60084357391763, -0.92117037483149],
[-0.89575662617359, 0.59766767151118, -0.89575662617359],
[-0.80615441916401, 0.59766767151118, -0.89575662617359],
[-0.67967319908614, 0.60084357391763, -0.92117037483149],
[-1.00000000000000, 0.76390997914491, -0.93015181941672],
[-0.91431199293741, 0.74293597881223, -0.91431199293741],
[-0.83375815972819, 0.76390997914491, -0.93015181941672],
[-1.00000000000000, 0.88560636841107, -0.94280318420554],
[-0.94280318420554, 0.88560636841107, -0.94280318420554],
[-1.00000000000000, 0.95993504526726, -0.95993504526726],
[-1.00000000000000, -1.00000000000000, -0.86780105383035],
[-0.93015181941672, -1.00000000000000, -0.83375815972819],
[-0.80867264609489, -1.00000000000000, -0.80867264609489],
[-0.64389688899633, -1.00000000000000, -0.79113271241773],
[-0.44605118773572, -1.00000000000000, -0.78005396215852],
[-0.22677621402734, -1.00000000000000, -0.77470054561703],
[0.00147675964437, -1.00000000000000, -0.77470054561703],
[0.22610514989424, -1.00000000000000, -0.78005396215852],
[0.43502960141406, -1.00000000000000, -0.79113271241773],
[0.61734529218978, -1.00000000000000, -0.80867264609489],
[0.76390997914491, -1.00000000000000, -0.83375815972819],
[0.86780105383035, -1.00000000000000, -0.86780105383035],
[-1.00000000000000, -0.93015181941672, -0.83375815972819],
[-0.89575662617359, -0.89575662617359, -0.80615441916401],
[-0.77230644419553, -0.87894963349278, -0.77230644419553],
[-0.60844335283895, -0.87032102456111, -0.75243530658995],
[-0.41271389426298, -0.86607109044172, -0.74198989111028],
[-0.19825030613490, -0.86477890276437, -0.73872048496583],
[0.02077487581497, -0.86607109044172, -0.74198989111028],
[0.23119968399001, -0.87032102456111, -0.75243530658995],
[0.42356252188384, -0.87894963349278, -0.77230644419553],
[0.59766767151118, -0.89575662617359, -0.80615441916401],
[0.76390997914491, -0.93015181941672, -0.83375815972819],
[-1.00000000000000, -0.80867264609489, -0.80867264609489],
[-0.87894963349278, -0.77230644419553, -0.77230644419553],
[-0.74440650078788, -0.74440650078788, -0.74440650078788],
[-0.57645509571103, -0.72832567346454, -0.72832567346454],
[-0.38264436830895, -0.72102609867905, -0.72102609867905],
[-0.17530343433295, -0.72102609867905, -0.72102609867905],
[0.03310644264010, -0.72832567346454, -0.72832567346454],
[0.23321950236363, -0.74440650078788, -0.74440650078788],
[0.42356252188384, -0.77230644419553, -0.77230644419553],
[0.61734529218978, -0.80867264609489, -0.80867264609489],
[-1.00000000000000, -0.64389688899633, -0.79113271241773],
[-0.87032102456111, -0.60844335283895, -0.75243530658995],
[-0.72832567346454, -0.57645509571103, -0.72832567346454],
[-0.55838329786539, -0.55838329786539, -0.71567735134879],
[-0.36781739729085, -0.55261829739424, -0.71174690802405],
[-0.16755605292043, -0.55838329786539, -0.71567735134879],
[0.03310644264010, -0.57645509571103, -0.72832567346454],
[0.23119968399001, -0.60844335283895, -0.75243530658995],
[0.43502960141406, -0.64389688899633, -0.79113271241773],
[-1.00000000000000, -0.44605118773572, -0.78005396215852],
[-0.86607109044172, -0.41271389426298, -0.74198989111028],
[-0.72102609867905, -0.38264436830895, -0.72102609867905],
[-0.55261829739424, -0.36781739729085, -0.71174690802405],
[-0.36781739729085, -0.36781739729085, -0.71174690802405],
[-0.17530343433295, -0.38264436830895, -0.72102609867905],
[0.02077487581497, -0.41271389426298, -0.74198989111028],
[0.22610514989424, -0.44605118773572, -0.78005396215852],
[-1.00000000000000, -0.22677621402734, -0.77470054561703],
[-0.86477890276437, -0.19825030613490, -0.73872048496583],
[-0.72102609867905, -0.17530343433295, -0.72102609867905],
[-0.55838329786539, -0.16755605292043, -0.71567735134879],
[-0.38264436830895, -0.17530343433295, -0.72102609867905],
[-0.19825030613490, -0.19825030613490, -0.73872048496583],
[0.00147675964437, -0.22677621402734, -0.77470054561703],
[-1.00000000000000, 0.00147675964437, -0.77470054561703],
[-0.86607109044172, 0.02077487581497, -0.74198989111028],
[-0.72832567346454, 0.03310644264010, -0.72832567346454],
[-0.57645509571103, 0.03310644264010, -0.72832567346454],
[-0.41271389426298, 0.02077487581497, -0.74198989111028],
[-0.22677621402734, 0.00147675964437, -0.77470054561703],
[-1.00000000000000, 0.22610514989424, -0.78005396215852],
[-0.87032102456111, 0.23119968399001, -0.75243530658995],
[-0.74440650078788, 0.23321950236363, -0.74440650078788],
[-0.60844335283895, 0.23119968399001, -0.75243530658995],
[-0.44605118773572, 0.22610514989424, -0.78005396215852],
[-1.00000000000000, 0.43502960141406, -0.79113271241773],
[-0.87894963349278, 0.42356252188384, -0.77230644419553],
[-0.77230644419553, 0.42356252188384, -0.77230644419553],
[-0.64389688899633, 0.43502960141406, -0.79113271241773],
[-1.00000000000000, 0.61734529218978, -0.80867264609489],
[-0.89575662617359, 0.59766767151118, -0.80615441916401],
[-0.80867264609489, 0.61734529218978, -0.80867264609489],
[-1.00000000000000, 0.76390997914491, -0.83375815972819],
[-0.93015181941672, 0.76390997914491, -0.83375815972819],
[-1.00000000000000, 0.86780105383035, -0.86780105383035],
[-1.00000000000000, -1.00000000000000, -0.72886859909133],
[-0.92117037483149, -1.00000000000000, -0.67967319908614],
[-0.79113271241773, -1.00000000000000, -0.64389688899633],
[-0.61969584469643, -1.00000000000000, -0.61969584469643],
[-0.41829388073718, -1.00000000000000, -0.60572275652627],
[-0.19942153173453, -1.00000000000000, -0.60115693653094],
[0.02401663726344, -1.00000000000000, -0.60572275652627],
[0.23939168939285, -1.00000000000000, -0.61969584469643],
[0.43502960141406, -1.00000000000000, -0.64389688899633],
[0.60084357391763, -1.00000000000000, -0.67967319908614],
[0.72886859909133, -1.00000000000000, -0.72886859909133],
[-1.00000000000000, -0.92117037483149, -0.67967319908614],
[-0.88595656041477, -0.88595656041477, -0.65049802947159],
[-0.75243530658995, -0.87032102456111, -0.60844335283895],
[-0.58358469123934, -0.86273477213625, -0.58358469123934],
[-0.38849852269218, -0.85954072529241, -0.57213603480564],
[-0.17982471720977, -0.85954072529241, -0.57213603480564],
[0.02990415461493, -0.86273477213625, -0.58358469123934],
[0.23119968399001, -0.87032102456111, -0.60844335283895],
[0.42241115030113, -0.88595656041477, -0.65049802947159],
[0.60084357391763, -0.92117037483149, -0.67967319908614],
[-1.00000000000000, -0.79113271241773, -0.64389688899633],
[-0.87032102456111, -0.75243530658995, -0.60844335283895],
[-0.72832567346454, -0.72832567346454, -0.57645509571103],
[-0.55838329786539, -0.71567735134879, -0.55838329786539],
[-0.36781739729085, -0.71174690802405, -0.55261829739424],
[-0.16755605292043, -0.71567735134879, -0.55838329786539],
[0.03310644264010, -0.72832567346454, -0.57645509571103],
[0.23119968399001, -0.75243530658995, -0.60844335283895],
[0.43502960141406, -0.79113271241773, -0.64389688899633],
[-1.00000000000000, -0.61969584469643, -0.61969584469643],
[-0.86273477213625, -0.58358469123934, -0.58358469123934],
[-0.71567735134879, -0.55838329786539, -0.55838329786539],
[-0.54636124336296, -0.54636124336296, -0.54636124336296],
[-0.36091626991113, -0.54636124336296, -0.54636124336296],
[-0.16755605292043, -0.55838329786539, -0.55838329786539],
[0.02990415461493, -0.58358469123934, -0.58358469123934],
[0.23939168939285, -0.61969584469643, -0.61969584469643],
[-1.00000000000000, -0.41829388073718, -0.60572275652627],
[-0.85954072529241, -0.38849852269218, -0.57213603480564],
[-0.71174690802405, -0.36781739729085, -0.55261829739424],
[-0.54636124336296, -0.36091626991113, -0.54636124336296],
[-0.36781739729085, -0.36781739729085, -0.55261829739424],
[-0.17982471720977, -0.38849852269218, -0.57213603480564],
[0.02401663726344, -0.41829388073718, -0.60572275652627],
[-1.00000000000000, -0.19942153173453, -0.60115693653094],
[-0.85954072529241, -0.17982471720977, -0.57213603480564],
[-0.71567735134879, -0.16755605292043, -0.55838329786539],
[-0.55838329786539, -0.16755605292043, -0.55838329786539],
[-0.38849852269218, -0.17982471720977, -0.57213603480564],
[-0.19942153173453, -0.19942153173453, -0.60115693653094],
[-1.00000000000000, 0.02401663726344, -0.60572275652627],
[-0.86273477213625, 0.02990415461493, -0.58358469123934],
[-0.72832567346454, 0.03310644264010, -0.57645509571103],
[-0.58358469123934, 0.02990415461493, -0.58358469123934],
[-0.41829388073718, 0.02401663726344, -0.60572275652627],
[-1.00000000000000, 0.23939168939285, -0.61969584469643],
[-0.87032102456111, 0.23119968399001, -0.60844335283895],
[-0.75243530658995, 0.23119968399001, -0.60844335283895],
[-0.61969584469643, 0.23939168939285, -0.61969584469643],
[-1.00000000000000, 0.43502960141406, -0.64389688899633],
[-0.88595656041477, 0.42241115030113, -0.65049802947159],
[-0.79113271241773, 0.43502960141406, -0.64389688899633],
[-1.00000000000000, 0.60084357391763, -0.67967319908614],
[-0.92117037483149, 0.60084357391763, -0.67967319908614],
[-1.00000000000000, 0.72886859909133, -0.72886859909133],
[-1.00000000000000, -1.00000000000000, -0.55063940292865],
[-0.91521985405486, -1.00000000000000, -0.48946894926468],
[-0.78005396215852, -1.00000000000000, -0.44605118773572],
[-0.60572275652627, -1.00000000000000, -0.41829388073718],
[-0.40477489692834, -1.00000000000000, -0.40477489692834],
[-0.19045020614332, -1.00000000000000, -0.40477489692834],
[0.02401663726344, -1.00000000000000, -0.41829388073718],
[0.22610514989424, -1.00000000000000, -0.44605118773572],
[0.40468880331954, -1.00000000000000, -0.48946894926468],
[0.55063940292865, -1.00000000000000, -0.55063940292865],
[-1.00000000000000, -0.91521985405486, -0.48946894926468],
[-0.88068055165978, -0.88068055165978, -0.45611282442131],
[-0.74198989111028, -0.86607109044172, -0.41271389426298],
[-0.57213603480564, -0.85954072529241, -0.38849852269218],
[-0.38078730234989, -0.85763809295034, -0.38078730234989],
[-0.17982471720977, -0.85954072529241, -0.38849852269218],
[0.02077487581497, -0.86607109044172, -0.41271389426298],
[0.21747392774087, -0.88068055165978, -0.45611282442131],
[0.40468880331954, -0.91521985405486, -0.48946894926468],
[-1.00000000000000, -0.78005396215852, -0.44605118773572],
[-0.86607109044172, -0.74198989111028, -0.41271389426298],
[-0.72102609867905, -0.72102609867905, -0.38264436830895],
[-0.55261829739424, -0.71174690802405, -0.36781739729085],
[-0.36781739729085, -0.71174690802405, -0.36781739729085],
[-0.17530343433295, -0.72102609867905, -0.38264436830895],
[0.02077487581497, -0.74198989111028, -0.41271389426298],
[0.22610514989424, -0.78005396215852, -0.44605118773572],
[-1.00000000000000, -0.60572275652627, -0.41829388073718],
[-0.85954072529241, -0.57213603480564, -0.38849852269218],
[-0.71174690802405, -0.55261829739424, -0.36781739729085],
[-0.54636124336296, -0.54636124336296, -0.36091626991113],
[-0.36781739729085, -0.55261829739424, -0.36781739729085],
[-0.17982471720977, -0.57213603480564, -0.38849852269218],
[0.02401663726344, -0.60572275652627, -0.41829388073718],
[-1.00000000000000, -0.40477489692834, -0.40477489692834],
[-0.85763809295034, -0.38078730234989, -0.38078730234989],
[-0.71174690802405, -0.36781739729085, -0.36781739729085],
[-0.55261829739424, -0.36781739729085, -0.36781739729085],
[-0.38078730234989, -0.38078730234989, -0.38078730234989],
[-0.19045020614332, -0.40477489692834, -0.40477489692834],
[-1.00000000000000, -0.19045020614332, -0.40477489692834],
[-0.85954072529241, -0.17982471720977, -0.38849852269218],
[-0.72102609867905, -0.17530343433295, -0.38264436830895],
[-0.57213603480564, -0.17982471720977, -0.38849852269218],
[-0.40477489692834, -0.19045020614332, -0.40477489692834],
[-1.00000000000000, 0.02401663726344, -0.41829388073718],
[-0.86607109044172, 0.02077487581497, -0.41271389426298],
[-0.74198989111028, 0.02077487581497, -0.41271389426298],
[-0.60572275652627, 0.02401663726344, -0.41829388073718],
[-1.00000000000000, 0.22610514989424, -0.44605118773572],
[-0.88068055165978, 0.21747392774087, -0.45611282442131],
[-0.78005396215852, 0.22610514989424, -0.44605118773572],
[-1.00000000000000, 0.40468880331954, -0.48946894926468],
[-0.91521985405486, 0.40468880331954, -0.48946894926468],
[-1.00000000000000, 0.55063940292865, -0.55063940292865],
[-1.00000000000000, -1.00000000000000, -0.34272401334271],
[-0.91183707059936, -1.00000000000000, -0.27382614524558],
[-0.77470054561703, -1.00000000000000, -0.22677621402734],
[-0.60115693653094, -1.00000000000000, -0.19942153173453],
[-0.40477489692834, -1.00000000000000, -0.19045020614332],
[-0.19942153173453, -1.00000000000000, -0.19942153173453],
[0.00147675964437, -1.00000000000000, -0.22677621402734],
[0.18566321584494, -1.00000000000000, -0.27382614524558],
[0.34272401334271, -1.00000000000000, -0.34272401334271],
[-1.00000000000000, -0.91183707059936, -0.27382614524558],
[-0.87832626052912, -0.87832626052912, -0.23650087718621],
[-0.73872048496583, -0.86477890276437, -0.19825030613490],
[-0.57213603480564, -0.85954072529241, -0.17982471720977],
[-0.38849852269218, -0.85954072529241, -0.17982471720977],
[-0.19825030613490, -0.86477890276437, -0.19825030613490],
[-0.00684660175554, -0.87832626052912, -0.23650087718621],
[0.18566321584494, -0.91183707059936, -0.27382614524558],
[-1.00000000000000, -0.77470054561703, -0.22677621402734],
[-0.86477890276437, -0.73872048496583, -0.19825030613490],
[-0.72102609867905, -0.72102609867905, -0.17530343433295],
[-0.55838329786539, -0.71567735134879, -0.16755605292043],
[-0.38264436830895, -0.72102609867905, -0.17530343433295],
[-0.19825030613490, -0.73872048496583, -0.19825030613490],
[0.00147675964437, -0.77470054561703, -0.22677621402734],
[-1.00000000000000, -0.60115693653094, -0.19942153173453],
[-0.85954072529241, -0.57213603480564, -0.17982471720977],
[-0.71567735134879, -0.55838329786539, -0.16755605292043],
[-0.55838329786539, -0.55838329786539, -0.16755605292043],
[-0.38849852269218, -0.57213603480564, -0.17982471720977],
[-0.19942153173453, -0.60115693653094, -0.19942153173453],
[-1.00000000000000, -0.40477489692834, -0.19045020614332],
[-0.85954072529241, -0.38849852269218, -0.17982471720977],
[-0.72102609867905, -0.38264436830895, -0.17530343433295],
[-0.57213603480564, -0.38849852269218, -0.17982471720977],
[-0.40477489692834, -0.40477489692834, -0.19045020614332],
[-1.00000000000000, -0.19942153173453, -0.19942153173453],
[-0.86477890276437, -0.19825030613490, -0.19825030613490],
[-0.73872048496583, -0.19825030613490, -0.19825030613490],
[-0.60115693653094, -0.19942153173453, -0.19942153173453],
[-1.00000000000000, 0.00147675964437, -0.22677621402734],
[-0.87832626052912, -0.00684660175554, -0.23650087718621],
[-0.77470054561703, 0.00147675964437, -0.22677621402734],
[-1.00000000000000, 0.18566321584494, -0.27382614524558],
[-0.91183707059936, 0.18566321584494, -0.27382614524558],
[-1.00000000000000, 0.34272401334271, -0.34272401334271],
[-1.00000000000000, -1.00000000000000, -0.11633186888370],
[-0.91074090433855, -1.00000000000000, -0.04462954783073],
[-0.77470054561703, -1.00000000000000, 0.00147675964437],
[-0.60572275652627, -1.00000000000000, 0.02401663726344],
[-0.41829388073718, -1.00000000000000, 0.02401663726344],
[-0.22677621402734, -1.00000000000000, 0.00147675964437],
[-0.04462954783073, -1.00000000000000, -0.04462954783073],
[0.11633186888370, -1.00000000000000, -0.11633186888370],
[-1.00000000000000, -0.91074090433855, -0.04462954783073],
[-0.87832626052912, -0.87832626052912, -0.00684660175554],
[-0.74198989111028, -0.86607109044172, 0.02077487581497],
[-0.58358469123934, -0.86273477213625, 0.02990415461493],
[-0.41271389426298, -0.86607109044172, 0.02077487581497],
[-0.23650087718621, -0.87832626052912, -0.00684660175554],
[-0.04462954783073, -0.91074090433855, -0.04462954783073],
[-1.00000000000000, -0.77470054561703, 0.00147675964437],
[-0.86607109044172, -0.74198989111028, 0.02077487581497],
[-0.72832567346454, -0.72832567346454, 0.03310644264010],
[-0.57645509571103, -0.72832567346454, 0.03310644264010],
[-0.41271389426298, -0.74198989111028, 0.02077487581497],
[-0.22677621402734, -0.77470054561703, 0.00147675964437],
[-1.00000000000000, -0.60572275652627, 0.02401663726344],
[-0.86273477213625, -0.58358469123934, 0.02990415461493],
[-0.72832567346454, -0.57645509571103, 0.03310644264010],
[-0.58358469123934, -0.58358469123934, 0.02990415461493],
[-0.41829388073718, -0.60572275652627, 0.02401663726344],
[-1.00000000000000, -0.41829388073718, 0.02401663726344],
[-0.86607109044172, -0.41271389426298, 0.02077487581497],
[-0.74198989111028, -0.41271389426298, 0.02077487581497],
[-0.60572275652627, -0.41829388073718, 0.02401663726344],
[-1.00000000000000, -0.22677621402734, 0.00147675964437],
[-0.87832626052912, -0.23650087718621, -0.00684660175554],
[-0.77470054561703, -0.22677621402734, 0.00147675964437],
[-1.00000000000000, -0.04462954783073, -0.04462954783073],
[-0.91074090433855, -0.04462954783073, -0.04462954783073],
[-1.00000000000000, 0.11633186888370, -0.11633186888370],
[-1.00000000000000, -1.00000000000000, 0.11633186888370],
[-0.91183707059936, -1.00000000000000, 0.18566321584494],
[-0.78005396215852, -1.00000000000000, 0.22610514989424],
[-0.61969584469643, -1.00000000000000, 0.23939168939285],
[-0.44605118773572, -1.00000000000000, 0.22610514989424],
[-0.27382614524558, -1.00000000000000, 0.18566321584494],
[-0.11633186888370, -1.00000000000000, 0.11633186888370],
[-1.00000000000000, -0.91183707059936, 0.18566321584494],
[-0.88068055165978, -0.88068055165978, 0.21747392774087],
[-0.75243530658995, -0.87032102456111, 0.23119968399001],
[-0.60844335283895, -0.87032102456111, 0.23119968399001],
[-0.45611282442131, -0.88068055165978, 0.21747392774087],
[-0.27382614524558, -0.91183707059936, 0.18566321584494],
[-1.00000000000000, -0.78005396215852, 0.22610514989424],
[-0.87032102456111, -0.75243530658995, 0.23119968399001],
[-0.74440650078788, -0.74440650078788, 0.23321950236363],
[-0.60844335283895, -0.75243530658995, 0.23119968399001],
[-0.44605118773572, -0.78005396215852, 0.22610514989424],
[-1.00000000000000, -0.61969584469643, 0.23939168939285],
[-0.87032102456111, -0.60844335283895, 0.23119968399001],
[-0.75243530658995, -0.60844335283895, 0.23119968399001],
[-0.61969584469643, -0.61969584469643, 0.23939168939285],
[-1.00000000000000, -0.44605118773572, 0.22610514989424],
[-0.88068055165978, -0.45611282442131, 0.21747392774087],
[-0.78005396215852, -0.44605118773572, 0.22610514989424],
[-1.00000000000000, -0.27382614524558, 0.18566321584494],
[-0.91183707059936, -0.27382614524558, 0.18566321584494],
[-1.00000000000000, -0.11633186888370, 0.11633186888370],
[-1.00000000000000, -1.00000000000000, 0.34272401334271],
[-0.91521985405486, -1.00000000000000, 0.40468880331954],
[-0.79113271241773, -1.00000000000000, 0.43502960141406],
[-0.64389688899633, -1.00000000000000, 0.43502960141406],
[-0.48946894926468, -1.00000000000000, 0.40468880331954],
[-0.34272401334271, -1.00000000000000, 0.34272401334271],
[-1.00000000000000, -0.91521985405486, 0.40468880331954],
[-0.88595656041477, -0.88595656041477, 0.42241115030113],
[-0.77230644419553, -0.87894963349278, 0.42356252188384],
[-0.65049802947159, -0.88595656041477, 0.42241115030113],
[-0.48946894926468, -0.91521985405486, 0.40468880331954],
[-1.00000000000000, -0.79113271241773, 0.43502960141406],
[-0.87894963349278, -0.77230644419553, 0.42356252188384],
[-0.77230644419553, -0.77230644419553, 0.42356252188384],
[-0.64389688899633, -0.79113271241773, 0.43502960141406],
[-1.00000000000000, -0.64389688899633, 0.43502960141406],
[-0.88595656041477, -0.65049802947159, 0.42241115030113],
[-0.79113271241773, -0.64389688899633, 0.43502960141406],
[-1.00000000000000, -0.48946894926468, 0.40468880331954],
[-0.91521985405486, -0.48946894926468, 0.40468880331954],
[-1.00000000000000, -0.34272401334271, 0.34272401334271],
[-1.00000000000000, -1.00000000000000, 0.55063940292865],
[-0.92117037483149, -1.00000000000000, 0.60084357391763],
[-0.80867264609489, -1.00000000000000, 0.61734529218978],
[-0.67967319908614, -1.00000000000000, 0.60084357391763],
[-0.55063940292865, -1.00000000000000, 0.55063940292865],
[-1.00000000000000, -0.92117037483149, 0.60084357391763],
[-0.89575662617359, -0.89575662617359, 0.59766767151118],
[-0.80615441916401, -0.89575662617359, 0.59766767151118],
[-0.67967319908614, -0.92117037483149, 0.60084357391763],
[-1.00000000000000, -0.80867264609489, 0.61734529218978],
[-0.89575662617359, -0.80615441916401, 0.59766767151118],
[-0.80867264609489, -0.80867264609489, 0.61734529218978],
[-1.00000000000000, -0.67967319908614, 0.60084357391763],
[-0.92117037483149, -0.67967319908614, 0.60084357391763],
[-1.00000000000000, -0.55063940292865, 0.55063940292865],
[-1.00000000000000, -1.00000000000000, 0.72886859909133],
[-0.93015181941672, -1.00000000000000, 0.76390997914491],
[-0.83375815972819, -1.00000000000000, 0.76390997914491],
[-0.72886859909133, -1.00000000000000, 0.72886859909133],
[-1.00000000000000, -0.93015181941672, 0.76390997914491],
[-0.91431199293741, -0.91431199293741, 0.74293597881223],
[-0.83375815972819, -0.93015181941672, 0.76390997914491],
[-1.00000000000000, -0.83375815972819, 0.76390997914491],
[-0.93015181941672, -0.83375815972819, 0.76390997914491],
[-1.00000000000000, -0.72886859909133, 0.72886859909133],
[-1.00000000000000, -1.00000000000000, 0.86780105383035],
[-0.94280318420554, -1.00000000000000, 0.88560636841107],
[-0.86780105383035, -1.00000000000000, 0.86780105383035],
[-1.00000000000000, -0.94280318420554, 0.88560636841107],
[-0.94280318420554, -0.94280318420554, 0.88560636841107],
[-1.00000000000000, -0.86780105383035, 0.86780105383035],
[-1.00000000000000, -1.00000000000000, 0.95993504526726],
[-0.95993504526726, -1.00000000000000, 0.95993504526726],
[-1.00000000000000, -0.95993504526726, 0.95993504526726],
])
elif C==13:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.96524592650384, -1.00000000000000, -1.00000000000000],
[-0.88508204422298, -1.00000000000000, -1.00000000000000],
[-0.76351968995182, -1.00000000000000, -1.00000000000000],
[-0.60625320546985, -1.00000000000000, -1.00000000000000],
[-0.42063805471367, -1.00000000000000, -1.00000000000000],
[-0.21535395536379, -1.00000000000000, -1.00000000000000],
[0.00000000000000, -1.00000000000000, -1.00000000000000],
[0.21535395536379, -1.00000000000000, -1.00000000000000],
[0.42063805471367, -1.00000000000000, -1.00000000000000],
[0.60625320546985, -1.00000000000000, -1.00000000000000],
[0.76351968995182, -1.00000000000000, -1.00000000000000],
[0.88508204422298, -1.00000000000000, -1.00000000000000],
[0.96524592650384, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.96524592650384, -1.00000000000000],
[-0.95024449979566, -0.95024449979566, -1.00000000000000],
[-0.85517906185435, -0.93886143433224, -1.00000000000000],
[-0.71996344038064, -0.93049320805776, -1.00000000000000],
[-0.55139362345669, -0.92465192960797, -1.00000000000000],
[-0.35769379661959, -0.92096665100107, -1.00000000000000],
[-0.14814655196915, -0.91918708296369, -1.00000000000000],
[0.06733363493284, -0.91918708296369, -1.00000000000000],
[0.27866044762067, -0.92096665100107, -1.00000000000000],
[0.47604555306467, -0.92465192960797, -1.00000000000000],
[0.65045664843840, -0.93049320805776, -1.00000000000000],
[0.79404049618658, -0.93886143433224, -1.00000000000000],
[0.90048899959132, -0.95024449979566, -1.00000000000000],
[0.96524592650384, -0.96524592650384, -1.00000000000000],
[-1.00000000000000, -0.88508204422298, -1.00000000000000],
[-0.93886143433224, -0.85517906185435, -1.00000000000000],
[-0.83250992375907, -0.83250992375907, -1.00000000000000],
[-0.68725865323697, -0.81601417373865, -1.00000000000000],
[-0.51092432541689, -0.80484778780174, -1.00000000000000],
[-0.31252079446822, -0.79839678416746, -1.00000000000000],
[-0.10185592197161, -0.79628815605678, -1.00000000000000],
[0.11091757863568, -0.79839678416746, -1.00000000000000],
[0.31577211321863, -0.80484778780174, -1.00000000000000],
[0.50327282697562, -0.81601417373865, -1.00000000000000],
[0.66501984751815, -0.83250992375907, -1.00000000000000],
[0.79404049618658, -0.85517906185435, -1.00000000000000],
[0.88508204422298, -0.88508204422298, -1.00000000000000],
[-1.00000000000000, -0.76351968995181, -1.00000000000000],
[-0.93049320805776, -0.71996344038064, -1.00000000000000],
[-0.81601417373865, -0.68725865323697, -1.00000000000000],
[-0.66399823129102, -0.66399823129102, -1.00000000000000],
[-0.48321257795145, -0.64910231144316, -1.00000000000000],
[-0.28338651030471, -0.64183909826484, -1.00000000000000],
[-0.07477439143046, -0.64183909826484, -1.00000000000000],
[0.13231488939461, -0.64910231144316, -1.00000000000000],
[0.32799646258203, -0.66399823129102, -1.00000000000000],
[0.50327282697562, -0.68725865323697, -1.00000000000000],
[0.65045664843840, -0.71996344038064, -1.00000000000000],
[0.76351968995182, -0.76351968995181, -1.00000000000000],
[-1.00000000000000, -0.60625320546985, -1.00000000000000],
[-0.92465192960797, -0.55139362345669, -1.00000000000000],
[-0.80484778780174, -0.51092432541689, -1.00000000000000],
[-0.64910231144316, -0.48321257795145, -1.00000000000000],
[-0.46706830390530, -0.46706830390530, -1.00000000000000],
[-0.26911608702977, -0.46176782594047, -1.00000000000000],
[-0.06586339218939, -0.46706830390530, -1.00000000000000],
[0.13231488939461, -0.48321257795145, -1.00000000000000],
[0.31577211321863, -0.51092432541689, -1.00000000000000],
[0.47604555306467, -0.55139362345669, -1.00000000000000],
[0.60625320546985, -0.60625320546985, -1.00000000000000],
[-1.00000000000000, -0.42063805471367, -1.00000000000000],
[-0.92096665100107, -0.35769379661959, -1.00000000000000],
[-0.79839678416746, -0.31252079446822, -1.00000000000000],
[-0.64183909826484, -0.28338651030471, -1.00000000000000],
[-0.46176782594047, -0.26911608702977, -1.00000000000000],
[-0.26911608702977, -0.26911608702977, -1.00000000000000],
[-0.07477439143046, -0.28338651030471, -1.00000000000000],
[0.11091757863568, -0.31252079446821, -1.00000000000000],
[0.27866044762067, -0.35769379661959, -1.00000000000000],
[0.42063805471367, -0.42063805471367, -1.00000000000000],
[-1.00000000000000, -0.21535395536379, -1.00000000000000],
[-0.91918708296369, -0.14814655196915, -1.00000000000000],
[-0.79628815605678, -0.10185592197161, -1.00000000000000],
[-0.64183909826484, -0.07477439143046, -1.00000000000000],
[-0.46706830390530, -0.06586339218939, -1.00000000000000],
[-0.28338651030470, -0.07477439143046, -1.00000000000000],
[-0.10185592197161, -0.10185592197161, -1.00000000000000],
[0.06733363493284, -0.14814655196915, -1.00000000000000],
[0.21535395536379, -0.21535395536379, -1.00000000000000],
[-1.00000000000000, 0.00000000000000, -1.00000000000000],
[-0.91918708296369, 0.06733363493284, -1.00000000000000],
[-0.79839678416746, 0.11091757863568, -1.00000000000000],
[-0.64910231144316, 0.13231488939461, -1.00000000000000],
[-0.48321257795145, 0.13231488939461, -1.00000000000000],
[-0.31252079446822, 0.11091757863568, -1.00000000000000],
[-0.14814655196915, 0.06733363493284, -1.00000000000000],
[0.00000000000000, 0.00000000000000, -1.00000000000000],
[-1.00000000000000, 0.21535395536379, -1.00000000000000],
[-0.92096665100107, 0.27866044762067, -1.00000000000000],
[-0.80484778780174, 0.31577211321863, -1.00000000000000],
[-0.66399823129102, 0.32799646258203, -1.00000000000000],
[-0.51092432541689, 0.31577211321863, -1.00000000000000],
[-0.35769379661959, 0.27866044762067, -1.00000000000000],
[-0.21535395536379, 0.21535395536379, -1.00000000000000],
[-1.00000000000000, 0.42063805471367, -1.00000000000000],
[-0.92465192960797, 0.47604555306467, -1.00000000000000],
[-0.81601417373865, 0.50327282697562, -1.00000000000000],
[-0.68725865323697, 0.50327282697562, -1.00000000000000],
[-0.55139362345669, 0.47604555306467, -1.00000000000000],
[-0.42063805471367, 0.42063805471367, -1.00000000000000],
[-1.00000000000000, 0.60625320546985, -1.00000000000000],
[-0.93049320805776, 0.65045664843840, -1.00000000000000],
[-0.83250992375907, 0.66501984751815, -1.00000000000000],
[-0.71996344038064, 0.65045664843840, -1.00000000000000],
[-0.60625320546985, 0.60625320546985, -1.00000000000000],
[-1.00000000000000, 0.76351968995182, -1.00000000000000],
[-0.93886143433224, 0.79404049618658, -1.00000000000000],
[-0.85517906185435, 0.79404049618658, -1.00000000000000],
[-0.76351968995182, 0.76351968995182, -1.00000000000000],
[-1.00000000000000, 0.88508204422298, -1.00000000000000],
[-0.95024449979566, 0.90048899959132, -1.00000000000000],
[-0.88508204422298, 0.88508204422298, -1.00000000000000],
[-1.00000000000000, 0.96524592650384, -1.00000000000000],
[-0.96524592650384, 0.96524592650384, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.96524592650384],
[-0.95024449979566, -1.00000000000000, -0.95024449979566],
[-0.85517906185435, -1.00000000000000, -0.93886143433224],
[-0.71996344038064, -1.00000000000000, -0.93049320805776],
[-0.55139362345669, -1.00000000000000, -0.92465192960797],
[-0.35769379661959, -1.00000000000000, -0.92096665100107],
[-0.14814655196915, -1.00000000000000, -0.91918708296369],
[0.06733363493284, -1.00000000000000, -0.91918708296369],
[0.27866044762067, -1.00000000000000, -0.92096665100107],
[0.47604555306467, -1.00000000000000, -0.92465192960797],
[0.65045664843840, -1.00000000000000, -0.93049320805776],
[0.79404049618658, -1.00000000000000, -0.93886143433224],
[0.90048899959132, -1.00000000000000, -0.95024449979566],
[0.96524592650384, -1.00000000000000, -0.96524592650384],
[-1.00000000000000, -0.95024449979566, -0.95024449979566],
[-0.92421824168052, -0.92421824168052, -0.92421824168052],
[-0.83129642224555, -0.90679943727255, -0.90679943727255],
[-0.69566262743558, -0.89738579484922, -0.89738579484922],
[-0.52334831268129, -0.89203484196476, -0.89203484196476],
[-0.32468202391085, -0.88922094785977, -0.88922094785977],
[-0.11166178154818, -0.88833821845182, -0.88833821845182],
[0.10312391963040, -0.88922094785977, -0.88922094785977],
[0.30741799661080, -0.89203484196476, -0.89203484196476],
[0.49043421713403, -0.89738579484922, -0.89738579484922],
[0.64489529679064, -0.90679943727255, -0.90679943727255],
[0.77265472504156, -0.92421824168052, -0.92421824168052],
[0.90048899959132, -0.95024449979566, -0.95024449979566],
[-1.00000000000000, -0.85517906185435, -0.93886143433224],
[-0.90679943727255, -0.83129642224555, -0.90679943727255],
[-0.79914165143507, -0.79914165143507, -0.89083941074747],
[-0.65476354947990, -0.77955255265301, -0.88237984073213],
[-0.47942955477061, -0.76829026258645, -0.87784903872726],
[-0.28327162633948, -0.76310720981260, -0.87584012489183],
[-0.07778103895609, -0.76310720981260, -0.87584012489183],
[0.12556885608432, -0.76829026258645, -0.87784903872726],
[0.31669594286504, -0.77955255265301, -0.88237984073213],
[0.48912271361761, -0.79914165143507, -0.89083941074747],
[0.64489529679064, -0.83129642224555, -0.90679943727255],
[0.79404049618658, -0.85517906185435, -0.93886143433224],
[-1.00000000000000, -0.71996344038064, -0.93049320805776],
[-0.89738579484922, -0.69566262743558, -0.89738579484922],
[-0.77955255265301, -0.65476354947990, -0.88237984073213],
[-0.62915975650256, -0.62915975650256, -0.87474869409317],
[-0.45253382144816, -0.61524460897785, -0.87100425864223],
[-0.25965393684947, -0.61082413224023, -0.86986799406083],
[-0.06121731093176, -0.61524460897785, -0.87100425864223],
[0.13306820709830, -0.62915975650256, -0.87474869409317],
[0.31669594286504, -0.65476354947990, -0.88237984073213],
[0.49043421713403, -0.69566262743558, -0.89738579484922],
[0.65045664843840, -0.71996344038064, -0.93049320805776],
[-1.00000000000000, -0.55139362345669, -0.92465192960797],
[-0.89203484196476, -0.52334831268129, -0.89203484196476],
[-0.76829026258645, -0.47942955477061, -0.87784903872726],
[-0.61524460897785, -0.45253382144816, -0.87100425864223],
[-0.43991079707006, -0.43991079707006, -0.86812931714831],
[-0.25204908871156, -0.43991079707006, -0.86812931714831],
[-0.06121731093176, -0.45253382144816, -0.87100425864223],
[0.12556885608432, -0.47942955477061, -0.87784903872726],
[0.30741799661080, -0.52334831268129, -0.89203484196476],
[0.47604555306467, -0.55139362345669, -0.92465192960797],
[-1.00000000000000, -0.35769379661959, -0.92096665100107],
[-0.88922094785977, -0.32468202391085, -0.88922094785977],
[-0.76310720981260, -0.28327162633948, -0.87584012489183],
[-0.61082413224023, -0.25965393684947, -0.86986799406083],
[-0.43991079707006, -0.25204908871156, -0.86812931714831],
[-0.25965393684947, -0.25965393684947, -0.86986799406083],
[-0.07778103895609, -0.28327162633948, -0.87584012489183],
[0.10312391963040, -0.32468202391085, -0.88922094785977],
[0.27866044762067, -0.35769379661959, -0.92096665100107],
[-1.00000000000000, -0.14814655196915, -0.91918708296369],
[-0.88833821845182, -0.11166178154818, -0.88833821845182],
[-0.76310720981260, -0.07778103895609, -0.87584012489183],
[-0.61524460897785, -0.06121731093176, -0.87100425864223],
[-0.45253382144816, -0.06121731093176, -0.87100425864223],
[-0.28327162633948, -0.07778103895609, -0.87584012489183],
[-0.11166178154818, -0.11166178154818, -0.88833821845182],
[0.06733363493284, -0.14814655196915, -0.91918708296369],
[-1.00000000000000, 0.06733363493284, -0.91918708296369],
[-0.88922094785977, 0.10312391963040, -0.88922094785977],
[-0.76829026258645, 0.12556885608432, -0.87784903872726],
[-0.62915975650256, 0.13306820709830, -0.87474869409317],
[-0.47942955477061, 0.12556885608432, -0.87784903872726],
[-0.32468202391085, 0.10312391963040, -0.88922094785977],
[-0.14814655196915, 0.06733363493284, -0.91918708296369],
[-1.00000000000000, 0.27866044762067, -0.92096665100107],
[-0.89203484196476, 0.30741799661080, -0.89203484196476],
[-0.77955255265301, 0.31669594286504, -0.88237984073213],
[-0.65476354947990, 0.31669594286504, -0.88237984073213],
[-0.52334831268129, 0.30741799661080, -0.89203484196476],
[-0.35769379661959, 0.27866044762067, -0.92096665100107],
[-1.00000000000000, 0.47604555306467, -0.92465192960797],
[-0.89738579484922, 0.49043421713403, -0.89738579484922],
[-0.79914165143507, 0.48912271361761, -0.89083941074747],
[-0.69566262743558, 0.49043421713403, -0.89738579484922],
[-0.55139362345669, 0.47604555306467, -0.92465192960797],
[-1.00000000000000, 0.65045664843840, -0.93049320805776],
[-0.90679943727255, 0.64489529679064, -0.90679943727255],
[-0.83129642224555, 0.64489529679064, -0.90679943727255],
[-0.71996344038064, 0.65045664843840, -0.93049320805776],
[-1.00000000000000, 0.79404049618658, -0.93886143433224],
[-0.92421824168052, 0.77265472504156, -0.92421824168052],
[-0.85517906185435, 0.79404049618658, -0.93886143433224],
[-1.00000000000000, 0.90048899959132, -0.95024449979566],
[-0.95024449979566, 0.90048899959132, -0.95024449979566],
[-1.00000000000000, 0.96524592650384, -0.96524592650384],
[-1.00000000000000, -1.00000000000000, -0.88508204422298],
[-0.93886143433224, -1.00000000000000, -0.85517906185435],
[-0.83250992375907, -1.00000000000000, -0.83250992375907],
[-0.68725865323697, -1.00000000000000, -0.81601417373865],
[-0.51092432541689, -1.00000000000000, -0.80484778780174],
[-0.31252079446822, -1.00000000000000, -0.79839678416746],
[-0.10185592197161, -1.00000000000000, -0.79628815605678],
[0.11091757863568, -1.00000000000000, -0.79839678416746],
[0.31577211321863, -1.00000000000000, -0.80484778780174],
[0.50327282697562, -1.00000000000000, -0.81601417373865],
[0.66501984751815, -1.00000000000000, -0.83250992375907],
[0.79404049618658, -1.00000000000000, -0.85517906185435],
[0.88508204422298, -1.00000000000000, -0.88508204422298],
[-1.00000000000000, -0.93886143433224, -0.85517906185435],
[-0.90679943727255, -0.90679943727255, -0.83129642224555],
[-0.79914165143507, -0.89083941074747, -0.79914165143507],
[-0.65476354947990, -0.88237984073213, -0.77955255265301],
[-0.47942955477061, -0.87784903872726, -0.76829026258645],
[-0.28327162633948, -0.87584012489183, -0.76310720981260],
[-0.07778103895609, -0.87584012489183, -0.76310720981260],
[0.12556885608432, -0.87784903872726, -0.76829026258645],
[0.31669594286504, -0.88237984073213, -0.77955255265301],
[0.48912271361761, -0.89083941074747, -0.79914165143507],
[0.64489529679064, -0.90679943727255, -0.83129642224555],
[0.79404049618658, -0.93886143433224, -0.85517906185435],
[-1.00000000000000, -0.83250992375907, -0.83250992375907],
[-0.89083941074747, -0.79914165143507, -0.79914165143507],
[-0.77219710443544, -0.77219710443544, -0.77219710443544],
[-0.62277494247244, -0.75575726251690, -0.75575726251690],
[-0.44740767553975, -0.74697095263856, -0.74697095263856],
[-0.25579354296770, -0.74420645703230, -0.74420645703230],
[-0.05865041918313, -0.74697095263856, -0.74697095263856],
[0.13428946750623, -0.75575726251690, -0.75575726251690],
[0.31659131330632, -0.77219710443544, -0.77219710443544],
[0.48912271361761, -0.79914165143507, -0.79914165143507],
[0.66501984751815, -0.83250992375907, -0.83250992375907],
[-1.00000000000000, -0.68725865323697, -0.81601417373865],
[-0.88237984073213, -0.65476354947990, -0.77955255265301],
[-0.75575726251690, -0.62277494247244, -0.75575726251690],
[-0.60288144882472, -0.60288144882472, -0.74210707686992],
[-0.42860980905977, -0.59352446837910, -0.73589358673193],
[-0.24197213582920, -0.59352446837910, -0.73589358673193],
[-0.05213002548064, -0.60288144882472, -0.74210707686992],
[0.13428946750623, -0.62277494247244, -0.75575726251690],
[0.31669594286504, -0.65476354947990, -0.77955255265301],
[0.50327282697562, -0.68725865323697, -0.81601417373865],
[-1.00000000000000, -0.51092432541689, -0.80484778780174],
[-0.87784903872726, -0.47942955477061, -0.76829026258645],
[-0.74697095263856, -0.44740767553975, -0.74697095263856],
[-0.59352446837910, -0.42860980905977, -0.73589358673193],
[-0.42251441883243, -0.42251441883243, -0.73245674350271],
[-0.24197213582920, -0.42860980905977, -0.73589358673193],
[-0.05865041918313, -0.44740767553975, -0.74697095263856],
[0.12556885608432, -0.47942955477061, -0.76829026258645],
[0.31577211321863, -0.51092432541689, -0.80484778780174],
[-1.00000000000000, -0.31252079446821, -0.79839678416746],
[-0.87584012489183, -0.28327162633948, -0.76310720981260],
[-0.74420645703230, -0.25579354296770, -0.74420645703230],
[-0.59352446837910, -0.24197213582920, -0.73589358673193],
[-0.42860980905977, -0.24197213582920, -0.73589358673193],
[-0.25579354296770, -0.25579354296770, -0.74420645703230],
[-0.07778103895609, -0.28327162633948, -0.76310720981260],
[0.11091757863568, -0.31252079446821, -0.79839678416746],
[-1.00000000000000, -0.10185592197161, -0.79628815605678],
[-0.87584012489183, -0.07778103895609, -0.76310720981260],
[-0.74697095263856, -0.05865041918313, -0.74697095263856],
[-0.60288144882472, -0.05213002548064, -0.74210707686992],
[-0.44740767553975, -0.05865041918313, -0.74697095263856],
[-0.28327162633948, -0.07778103895609, -0.76310720981260],
[-0.10185592197161, -0.10185592197161, -0.79628815605677],
[-1.00000000000000, 0.11091757863568, -0.79839678416746],
[-0.87784903872726, 0.12556885608432, -0.76829026258645],
[-0.75575726251690, 0.13428946750623, -0.75575726251690],
[-0.62277494247244, 0.13428946750623, -0.75575726251690],
[-0.47942955477061, 0.12556885608432, -0.76829026258645],
[-0.31252079446821, 0.11091757863568, -0.79839678416746],
[-1.00000000000000, 0.31577211321863, -0.80484778780174],
[-0.88237984073213, 0.31669594286504, -0.77955255265301],
[-0.77219710443544, 0.31659131330632, -0.77219710443544],
[-0.65476354947990, 0.31669594286504, -0.77955255265301],
[-0.51092432541689, 0.31577211321863, -0.80484778780174],
[-1.00000000000000, 0.50327282697562, -0.81601417373865],
[-0.89083941074747, 0.48912271361761, -0.79914165143507],
[-0.79914165143507, 0.48912271361761, -0.79914165143507],
[-0.68725865323697, 0.50327282697562, -0.81601417373865],
[-1.00000000000000, 0.66501984751815, -0.83250992375907],
[-0.90679943727255, 0.64489529679064, -0.83129642224555],
[-0.83250992375907, 0.66501984751815, -0.83250992375907],
[-1.00000000000000, 0.79404049618658, -0.85517906185435],
[-0.93886143433224, 0.79404049618658, -0.85517906185435],
[-1.00000000000000, 0.88508204422298, -0.88508204422298],
[-1.00000000000000, -1.00000000000000, -0.76351968995182],
[-0.93049320805776, -1.00000000000000, -0.71996344038064],
[-0.81601417373865, -1.00000000000000, -0.68725865323697],
[-0.66399823129102, -1.00000000000000, -0.66399823129102],
[-0.48321257795145, -1.00000000000000, -0.64910231144316],
[-0.28338651030471, -1.00000000000000, -0.64183909826484],
[-0.07477439143046, -1.00000000000000, -0.64183909826484],
[0.13231488939461, -1.00000000000000, -0.64910231144316],
[0.32799646258203, -1.00000000000000, -0.66399823129102],
[0.50327282697562, -1.00000000000000, -0.68725865323697],
[0.65045664843840, -1.00000000000000, -0.71996344038064],
[0.76351968995182, -1.00000000000000, -0.76351968995182],
[-1.00000000000000, -0.93049320805776, -0.71996344038064],
[-0.89738579484922, -0.89738579484922, -0.69566262743558],
[-0.77955255265301, -0.88237984073213, -0.65476354947990],
[-0.62915975650256, -0.87474869409317, -0.62915975650256],
[-0.45253382144816, -0.87100425864223, -0.61524460897785],
[-0.25965393684947, -0.86986799406083, -0.61082413224023],
[-0.06121731093176, -0.87100425864223, -0.61524460897785],
[0.13306820709830, -0.87474869409317, -0.62915975650256],
[0.31669594286504, -0.88237984073213, -0.65476354947990],
[0.49043421713403, -0.89738579484922, -0.69566262743558],
[0.65045664843840, -0.93049320805776, -0.71996344038064],
[-1.00000000000000, -0.81601417373865, -0.68725865323697],
[-0.88237984073213, -0.77955255265301, -0.65476354947990],
[-0.75575726251690, -0.75575726251690, -0.62277494247244],
[-0.60288144882472, -0.74210707686992, -0.60288144882472],
[-0.42860980905977, -0.73589358673193, -0.59352446837910],
[-0.24197213582920, -0.73589358673193, -0.59352446837910],
[-0.05213002548064, -0.74210707686992, -0.60288144882472],
[0.13428946750623, -0.75575726251690, -0.62277494247244],
[0.31669594286504, -0.77955255265301, -0.65476354947990],
[0.50327282697562, -0.81601417373865, -0.68725865323697],
[-1.00000000000000, -0.66399823129102, -0.66399823129102],
[-0.87474869409317, -0.62915975650256, -0.62915975650256],
[-0.74210707686992, -0.60288144882472, -0.60288144882472],
[-0.58801919688477, -0.58801919688477, -0.58801919688477],
[-0.41674448786197, -0.58325551213803, -0.58325551213803],
[-0.23594240934570, -0.58801919688477, -0.58801919688477],
[-0.05213002548064, -0.60288144882472, -0.60288144882472],
[0.13306820709830, -0.62915975650256, -0.62915975650256],
[0.32799646258203, -0.66399823129102, -0.66399823129102],
[-1.00000000000000, -0.48321257795145, -0.64910231144316],
[-0.87100425864223, -0.45253382144816, -0.61524460897785],
[-0.73589358673193, -0.42860980905977, -0.59352446837910],
[-0.58325551213803, -0.41674448786197, -0.58325551213803],
[-0.41674448786197, -0.41674448786197, -0.58325551213803],
[-0.24197213582920, -0.42860980905977, -0.59352446837910],
[-0.06121731093176, -0.45253382144816, -0.61524460897785],
[0.13231488939461, -0.48321257795145, -0.64910231144316],
[-1.00000000000000, -0.28338651030471, -0.64183909826484],
[-0.86986799406083, -0.25965393684947, -0.61082413224023],
[-0.73589358673193, -0.24197213582920, -0.59352446837910],
[-0.58801919688477, -0.23594240934570, -0.58801919688477],
[-0.42860980905977, -0.24197213582920, -0.59352446837910],
[-0.25965393684947, -0.25965393684947, -0.61082413224023],
[-0.07477439143046, -0.28338651030471, -0.64183909826484],
[-1.00000000000000, -0.07477439143046, -0.64183909826484],
[-0.87100425864223, -0.06121731093176, -0.61524460897785],
[-0.74210707686992, -0.05213002548064, -0.60288144882472],
[-0.60288144882472, -0.05213002548064, -0.60288144882472],
[-0.45253382144816, -0.06121731093176, -0.61524460897785],
[-0.28338651030471, -0.07477439143046, -0.64183909826484],
[-1.00000000000000, 0.13231488939461, -0.64910231144316],
[-0.87474869409317, 0.13306820709830, -0.62915975650256],
[-0.75575726251690, 0.13428946750623, -0.62277494247244],
[-0.62915975650256, 0.13306820709830, -0.62915975650256],
[-0.48321257795145, 0.13231488939461, -0.64910231144316],
[-1.00000000000000, 0.32799646258203, -0.66399823129102],
[-0.88237984073213, 0.31669594286504, -0.65476354947990],
[-0.77955255265301, 0.31669594286504, -0.65476354947990],
[-0.66399823129102, 0.32799646258203, -0.66399823129102],
[-1.00000000000000, 0.50327282697562, -0.68725865323697],
[-0.89738579484922, 0.49043421713403, -0.69566262743558],
[-0.81601417373865, 0.50327282697562, -0.68725865323697],
[-1.00000000000000, 0.65045664843840, -0.71996344038064],
[-0.93049320805776, 0.65045664843840, -0.71996344038064],
[-1.00000000000000, 0.76351968995181, -0.76351968995182],
[-1.00000000000000, -1.00000000000000, -0.60625320546985],
[-0.92465192960797, -1.00000000000000, -0.55139362345669],
[-0.80484778780174, -1.00000000000000, -0.51092432541689],
[-0.64910231144316, -1.00000000000000, -0.48321257795145],
[-0.46706830390530, -1.00000000000000, -0.46706830390530],
[-0.26911608702977, -1.00000000000000, -0.46176782594047],
[-0.06586339218939, -1.00000000000000, -0.46706830390530],
[0.13231488939461, -1.00000000000000, -0.48321257795145],
[0.31577211321863, -1.00000000000000, -0.51092432541689],
[0.47604555306467, -1.00000000000000, -0.55139362345669],
[0.60625320546985, -1.00000000000000, -0.60625320546985],
[-1.00000000000000, -0.92465192960797, -0.55139362345669],
[-0.89203484196476, -0.89203484196476, -0.52334831268129],
[-0.76829026258645, -0.87784903872726, -0.47942955477061],
[-0.61524460897785, -0.87100425864223, -0.45253382144816],
[-0.43991079707006, -0.86812931714831, -0.43991079707006],
[-0.25204908871156, -0.86812931714831, -0.43991079707006],
[-0.06121731093176, -0.87100425864223, -0.45253382144816],
[0.12556885608432, -0.87784903872726, -0.47942955477061],
[0.30741799661080, -0.89203484196476, -0.52334831268129],
[0.47604555306467, -0.92465192960797, -0.55139362345669],
[-1.00000000000000, -0.80484778780174, -0.51092432541689],
[-0.87784903872726, -0.76829026258645, -0.47942955477061],
[-0.74697095263856, -0.74697095263856, -0.44740767553975],
[-0.59352446837910, -0.73589358673193, -0.42860980905977],
[-0.42251441883243, -0.73245674350271, -0.42251441883243],
[-0.24197213582920, -0.73589358673193, -0.42860980905977],
[-0.05865041918313, -0.74697095263856, -0.44740767553975],
[0.12556885608432, -0.76829026258645, -0.47942955477061],
[0.31577211321863, -0.80484778780174, -0.51092432541689],
[-1.00000000000000, -0.64910231144316, -0.48321257795145],
[-0.87100425864223, -0.61524460897785, -0.45253382144816],
[-0.73589358673193, -0.59352446837910, -0.42860980905977],
[-0.58325551213803, -0.58325551213803, -0.41674448786197],
[-0.41674448786197, -0.58325551213803, -0.41674448786197],
[-0.24197213582920, -0.59352446837910, -0.42860980905977],
[-0.06121731093176, -0.61524460897785, -0.45253382144816],
[0.13231488939461, -0.64910231144316, -0.48321257795145],
[-1.00000000000000, -0.46706830390530, -0.46706830390530],
[-0.86812931714831, -0.43991079707006, -0.43991079707006],
[-0.73245674350271, -0.42251441883243, -0.42251441883243],
[-0.58325551213803, -0.41674448786197, -0.41674448786197],
[-0.42251441883243, -0.42251441883243, -0.42251441883243],
[-0.25204908871156, -0.43991079707006, -0.43991079707006],
[-0.06586339218939, -0.46706830390530, -0.46706830390530],
[-1.00000000000000, -0.26911608702977, -0.46176782594047],
[-0.86812931714831, -0.25204908871156, -0.43991079707006],
[-0.73589358673193, -0.24197213582920, -0.42860980905977],
[-0.59352446837910, -0.24197213582920, -0.42860980905977],
[-0.43991079707006, -0.25204908871156, -0.43991079707006],
[-0.26911608702977, -0.26911608702977, -0.46176782594047],
[-1.00000000000000, -0.06586339218939, -0.46706830390530],
[-0.87100425864223, -0.06121731093176, -0.45253382144816],
[-0.74697095263856, -0.05865041918313, -0.44740767553975],
[-0.61524460897785, -0.06121731093176, -0.45253382144816],
[-0.46706830390530, -0.06586339218939, -0.46706830390530],
[-1.00000000000000, 0.13231488939461, -0.48321257795145],
[-0.87784903872726, 0.12556885608432, -0.47942955477061],
[-0.76829026258645, 0.12556885608432, -0.47942955477061],
[-0.64910231144316, 0.13231488939461, -0.48321257795145],
[-1.00000000000000, 0.31577211321863, -0.51092432541689],
[-0.89203484196476, 0.30741799661080, -0.52334831268129],
[-0.80484778780174, 0.31577211321863, -0.51092432541689],
[-1.00000000000000, 0.47604555306467, -0.55139362345669],
[-0.92465192960797, 0.47604555306467, -0.55139362345669],
[-1.00000000000000, 0.60625320546985, -0.60625320546985],
[-1.00000000000000, -1.00000000000000, -0.42063805471367],
[-0.92096665100107, -1.00000000000000, -0.35769379661959],
[-0.79839678416746, -1.00000000000000, -0.31252079446822],
[-0.64183909826484, -1.00000000000000, -0.28338651030471],
[-0.46176782594047, -1.00000000000000, -0.26911608702977],
[-0.26911608702977, -1.00000000000000, -0.26911608702977],
[-0.07477439143046, -1.00000000000000, -0.28338651030471],
[0.11091757863568, -1.00000000000000, -0.31252079446821],
[0.27866044762067, -1.00000000000000, -0.35769379661959],
[0.42063805471367, -1.00000000000000, -0.42063805471367],
[-1.00000000000000, -0.92096665100107, -0.35769379661959],
[-0.88922094785977, -0.88922094785977, -0.32468202391085],
[-0.76310720981260, -0.87584012489183, -0.28327162633948],
[-0.61082413224023, -0.86986799406083, -0.25965393684947],
[-0.43991079707006, -0.86812931714831, -0.25204908871156],
[-0.25965393684947, -0.86986799406083, -0.25965393684947],
[-0.07778103895609, -0.87584012489183, -0.28327162633948],
[0.10312391963040, -0.88922094785977, -0.32468202391085],
[0.27866044762067, -0.92096665100107, -0.35769379661959],
[-1.00000000000000, -0.79839678416746, -0.31252079446822],
[-0.87584012489183, -0.76310720981260, -0.28327162633948],
[-0.74420645703230, -0.74420645703230, -0.25579354296770],
[-0.59352446837910, -0.73589358673193, -0.24197213582920],
[-0.42860980905977, -0.73589358673193, -0.24197213582920],
[-0.25579354296770, -0.74420645703230, -0.25579354296770],
[-0.07778103895609, -0.76310720981260, -0.28327162633948],
[0.11091757863568, -0.79839678416746, -0.31252079446822],
[-1.00000000000000, -0.64183909826484, -0.28338651030471],
[-0.86986799406083, -0.61082413224023, -0.25965393684947],
[-0.73589358673193, -0.59352446837910, -0.24197213582920],
[-0.58801919688477, -0.58801919688477, -0.23594240934570],
[-0.42860980905977, -0.59352446837910, -0.24197213582920],
[-0.25965393684947, -0.61082413224023, -0.25965393684947],
[-0.07477439143046, -0.64183909826484, -0.28338651030471],
[-1.00000000000000, -0.46176782594047, -0.26911608702977],
[-0.86812931714831, -0.43991079707006, -0.25204908871156],
[-0.73589358673193, -0.42860980905977, -0.24197213582920],
[-0.59352446837910, -0.42860980905977, -0.24197213582920],
[-0.43991079707006, -0.43991079707006, -0.25204908871156],
[-0.26911608702977, -0.46176782594047, -0.26911608702977],
[-1.00000000000000, -0.26911608702977, -0.26911608702977],
[-0.86986799406083, -0.25965393684947, -0.25965393684947],
[-0.74420645703230, -0.25579354296770, -0.25579354296770],
[-0.61082413224023, -0.25965393684947, -0.25965393684947],
[-0.46176782594047, -0.26911608702977, -0.26911608702977],
[-1.00000000000000, -0.07477439143046, -0.28338651030471],
[-0.87584012489183, -0.07778103895609, -0.28327162633948],
[-0.76310720981260, -0.07778103895609, -0.28327162633948],
[-0.64183909826484, -0.07477439143046, -0.28338651030471],
[-1.00000000000000, 0.11091757863568, -0.31252079446821],
[-0.88922094785977, 0.10312391963040, -0.32468202391085],
[-0.79839678416746, 0.11091757863568, -0.31252079446821],
[-1.00000000000000, 0.27866044762067, -0.35769379661959],
[-0.92096665100107, 0.27866044762067, -0.35769379661959],
[-1.00000000000000, 0.42063805471367, -0.42063805471367],
[-1.00000000000000, -1.00000000000000, -0.21535395536379],
[-0.91918708296369, -1.00000000000000, -0.14814655196915],
[-0.79628815605678, -1.00000000000000, -0.10185592197161],
[-0.64183909826484, -1.00000000000000, -0.07477439143046],
[-0.46706830390530, -1.00000000000000, -0.06586339218939],
[-0.28338651030470, -1.00000000000000, -0.07477439143046],
[-0.10185592197161, -1.00000000000000, -0.10185592197161],
[0.06733363493284, -1.00000000000000, -0.14814655196915],
[0.21535395536379, -1.00000000000000, -0.21535395536379],
[-1.00000000000000, -0.91918708296369, -0.14814655196915],
[-0.88833821845182, -0.88833821845182, -0.11166178154818],
[-0.76310720981260, -0.87584012489183, -0.07778103895609],
[-0.61524460897785, -0.87100425864223, -0.06121731093176],
[-0.45253382144816, -0.87100425864223, -0.06121731093176],
[-0.28327162633948, -0.87584012489183, -0.07778103895609],
[-0.11166178154818, -0.88833821845182, -0.11166178154818],
[0.06733363493284, -0.91918708296369, -0.14814655196915],
[-1.00000000000000, -0.79628815605678, -0.10185592197161],
[-0.87584012489183, -0.76310720981260, -0.07778103895609],
[-0.74697095263856, -0.74697095263856, -0.05865041918313],
[-0.60288144882472, -0.74210707686992, -0.05213002548064],
[-0.44740767553975, -0.74697095263856, -0.05865041918313],
[-0.28327162633948, -0.76310720981260, -0.07778103895609],
[-0.10185592197161, -0.79628815605677, -0.10185592197161],
[-1.00000000000000, -0.64183909826484, -0.07477439143046],
[-0.87100425864223, -0.61524460897785, -0.06121731093176],
[-0.74210707686992, -0.60288144882472, -0.05213002548064],
[-0.60288144882472, -0.60288144882472, -0.05213002548064],
[-0.45253382144816, -0.61524460897785, -0.06121731093176],
[-0.28338651030471, -0.64183909826484, -0.07477439143046],
[-1.00000000000000, -0.46706830390530, -0.06586339218939],
[-0.87100425864223, -0.45253382144816, -0.06121731093176],
[-0.74697095263856, -0.44740767553975, -0.05865041918313],
[-0.61524460897785, -0.45253382144816, -0.06121731093176],
[-0.46706830390530, -0.46706830390530, -0.06586339218939],
[-1.00000000000000, -0.28338651030471, -0.07477439143046],
[-0.87584012489183, -0.28327162633948, -0.07778103895609],
[-0.76310720981260, -0.28327162633948, -0.07778103895609],
[-0.64183909826484, -0.28338651030471, -0.07477439143046],
[-1.00000000000000, -0.10185592197161, -0.10185592197161],
[-0.88833821845182, -0.11166178154818, -0.11166178154818],
[-0.79628815605677, -0.10185592197161, -0.10185592197161],
[-1.00000000000000, 0.06733363493284, -0.14814655196915],
[-0.91918708296369, 0.06733363493284, -0.14814655196915],
[-1.00000000000000, 0.21535395536379, -0.21535395536379],
[-1.00000000000000, -1.00000000000000, -0.00000000000000],
[-0.91918708296369, -1.00000000000000, 0.06733363493284],
[-0.79839678416746, -1.00000000000000, 0.11091757863568],
[-0.64910231144316, -1.00000000000000, 0.13231488939461],
[-0.48321257795145, -1.00000000000000, 0.13231488939461],
[-0.31252079446822, -1.00000000000000, 0.11091757863568],
[-0.14814655196915, -1.00000000000000, 0.06733363493284],
[0.00000000000000, -1.00000000000000, -0.00000000000000],
[-1.00000000000000, -0.91918708296369, 0.06733363493284],
[-0.88922094785977, -0.88922094785977, 0.10312391963040],
[-0.76829026258645, -0.87784903872726, 0.12556885608432],
[-0.62915975650256, -0.87474869409317, 0.13306820709830],
[-0.47942955477061, -0.87784903872726, 0.12556885608432],
[-0.32468202391085, -0.88922094785977, 0.10312391963040],
[-0.14814655196915, -0.91918708296369, 0.06733363493284],
[-1.00000000000000, -0.79839678416746, 0.11091757863568],
[-0.87784903872726, -0.76829026258645, 0.12556885608432],
[-0.75575726251690, -0.75575726251690, 0.13428946750623],
[-0.62277494247244, -0.75575726251690, 0.13428946750623],
[-0.47942955477061, -0.76829026258645, 0.12556885608432],
[-0.31252079446821, -0.79839678416746, 0.11091757863568],
[-1.00000000000000, -0.64910231144316, 0.13231488939461],
[-0.87474869409317, -0.62915975650256, 0.13306820709830],
[-0.75575726251690, -0.62277494247244, 0.13428946750623],
[-0.62915975650256, -0.62915975650256, 0.13306820709830],
[-0.48321257795145, -0.64910231144316, 0.13231488939461],
[-1.00000000000000, -0.48321257795145, 0.13231488939461],
[-0.87784903872726, -0.47942955477061, 0.12556885608432],
[-0.76829026258645, -0.47942955477061, 0.12556885608432],
[-0.64910231144316, -0.48321257795145, 0.13231488939461],
[-1.00000000000000, -0.31252079446822, 0.11091757863568],
[-0.88922094785977, -0.32468202391085, 0.10312391963040],
[-0.79839678416746, -0.31252079446822, 0.11091757863568],
[-1.00000000000000, -0.14814655196915, 0.06733363493284],
[-0.91918708296369, -0.14814655196915, 0.06733363493284],
[-1.00000000000000, 0.00000000000000, -0.00000000000000],
[-1.00000000000000, -1.00000000000000, 0.21535395536379],
[-0.92096665100107, -1.00000000000000, 0.27866044762067],
[-0.80484778780174, -1.00000000000000, 0.31577211321863],
[-0.66399823129102, -1.00000000000000, 0.32799646258203],
[-0.51092432541689, -1.00000000000000, 0.31577211321863],
[-0.35769379661959, -1.00000000000000, 0.27866044762067],
[-0.21535395536379, -1.00000000000000, 0.21535395536379],
[-1.00000000000000, -0.92096665100107, 0.27866044762067],
[-0.89203484196476, -0.89203484196476, 0.30741799661080],
[-0.77955255265301, -0.88237984073213, 0.31669594286504],
[-0.65476354947990, -0.88237984073213, 0.31669594286504],
[-0.52334831268129, -0.89203484196476, 0.30741799661080],
[-0.35769379661959, -0.92096665100107, 0.27866044762067],
[-1.00000000000000, -0.80484778780174, 0.31577211321863],
[-0.88237984073213, -0.77955255265301, 0.31669594286504],
[-0.77219710443544, -0.77219710443544, 0.31659131330632],
[-0.65476354947990, -0.77955255265301, 0.31669594286504],
[-0.51092432541689, -0.80484778780174, 0.31577211321863],
[-1.00000000000000, -0.66399823129102, 0.32799646258203],
[-0.88237984073213, -0.65476354947990, 0.31669594286504],
[-0.77955255265301, -0.65476354947990, 0.31669594286504],
[-0.66399823129102, -0.66399823129102, 0.32799646258203],
[-1.00000000000000, -0.51092432541689, 0.31577211321863],
[-0.89203484196476, -0.52334831268129, 0.30741799661080],
[-0.80484778780174, -0.51092432541689, 0.31577211321863],
[-1.00000000000000, -0.35769379661959, 0.27866044762067],
[-0.92096665100107, -0.35769379661959, 0.27866044762067],
[-1.00000000000000, -0.21535395536379, 0.21535395536379],
[-1.00000000000000, -1.00000000000000, 0.42063805471367],
[-0.92465192960797, -1.00000000000000, 0.47604555306467],
[-0.81601417373865, -1.00000000000000, 0.50327282697562],
[-0.68725865323697, -1.00000000000000, 0.50327282697562],
[-0.55139362345669, -1.00000000000000, 0.47604555306467],
[-0.42063805471367, -1.00000000000000, 0.42063805471367],
[-1.00000000000000, -0.92465192960797, 0.47604555306467],
[-0.89738579484922, -0.89738579484922, 0.49043421713403],
[-0.79914165143507, -0.89083941074747, 0.48912271361761],
[-0.69566262743558, -0.89738579484922, 0.49043421713403],
[-0.55139362345669, -0.92465192960797, 0.47604555306467],
[-1.00000000000000, -0.81601417373865, 0.50327282697562],
[-0.89083941074747, -0.79914165143507, 0.48912271361761],
[-0.79914165143507, -0.79914165143507, 0.48912271361761],
[-0.68725865323697, -0.81601417373865, 0.50327282697562],
[-1.00000000000000, -0.68725865323697, 0.50327282697562],
[-0.89738579484922, -0.69566262743558, 0.49043421713403],
[-0.81601417373865, -0.68725865323697, 0.50327282697562],
[-1.00000000000000, -0.55139362345669, 0.47604555306467],
[-0.92465192960797, -0.55139362345669, 0.47604555306467],
[-1.00000000000000, -0.42063805471367, 0.42063805471367],
[-1.00000000000000, -1.00000000000000, 0.60625320546985],
[-0.93049320805776, -1.00000000000000, 0.65045664843840],
[-0.83250992375907, -1.00000000000000, 0.66501984751815],
[-0.71996344038064, -1.00000000000000, 0.65045664843840],
[-0.60625320546985, -1.00000000000000, 0.60625320546985],
[-1.00000000000000, -0.93049320805776, 0.65045664843840],
[-0.90679943727255, -0.90679943727255, 0.64489529679064],
[-0.83129642224555, -0.90679943727255, 0.64489529679064],
[-0.71996344038064, -0.93049320805776, 0.65045664843840],
[-1.00000000000000, -0.83250992375907, 0.66501984751815],
[-0.90679943727255, -0.83129642224555, 0.64489529679064],
[-0.83250992375907, -0.83250992375907, 0.66501984751815],
[-1.00000000000000, -0.71996344038064, 0.65045664843840],
[-0.93049320805776, -0.71996344038064, 0.65045664843840],
[-1.00000000000000, -0.60625320546985, 0.60625320546985],
[-1.00000000000000, -1.00000000000000, 0.76351968995182],
[-0.93886143433224, -1.00000000000000, 0.79404049618658],
[-0.85517906185435, -1.00000000000000, 0.79404049618658],
[-0.76351968995182, -1.00000000000000, 0.76351968995182],
[-1.00000000000000, -0.93886143433224, 0.79404049618658],
[-0.92421824168052, -0.92421824168052, 0.77265472504156],
[-0.85517906185435, -0.93886143433224, 0.79404049618658],
[-1.00000000000000, -0.85517906185435, 0.79404049618658],
[-0.93886143433224, -0.85517906185435, 0.79404049618658],
[-1.00000000000000, -0.76351968995182, 0.76351968995182],
[-1.00000000000000, -1.00000000000000, 0.88508204422298],
[-0.95024449979566, -1.00000000000000, 0.90048899959132],
[-0.88508204422298, -1.00000000000000, 0.88508204422298],
[-1.00000000000000, -0.95024449979566, 0.90048899959132],
[-0.95024449979566, -0.95024449979566, 0.90048899959132],
[-1.00000000000000, -0.88508204422298, 0.88508204422298],
[-1.00000000000000, -1.00000000000000, 0.96524592650384],
[-0.96524592650384, -1.00000000000000, 0.96524592650384],
[-1.00000000000000, -0.96524592650384, 0.96524592650384],
])
elif C==14:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.96956804627022, -1.00000000000000, -1.00000000000000],
[-0.89920053309347, -1.00000000000000, -1.00000000000000],
[-0.79200829186182, -1.00000000000000, -1.00000000000000],
[-0.65238870288249, -1.00000000000000, -1.00000000000000],
[-0.48605942188714, -1.00000000000000, -1.00000000000000],
[-0.29983046890076, -1.00000000000000, -1.00000000000000],
[-0.10132627352195, -1.00000000000000, -1.00000000000000],
[0.10132627352195, -1.00000000000000, -1.00000000000000],
[0.29983046890076, -1.00000000000000, -1.00000000000000],
[0.48605942188714, -1.00000000000000, -1.00000000000000],
[0.65238870288249, -1.00000000000000, -1.00000000000000],
[0.79200829186181, -1.00000000000000, -1.00000000000000],
[0.89920053309347, -1.00000000000000, -1.00000000000000],
[0.96956804627022, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.96956804627022, -1.00000000000000],
[-0.95640415365904, -0.95640415365904, -1.00000000000000],
[-0.87271392966596, -0.94646577650942, -1.00000000000000],
[-0.75298782540723, -0.93915987605377, -1.00000000000000],
[-0.60255555753341, -0.93399645965197, -1.00000000000000],
[-0.42789455771167, -0.93058892225476, -1.00000000000000],
[-0.23637879061862, -0.92865745388385, -1.00000000000000],
[-0.03598372132386, -0.92803255735228, -1.00000000000000],
[0.16503624450246, -0.92865745388385, -1.00000000000000],
[0.35848347996644, -0.93058892225476, -1.00000000000000],
[0.53655201718538, -0.93399645965197, -1.00000000000000],
[0.69214770146100, -0.93915987605377, -1.00000000000000],
[0.81917970617538, -0.94646577650942, -1.00000000000000],
[0.91280830731808, -0.95640415365904, -1.00000000000000],
[0.96956804627022, -0.96956804627022, -1.00000000000000],
[-1.00000000000000, -0.89920053309347, -1.00000000000000],
[-0.94646577650942, -0.87271392966596, -1.00000000000000],
[-0.85261629770175, -0.85261629770175, -1.00000000000000],
[-0.72347511589086, -0.83785050813163, -1.00000000000000],
[-0.56524732972705, -0.82755144253353, -1.00000000000000],
[-0.38511822139871, -0.82105815385500, -1.00000000000000],
[-0.19097623971153, -0.81792406058543, -1.00000000000000],
[0.00890030029696, -0.81792406058543, -1.00000000000000],
[0.20617637525371, -0.82105815385500, -1.00000000000000],
[0.39279877226058, -0.82755144253353, -1.00000000000000],
[0.56132562402249, -0.83785050813163, -1.00000000000000],
[0.70523259540349, -0.85261629770175, -1.00000000000000],
[0.81917970617538, -0.87271392966596, -1.00000000000000],
[0.89920053309347, -0.89920053309347, -1.00000000000000],
[-1.00000000000000, -0.79200829186182, -1.00000000000000],
[-0.93915987605377, -0.75298782540723, -1.00000000000000],
[-0.83785050813163, -0.72347511589086, -1.00000000000000],
[-0.70204520622319, -0.70204520622319, -1.00000000000000],
[-0.53877930230999, -0.68756446761854, -1.00000000000000],
[-0.35589943054091, -0.67920935366037, -1.00000000000000],
[-0.16175992424576, -0.67648015150848, -1.00000000000000],
[0.03510878420129, -0.67920935366037, -1.00000000000000],
[0.22634376992852, -0.68756446761854, -1.00000000000000],
[0.40409041244638, -0.70204520622319, -1.00000000000000],
[0.56132562402249, -0.72347511589086, -1.00000000000000],
[0.69214770146100, -0.75298782540723, -1.00000000000000],
[0.79200829186182, -0.79200829186182, -1.00000000000000],
[-1.00000000000000, -0.65238870288249, -1.00000000000000],
[-0.93399645965197, -0.60255555753341, -1.00000000000000],
[-0.82755144253353, -0.56524732972705, -1.00000000000000],
[-0.68756446761854, -0.53877930230999, -1.00000000000000],
[-0.52186199814718, -0.52186199814718, -1.00000000000000],
[-0.33890713269884, -0.51362327113555, -1.00000000000000],
[-0.14746959616561, -0.51362327113555, -1.00000000000000],
[0.04372399629437, -0.52186199814718, -1.00000000000000],
[0.22634376992852, -0.53877930230999, -1.00000000000000],
[0.39279877226058, -0.56524732972705, -1.00000000000000],
[0.53655201718538, -0.60255555753341, -1.00000000000000],
[0.65238870288249, -0.65238870288249, -1.00000000000000],
[-1.00000000000000, -0.48605942188714, -1.00000000000000],
[-0.93058892225476, -0.42789455771167, -1.00000000000000],
[-0.82105815385500, -0.38511822139871, -1.00000000000000],
[-0.67920935366037, -0.35589943054091, -1.00000000000000],
[-0.51362327113555, -0.33890713269884, -1.00000000000000],
[-0.33333333333333, -0.33333333333333, -1.00000000000000],
[-0.14746959616561, -0.33890713269884, -1.00000000000000],
[0.03510878420129, -0.35589943054091, -1.00000000000000],
[0.20617637525371, -0.38511822139871, -1.00000000000000],
[0.35848347996644, -0.42789455771167, -1.00000000000000],
[0.48605942188714, -0.48605942188714, -1.00000000000000],
[-1.00000000000000, -0.29983046890076, -1.00000000000000],
[-0.92865745388385, -0.23637879061862, -1.00000000000000],
[-0.81792406058543, -0.19097623971153, -1.00000000000000],
[-0.67648015150848, -0.16175992424576, -1.00000000000000],
[-0.51362327113555, -0.14746959616561, -1.00000000000000],
[-0.33890713269884, -0.14746959616561, -1.00000000000000],
[-0.16175992424576, -0.16175992424576, -1.00000000000000],
[0.00890030029696, -0.19097623971153, -1.00000000000000],
[0.16503624450246, -0.23637879061862, -1.00000000000000],
[0.29983046890076, -0.29983046890076, -1.00000000000000],
[-1.00000000000000, -0.10132627352195, -1.00000000000000],
[-0.92803255735228, -0.03598372132386, -1.00000000000000],
[-0.81792406058543, 0.00890030029696, -1.00000000000000],
[-0.67920935366037, 0.03510878420129, -1.00000000000000],
[-0.52186199814718, 0.04372399629437, -1.00000000000000],
[-0.35589943054091, 0.03510878420129, -1.00000000000000],
[-0.19097623971153, 0.00890030029696, -1.00000000000000],
[-0.03598372132386, -0.03598372132386, -1.00000000000000],
[0.10132627352195, -0.10132627352195, -1.00000000000000],
[-1.00000000000000, 0.10132627352195, -1.00000000000000],
[-0.92865745388385, 0.16503624450246, -1.00000000000000],
[-0.82105815385500, 0.20617637525371, -1.00000000000000],
[-0.68756446761854, 0.22634376992852, -1.00000000000000],
[-0.53877930230999, 0.22634376992852, -1.00000000000000],
[-0.38511822139871, 0.20617637525371, -1.00000000000000],
[-0.23637879061862, 0.16503624450246, -1.00000000000000],
[-0.10132627352195, 0.10132627352195, -1.00000000000000],
[-1.00000000000000, 0.29983046890076, -1.00000000000000],
[-0.93058892225476, 0.35848347996644, -1.00000000000000],
[-0.82755144253353, 0.39279877226058, -1.00000000000000],
[-0.70204520622319, 0.40409041244638, -1.00000000000000],
[-0.56524732972705, 0.39279877226058, -1.00000000000000],
[-0.42789455771167, 0.35848347996644, -1.00000000000000],
[-0.29983046890076, 0.29983046890076, -1.00000000000000],
[-1.00000000000000, 0.48605942188714, -1.00000000000000],
[-0.93399645965197, 0.53655201718538, -1.00000000000000],
[-0.83785050813163, 0.56132562402249, -1.00000000000000],
[-0.72347511589086, 0.56132562402249, -1.00000000000000],
[-0.60255555753341, 0.53655201718538, -1.00000000000000],
[-0.48605942188714, 0.48605942188714, -1.00000000000000],
[-1.00000000000000, 0.65238870288249, -1.00000000000000],
[-0.93915987605376, 0.69214770146100, -1.00000000000000],
[-0.85261629770175, 0.70523259540349, -1.00000000000000],
[-0.75298782540723, 0.69214770146100, -1.00000000000000],
[-0.65238870288249, 0.65238870288249, -1.00000000000000],
[-1.00000000000000, 0.79200829186181, -1.00000000000000],
[-0.94646577650942, 0.81917970617538, -1.00000000000000],
[-0.87271392966596, 0.81917970617538, -1.00000000000000],
[-0.79200829186181, 0.79200829186181, -1.00000000000000],
[-1.00000000000000, 0.89920053309347, -1.00000000000000],
[-0.95640415365904, 0.91280830731808, -1.00000000000000],
[-0.89920053309347, 0.89920053309347, -1.00000000000000],
[-1.00000000000000, 0.96956804627022, -1.00000000000000],
[-0.96956804627022, 0.96956804627022, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.96956804627022],
[-0.95640415365904, -1.00000000000000, -0.95640415365904],
[-0.87271392966596, -1.00000000000000, -0.94646577650942],
[-0.75298782540723, -1.00000000000000, -0.93915987605377],
[-0.60255555753341, -1.00000000000000, -0.93399645965197],
[-0.42789455771167, -1.00000000000000, -0.93058892225476],
[-0.23637879061862, -1.00000000000000, -0.92865745388385],
[-0.03598372132386, -1.00000000000000, -0.92803255735228],
[0.16503624450246, -1.00000000000000, -0.92865745388385],
[0.35848347996644, -1.00000000000000, -0.93058892225476],
[0.53655201718538, -1.00000000000000, -0.93399645965197],
[0.69214770146100, -1.00000000000000, -0.93915987605377],
[0.81917970617538, -1.00000000000000, -0.94646577650942],
[0.91280830731808, -1.00000000000000, -0.95640415365904],
[0.96956804627022, -1.00000000000000, -0.96956804627022],
[-1.00000000000000, -0.95640415365904, -0.95640415365904],
[-0.93253524807174, -0.93253524807174, -0.93253524807174],
[-0.85211907148290, -0.91632317508641, -0.91632317508641],
[-0.73313096691257, -0.90754940375318, -0.90754940375318],
[-0.57971495229269, -0.90246572869321, -0.90246572869321],
[-0.39990809159211, -0.89958234781404, -0.89958234781404],
[-0.20329598101562, -0.89826430138753, -0.89826430138753],
[-0.00017541620933, -0.89826430138753, -0.89826430138753],
[0.19907278722019, -0.89958234781404, -0.89958234781404],
[0.38464640967911, -0.90246572869321, -0.90246572869321],
[0.54822977441893, -0.90754940375318, -0.90754940375318],
[0.68476542165573, -0.91632317508641, -0.91632317508641],
[0.79760574421522, -0.93253524807174, -0.93253524807174],
[0.91280830731808, -0.95640415365904, -0.95640415365904],
[-1.00000000000000, -0.87271392966596, -0.94646577650942],
[-0.91632317508641, -0.85211907148290, -0.91632317508641],
[-0.82191335999262, -0.82191335999262, -0.90119859261617],
[-0.69397031419099, -0.80321920064737, -0.89307484646220],
[-0.53634765932070, -0.79197859234110, -0.88853826764551],
[-0.35699270264769, -0.78592237254801, -0.88619583543133],
[-0.16526308498746, -0.78400505161966, -0.88546877840541],
[0.02911091062702, -0.78592237254801, -0.88619583543133],
[0.21686451930730, -0.79197859234110, -0.88853826764551],
[0.39026436130056, -0.80321920064737, -0.89307484646220],
[0.54502531260140, -0.82191335999262, -0.90119859261617],
[0.68476542165573, -0.85211907148290, -0.91632317508641],
[0.81917970617538, -0.87271392966596, -0.94646577650942],
[-1.00000000000000, -0.75298782540723, -0.93915987605377],
[-0.90754940375318, -0.73313096691257, -0.90754940375318],
[-0.80321920064737, -0.69397031419099, -0.89307484646220],
[-0.66867465527363, -0.66867465527363, -0.88552720830084],
[-0.50833800122192, -0.65372263209158, -0.88152861658099],
[-0.33020177117561, -0.64675294725547, -0.87976925408025],
[-0.14327602748867, -0.64675294725547, -0.87976925408025],
[0.04358924989449, -0.65372263209158, -0.88152861658099],
[0.22287651884810, -0.66867465527363, -0.88552720830084],
[0.39026436130056, -0.69397031419099, -0.89307484646220],
[0.54822977441893, -0.73313096691257, -0.90754940375318],
[0.69214770146100, -0.75298782540723, -0.93915987605377],
[-1.00000000000000, -0.60255555753341, -0.93399645965197],
[-0.90246572869321, -0.57971495229269, -0.90246572869321],
[-0.79197859234110, -0.53634765932070, -0.88853826764551],
[-0.65372263209158, -0.50833800122192, -0.88152861658099],
[-0.49287144180154, -0.49287144180154, -0.87811221336653],
[-0.31749794317662, -0.48792484669986, -0.87707926694690],
[-0.13614490303039, -0.49287144180154, -0.87811221336653],
[0.04358924989449, -0.50833800122192, -0.88152861658099],
[0.21686451930730, -0.53634765932070, -0.88853826764551],
[0.38464640967911, -0.57971495229269, -0.90246572869321],
[0.53655201718538, -0.60255555753341, -0.93399645965197],
[-1.00000000000000, -0.42789455771167, -0.93058892225476],
[-0.89958234781404, -0.39990809159211, -0.89958234781404],
[-0.78592237254801, -0.35699270264769, -0.88619583543133],
[-0.64675294725547, -0.33020177117561, -0.87976925408025],
[-0.48792484669986, -0.31749794317662, -0.87707926694690],
[-0.31749794317662, -0.31749794317662, -0.87707926694690],
[-0.14327602748867, -0.33020177117561, -0.87976925408025],
[0.02911091062702, -0.35699270264769, -0.88619583543133],
[0.19907278722019, -0.39990809159211, -0.89958234781404],
[0.35848347996644, -0.42789455771167, -0.93058892225476],
[-1.00000000000000, -0.23637879061862, -0.92865745388385],
[-0.89826430138753, -0.20329598101562, -0.89826430138753],
[-0.78400505161966, -0.16526308498746, -0.88546877840541],
[-0.64675294725547, -0.14327602748867, -0.87976925408025],
[-0.49287144180154, -0.13614490303039, -0.87811221336653],
[-0.33020177117561, -0.14327602748867, -0.87976925408025],
[-0.16526308498746, -0.16526308498746, -0.88546877840541],
[-0.00017541620933, -0.20329598101562, -0.89826430138753],
[0.16503624450246, -0.23637879061862, -0.92865745388385],
[-1.00000000000000, -0.03598372132386, -0.92803255735228],
[-0.89826430138753, -0.00017541620933, -0.89826430138753],
[-0.78592237254801, 0.02911091062702, -0.88619583543133],
[-0.65372263209158, 0.04358924989449, -0.88152861658099],
[-0.50833800122192, 0.04358924989449, -0.88152861658099],
[-0.35699270264769, 0.02911091062702, -0.88619583543133],
[-0.20329598101562, -0.00017541620933, -0.89826430138753],
[-0.03598372132386, -0.03598372132386, -0.92803255735228],
[-1.00000000000000, 0.16503624450246, -0.92865745388385],
[-0.89958234781404, 0.19907278722019, -0.89958234781404],
[-0.79197859234110, 0.21686451930730, -0.88853826764551],
[-0.66867465527363, 0.22287651884810, -0.88552720830084],
[-0.53634765932070, 0.21686451930730, -0.88853826764551],
[-0.39990809159211, 0.19907278722019, -0.89958234781404],
[-0.23637879061862, 0.16503624450246, -0.92865745388385],
[-1.00000000000000, 0.35848347996644, -0.93058892225476],
[-0.90246572869321, 0.38464640967911, -0.90246572869321],
[-0.80321920064737, 0.39026436130056, -0.89307484646220],
[-0.69397031419099, 0.39026436130056, -0.89307484646220],
[-0.57971495229269, 0.38464640967911, -0.90246572869321],
[-0.42789455771167, 0.35848347996644, -0.93058892225476],
[-1.00000000000000, 0.53655201718538, -0.93399645965197],
[-0.90754940375318, 0.54822977441893, -0.90754940375318],
[-0.82191335999261, 0.54502531260140, -0.90119859261617],
[-0.73313096691257, 0.54822977441893, -0.90754940375318],
[-0.60255555753341, 0.53655201718538, -0.93399645965197],
[-1.00000000000000, 0.69214770146100, -0.93915987605377],
[-0.91632317508641, 0.68476542165573, -0.91632317508641],
[-0.85211907148290, 0.68476542165573, -0.91632317508641],
[-0.75298782540723, 0.69214770146100, -0.93915987605377],
[-1.00000000000000, 0.81917970617538, -0.94646577650942],
[-0.93253524807174, 0.79760574421522, -0.93253524807174],
[-0.87271392966596, 0.81917970617538, -0.94646577650942],
[-1.00000000000000, 0.91280830731808, -0.95640415365904],
[-0.95640415365904, 0.91280830731808, -0.95640415365904],
[-1.00000000000000, 0.96956804627022, -0.96956804627022],
[-1.00000000000000, -1.00000000000000, -0.89920053309347],
[-0.94646577650942, -1.00000000000000, -0.87271392966596],
[-0.85261629770175, -1.00000000000000, -0.85261629770175],
[-0.72347511589086, -1.00000000000000, -0.83785050813163],
[-0.56524732972705, -1.00000000000000, -0.82755144253353],
[-0.38511822139871, -1.00000000000000, -0.82105815385500],
[-0.19097623971153, -1.00000000000000, -0.81792406058543],
[0.00890030029696, -1.00000000000000, -0.81792406058543],
[0.20617637525371, -1.00000000000000, -0.82105815385500],
[0.39279877226058, -1.00000000000000, -0.82755144253353],
[0.56132562402249, -1.00000000000000, -0.83785050813163],
[0.70523259540349, -1.00000000000000, -0.85261629770175],
[0.81917970617538, -1.00000000000000, -0.87271392966596],
[0.89920053309347, -1.00000000000000, -0.89920053309347],
[-1.00000000000000, -0.94646577650942, -0.87271392966596],
[-0.91632317508641, -0.91632317508641, -0.85211907148290],
[-0.82191335999262, -0.90119859261617, -0.82191335999262],
[-0.69397031419099, -0.89307484646220, -0.80321920064737],
[-0.53634765932070, -0.88853826764551, -0.79197859234110],
[-0.35699270264769, -0.88619583543133, -0.78592237254801],
[-0.16526308498746, -0.88546877840541, -0.78400505161966],
[0.02911091062702, -0.88619583543133, -0.78592237254801],
[0.21686451930730, -0.88853826764551, -0.79197859234110],
[0.39026436130056, -0.89307484646220, -0.80321920064737],
[0.54502531260140, -0.90119859261617, -0.82191335999262],
[0.68476542165573, -0.91632317508641, -0.85211907148290],
[0.81917970617538, -0.94646577650942, -0.87271392966596],
[-1.00000000000000, -0.85261629770175, -0.85261629770175],
[-0.90119859261617, -0.82191335999262, -0.82191335999262],
[-0.79614636727388, -0.79614636727388, -0.79614636727388],
[-0.66264058517509, -0.77990848584661, -0.77990848584661],
[-0.50360413913697, -0.77047334797411, -0.77047334797411],
[-0.32672031374452, -0.76612845527794, -0.76612845527794],
[-0.14102277569960, -0.76612845527794, -0.76612845527794],
[0.04455083508520, -0.77047334797411, -0.77047334797411],
[0.22245755686831, -0.77990848584661, -0.77990848584661],
[0.38843910182164, -0.79614636727388, -0.79614636727388],
[0.54502531260140, -0.82191335999262, -0.82191335999262],
[0.70523259540349, -0.85261629770175, -0.85261629770175],
[-1.00000000000000, -0.72347511589086, -0.83785050813163],
[-0.89307484646220, -0.69397031419099, -0.80321920064737],
[-0.77990848584661, -0.66264058517509, -0.77990848584661],
[-0.64201017683130, -0.64201017683130, -0.76582902128328],
[-0.48243087334908, -0.63057637947346, -0.75831111829833],
[-0.30856494792649, -0.62692323097232, -0.75594687317471],
[-0.12868162887913, -0.63057637947346, -0.75831111829833],
[0.04984937494587, -0.64201017683130, -0.76582902128328],
[0.22245755686831, -0.66264058517509, -0.77990848584661],
[0.39026436130056, -0.69397031419099, -0.80321920064737],
[0.56132562402249, -0.72347511589086, -0.83785050813163],
[-1.00000000000000, -0.56524732972705, -0.82755144253353],
[-0.88853826764551, -0.53634765932070, -0.79197859234110],
[-0.77047334797411, -0.50360413913697, -0.77047334797411],
[-0.63057637947346, -0.48243087334908, -0.75831111829833],
[-0.47228940070891, -0.47228940070891, -0.75279870929496],
[-0.30262248928722, -0.47228940070891, -0.75279870929496],
[-0.12868162887913, -0.48243087334908, -0.75831111829833],
[0.04455083508520, -0.50360413913697, -0.77047334797411],
[0.21686451930730, -0.53634765932070, -0.79197859234110],
[0.39279877226058, -0.56524732972705, -0.82755144253353],
[-1.00000000000000, -0.38511822139871, -0.82105815385500],
[-0.88619583543133, -0.35699270264769, -0.78592237254801],
[-0.76612845527794, -0.32672031374452, -0.76612845527794],
[-0.62692323097232, -0.30856494792649, -0.75594687317471],
[-0.47228940070891, -0.30262248928722, -0.75279870929496],
[-0.30856494792649, -0.30856494792649, -0.75594687317471],
[-0.14102277569960, -0.32672031374452, -0.76612845527794],
[0.02911091062702, -0.35699270264769, -0.78592237254801],
[0.20617637525371, -0.38511822139871, -0.82105815385500],
[-1.00000000000000, -0.19097623971153, -0.81792406058543],
[-0.88546877840541, -0.16526308498746, -0.78400505161966],
[-0.76612845527794, -0.14102277569960, -0.76612845527794],
[-0.63057637947346, -0.12868162887913, -0.75831111829833],
[-0.48243087334908, -0.12868162887913, -0.75831111829833],
[-0.32672031374452, -0.14102277569960, -0.76612845527794],
[-0.16526308498746, -0.16526308498746, -0.78400505161966],
[0.00890030029696, -0.19097623971153, -0.81792406058543],
[-1.00000000000000, 0.00890030029696, -0.81792406058543],
[-0.88619583543133, 0.02911091062702, -0.78592237254801],
[-0.77047334797411, 0.04455083508520, -0.77047334797411],
[-0.64201017683130, 0.04984937494587, -0.76582902128328],
[-0.50360413913697, 0.04455083508520, -0.77047334797411],
[-0.35699270264769, 0.02911091062702, -0.78592237254801],
[-0.19097623971153, 0.00890030029696, -0.81792406058543],
[-1.00000000000000, 0.20617637525371, -0.82105815385500],
[-0.88853826764551, 0.21686451930730, -0.79197859234110],
[-0.77990848584661, 0.22245755686831, -0.77990848584661],
[-0.66264058517509, 0.22245755686831, -0.77990848584661],
[-0.53634765932070, 0.21686451930730, -0.79197859234110],
[-0.38511822139871, 0.20617637525371, -0.82105815385500],
[-1.00000000000000, 0.39279877226058, -0.82755144253353],
[-0.89307484646220, 0.39026436130056, -0.80321920064737],
[-0.79614636727388, 0.38843910182164, -0.79614636727388],
[-0.69397031419099, 0.39026436130056, -0.80321920064737],
[-0.56524732972705, 0.39279877226058, -0.82755144253353],
[-1.00000000000000, 0.56132562402249, -0.83785050813163],
[-0.90119859261617, 0.54502531260140, -0.82191335999262],
[-0.82191335999262, 0.54502531260140, -0.82191335999262],
[-0.72347511589086, 0.56132562402249, -0.83785050813163],
[-1.00000000000000, 0.70523259540349, -0.85261629770175],
[-0.91632317508641, 0.68476542165573, -0.85211907148290],
[-0.85261629770175, 0.70523259540349, -0.85261629770175],
[-1.00000000000000, 0.81917970617538, -0.87271392966596],
[-0.94646577650942, 0.81917970617538, -0.87271392966596],
[-1.00000000000000, 0.89920053309347, -0.89920053309347],
[-1.00000000000000, -1.00000000000000, -0.79200829186181],
[-0.93915987605377, -1.00000000000000, -0.75298782540723],
[-0.83785050813163, -1.00000000000000, -0.72347511589086],
[-0.70204520622319, -1.00000000000000, -0.70204520622319],
[-0.53877930230999, -1.00000000000000, -0.68756446761854],
[-0.35589943054091, -1.00000000000000, -0.67920935366037],
[-0.16175992424576, -1.00000000000000, -0.67648015150848],
[0.03510878420129, -1.00000000000000, -0.67920935366037],
[0.22634376992852, -1.00000000000000, -0.68756446761854],
[0.40409041244638, -1.00000000000000, -0.70204520622319],
[0.56132562402249, -1.00000000000000, -0.72347511589086],
[0.69214770146100, -1.00000000000000, -0.75298782540723],
[0.79200829186182, -1.00000000000000, -0.79200829186181],
[-1.00000000000000, -0.93915987605377, -0.75298782540723],
[-0.90754940375318, -0.90754940375318, -0.73313096691257],
[-0.80321920064737, -0.89307484646220, -0.69397031419099],
[-0.66867465527363, -0.88552720830084, -0.66867465527363],
[-0.50833800122192, -0.88152861658099, -0.65372263209158],
[-0.33020177117561, -0.87976925408025, -0.64675294725547],
[-0.14327602748867, -0.87976925408025, -0.64675294725547],
[0.04358924989449, -0.88152861658099, -0.65372263209158],
[0.22287651884810, -0.88552720830084, -0.66867465527363],
[0.39026436130056, -0.89307484646220, -0.69397031419099],
[0.54822977441893, -0.90754940375318, -0.73313096691257],
[0.69214770146100, -0.93915987605377, -0.75298782540723],
[-1.00000000000000, -0.83785050813163, -0.72347511589086],
[-0.89307484646220, -0.80321920064737, -0.69397031419099],
[-0.77990848584661, -0.77990848584661, -0.66264058517509],
[-0.64201017683130, -0.76582902128328, -0.64201017683130],
[-0.48243087334908, -0.75831111829833, -0.63057637947346],
[-0.30856494792649, -0.75594687317471, -0.62692323097232],
[-0.12868162887913, -0.75831111829833, -0.63057637947346],
[0.04984937494587, -0.76582902128328, -0.64201017683130],
[0.22245755686831, -0.77990848584661, -0.66264058517509],
[0.39026436130056, -0.80321920064737, -0.69397031419099],
[0.56132562402249, -0.83785050813163, -0.72347511589086],
[-1.00000000000000, -0.70204520622319, -0.70204520622319],
[-0.88552720830084, -0.66867465527363, -0.66867465527363],
[-0.76582902128328, -0.64201017683130, -0.64201017683130],
[-0.62546113160473, -0.62546113160473, -0.62546113160473],
[-0.46714639126219, -0.61764668822165, -0.61764668822165],
[-0.29756023229452, -0.61764668822165, -0.61764668822165],
[-0.12361660518582, -0.62546113160473, -0.62546113160473],
[0.04984937494587, -0.64201017683130, -0.64201017683130],
[0.22287651884810, -0.66867465527363, -0.66867465527363],
[0.40409041244638, -0.70204520622319, -0.70204520622319],
[-1.00000000000000, -0.53877930230999, -0.68756446761854],
[-0.88152861658099, -0.50833800122192, -0.65372263209158],
[-0.75831111829833, -0.48243087334908, -0.63057637947346],
[-0.61764668822165, -0.46714639126219, -0.61764668822165],
[-0.46216300760478, -0.46216300760478, -0.61351097718567],
[-0.29756023229452, -0.46714639126219, -0.61764668822165],
[-0.12868162887913, -0.48243087334908, -0.63057637947346],
[0.04358924989449, -0.50833800122192, -0.65372263209158],
[0.22634376992852, -0.53877930230999, -0.68756446761854],
[-1.00000000000000, -0.35589943054091, -0.67920935366037],
[-0.87976925408025, -0.33020177117561, -0.64675294725547],
[-0.75594687317471, -0.30856494792649, -0.62692323097232],
[-0.61764668822165, -0.29756023229452, -0.61764668822165],
[-0.46714639126219, -0.29756023229452, -0.61764668822165],
[-0.30856494792649, -0.30856494792649, -0.62692323097232],
[-0.14327602748867, -0.33020177117561, -0.64675294725547],
[0.03510878420129, -0.35589943054091, -0.67920935366037],
[-1.00000000000000, -0.16175992424576, -0.67648015150848],
[-0.87976925408025, -0.14327602748867, -0.64675294725547],
[-0.75831111829833, -0.12868162887913, -0.63057637947346],
[-0.62546113160473, -0.12361660518582, -0.62546113160473],
[-0.48243087334908, -0.12868162887913, -0.63057637947346],
[-0.33020177117561, -0.14327602748867, -0.64675294725547],
[-0.16175992424576, -0.16175992424576, -0.67648015150848],
[-1.00000000000000, 0.03510878420129, -0.67920935366037],
[-0.88152861658099, 0.04358924989449, -0.65372263209158],
[-0.76582902128328, 0.04984937494587, -0.64201017683130],
[-0.64201017683130, 0.04984937494587, -0.64201017683130],
[-0.50833800122192, 0.04358924989449, -0.65372263209158],
[-0.35589943054091, 0.03510878420129, -0.67920935366037],
[-1.00000000000000, 0.22634376992852, -0.68756446761854],
[-0.88552720830084, 0.22287651884810, -0.66867465527363],
[-0.77990848584661, 0.22245755686831, -0.66264058517509],
[-0.66867465527363, 0.22287651884810, -0.66867465527363],
[-0.53877930230999, 0.22634376992852, -0.68756446761854],
[-1.00000000000000, 0.40409041244638, -0.70204520622319],
[-0.89307484646220, 0.39026436130056, -0.69397031419099],
[-0.80321920064737, 0.39026436130056, -0.69397031419099],
[-0.70204520622319, 0.40409041244638, -0.70204520622319],
[-1.00000000000000, 0.56132562402249, -0.72347511589086],
[-0.90754940375318, 0.54822977441893, -0.73313096691257],
[-0.83785050813163, 0.56132562402249, -0.72347511589086],
[-1.00000000000000, 0.69214770146100, -0.75298782540723],
[-0.93915987605376, 0.69214770146100, -0.75298782540723],
[-1.00000000000000, 0.79200829186181, -0.79200829186182],
[-1.00000000000000, -1.00000000000000, -0.65238870288249],
[-0.93399645965197, -1.00000000000000, -0.60255555753341],
[-0.82755144253353, -1.00000000000000, -0.56524732972705],
[-0.68756446761854, -1.00000000000000, -0.53877930230999],
[-0.52186199814718, -1.00000000000000, -0.52186199814718],
[-0.33890713269884, -1.00000000000000, -0.51362327113555],
[-0.14746959616561, -1.00000000000000, -0.51362327113555],
[0.04372399629437, -1.00000000000000, -0.52186199814718],
[0.22634376992852, -1.00000000000000, -0.53877930230999],
[0.39279877226058, -1.00000000000000, -0.56524732972705],
[0.53655201718538, -1.00000000000000, -0.60255555753341],
[0.65238870288249, -1.00000000000000, -0.65238870288249],
[-1.00000000000000, -0.93399645965197, -0.60255555753341],
[-0.90246572869321, -0.90246572869321, -0.57971495229269],
[-0.79197859234110, -0.88853826764551, -0.53634765932070],
[-0.65372263209158, -0.88152861658099, -0.50833800122192],
[-0.49287144180154, -0.87811221336653, -0.49287144180154],
[-0.31749794317662, -0.87707926694690, -0.48792484669986],
[-0.13614490303039, -0.87811221336653, -0.49287144180154],
[0.04358924989449, -0.88152861658099, -0.50833800122192],
[0.21686451930730, -0.88853826764551, -0.53634765932070],
[0.38464640967911, -0.90246572869321, -0.57971495229269],
[0.53655201718538, -0.93399645965197, -0.60255555753341],
[-1.00000000000000, -0.82755144253353, -0.56524732972705],
[-0.88853826764551, -0.79197859234110, -0.53634765932070],
[-0.77047334797411, -0.77047334797411, -0.50360413913697],
[-0.63057637947346, -0.75831111829833, -0.48243087334908],
[-0.47228940070891, -0.75279870929496, -0.47228940070891],
[-0.30262248928722, -0.75279870929496, -0.47228940070891],
[-0.12868162887913, -0.75831111829833, -0.48243087334908],
[0.04455083508520, -0.77047334797411, -0.50360413913697],
[0.21686451930730, -0.79197859234110, -0.53634765932070],
[0.39279877226058, -0.82755144253353, -0.56524732972705],
[-1.00000000000000, -0.68756446761854, -0.53877930230999],
[-0.88152861658099, -0.65372263209158, -0.50833800122192],
[-0.75831111829833, -0.63057637947346, -0.48243087334908],
[-0.61764668822165, -0.61764668822165, -0.46714639126219],
[-0.46216300760478, -0.61351097718567, -0.46216300760478],
[-0.29756023229452, -0.61764668822165, -0.46714639126219],
[-0.12868162887913, -0.63057637947346, -0.48243087334908],
[0.04358924989449, -0.65372263209158, -0.50833800122192],
[0.22634376992852, -0.68756446761854, -0.53877930230999],
[-1.00000000000000, -0.52186199814718, -0.52186199814718],
[-0.87811221336653, -0.49287144180154, -0.49287144180154],
[-0.75279870929496, -0.47228940070891, -0.47228940070891],
[-0.61351097718567, -0.46216300760478, -0.46216300760478],
[-0.46216300760478, -0.46216300760478, -0.46216300760478],
[-0.30262248928722, -0.47228940070891, -0.47228940070891],
[-0.13614490303039, -0.49287144180154, -0.49287144180154],
[0.04372399629437, -0.52186199814718, -0.52186199814718],
[-1.00000000000000, -0.33890713269884, -0.51362327113555],
[-0.87707926694690, -0.31749794317662, -0.48792484669986],
[-0.75279870929496, -0.30262248928722, -0.47228940070891],
[-0.61764668822165, -0.29756023229452, -0.46714639126219],
[-0.47228940070891, -0.30262248928722, -0.47228940070891],
[-0.31749794317662, -0.31749794317662, -0.48792484669986],
[-0.14746959616561, -0.33890713269884, -0.51362327113555],
[-1.00000000000000, -0.14746959616561, -0.51362327113555],
[-0.87811221336653, -0.13614490303039, -0.49287144180154],
[-0.75831111829833, -0.12868162887913, -0.48243087334908],
[-0.63057637947346, -0.12868162887913, -0.48243087334908],
[-0.49287144180154, -0.13614490303039, -0.49287144180154],
[-0.33890713269884, -0.14746959616561, -0.51362327113555],
[-1.00000000000000, 0.04372399629437, -0.52186199814718],
[-0.88152861658099, 0.04358924989449, -0.50833800122192],
[-0.77047334797411, 0.04455083508520, -0.50360413913697],
[-0.65372263209158, 0.04358924989449, -0.50833800122192],
[-0.52186199814718, 0.04372399629437, -0.52186199814718],
[-1.00000000000000, 0.22634376992852, -0.53877930230999],
[-0.88853826764551, 0.21686451930730, -0.53634765932070],
[-0.79197859234110, 0.21686451930730, -0.53634765932070],
[-0.68756446761854, 0.22634376992852, -0.53877930230999],
[-1.00000000000000, 0.39279877226058, -0.56524732972705],
[-0.90246572869321, 0.38464640967911, -0.57971495229269],
[-0.82755144253353, 0.39279877226058, -0.56524732972705],
[-1.00000000000000, 0.53655201718538, -0.60255555753341],
[-0.93399645965197, 0.53655201718538, -0.60255555753341],
[-1.00000000000000, 0.65238870288249, -0.65238870288249],
[-1.00000000000000, -1.00000000000000, -0.48605942188714],
[-0.93058892225476, -1.00000000000000, -0.42789455771167],
[-0.82105815385500, -1.00000000000000, -0.38511822139871],
[-0.67920935366037, -1.00000000000000, -0.35589943054091],
[-0.51362327113555, -1.00000000000000, -0.33890713269884],
[-0.33333333333333, -1.00000000000000, -0.33333333333333],
[-0.14746959616561, -1.00000000000000, -0.33890713269884],
[0.03510878420129, -1.00000000000000, -0.35589943054091],
[0.20617637525371, -1.00000000000000, -0.38511822139871],
[0.35848347996644, -1.00000000000000, -0.42789455771167],
[0.48605942188714, -1.00000000000000, -0.48605942188714],
[-1.00000000000000, -0.93058892225476, -0.42789455771167],
[-0.89958234781404, -0.89958234781404, -0.39990809159211],
[-0.78592237254801, -0.88619583543133, -0.35699270264769],
[-0.64675294725547, -0.87976925408025, -0.33020177117561],
[-0.48792484669986, -0.87707926694690, -0.31749794317662],
[-0.31749794317662, -0.87707926694690, -0.31749794317662],
[-0.14327602748867, -0.87976925408025, -0.33020177117561],
[0.02911091062702, -0.88619583543133, -0.35699270264769],
[0.19907278722019, -0.89958234781404, -0.39990809159211],
[0.35848347996644, -0.93058892225476, -0.42789455771167],
[-1.00000000000000, -0.82105815385500, -0.38511822139871],
[-0.88619583543133, -0.78592237254801, -0.35699270264769],
[-0.76612845527794, -0.76612845527794, -0.32672031374452],
[-0.62692323097232, -0.75594687317471, -0.30856494792649],
[-0.47228940070891, -0.75279870929496, -0.30262248928722],
[-0.30856494792649, -0.75594687317471, -0.30856494792649],
[-0.14102277569960, -0.76612845527794, -0.32672031374452],
[0.02911091062702, -0.78592237254801, -0.35699270264769],
[0.20617637525371, -0.82105815385500, -0.38511822139871],
[-1.00000000000000, -0.67920935366037, -0.35589943054091],
[-0.87976925408025, -0.64675294725547, -0.33020177117561],
[-0.75594687317471, -0.62692323097232, -0.30856494792649],
[-0.61764668822165, -0.61764668822165, -0.29756023229452],
[-0.46714639126219, -0.61764668822165, -0.29756023229452],
[-0.30856494792649, -0.62692323097232, -0.30856494792649],
[-0.14327602748867, -0.64675294725547, -0.33020177117561],
[0.03510878420129, -0.67920935366037, -0.35589943054091],
[-1.00000000000000, -0.51362327113555, -0.33890713269884],
[-0.87707926694690, -0.48792484669986, -0.31749794317662],
[-0.75279870929496, -0.47228940070891, -0.30262248928722],
[-0.61764668822165, -0.46714639126219, -0.29756023229452],
[-0.47228940070891, -0.47228940070891, -0.30262248928722],
[-0.31749794317662, -0.48792484669986, -0.31749794317662],
[-0.14746959616561, -0.51362327113555, -0.33890713269884],
[-1.00000000000000, -0.33333333333333, -0.33333333333333],
[-0.87707926694690, -0.31749794317662, -0.31749794317662],
[-0.75594687317471, -0.30856494792649, -0.30856494792649],
[-0.62692323097232, -0.30856494792649, -0.30856494792649],
[-0.48792484669986, -0.31749794317662, -0.31749794317662],
[-0.33333333333333, -0.33333333333333, -0.33333333333333],
[-1.00000000000000, -0.14746959616561, -0.33890713269884],
[-0.87976925408025, -0.14327602748867, -0.33020177117561],
[-0.76612845527794, -0.14102277569960, -0.32672031374452],
[-0.64675294725547, -0.14327602748867, -0.33020177117561],
[-0.51362327113555, -0.14746959616561, -0.33890713269884],
[-1.00000000000000, 0.03510878420129, -0.35589943054091],
[-0.88619583543133, 0.02911091062702, -0.35699270264769],
[-0.78592237254801, 0.02911091062702, -0.35699270264769],
[-0.67920935366037, 0.03510878420129, -0.35589943054091],
[-1.00000000000000, 0.20617637525371, -0.38511822139871],
[-0.89958234781404, 0.19907278722019, -0.39990809159211],
[-0.82105815385500, 0.20617637525371, -0.38511822139871],
[-1.00000000000000, 0.35848347996644, -0.42789455771167],
[-0.93058892225476, 0.35848347996644, -0.42789455771167],
[-1.00000000000000, 0.48605942188714, -0.48605942188714],
[-1.00000000000000, -1.00000000000000, -0.29983046890076],
[-0.92865745388385, -1.00000000000000, -0.23637879061862],
[-0.81792406058543, -1.00000000000000, -0.19097623971153],
[-0.67648015150848, -1.00000000000000, -0.16175992424576],
[-0.51362327113555, -1.00000000000000, -0.14746959616561],
[-0.33890713269884, -1.00000000000000, -0.14746959616561],
[-0.16175992424576, -1.00000000000000, -0.16175992424576],
[0.00890030029696, -1.00000000000000, -0.19097623971153],
[0.16503624450246, -1.00000000000000, -0.23637879061862],
[0.29983046890076, -1.00000000000000, -0.29983046890076],
[-1.00000000000000, -0.92865745388385, -0.23637879061862],
[-0.89826430138753, -0.89826430138753, -0.20329598101562],
[-0.78400505161966, -0.88546877840541, -0.16526308498746],
[-0.64675294725547, -0.87976925408025, -0.14327602748867],
[-0.49287144180154, -0.87811221336653, -0.13614490303039],
[-0.33020177117561, -0.87976925408025, -0.14327602748867],
[-0.16526308498746, -0.88546877840541, -0.16526308498746],
[-0.00017541620933, -0.89826430138753, -0.20329598101562],
[0.16503624450246, -0.92865745388385, -0.23637879061862],
[-1.00000000000000, -0.81792406058543, -0.19097623971153],
[-0.88546877840541, -0.78400505161966, -0.16526308498746],
[-0.76612845527794, -0.76612845527794, -0.14102277569960],
[-0.63057637947346, -0.75831111829833, -0.12868162887913],
[-0.48243087334908, -0.75831111829833, -0.12868162887913],
[-0.32672031374452, -0.76612845527794, -0.14102277569960],
[-0.16526308498746, -0.78400505161966, -0.16526308498746],
[0.00890030029696, -0.81792406058543, -0.19097623971153],
[-1.00000000000000, -0.67648015150848, -0.16175992424576],
[-0.87976925408025, -0.64675294725547, -0.14327602748867],
[-0.75831111829833, -0.63057637947346, -0.12868162887913],
[-0.62546113160473, -0.62546113160473, -0.12361660518582],
[-0.48243087334908, -0.63057637947346, -0.12868162887913],
[-0.33020177117561, -0.64675294725547, -0.14327602748867],
[-0.16175992424576, -0.67648015150848, -0.16175992424576],
[-1.00000000000000, -0.51362327113555, -0.14746959616561],
[-0.87811221336653, -0.49287144180154, -0.13614490303039],
[-0.75831111829833, -0.48243087334908, -0.12868162887913],
[-0.63057637947346, -0.48243087334908, -0.12868162887913],
[-0.49287144180154, -0.49287144180154, -0.13614490303039],
[-0.33890713269884, -0.51362327113555, -0.14746959616561],
[-1.00000000000000, -0.33890713269884, -0.14746959616561],
[-0.87976925408025, -0.33020177117561, -0.14327602748867],
[-0.76612845527794, -0.32672031374452, -0.14102277569960],
[-0.64675294725547, -0.33020177117561, -0.14327602748867],
[-0.51362327113555, -0.33890713269884, -0.14746959616561],
[-1.00000000000000, -0.16175992424576, -0.16175992424576],
[-0.88546877840541, -0.16526308498746, -0.16526308498746],
[-0.78400505161966, -0.16526308498746, -0.16526308498746],
[-0.67648015150848, -0.16175992424576, -0.16175992424576],
[-1.00000000000000, 0.00890030029696, -0.19097623971153],
[-0.89826430138753, -0.00017541620933, -0.20329598101562],
[-0.81792406058543, 0.00890030029696, -0.19097623971153],
[-1.00000000000000, 0.16503624450246, -0.23637879061862],
[-0.92865745388385, 0.16503624450246, -0.23637879061862],
[-1.00000000000000, 0.29983046890076, -0.29983046890076],
[-1.00000000000000, -1.00000000000000, -0.10132627352195],
[-0.92803255735228, -1.00000000000000, -0.03598372132386],
[-0.81792406058543, -1.00000000000000, 0.00890030029696],
[-0.67920935366037, -1.00000000000000, 0.03510878420129],
[-0.52186199814718, -1.00000000000000, 0.04372399629437],
[-0.35589943054091, -1.00000000000000, 0.03510878420129],
[-0.19097623971153, -1.00000000000000, 0.00890030029696],
[-0.03598372132386, -1.00000000000000, -0.03598372132386],
[0.10132627352195, -1.00000000000000, -0.10132627352195],
[-1.00000000000000, -0.92803255735228, -0.03598372132386],
[-0.89826430138753, -0.89826430138753, -0.00017541620933],
[-0.78592237254801, -0.88619583543133, 0.02911091062702],
[-0.65372263209158, -0.88152861658099, 0.04358924989449],
[-0.50833800122192, -0.88152861658099, 0.04358924989449],
[-0.35699270264769, -0.88619583543133, 0.02911091062702],
[-0.20329598101562, -0.89826430138753, -0.00017541620933],
[-0.03598372132386, -0.92803255735228, -0.03598372132386],
[-1.00000000000000, -0.81792406058543, 0.00890030029696],
[-0.88619583543133, -0.78592237254801, 0.02911091062702],
[-0.77047334797411, -0.77047334797411, 0.04455083508520],
[-0.64201017683130, -0.76582902128328, 0.04984937494587],
[-0.50360413913697, -0.77047334797411, 0.04455083508520],
[-0.35699270264769, -0.78592237254801, 0.02911091062702],
[-0.19097623971153, -0.81792406058543, 0.00890030029696],
[-1.00000000000000, -0.67920935366037, 0.03510878420129],
[-0.88152861658099, -0.65372263209158, 0.04358924989449],
[-0.76582902128328, -0.64201017683130, 0.04984937494587],
[-0.64201017683130, -0.64201017683130, 0.04984937494587],
[-0.50833800122192, -0.65372263209158, 0.04358924989449],
[-0.35589943054091, -0.67920935366037, 0.03510878420129],
[-1.00000000000000, -0.52186199814718, 0.04372399629437],
[-0.88152861658099, -0.50833800122192, 0.04358924989449],
[-0.77047334797411, -0.50360413913697, 0.04455083508520],
[-0.65372263209158, -0.50833800122192, 0.04358924989449],
[-0.52186199814718, -0.52186199814718, 0.04372399629437],
[-1.00000000000000, -0.35589943054091, 0.03510878420129],
[-0.88619583543133, -0.35699270264769, 0.02911091062702],
[-0.78592237254801, -0.35699270264769, 0.02911091062702],
[-0.67920935366037, -0.35589943054091, 0.03510878420129],
[-1.00000000000000, -0.19097623971153, 0.00890030029696],
[-0.89826430138753, -0.20329598101562, -0.00017541620933],
[-0.81792406058543, -0.19097623971153, 0.00890030029696],
[-1.00000000000000, -0.03598372132386, -0.03598372132386],
[-0.92803255735228, -0.03598372132386, -0.03598372132386],
[-1.00000000000000, 0.10132627352195, -0.10132627352195],
[-1.00000000000000, -1.00000000000000, 0.10132627352195],
[-0.92865745388385, -1.00000000000000, 0.16503624450246],
[-0.82105815385500, -1.00000000000000, 0.20617637525371],
[-0.68756446761854, -1.00000000000000, 0.22634376992852],
[-0.53877930230999, -1.00000000000000, 0.22634376992852],
[-0.38511822139871, -1.00000000000000, 0.20617637525371],
[-0.23637879061862, -1.00000000000000, 0.16503624450246],
[-0.10132627352195, -1.00000000000000, 0.10132627352195],
[-1.00000000000000, -0.92865745388385, 0.16503624450246],
[-0.89958234781404, -0.89958234781404, 0.19907278722019],
[-0.79197859234110, -0.88853826764551, 0.21686451930730],
[-0.66867465527363, -0.88552720830084, 0.22287651884810],
[-0.53634765932070, -0.88853826764551, 0.21686451930730],
[-0.39990809159211, -0.89958234781404, 0.19907278722019],
[-0.23637879061862, -0.92865745388385, 0.16503624450246],
[-1.00000000000000, -0.82105815385500, 0.20617637525371],
[-0.88853826764551, -0.79197859234110, 0.21686451930730],
[-0.77990848584661, -0.77990848584661, 0.22245755686831],
[-0.66264058517509, -0.77990848584661, 0.22245755686831],
[-0.53634765932070, -0.79197859234110, 0.21686451930730],
[-0.38511822139871, -0.82105815385500, 0.20617637525371],
[-1.00000000000000, -0.68756446761854, 0.22634376992852],
[-0.88552720830084, -0.66867465527363, 0.22287651884810],
[-0.77990848584661, -0.66264058517509, 0.22245755686831],
[-0.66867465527363, -0.66867465527363, 0.22287651884810],
[-0.53877930230999, -0.68756446761854, 0.22634376992852],
[-1.00000000000000, -0.53877930230999, 0.22634376992852],
[-0.88853826764551, -0.53634765932070, 0.21686451930730],
[-0.79197859234110, -0.53634765932070, 0.21686451930730],
[-0.68756446761854, -0.53877930230999, 0.22634376992852],
[-1.00000000000000, -0.38511822139871, 0.20617637525371],
[-0.89958234781404, -0.39990809159211, 0.19907278722019],
[-0.82105815385500, -0.38511822139871, 0.20617637525371],
[-1.00000000000000, -0.23637879061862, 0.16503624450246],
[-0.92865745388385, -0.23637879061862, 0.16503624450246],
[-1.00000000000000, -0.10132627352195, 0.10132627352195],
[-1.00000000000000, -1.00000000000000, 0.29983046890076],
[-0.93058892225476, -1.00000000000000, 0.35848347996644],
[-0.82755144253353, -1.00000000000000, 0.39279877226058],
[-0.70204520622319, -1.00000000000000, 0.40409041244638],
[-0.56524732972705, -1.00000000000000, 0.39279877226058],
[-0.42789455771167, -1.00000000000000, 0.35848347996644],
[-0.29983046890076, -1.00000000000000, 0.29983046890076],
[-1.00000000000000, -0.93058892225476, 0.35848347996644],
[-0.90246572869321, -0.90246572869321, 0.38464640967911],
[-0.80321920064737, -0.89307484646220, 0.39026436130056],
[-0.69397031419099, -0.89307484646220, 0.39026436130056],
[-0.57971495229269, -0.90246572869321, 0.38464640967911],
[-0.42789455771167, -0.93058892225476, 0.35848347996644],
[-1.00000000000000, -0.82755144253353, 0.39279877226058],
[-0.89307484646220, -0.80321920064737, 0.39026436130056],
[-0.79614636727388, -0.79614636727388, 0.38843910182164],
[-0.69397031419099, -0.80321920064737, 0.39026436130056],
[-0.56524732972705, -0.82755144253353, 0.39279877226058],
[-1.00000000000000, -0.70204520622319, 0.40409041244638],
[-0.89307484646220, -0.69397031419099, 0.39026436130056],
[-0.80321920064737, -0.69397031419099, 0.39026436130056],
[-0.70204520622319, -0.70204520622319, 0.40409041244638],
[-1.00000000000000, -0.56524732972705, 0.39279877226058],
[-0.90246572869321, -0.57971495229269, 0.38464640967911],
[-0.82755144253353, -0.56524732972705, 0.39279877226058],
[-1.00000000000000, -0.42789455771167, 0.35848347996644],
[-0.93058892225476, -0.42789455771167, 0.35848347996644],
[-1.00000000000000, -0.29983046890076, 0.29983046890076],
[-1.00000000000000, -1.00000000000000, 0.48605942188714],
[-0.93399645965197, -1.00000000000000, 0.53655201718538],
[-0.83785050813163, -1.00000000000000, 0.56132562402249],
[-0.72347511589086, -1.00000000000000, 0.56132562402249],
[-0.60255555753341, -1.00000000000000, 0.53655201718538],
[-0.48605942188714, -1.00000000000000, 0.48605942188714],
[-1.00000000000000, -0.93399645965197, 0.53655201718538],
[-0.90754940375318, -0.90754940375318, 0.54822977441893],
[-0.82191335999261, -0.90119859261617, 0.54502531260140],
[-0.73313096691257, -0.90754940375318, 0.54822977441893],
[-0.60255555753341, -0.93399645965197, 0.53655201718538],
[-1.00000000000000, -0.83785050813163, 0.56132562402249],
[-0.90119859261617, -0.82191335999262, 0.54502531260140],
[-0.82191335999262, -0.82191335999262, 0.54502531260140],
[-0.72347511589086, -0.83785050813163, 0.56132562402249],
[-1.00000000000000, -0.72347511589086, 0.56132562402249],
[-0.90754940375318, -0.73313096691257, 0.54822977441893],
[-0.83785050813163, -0.72347511589086, 0.56132562402249],
[-1.00000000000000, -0.60255555753341, 0.53655201718538],
[-0.93399645965197, -0.60255555753341, 0.53655201718538],
[-1.00000000000000, -0.48605942188714, 0.48605942188714],
[-1.00000000000000, -1.00000000000000, 0.65238870288249],
[-0.93915987605376, -1.00000000000000, 0.69214770146100],
[-0.85261629770175, -1.00000000000000, 0.70523259540349],
[-0.75298782540723, -1.00000000000000, 0.69214770146100],
[-0.65238870288249, -1.00000000000000, 0.65238870288249],
[-1.00000000000000, -0.93915987605376, 0.69214770146100],
[-0.91632317508641, -0.91632317508641, 0.68476542165573],
[-0.85211907148290, -0.91632317508641, 0.68476542165573],
[-0.75298782540723, -0.93915987605376, 0.69214770146100],
[-1.00000000000000, -0.85261629770175, 0.70523259540349],
[-0.91632317508641, -0.85211907148290, 0.68476542165573],
[-0.85261629770175, -0.85261629770175, 0.70523259540349],
[-1.00000000000000, -0.75298782540723, 0.69214770146100],
[-0.93915987605377, -0.75298782540723, 0.69214770146100],
[-1.00000000000000, -0.65238870288249, 0.65238870288249],
[-1.00000000000000, -1.00000000000000, 0.79200829186182],
[-0.94646577650942, -1.00000000000000, 0.81917970617538],
[-0.87271392966596, -1.00000000000000, 0.81917970617538],
[-0.79200829186182, -1.00000000000000, 0.79200829186182],
[-1.00000000000000, -0.94646577650942, 0.81917970617538],
[-0.93253524807174, -0.93253524807174, 0.79760574421522],
[-0.87271392966596, -0.94646577650942, 0.81917970617538],
[-1.00000000000000, -0.87271392966596, 0.81917970617538],
[-0.94646577650942, -0.87271392966596, 0.81917970617538],
[-1.00000000000000, -0.79200829186181, 0.79200829186181],
[-1.00000000000000, -1.00000000000000, 0.89920053309347],
[-0.95640415365904, -1.00000000000000, 0.91280830731808],
[-0.89920053309347, -1.00000000000000, 0.89920053309347],
[-1.00000000000000, -0.95640415365904, 0.91280830731808],
[-0.95640415365904, -0.95640415365904, 0.91280830731808],
[-1.00000000000000, -0.89920053309347, 0.89920053309347],
[-1.00000000000000, -1.00000000000000, 0.96956804627022],
[-0.96956804627022, -1.00000000000000, 0.96956804627022],
[-1.00000000000000, -0.96956804627022, 0.96956804627022],
])
elif C==15:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.97313217663142, -1.00000000000000, -1.00000000000000],
[-0.91087999591557, -1.00000000000000, -1.00000000000000],
[-0.81569625122177, -1.00000000000000, -1.00000000000000],
[-0.69102898062768, -1.00000000000000, -1.00000000000000],
[-0.54138539933010, -1.00000000000000, -1.00000000000000],
[-0.37217443356548, -1.00000000000000, -1.00000000000000],
[-0.18951197351832, -1.00000000000000, -1.00000000000000],
[0.00000000000000, -1.00000000000000, -1.00000000000000],
[0.18951197351832, -1.00000000000000, -1.00000000000000],
[0.37217443356548, -1.00000000000000, -1.00000000000000],
[0.54138539933010, -1.00000000000000, -1.00000000000000],
[0.69102898062768, -1.00000000000000, -1.00000000000000],
[0.81569625122177, -1.00000000000000, -1.00000000000000],
[0.91087999591557, -1.00000000000000, -1.00000000000000],
[0.97313217663142, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.97313217663142, -1.00000000000000],
[-0.96145683115080, -0.96145683115081, -1.00000000000000],
[-0.88728571819659, -0.95256383018706, -1.00000000000000],
[-0.78069706583309, -0.94594521938428, -1.00000000000000],
[-0.64590401105551, -0.94117406944344, -1.00000000000000],
[-0.48805336347952, -0.93790410083952, -1.00000000000000],
[-0.31305116691461, -0.93587209559668, -1.00000000000000],
[-0.12735504764429, -0.93490071879782, -1.00000000000000],
[0.06225576644211, -0.93490071879782, -1.00000000000000],
[0.24892326251128, -0.93587209559668, -1.00000000000000],
[0.42595746431903, -0.93790410083952, -1.00000000000000],
[0.58707808049895, -0.94117406944344, -1.00000000000000],
[0.72664228521737, -0.94594521938428, -1.00000000000000],
[0.83984954838364, -0.95256383018706, -1.00000000000000],
[0.92291366230160, -0.96145683115080, -1.00000000000000],
[0.97313217663142, -0.97313217663142, -1.00000000000000],
[-1.00000000000000, -0.91087999591557, -1.00000000000000],
[-0.95256383018706, -0.88728571819659, -1.00000000000000],
[-0.86918515701361, -0.86918515701361, -1.00000000000000],
[-0.75385397825850, -0.85566517591551, -1.00000000000000],
[-0.61150631099970, -0.84596211695572, -1.00000000000000],
[-0.44789101806497, -0.83947093630474, -1.00000000000000],
[-0.26937637111685, -0.83575330907818, -1.00000000000000],
[-0.08272813026426, -0.83454373947148, -1.00000000000000],
[0.10512968019503, -0.83575330907818, -1.00000000000000],
[0.28736195436971, -0.83947093630474, -1.00000000000000],
[0.45746842795542, -0.84596211695572, -1.00000000000000],
[0.60951915417401, -0.85566517591551, -1.00000000000000],
[0.73837031402721, -0.86918515701361, -1.00000000000000],
[0.83984954838364, -0.88728571819659, -1.00000000000000],
[0.91087999591557, -0.91087999591557, -1.00000000000000],
[-1.00000000000000, -0.81569625122177, -1.00000000000000],
[-0.94594521938428, -0.78069706583309, -1.00000000000000],
[-0.85566517591551, -0.75385397825850, -1.00000000000000],
[-0.73392254063670, -0.73392254063670, -1.00000000000000],
[-0.58634382717501, -0.71988475929362, -1.00000000000000],
[-0.41924757784475, -0.71096380698923, -1.00000000000000],
[-0.23943100407015, -0.70663537739511, -1.00000000000000],
[-0.05393361853474, -0.70663537739511, -1.00000000000000],
[0.13021138483398, -0.71096380698923, -1.00000000000000],
[0.30622858646863, -0.71988475929362, -1.00000000000000],
[0.46784508127340, -0.73392254063670, -1.00000000000000],
[0.60951915417401, -0.75385397825850, -1.00000000000000],
[0.72664228521737, -0.78069706583309, -1.00000000000000],
[0.81569625122177, -0.81569625122177, -1.00000000000000],
[-1.00000000000000, -0.69102898062768, -1.00000000000000],
[-0.94117406944344, -0.64590401105551, -1.00000000000000],
[-0.84596211695572, -0.61150631099970, -1.00000000000000],
[-0.71988475929362, -0.58634382717501, -1.00000000000000],
[-0.56923217902044, -0.56923217902044, -1.00000000000000],
[-0.40086241504471, -0.55931242597197, -1.00000000000000],
[-0.22196801150692, -0.55606397698616, -1.00000000000000],
[-0.03982515898332, -0.55931242597197, -1.00000000000000],
[0.13846435804087, -0.56923217902044, -1.00000000000000],
[0.30622858646863, -0.58634382717501, -1.00000000000000],
[0.45746842795542, -0.61150631099970, -1.00000000000000],
[0.58707808049895, -0.64590401105551, -1.00000000000000],
[0.69102898062768, -0.69102898062768, -1.00000000000000],
[-1.00000000000000, -0.54138539933010, -1.00000000000000],
[-0.93790410083952, -0.48805336347952, -1.00000000000000],
[-0.83947093630474, -0.44789101806497, -1.00000000000000],
[-0.71096380698923, -0.41924757784475, -1.00000000000000],
[-0.55931242597197, -0.40086241504471, -1.00000000000000],
[-0.39188428022937, -0.39188428022937, -1.00000000000000],
[-0.21623143954125, -0.39188428022937, -1.00000000000000],
[-0.03982515898332, -0.40086241504471, -1.00000000000000],
[0.13021138483398, -0.41924757784475, -1.00000000000000],
[0.28736195436971, -0.44789101806497, -1.00000000000000],
[0.42595746431903, -0.48805336347952, -1.00000000000000],
[0.54138539933010, -0.54138539933010, -1.00000000000000],
[-1.00000000000000, -0.37217443356548, -1.00000000000000],
[-0.93587209559668, -0.31305116691461, -1.00000000000000],
[-0.83575330907818, -0.26937637111685, -1.00000000000000],
[-0.70663537739511, -0.23943100407015, -1.00000000000000],
[-0.55606397698616, -0.22196801150692, -1.00000000000000],
[-0.39188428022937, -0.21623143954125, -1.00000000000000],
[-0.22196801150692, -0.22196801150692, -1.00000000000000],
[-0.05393361853474, -0.23943100407015, -1.00000000000000],
[0.10512968019503, -0.26937637111685, -1.00000000000000],
[0.24892326251129, -0.31305116691461, -1.00000000000000],
[0.37217443356548, -0.37217443356548, -1.00000000000000],
[-1.00000000000000, -0.18951197351832, -1.00000000000000],
[-0.93490071879782, -0.12735504764429, -1.00000000000000],
[-0.83454373947148, -0.08272813026426, -1.00000000000000],
[-0.70663537739511, -0.05393361853474, -1.00000000000000],
[-0.55931242597197, -0.03982515898332, -1.00000000000000],
[-0.40086241504471, -0.03982515898332, -1.00000000000000],
[-0.23943100407015, -0.05393361853474, -1.00000000000000],
[-0.08272813026426, -0.08272813026426, -1.00000000000000],
[0.06225576644211, -0.12735504764429, -1.00000000000000],
[0.18951197351832, -0.18951197351832, -1.00000000000000],
[-1.00000000000000, 0.00000000000000, -1.00000000000000],
[-0.93490071879782, 0.06225576644211, -1.00000000000000],
[-0.83575330907818, 0.10512968019503, -1.00000000000000],
[-0.71096380698923, 0.13021138483398, -1.00000000000000],
[-0.56923217902044, 0.13846435804087, -1.00000000000000],
[-0.41924757784475, 0.13021138483398, -1.00000000000000],
[-0.26937637111685, 0.10512968019503, -1.00000000000000],
[-0.12735504764429, 0.06225576644211, -1.00000000000000],
[0.00000000000000, 0.00000000000000, -1.00000000000000],
[-1.00000000000000, 0.18951197351832, -1.00000000000000],
[-0.93587209559668, 0.24892326251129, -1.00000000000000],
[-0.83947093630474, 0.28736195436971, -1.00000000000000],
[-0.71988475929362, 0.30622858646863, -1.00000000000000],
[-0.58634382717501, 0.30622858646863, -1.00000000000000],
[-0.44789101806497, 0.28736195436971, -1.00000000000000],
[-0.31305116691461, 0.24892326251128, -1.00000000000000],
[-0.18951197351832, 0.18951197351832, -1.00000000000000],
[-1.00000000000000, 0.37217443356548, -1.00000000000000],
[-0.93790410083952, 0.42595746431903, -1.00000000000000],
[-0.84596211695572, 0.45746842795542, -1.00000000000000],
[-0.73392254063670, 0.46784508127340, -1.00000000000000],
[-0.61150631099970, 0.45746842795542, -1.00000000000000],
[-0.48805336347951, 0.42595746431903, -1.00000000000000],
[-0.37217443356548, 0.37217443356548, -1.00000000000000],
[-1.00000000000000, 0.54138539933010, -1.00000000000000],
[-0.94117406944344, 0.58707808049895, -1.00000000000000],
[-0.85566517591551, 0.60951915417401, -1.00000000000000],
[-0.75385397825850, 0.60951915417401, -1.00000000000000],
[-0.64590401105551, 0.58707808049895, -1.00000000000000],
[-0.54138539933010, 0.54138539933010, -1.00000000000000],
[-1.00000000000000, 0.69102898062768, -1.00000000000000],
[-0.94594521938428, 0.72664228521737, -1.00000000000000],
[-0.86918515701361, 0.73837031402721, -1.00000000000000],
[-0.78069706583309, 0.72664228521737, -1.00000000000000],
[-0.69102898062768, 0.69102898062768, -1.00000000000000],
[-1.00000000000000, 0.81569625122177, -1.00000000000000],
[-0.95256383018706, 0.83984954838364, -1.00000000000000],
[-0.88728571819659, 0.83984954838364, -1.00000000000000],
[-0.81569625122177, 0.81569625122177, -1.00000000000000],
[-1.00000000000000, 0.91087999591557, -1.00000000000000],
[-0.96145683115081, 0.92291366230160, -1.00000000000000],
[-0.91087999591557, 0.91087999591557, -1.00000000000000],
[-1.00000000000000, 0.97313217663142, -1.00000000000000],
[-0.97313217663142, 0.97313217663142, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.97313217663142],
[-0.96145683115080, -1.00000000000000, -0.96145683115081],
[-0.88728571819659, -1.00000000000000, -0.95256383018706],
[-0.78069706583309, -1.00000000000000, -0.94594521938428],
[-0.64590401105551, -1.00000000000000, -0.94117406944344],
[-0.48805336347952, -1.00000000000000, -0.93790410083952],
[-0.31305116691461, -1.00000000000000, -0.93587209559668],
[-0.12735504764429, -1.00000000000000, -0.93490071879782],
[0.06225576644211, -1.00000000000000, -0.93490071879782],
[0.24892326251128, -1.00000000000000, -0.93587209559668],
[0.42595746431903, -1.00000000000000, -0.93790410083952],
[0.58707808049895, -1.00000000000000, -0.94117406944344],
[0.72664228521737, -1.00000000000000, -0.94594521938428],
[0.83984954838364, -1.00000000000000, -0.95256383018706],
[0.92291366230160, -1.00000000000000, -0.96145683115080],
[0.97313217663142, -1.00000000000000, -0.97313217663142],
[-1.00000000000000, -0.96145683115080, -0.96145683115081],
[-0.93943632227224, -0.93943632227224, -0.93943632227224],
[-0.86944503948562, -0.92422372884499, -0.92422372884499],
[-0.76465149695200, -0.91594133949377, -0.91594133949377],
[-0.62778642191262, -0.91105468070505, -0.91105468070505],
[-0.46512678709536, -0.90814282013366, -0.90814282013366],
[-0.28436225480397, -0.90657978734712, -0.90657978734712],
[-0.09391411664722, -0.90608588335278, -0.90608588335278],
[0.09752182949820, -0.90657978734712, -0.90657978734712],
[0.28141242736269, -0.90814282013366, -0.90814282013366],
[0.44989578332272, -0.91105468070505, -0.91105468070505],
[0.59653417593955, -0.91594133949377, -0.91594133949377],
[0.71789249717560, -0.92422372884499, -0.92422372884499],
[0.81830896681673, -0.93943632227224, -0.93943632227224],
[0.92291366230160, -0.96145683115080, -0.96145683115081],
[-1.00000000000000, -0.88728571819659, -0.95256383018706],
[-0.92422372884499, -0.86944503948562, -0.92422372884499],
[-0.84093333011616, -0.84093333011616, -0.90987534922907],
[-0.72719985604980, -0.82302040254341, -0.90207264264846],
[-0.58541267651694, -0.81189519337880, -0.89758421902605],
[-0.42178213297254, -0.80535532140090, -0.89506610278289],
[-0.24389353969246, -0.80231226689534, -0.89392415739487],
[-0.05987003601733, -0.80231226689534, -0.89392415739487],
[0.12220355715633, -0.80535532140090, -0.89506610278289],
[0.29489208892178, -0.81189519337880, -0.89758421902605],
[0.45229290124167, -0.82302040254341, -0.90207264264846],
[0.59174200946140, -0.84093333011616, -0.90987534922907],
[0.71789249717559, -0.86944503948562, -0.92422372884499],
[0.83984954838364, -0.88728571819659, -0.95256383018706],
[-1.00000000000000, -0.78069706583309, -0.94594521938428],
[-0.91594133949377, -0.76465149695200, -0.91594133949377],
[-0.82302040254341, -0.72719985604980, -0.90207264264846],
[-0.70240299672263, -0.70240299672263, -0.89470267766589],
[-0.55699468121643, -0.68694351447282, -0.89060856982129],
[-0.39311610910852, -0.67844598835119, -0.88850304650231],
[-0.21820887124932, -0.67573128488548, -0.88785097261588],
[-0.03993485603799, -0.67844598835118, -0.88850304650231],
[0.13454676551055, -0.68694351447282, -0.89060856982129],
[0.29950867111115, -0.70240299672263, -0.89470267766589],
[0.45229290124167, -0.72719985604980, -0.90207264264846],
[0.59653417593955, -0.76465149695200, -0.91594133949377],
[0.72664228521737, -0.78069706583309, -0.94594521938428],
[-1.00000000000000, -0.64590401105551, -0.94117406944344],
[-0.91105468070505, -0.62778642191262, -0.91105468070505],
[-0.81189519337880, -0.58541267651694, -0.89758421902605],
[-0.68694351447282, -0.55699468121643, -0.89060856982129],
[-0.53982745207518, -0.53982745207518, -0.88692677958341],
[-0.37705601988688, -0.53173147773860, -0.88531043041236],
[-0.20590207196216, -0.53173147773860, -0.88531043041236],
[-0.03341831626624, -0.53982745207518, -0.88692677958341],
[0.13454676551055, -0.55699468121643, -0.89060856982129],
[0.29489208892178, -0.58541267651694, -0.89758421902605],
[0.44989578332272, -0.62778642191262, -0.91105468070505],
[0.58707808049895, -0.64590401105551, -0.94117406944344],
[-1.00000000000000, -0.48805336347952, -0.93790410083952],
[-0.90814282013366, -0.46512678709536, -0.90814282013366],
[-0.80535532140090, -0.42178213297254, -0.89506610278289],
[-0.67844598835119, -0.39311610910852, -0.88850304650231],
[-0.53173147773860, -0.37705601988688, -0.88531043041236],
[-0.37188461854359, -0.37188461854359, -0.88434614436923],
[-0.20590207196216, -0.37705601988688, -0.88531043041236],
[-0.03993485603799, -0.39311610910852, -0.88850304650231],
[0.12220355715633, -0.42178213297254, -0.89506610278289],
[0.28141242736269, -0.46512678709536, -0.90814282013366],
[0.42595746431903, -0.48805336347952, -0.93790410083952],
[-1.00000000000000, -0.31305116691461, -0.93587209559668],
[-0.90657978734712, -0.28436225480397, -0.90657978734712],
[-0.80231226689534, -0.24389353969246, -0.89392415739487],
[-0.67573128488548, -0.21820887124932, -0.88785097261588],
[-0.53173147773860, -0.20590207196216, -0.88531043041236],
[-0.37705601988688, -0.20590207196216, -0.88531043041236],
[-0.21820887124932, -0.21820887124932, -0.88785097261588],
[-0.05987003601733, -0.24389353969246, -0.89392415739487],
[0.09752182949820, -0.28436225480397, -0.90657978734712],
[0.24892326251129, -0.31305116691461, -0.93587209559668],
[-1.00000000000000, -0.12735504764429, -0.93490071879782],
[-0.90608588335278, -0.09391411664722, -0.90608588335278],
[-0.80231226689534, -0.05987003601733, -0.89392415739487],
[-0.67844598835119, -0.03993485603799, -0.88850304650231],
[-0.53982745207518, -0.03341831626624, -0.88692677958341],
[-0.39311610910852, -0.03993485603799, -0.88850304650231],
[-0.24389353969246, -0.05987003601733, -0.89392415739487],
[-0.09391411664722, -0.09391411664722, -0.90608588335278],
[0.06225576644211, -0.12735504764429, -0.93490071879782],
[-1.00000000000000, 0.06225576644211, -0.93490071879782],
[-0.90657978734712, 0.09752182949820, -0.90657978734712],
[-0.80535532140090, 0.12220355715633, -0.89506610278289],
[-0.68694351447282, 0.13454676551055, -0.89060856982129],
[-0.55699468121643, 0.13454676551055, -0.89060856982129],
[-0.42178213297254, 0.12220355715633, -0.89506610278289],
[-0.28436225480396, 0.09752182949820, -0.90657978734712],
[-0.12735504764429, 0.06225576644211, -0.93490071879782],
[-1.00000000000000, 0.24892326251128, -0.93587209559668],
[-0.90814282013366, 0.28141242736269, -0.90814282013366],
[-0.81189519337880, 0.29489208892178, -0.89758421902605],
[-0.70240299672263, 0.29950867111115, -0.89470267766589],
[-0.58541267651694, 0.29489208892178, -0.89758421902605],
[-0.46512678709536, 0.28141242736269, -0.90814282013366],
[-0.31305116691461, 0.24892326251128, -0.93587209559668],
[-1.00000000000000, 0.42595746431903, -0.93790410083952],
[-0.91105468070505, 0.44989578332272, -0.91105468070505],
[-0.82302040254341, 0.45229290124167, -0.90207264264846],
[-0.72719985604980, 0.45229290124167, -0.90207264264846],
[-0.62778642191262, 0.44989578332272, -0.91105468070505],
[-0.48805336347952, 0.42595746431903, -0.93790410083952],
[-1.00000000000000, 0.58707808049895, -0.94117406944344],
[-0.91594133949377, 0.59653417593955, -0.91594133949377],
[-0.84093333011616, 0.59174200946140, -0.90987534922907],
[-0.76465149695200, 0.59653417593955, -0.91594133949377],
[-0.64590401105551, 0.58707808049895, -0.94117406944344],
[-1.00000000000000, 0.72664228521737, -0.94594521938428],
[-0.92422372884499, 0.71789249717559, -0.92422372884499],
[-0.86944503948562, 0.71789249717560, -0.92422372884499],
[-0.78069706583309, 0.72664228521737, -0.94594521938428],
[-1.00000000000000, 0.83984954838364, -0.95256383018706],
[-0.93943632227224, 0.81830896681673, -0.93943632227224],
[-0.88728571819659, 0.83984954838364, -0.95256383018706],
[-1.00000000000000, 0.92291366230160, -0.96145683115080],
[-0.96145683115081, 0.92291366230160, -0.96145683115080],
[-1.00000000000000, 0.97313217663142, -0.97313217663142],
[-1.00000000000000, -1.00000000000000, -0.91087999591557],
[-0.95256383018706, -1.00000000000000, -0.88728571819659],
[-0.86918515701361, -1.00000000000000, -0.86918515701361],
[-0.75385397825850, -1.00000000000000, -0.85566517591551],
[-0.61150631099970, -1.00000000000000, -0.84596211695572],
[-0.44789101806497, -1.00000000000000, -0.83947093630474],
[-0.26937637111685, -1.00000000000000, -0.83575330907818],
[-0.08272813026426, -1.00000000000000, -0.83454373947148],
[0.10512968019503, -1.00000000000000, -0.83575330907818],
[0.28736195436971, -1.00000000000000, -0.83947093630474],
[0.45746842795542, -1.00000000000000, -0.84596211695572],
[0.60951915417401, -1.00000000000000, -0.85566517591551],
[0.73837031402721, -1.00000000000000, -0.86918515701361],
[0.83984954838364, -1.00000000000000, -0.88728571819659],
[0.91087999591557, -1.00000000000000, -0.91087999591557],
[-1.00000000000000, -0.95256383018706, -0.88728571819659],
[-0.92422372884499, -0.92422372884499, -0.86944503948562],
[-0.84093333011616, -0.90987534922907, -0.84093333011616],
[-0.72719985604980, -0.90207264264846, -0.82302040254341],
[-0.58541267651694, -0.89758421902605, -0.81189519337880],
[-0.42178213297254, -0.89506610278289, -0.80535532140090],
[-0.24389353969246, -0.89392415739487, -0.80231226689534],
[-0.05987003601733, -0.89392415739487, -0.80231226689534],
[0.12220355715633, -0.89506610278289, -0.80535532140090],
[0.29489208892178, -0.89758421902605, -0.81189519337880],
[0.45229290124167, -0.90207264264846, -0.82302040254341],
[0.59174200946140, -0.90987534922907, -0.84093333011616],
[0.71789249717559, -0.92422372884499, -0.86944503948562],
[0.83984954838364, -0.95256383018706, -0.88728571819659],
[-1.00000000000000, -0.86918515701361, -0.86918515701361],
[-0.90987534922907, -0.84093333011616, -0.84093333011616],
[-0.81634521017071, -0.81634521017071, -0.81634521017071],
[-0.69681397788482, -0.80045242943653, -0.80045242943653],
[-0.55271049610410, -0.79071623475418, -0.79071623475418],
[-0.39003105466491, -0.78544012419782, -0.78544012419782],
[-0.21623243063106, -0.78376756936894, -0.78376756936894],
[-0.03908869693945, -0.78544012419782, -0.78544012419782],
[0.13414296561246, -0.79071623475418, -0.79071623475418],
[0.29771883675788, -0.80045242943653, -0.80045242943653],
[0.44903563051214, -0.81634521017071, -0.81634521017071],
[0.59174200946140, -0.84093333011616, -0.84093333011616],
[0.73837031402721, -0.86918515701361, -0.86918515701361],
[-1.00000000000000, -0.75385397825850, -0.85566517591551],
[-0.90207264264846, -0.72719985604980, -0.82302040254341],
[-0.80045242943653, -0.69681397788482, -0.80045242943653],
[-0.67596634711632, -0.67596634711632, -0.78631903813907],
[-0.53018191908037, -0.66330998127617, -0.77808048548239],
[-0.36896982961987, -0.65735005129103, -0.77427857358733],
[-0.19940154550177, -0.65735005129103, -0.77427857358733],
[-0.02842761416107, -0.66330998127617, -0.77808048548239],
[0.13825173237170, -0.67596634711632, -0.78631903813907],
[0.29771883675788, -0.69681397788482, -0.80045242943653],
[0.45229290124167, -0.72719985604980, -0.82302040254341],
[0.60951915417401, -0.75385397825850, -0.85566517591551],
[-1.00000000000000, -0.61150631099970, -0.84596211695572],
[-0.89758421902605, -0.58541267651694, -0.81189519337880],
[-0.79071623475418, -0.55271049610410, -0.79071623475418],
[-0.66330998127617, -0.53018191908037, -0.77808048548239],
[-0.51739938877605, -0.51739938877605, -0.77134978179665],
[-0.35874699582426, -0.51327189188952, -0.76923411646197],
[-0.19385144065125, -0.51739938877605, -0.77134978179665],
[-0.02842761416107, -0.53018191908037, -0.77808048548239],
[0.13414296561246, -0.55271049610410, -0.79071623475418],
[0.29489208892178, -0.58541267651694, -0.81189519337880],
[0.45746842795542, -0.61150631099970, -0.84596211695572],
[-1.00000000000000, -0.44789101806497, -0.83947093630474],
[-0.89506610278289, -0.42178213297254, -0.80535532140090],
[-0.78544012419782, -0.39003105466491, -0.78544012419782],
[-0.65735005129103, -0.36896982961987, -0.77427857358733],
[-0.51327189188952, -0.35874699582426, -0.76923411646197],
[-0.35874699582426, -0.35874699582426, -0.76923411646197],
[-0.19940154550177, -0.36896982961987, -0.77427857358733],
[-0.03908869693945, -0.39003105466491, -0.78544012419782],
[0.12220355715633, -0.42178213297254, -0.80535532140090],
[0.28736195436971, -0.44789101806497, -0.83947093630474],
[-1.00000000000000, -0.26937637111685, -0.83575330907818],
[-0.89392415739487, -0.24389353969246, -0.80231226689534],
[-0.78376756936894, -0.21623243063106, -0.78376756936894],
[-0.65735005129103, -0.19940154550177, -0.77427857358733],
[-0.51739938877605, -0.19385144065125, -0.77134978179665],
[-0.36896982961987, -0.19940154550177, -0.77427857358733],
[-0.21623243063106, -0.21623243063106, -0.78376756936894],
[-0.05987003601733, -0.24389353969246, -0.80231226689534],
[0.10512968019503, -0.26937637111685, -0.83575330907818],
[-1.00000000000000, -0.08272813026426, -0.83454373947148],
[-0.89392415739487, -0.05987003601733, -0.80231226689534],
[-0.78544012419782, -0.03908869693945, -0.78544012419782],
[-0.66330998127617, -0.02842761416107, -0.77808048548239],
[-0.53018191908037, -0.02842761416107, -0.77808048548239],
[-0.39003105466491, -0.03908869693945, -0.78544012419782],
[-0.24389353969246, -0.05987003601733, -0.80231226689534],
[-0.08272813026426, -0.08272813026426, -0.83454373947148],
[-1.00000000000000, 0.10512968019503, -0.83575330907818],
[-0.89506610278289, 0.12220355715633, -0.80535532140090],
[-0.79071623475418, 0.13414296561246, -0.79071623475418],
[-0.67596634711632, 0.13825173237170, -0.78631903813907],
[-0.55271049610410, 0.13414296561246, -0.79071623475418],
[-0.42178213297254, 0.12220355715633, -0.80535532140090],
[-0.26937637111685, 0.10512968019503, -0.83575330907818],
[-1.00000000000000, 0.28736195436971, -0.83947093630474],
[-0.89758421902605, 0.29489208892178, -0.81189519337880],
[-0.80045242943653, 0.29771883675788, -0.80045242943653],
[-0.69681397788482, 0.29771883675788, -0.80045242943653],
[-0.58541267651694, 0.29489208892178, -0.81189519337880],
[-0.44789101806497, 0.28736195436971, -0.83947093630474],
[-1.00000000000000, 0.45746842795542, -0.84596211695572],
[-0.90207264264846, 0.45229290124167, -0.82302040254341],
[-0.81634521017071, 0.44903563051214, -0.81634521017071],
[-0.72719985604980, 0.45229290124167, -0.82302040254341],
[-0.61150631099970, 0.45746842795542, -0.84596211695572],
[-1.00000000000000, 0.60951915417401, -0.85566517591551],
[-0.90987534922907, 0.59174200946140, -0.84093333011616],
[-0.84093333011616, 0.59174200946140, -0.84093333011616],
[-0.75385397825850, 0.60951915417401, -0.85566517591551],
[-1.00000000000000, 0.73837031402721, -0.86918515701361],
[-0.92422372884499, 0.71789249717559, -0.86944503948562],
[-0.86918515701361, 0.73837031402721, -0.86918515701361],
[-1.00000000000000, 0.83984954838364, -0.88728571819659],
[-0.95256383018706, 0.83984954838364, -0.88728571819659],
[-1.00000000000000, 0.91087999591557, -0.91087999591557],
[-1.00000000000000, -1.00000000000000, -0.81569625122177],
[-0.94594521938428, -1.00000000000000, -0.78069706583309],
[-0.85566517591551, -1.00000000000000, -0.75385397825850],
[-0.73392254063670, -1.00000000000000, -0.73392254063670],
[-0.58634382717501, -1.00000000000000, -0.71988475929362],
[-0.41924757784475, -1.00000000000000, -0.71096380698923],
[-0.23943100407015, -1.00000000000000, -0.70663537739511],
[-0.05393361853474, -1.00000000000000, -0.70663537739511],
[0.13021138483398, -1.00000000000000, -0.71096380698923],
[0.30622858646863, -1.00000000000000, -0.71988475929362],
[0.46784508127340, -1.00000000000000, -0.73392254063670],
[0.60951915417401, -1.00000000000000, -0.75385397825850],
[0.72664228521737, -1.00000000000000, -0.78069706583309],
[0.81569625122177, -1.00000000000000, -0.81569625122177],
[-1.00000000000000, -0.94594521938428, -0.78069706583309],
[-0.91594133949377, -0.91594133949377, -0.76465149695200],
[-0.82302040254341, -0.90207264264846, -0.72719985604980],
[-0.70240299672263, -0.89470267766589, -0.70240299672263],
[-0.55699468121643, -0.89060856982129, -0.68694351447282],
[-0.39311610910852, -0.88850304650231, -0.67844598835118],
[-0.21820887124932, -0.88785097261588, -0.67573128488548],
[-0.03993485603799, -0.88850304650231, -0.67844598835118],
[0.13454676551055, -0.89060856982129, -0.68694351447282],
[0.29950867111115, -0.89470267766589, -0.70240299672263],
[0.45229290124167, -0.90207264264846, -0.72719985604980],
[0.59653417593955, -0.91594133949377, -0.76465149695200],
[0.72664228521737, -0.94594521938428, -0.78069706583309],
[-1.00000000000000, -0.85566517591551, -0.75385397825850],
[-0.90207264264846, -0.82302040254341, -0.72719985604980],
[-0.80045242943653, -0.80045242943653, -0.69681397788482],
[-0.67596634711632, -0.78631903813907, -0.67596634711632],
[-0.53018191908037, -0.77808048548239, -0.66330998127617],
[-0.36896982961987, -0.77427857358733, -0.65735005129103],
[-0.19940154550177, -0.77427857358733, -0.65735005129103],
[-0.02842761416107, -0.77808048548239, -0.66330998127617],
[0.13825173237170, -0.78631903813907, -0.67596634711632],
[0.29771883675788, -0.80045242943653, -0.69681397788482],
[0.45229290124167, -0.82302040254341, -0.72719985604980],
[0.60951915417401, -0.85566517591551, -0.75385397825850],
[-1.00000000000000, -0.73392254063670, -0.73392254063670],
[-0.89470267766589, -0.70240299672263, -0.70240299672263],
[-0.78631903813907, -0.67596634711632, -0.67596634711632],
[-0.65854183263918, -0.65854183263918, -0.65854183263918],
[-0.51271767829902, -0.64881302709473, -0.64881302709473],
[-0.35430719405344, -0.64569280594656, -0.64569280594656],
[-0.18965626751152, -0.64881302709473, -0.64881302709473],
[-0.02437450208246, -0.65854183263918, -0.65854183263918],
[0.13825173237170, -0.67596634711632, -0.67596634711632],
[0.29950867111115, -0.70240299672263, -0.70240299672263],
[0.46784508127340, -0.73392254063670, -0.73392254063670],
[-1.00000000000000, -0.58634382717501, -0.71988475929362],
[-0.89060856982129, -0.55699468121643, -0.68694351447282],
[-0.77808048548239, -0.53018191908037, -0.66330998127617],
[-0.64881302709473, -0.51271767829902, -0.64881302709473],
[-0.50427892665174, -0.50427892665174, -0.64197354809406],
[-0.34946859860247, -0.50427892665174, -0.64197354809406],
[-0.18965626751152, -0.51271767829902, -0.64881302709473],
[-0.02842761416107, -0.53018191908037, -0.66330998127617],
[0.13454676551055, -0.55699468121643, -0.68694351447282],
[0.30622858646863, -0.58634382717501, -0.71988475929362],
[-1.00000000000000, -0.41924757784475, -0.71096380698923],
[-0.88850304650231, -0.39311610910852, -0.67844598835118],
[-0.77427857358733, -0.36896982961987, -0.65735005129103],
[-0.64569280594656, -0.35430719405344, -0.64569280594656],
[-0.50427892665174, -0.34946859860247, -0.64197354809406],
[-0.35430719405344, -0.35430719405344, -0.64569280594656],
[-0.19940154550177, -0.36896982961987, -0.65735005129103],
[-0.03993485603799, -0.39311610910852, -0.67844598835119],
[0.13021138483398, -0.41924757784475, -0.71096380698923],
[-1.00000000000000, -0.23943100407015, -0.70663537739511],
[-0.88785097261588, -0.21820887124932, -0.67573128488548],
[-0.77427857358733, -0.19940154550177, -0.65735005129103],
[-0.64881302709473, -0.18965626751152, -0.64881302709473],
[-0.51271767829902, -0.18965626751152, -0.64881302709473],
[-0.36896982961987, -0.19940154550177, -0.65735005129103],
[-0.21820887124932, -0.21820887124932, -0.67573128488548],
[-0.05393361853474, -0.23943100407015, -0.70663537739511],
[-1.00000000000000, -0.05393361853474, -0.70663537739511],
[-0.88850304650231, -0.03993485603799, -0.67844598835118],
[-0.77808048548239, -0.02842761416107, -0.66330998127617],
[-0.65854183263918, -0.02437450208246, -0.65854183263918],
[-0.53018191908037, -0.02842761416107, -0.66330998127617],
[-0.39311610910852, -0.03993485603799, -0.67844598835118],
[-0.23943100407015, -0.05393361853474, -0.70663537739511],
[-1.00000000000000, 0.13021138483398, -0.71096380698923],
[-0.89060856982129, 0.13454676551055, -0.68694351447282],
[-0.78631903813907, 0.13825173237170, -0.67596634711632],
[-0.67596634711632, 0.13825173237170, -0.67596634711632],
[-0.55699468121643, 0.13454676551055, -0.68694351447282],
[-0.41924757784475, 0.13021138483398, -0.71096380698923],
[-1.00000000000000, 0.30622858646863, -0.71988475929362],
[-0.89470267766589, 0.29950867111115, -0.70240299672263],
[-0.80045242943653, 0.29771883675788, -0.69681397788482],
[-0.70240299672263, 0.29950867111115, -0.70240299672263],
[-0.58634382717501, 0.30622858646863, -0.71988475929362],
[-1.00000000000000, 0.46784508127340, -0.73392254063670],
[-0.90207264264846, 0.45229290124167, -0.72719985604980],
[-0.82302040254341, 0.45229290124167, -0.72719985604980],
[-0.73392254063670, 0.46784508127340, -0.73392254063670],
[-1.00000000000000, 0.60951915417401, -0.75385397825850],
[-0.91594133949377, 0.59653417593955, -0.76465149695200],
[-0.85566517591551, 0.60951915417401, -0.75385397825850],
[-1.00000000000000, 0.72664228521737, -0.78069706583309],
[-0.94594521938428, 0.72664228521737, -0.78069706583309],
[-1.00000000000000, 0.81569625122177, -0.81569625122177],
[-1.00000000000000, -1.00000000000000, -0.69102898062768],
[-0.94117406944344, -1.00000000000000, -0.64590401105551],
[-0.84596211695572, -1.00000000000000, -0.61150631099970],
[-0.71988475929362, -1.00000000000000, -0.58634382717501],
[-0.56923217902044, -1.00000000000000, -0.56923217902044],
[-0.40086241504471, -1.00000000000000, -0.55931242597197],
[-0.22196801150692, -1.00000000000000, -0.55606397698616],
[-0.03982515898332, -1.00000000000000, -0.55931242597197],
[0.13846435804087, -1.00000000000000, -0.56923217902044],
[0.30622858646863, -1.00000000000000, -0.58634382717501],
[0.45746842795542, -1.00000000000000, -0.61150631099970],
[0.58707808049895, -1.00000000000000, -0.64590401105551],
[0.69102898062768, -1.00000000000000, -0.69102898062768],
[-1.00000000000000, -0.94117406944344, -0.64590401105551],
[-0.91105468070505, -0.91105468070505, -0.62778642191262],
[-0.81189519337880, -0.89758421902605, -0.58541267651694],
[-0.68694351447282, -0.89060856982129, -0.55699468121643],
[-0.53982745207518, -0.88692677958341, -0.53982745207518],
[-0.37705601988688, -0.88531043041236, -0.53173147773860],
[-0.20590207196216, -0.88531043041236, -0.53173147773860],
[-0.03341831626624, -0.88692677958341, -0.53982745207518],
[0.13454676551055, -0.89060856982129, -0.55699468121643],
[0.29489208892178, -0.89758421902605, -0.58541267651694],
[0.44989578332272, -0.91105468070505, -0.62778642191262],
[0.58707808049895, -0.94117406944344, -0.64590401105551],
[-1.00000000000000, -0.84596211695572, -0.61150631099970],
[-0.89758421902605, -0.81189519337880, -0.58541267651694],
[-0.79071623475418, -0.79071623475418, -0.55271049610410],
[-0.66330998127617, -0.77808048548239, -0.53018191908037],
[-0.51739938877605, -0.77134978179665, -0.51739938877605],
[-0.35874699582426, -0.76923411646197, -0.51327189188952],
[-0.19385144065125, -0.77134978179665, -0.51739938877605],
[-0.02842761416107, -0.77808048548239, -0.53018191908037],
[0.13414296561246, -0.79071623475418, -0.55271049610410],
[0.29489208892178, -0.81189519337880, -0.58541267651694],
[0.45746842795542, -0.84596211695572, -0.61150631099970],
[-1.00000000000000, -0.71988475929362, -0.58634382717501],
[-0.89060856982129, -0.68694351447282, -0.55699468121643],
[-0.77808048548239, -0.66330998127617, -0.53018191908037],
[-0.64881302709473, -0.64881302709473, -0.51271767829902],
[-0.50427892665174, -0.64197354809406, -0.50427892665174],
[-0.34946859860247, -0.64197354809406, -0.50427892665174],
[-0.18965626751152, -0.64881302709473, -0.51271767829902],
[-0.02842761416107, -0.66330998127617, -0.53018191908037],
[0.13454676551055, -0.68694351447282, -0.55699468121643],
[0.30622858646863, -0.71988475929362, -0.58634382717501],
[-1.00000000000000, -0.56923217902044, -0.56923217902044],
[-0.88692677958341, -0.53982745207518, -0.53982745207518],
[-0.77134978179665, -0.51739938877605, -0.51739938877605],
[-0.64197354809406, -0.50427892665174, -0.50427892665174],
[-0.50000000000000, -0.50000000000000, -0.50000000000000],
[-0.34946859860247, -0.50427892665174, -0.50427892665174],
[-0.19385144065125, -0.51739938877605, -0.51739938877605],
[-0.03341831626624, -0.53982745207518, -0.53982745207518],
[0.13846435804087, -0.56923217902044, -0.56923217902044],
[-1.00000000000000, -0.40086241504471, -0.55931242597197],
[-0.88531043041236, -0.37705601988688, -0.53173147773860],
[-0.76923411646197, -0.35874699582426, -0.51327189188952],
[-0.64197354809406, -0.34946859860247, -0.50427892665174],
[-0.50427892665174, -0.34946859860247, -0.50427892665174],
[-0.35874699582426, -0.35874699582426, -0.51327189188952],
[-0.20590207196216, -0.37705601988688, -0.53173147773860],
[-0.03982515898332, -0.40086241504471, -0.55931242597197],
[-1.00000000000000, -0.22196801150692, -0.55606397698616],
[-0.88531043041236, -0.20590207196216, -0.53173147773860],
[-0.77134978179665, -0.19385144065125, -0.51739938877605],
[-0.64881302709473, -0.18965626751152, -0.51271767829902],
[-0.51739938877605, -0.19385144065125, -0.51739938877605],
[-0.37705601988688, -0.20590207196216, -0.53173147773860],
[-0.22196801150692, -0.22196801150692, -0.55606397698616],
[-1.00000000000000, -0.03982515898332, -0.55931242597197],
[-0.88692677958341, -0.03341831626624, -0.53982745207518],
[-0.77808048548239, -0.02842761416107, -0.53018191908037],
[-0.66330998127617, -0.02842761416107, -0.53018191908037],
[-0.53982745207518, -0.03341831626624, -0.53982745207518],
[-0.40086241504471, -0.03982515898332, -0.55931242597197],
[-1.00000000000000, 0.13846435804087, -0.56923217902044],
[-0.89060856982129, 0.13454676551055, -0.55699468121643],
[-0.79071623475418, 0.13414296561246, -0.55271049610410],
[-0.68694351447282, 0.13454676551055, -0.55699468121643],
[-0.56923217902044, 0.13846435804087, -0.56923217902044],
[-1.00000000000000, 0.30622858646863, -0.58634382717501],
[-0.89758421902605, 0.29489208892178, -0.58541267651694],
[-0.81189519337880, 0.29489208892178, -0.58541267651694],
[-0.71988475929362, 0.30622858646863, -0.58634382717501],
[-1.00000000000000, 0.45746842795542, -0.61150631099970],
[-0.91105468070505, 0.44989578332272, -0.62778642191262],
[-0.84596211695572, 0.45746842795542, -0.61150631099970],
[-1.00000000000000, 0.58707808049895, -0.64590401105551],
[-0.94117406944344, 0.58707808049895, -0.64590401105551],
[-1.00000000000000, 0.69102898062768, -0.69102898062768],
[-1.00000000000000, -1.00000000000000, -0.54138539933010],
[-0.93790410083952, -1.00000000000000, -0.48805336347952],
[-0.83947093630474, -1.00000000000000, -0.44789101806497],
[-0.71096380698923, -1.00000000000000, -0.41924757784475],
[-0.55931242597197, -1.00000000000000, -0.40086241504471],
[-0.39188428022937, -1.00000000000000, -0.39188428022937],
[-0.21623143954125, -1.00000000000000, -0.39188428022937],
[-0.03982515898332, -1.00000000000000, -0.40086241504471],
[0.13021138483398, -1.00000000000000, -0.41924757784475],
[0.28736195436971, -1.00000000000000, -0.44789101806497],
[0.42595746431903, -1.00000000000000, -0.48805336347952],
[0.54138539933010, -1.00000000000000, -0.54138539933010],
[-1.00000000000000, -0.93790410083952, -0.48805336347952],
[-0.90814282013366, -0.90814282013366, -0.46512678709536],
[-0.80535532140090, -0.89506610278289, -0.42178213297254],
[-0.67844598835119, -0.88850304650231, -0.39311610910852],
[-0.53173147773860, -0.88531043041236, -0.37705601988688],
[-0.37188461854359, -0.88434614436923, -0.37188461854359],
[-0.20590207196216, -0.88531043041236, -0.37705601988688],
[-0.03993485603799, -0.88850304650231, -0.39311610910852],
[0.12220355715633, -0.89506610278289, -0.42178213297254],
[0.28141242736269, -0.90814282013366, -0.46512678709536],
[0.42595746431903, -0.93790410083952, -0.48805336347952],
[-1.00000000000000, -0.83947093630474, -0.44789101806497],
[-0.89506610278289, -0.80535532140090, -0.42178213297254],
[-0.78544012419782, -0.78544012419782, -0.39003105466491],
[-0.65735005129103, -0.77427857358733, -0.36896982961987],
[-0.51327189188952, -0.76923411646197, -0.35874699582426],
[-0.35874699582426, -0.76923411646197, -0.35874699582426],
[-0.19940154550177, -0.77427857358733, -0.36896982961987],
[-0.03908869693945, -0.78544012419782, -0.39003105466491],
[0.12220355715633, -0.80535532140090, -0.42178213297254],
[0.28736195436971, -0.83947093630474, -0.44789101806497],
[-1.00000000000000, -0.71096380698923, -0.41924757784475],
[-0.88850304650231, -0.67844598835118, -0.39311610910852],
[-0.77427857358733, -0.65735005129103, -0.36896982961987],
[-0.64569280594656, -0.64569280594656, -0.35430719405344],
[-0.50427892665174, -0.64197354809406, -0.34946859860247],
[-0.35430719405344, -0.64569280594656, -0.35430719405344],
[-0.19940154550177, -0.65735005129103, -0.36896982961987],
[-0.03993485603799, -0.67844598835119, -0.39311610910852],
[0.13021138483398, -0.71096380698923, -0.41924757784475],
[-1.00000000000000, -0.55931242597197, -0.40086241504471],
[-0.88531043041236, -0.53173147773860, -0.37705601988688],
[-0.76923411646197, -0.51327189188952, -0.35874699582426],
[-0.64197354809406, -0.50427892665174, -0.34946859860247],
[-0.50427892665174, -0.50427892665174, -0.34946859860247],
[-0.35874699582426, -0.51327189188952, -0.35874699582426],
[-0.20590207196216, -0.53173147773860, -0.37705601988688],
[-0.03982515898332, -0.55931242597197, -0.40086241504471],
[-1.00000000000000, -0.39188428022937, -0.39188428022937],
[-0.88434614436923, -0.37188461854359, -0.37188461854359],
[-0.76923411646197, -0.35874699582426, -0.35874699582426],
[-0.64569280594656, -0.35430719405344, -0.35430719405344],
[-0.51327189188952, -0.35874699582426, -0.35874699582426],
[-0.37188461854359, -0.37188461854359, -0.37188461854359],
[-0.21623143954125, -0.39188428022937, -0.39188428022937],
[-1.00000000000000, -0.21623143954125, -0.39188428022937],
[-0.88531043041236, -0.20590207196216, -0.37705601988688],
[-0.77427857358733, -0.19940154550177, -0.36896982961987],
[-0.65735005129103, -0.19940154550177, -0.36896982961987],
[-0.53173147773860, -0.20590207196216, -0.37705601988688],
[-0.39188428022937, -0.21623143954125, -0.39188428022937],
[-1.00000000000000, -0.03982515898332, -0.40086241504471],
[-0.88850304650231, -0.03993485603799, -0.39311610910852],
[-0.78544012419782, -0.03908869693945, -0.39003105466491],
[-0.67844598835118, -0.03993485603799, -0.39311610910852],
[-0.55931242597197, -0.03982515898332, -0.40086241504471],
[-1.00000000000000, 0.13021138483398, -0.41924757784475],
[-0.89506610278289, 0.12220355715633, -0.42178213297254],
[-0.80535532140090, 0.12220355715633, -0.42178213297254],
[-0.71096380698923, 0.13021138483398, -0.41924757784475],
[-1.00000000000000, 0.28736195436971, -0.44789101806497],
[-0.90814282013366, 0.28141242736269, -0.46512678709536],
[-0.83947093630474, 0.28736195436971, -0.44789101806497],
[-1.00000000000000, 0.42595746431903, -0.48805336347952],
[-0.93790410083952, 0.42595746431903, -0.48805336347952],
[-1.00000000000000, 0.54138539933010, -0.54138539933010],
[-1.00000000000000, -1.00000000000000, -0.37217443356548],
[-0.93587209559668, -1.00000000000000, -0.31305116691461],
[-0.83575330907818, -1.00000000000000, -0.26937637111685],
[-0.70663537739511, -1.00000000000000, -0.23943100407015],
[-0.55606397698616, -1.00000000000000, -0.22196801150692],
[-0.39188428022937, -1.00000000000000, -0.21623143954125],
[-0.22196801150692, -1.00000000000000, -0.22196801150692],
[-0.05393361853474, -1.00000000000000, -0.23943100407015],
[0.10512968019503, -1.00000000000000, -0.26937637111685],
[0.24892326251129, -1.00000000000000, -0.31305116691461],
[0.37217443356548, -1.00000000000000, -0.37217443356548],
[-1.00000000000000, -0.93587209559668, -0.31305116691461],
[-0.90657978734712, -0.90657978734712, -0.28436225480396],
[-0.80231226689534, -0.89392415739487, -0.24389353969246],
[-0.67573128488548, -0.88785097261588, -0.21820887124932],
[-0.53173147773860, -0.88531043041236, -0.20590207196216],
[-0.37705601988688, -0.88531043041236, -0.20590207196216],
[-0.21820887124932, -0.88785097261588, -0.21820887124932],
[-0.05987003601733, -0.89392415739487, -0.24389353969246],
[0.09752182949820, -0.90657978734712, -0.28436225480397],
[0.24892326251128, -0.93587209559668, -0.31305116691461],
[-1.00000000000000, -0.83575330907818, -0.26937637111685],
[-0.89392415739487, -0.80231226689534, -0.24389353969246],
[-0.78376756936894, -0.78376756936894, -0.21623243063106],
[-0.65735005129103, -0.77427857358733, -0.19940154550177],
[-0.51739938877605, -0.77134978179665, -0.19385144065125],
[-0.36896982961987, -0.77427857358733, -0.19940154550177],
[-0.21623243063106, -0.78376756936894, -0.21623243063106],
[-0.05987003601733, -0.80231226689534, -0.24389353969246],
[0.10512968019503, -0.83575330907818, -0.26937637111685],
[-1.00000000000000, -0.70663537739511, -0.23943100407015],
[-0.88785097261588, -0.67573128488548, -0.21820887124932],
[-0.77427857358733, -0.65735005129103, -0.19940154550177],
[-0.64881302709473, -0.64881302709473, -0.18965626751152],
[-0.51271767829902, -0.64881302709473, -0.18965626751152],
[-0.36896982961987, -0.65735005129103, -0.19940154550177],
[-0.21820887124932, -0.67573128488548, -0.21820887124932],
[-0.05393361853474, -0.70663537739511, -0.23943100407015],
[-1.00000000000000, -0.55606397698616, -0.22196801150692],
[-0.88531043041236, -0.53173147773860, -0.20590207196216],
[-0.77134978179665, -0.51739938877605, -0.19385144065125],
[-0.64881302709473, -0.51271767829902, -0.18965626751152],
[-0.51739938877605, -0.51739938877605, -0.19385144065125],
[-0.37705601988688, -0.53173147773860, -0.20590207196216],
[-0.22196801150692, -0.55606397698616, -0.22196801150692],
[-1.00000000000000, -0.39188428022937, -0.21623143954125],
[-0.88531043041236, -0.37705601988688, -0.20590207196216],
[-0.77427857358733, -0.36896982961987, -0.19940154550177],
[-0.65735005129103, -0.36896982961987, -0.19940154550177],
[-0.53173147773860, -0.37705601988688, -0.20590207196216],
[-0.39188428022937, -0.39188428022937, -0.21623143954125],
[-1.00000000000000, -0.22196801150692, -0.22196801150692],
[-0.88785097261588, -0.21820887124932, -0.21820887124932],
[-0.78376756936894, -0.21623243063106, -0.21623243063106],
[-0.67573128488548, -0.21820887124932, -0.21820887124932],
[-0.55606397698616, -0.22196801150692, -0.22196801150692],
[-1.00000000000000, -0.05393361853474, -0.23943100407015],
[-0.89392415739487, -0.05987003601733, -0.24389353969246],
[-0.80231226689534, -0.05987003601733, -0.24389353969246],
[-0.70663537739511, -0.05393361853474, -0.23943100407015],
[-1.00000000000000, 0.10512968019503, -0.26937637111685],
[-0.90657978734712, 0.09752182949820, -0.28436225480397],
[-0.83575330907818, 0.10512968019503, -0.26937637111685],
[-1.00000000000000, 0.24892326251129, -0.31305116691461],
[-0.93587209559668, 0.24892326251129, -0.31305116691461],
[-1.00000000000000, 0.37217443356548, -0.37217443356548],
[-1.00000000000000, -1.00000000000000, -0.18951197351832],
[-0.93490071879782, -1.00000000000000, -0.12735504764429],
[-0.83454373947148, -1.00000000000000, -0.08272813026426],
[-0.70663537739511, -1.00000000000000, -0.05393361853474],
[-0.55931242597197, -1.00000000000000, -0.03982515898332],
[-0.40086241504471, -1.00000000000000, -0.03982515898332],
[-0.23943100407015, -1.00000000000000, -0.05393361853474],
[-0.08272813026426, -1.00000000000000, -0.08272813026426],
[0.06225576644211, -1.00000000000000, -0.12735504764429],
[0.18951197351832, -1.00000000000000, -0.18951197351832],
[-1.00000000000000, -0.93490071879782, -0.12735504764429],
[-0.90608588335278, -0.90608588335278, -0.09391411664722],
[-0.80231226689534, -0.89392415739487, -0.05987003601733],
[-0.67844598835118, -0.88850304650231, -0.03993485603799],
[-0.53982745207518, -0.88692677958341, -0.03341831626624],
[-0.39311610910852, -0.88850304650231, -0.03993485603799],
[-0.24389353969246, -0.89392415739487, -0.05987003601733],
[-0.09391411664722, -0.90608588335278, -0.09391411664722],
[0.06225576644211, -0.93490071879782, -0.12735504764429],
[-1.00000000000000, -0.83454373947148, -0.08272813026426],
[-0.89392415739487, -0.80231226689534, -0.05987003601733],
[-0.78544012419782, -0.78544012419782, -0.03908869693945],
[-0.66330998127617, -0.77808048548239, -0.02842761416107],
[-0.53018191908037, -0.77808048548239, -0.02842761416107],
[-0.39003105466491, -0.78544012419782, -0.03908869693945],
[-0.24389353969246, -0.80231226689534, -0.05987003601733],
[-0.08272813026426, -0.83454373947148, -0.08272813026426],
[-1.00000000000000, -0.70663537739511, -0.05393361853474],
[-0.88850304650231, -0.67844598835118, -0.03993485603799],
[-0.77808048548239, -0.66330998127617, -0.02842761416107],
[-0.65854183263918, -0.65854183263918, -0.02437450208246],
[-0.53018191908037, -0.66330998127617, -0.02842761416107],
[-0.39311610910852, -0.67844598835118, -0.03993485603799],
[-0.23943100407015, -0.70663537739511, -0.05393361853474],
[-1.00000000000000, -0.55931242597197, -0.03982515898332],
[-0.88692677958341, -0.53982745207518, -0.03341831626624],
[-0.77808048548239, -0.53018191908037, -0.02842761416107],
[-0.66330998127617, -0.53018191908037, -0.02842761416107],
[-0.53982745207518, -0.53982745207518, -0.03341831626624],
[-0.40086241504471, -0.55931242597197, -0.03982515898332],
[-1.00000000000000, -0.40086241504471, -0.03982515898332],
[-0.88850304650231, -0.39311610910852, -0.03993485603799],
[-0.78544012419782, -0.39003105466491, -0.03908869693945],
[-0.67844598835118, -0.39311610910852, -0.03993485603799],
[-0.55931242597197, -0.40086241504471, -0.03982515898332],
[-1.00000000000000, -0.23943100407015, -0.05393361853474],
[-0.89392415739487, -0.24389353969246, -0.05987003601733],
[-0.80231226689534, -0.24389353969246, -0.05987003601733],
[-0.70663537739511, -0.23943100407015, -0.05393361853474],
[-1.00000000000000, -0.08272813026426, -0.08272813026426],
[-0.90608588335278, -0.09391411664722, -0.09391411664722],
[-0.83454373947148, -0.08272813026426, -0.08272813026426],
[-1.00000000000000, 0.06225576644211, -0.12735504764429],
[-0.93490071879782, 0.06225576644211, -0.12735504764429],
[-1.00000000000000, 0.18951197351832, -0.18951197351832],
[-1.00000000000000, -1.00000000000000, -0.00000000000000],
[-0.93490071879782, -1.00000000000000, 0.06225576644211],
[-0.83575330907818, -1.00000000000000, 0.10512968019503],
[-0.71096380698923, -1.00000000000000, 0.13021138483398],
[-0.56923217902044, -1.00000000000000, 0.13846435804087],
[-0.41924757784475, -1.00000000000000, 0.13021138483398],
[-0.26937637111685, -1.00000000000000, 0.10512968019503],
[-0.12735504764429, -1.00000000000000, 0.06225576644211],
[0.00000000000000, -1.00000000000000, -0.00000000000000],
[-1.00000000000000, -0.93490071879782, 0.06225576644211],
[-0.90657978734712, -0.90657978734712, 0.09752182949820],
[-0.80535532140090, -0.89506610278289, 0.12220355715633],
[-0.68694351447282, -0.89060856982129, 0.13454676551055],
[-0.55699468121643, -0.89060856982129, 0.13454676551055],
[-0.42178213297254, -0.89506610278289, 0.12220355715633],
[-0.28436225480397, -0.90657978734712, 0.09752182949820],
[-0.12735504764429, -0.93490071879782, 0.06225576644211],
[-1.00000000000000, -0.83575330907818, 0.10512968019503],
[-0.89506610278289, -0.80535532140090, 0.12220355715633],
[-0.79071623475418, -0.79071623475418, 0.13414296561246],
[-0.67596634711632, -0.78631903813907, 0.13825173237170],
[-0.55271049610410, -0.79071623475418, 0.13414296561246],
[-0.42178213297254, -0.80535532140090, 0.12220355715633],
[-0.26937637111685, -0.83575330907818, 0.10512968019503],
[-1.00000000000000, -0.71096380698923, 0.13021138483398],
[-0.89060856982129, -0.68694351447282, 0.13454676551055],
[-0.78631903813907, -0.67596634711632, 0.13825173237170],
[-0.67596634711632, -0.67596634711632, 0.13825173237170],
[-0.55699468121643, -0.68694351447282, 0.13454676551055],
[-0.41924757784475, -0.71096380698923, 0.13021138483398],
[-1.00000000000000, -0.56923217902044, 0.13846435804087],
[-0.89060856982129, -0.55699468121643, 0.13454676551055],
[-0.79071623475418, -0.55271049610410, 0.13414296561246],
[-0.68694351447282, -0.55699468121643, 0.13454676551055],
[-0.56923217902044, -0.56923217902044, 0.13846435804087],
[-1.00000000000000, -0.41924757784475, 0.13021138483398],
[-0.89506610278289, -0.42178213297254, 0.12220355715633],
[-0.80535532140090, -0.42178213297254, 0.12220355715633],
[-0.71096380698923, -0.41924757784475, 0.13021138483398],
[-1.00000000000000, -0.26937637111685, 0.10512968019503],
[-0.90657978734712, -0.28436225480396, 0.09752182949820],
[-0.83575330907818, -0.26937637111685, 0.10512968019503],
[-1.00000000000000, -0.12735504764429, 0.06225576644211],
[-0.93490071879782, -0.12735504764429, 0.06225576644211],
[-1.00000000000000, -0.00000000000000, 0.00000000000000],
[-1.00000000000000, -1.00000000000000, 0.18951197351832],
[-0.93587209559668, -1.00000000000000, 0.24892326251129],
[-0.83947093630474, -1.00000000000000, 0.28736195436971],
[-0.71988475929362, -1.00000000000000, 0.30622858646863],
[-0.58634382717501, -1.00000000000000, 0.30622858646863],
[-0.44789101806497, -1.00000000000000, 0.28736195436971],
[-0.31305116691461, -1.00000000000000, 0.24892326251128],
[-0.18951197351832, -1.00000000000000, 0.18951197351832],
[-1.00000000000000, -0.93587209559668, 0.24892326251129],
[-0.90814282013366, -0.90814282013366, 0.28141242736269],
[-0.81189519337880, -0.89758421902605, 0.29489208892178],
[-0.70240299672263, -0.89470267766589, 0.29950867111115],
[-0.58541267651694, -0.89758421902605, 0.29489208892178],
[-0.46512678709536, -0.90814282013366, 0.28141242736269],
[-0.31305116691461, -0.93587209559668, 0.24892326251129],
[-1.00000000000000, -0.83947093630474, 0.28736195436971],
[-0.89758421902605, -0.81189519337880, 0.29489208892178],
[-0.80045242943653, -0.80045242943653, 0.29771883675788],
[-0.69681397788482, -0.80045242943653, 0.29771883675788],
[-0.58541267651694, -0.81189519337880, 0.29489208892178],
[-0.44789101806497, -0.83947093630474, 0.28736195436971],
[-1.00000000000000, -0.71988475929362, 0.30622858646863],
[-0.89470267766589, -0.70240299672263, 0.29950867111115],
[-0.80045242943653, -0.69681397788482, 0.29771883675788],
[-0.70240299672263, -0.70240299672263, 0.29950867111115],
[-0.58634382717501, -0.71988475929362, 0.30622858646863],
[-1.00000000000000, -0.58634382717501, 0.30622858646863],
[-0.89758421902605, -0.58541267651694, 0.29489208892178],
[-0.81189519337880, -0.58541267651694, 0.29489208892178],
[-0.71988475929362, -0.58634382717501, 0.30622858646863],
[-1.00000000000000, -0.44789101806497, 0.28736195436971],
[-0.90814282013366, -0.46512678709536, 0.28141242736269],
[-0.83947093630474, -0.44789101806497, 0.28736195436971],
[-1.00000000000000, -0.31305116691461, 0.24892326251128],
[-0.93587209559668, -0.31305116691461, 0.24892326251128],
[-1.00000000000000, -0.18951197351832, 0.18951197351832],
[-1.00000000000000, -1.00000000000000, 0.37217443356548],
[-0.93790410083952, -1.00000000000000, 0.42595746431903],
[-0.84596211695572, -1.00000000000000, 0.45746842795542],
[-0.73392254063670, -1.00000000000000, 0.46784508127340],
[-0.61150631099970, -1.00000000000000, 0.45746842795542],
[-0.48805336347952, -1.00000000000000, 0.42595746431903],
[-0.37217443356548, -1.00000000000000, 0.37217443356548],
[-1.00000000000000, -0.93790410083952, 0.42595746431903],
[-0.91105468070505, -0.91105468070505, 0.44989578332272],
[-0.82302040254341, -0.90207264264846, 0.45229290124167],
[-0.72719985604980, -0.90207264264846, 0.45229290124167],
[-0.62778642191262, -0.91105468070505, 0.44989578332272],
[-0.48805336347952, -0.93790410083952, 0.42595746431903],
[-1.00000000000000, -0.84596211695572, 0.45746842795542],
[-0.90207264264846, -0.82302040254341, 0.45229290124167],
[-0.81634521017071, -0.81634521017071, 0.44903563051214],
[-0.72719985604980, -0.82302040254341, 0.45229290124167],
[-0.61150631099970, -0.84596211695572, 0.45746842795542],
[-1.00000000000000, -0.73392254063670, 0.46784508127340],
[-0.90207264264846, -0.72719985604980, 0.45229290124167],
[-0.82302040254341, -0.72719985604980, 0.45229290124167],
[-0.73392254063670, -0.73392254063670, 0.46784508127340],
[-1.00000000000000, -0.61150631099970, 0.45746842795542],
[-0.91105468070505, -0.62778642191262, 0.44989578332272],
[-0.84596211695572, -0.61150631099970, 0.45746842795542],
[-1.00000000000000, -0.48805336347952, 0.42595746431903],
[-0.93790410083952, -0.48805336347952, 0.42595746431903],
[-1.00000000000000, -0.37217443356548, 0.37217443356548],
[-1.00000000000000, -1.00000000000000, 0.54138539933010],
[-0.94117406944344, -1.00000000000000, 0.58707808049895],
[-0.85566517591551, -1.00000000000000, 0.60951915417401],
[-0.75385397825850, -1.00000000000000, 0.60951915417401],
[-0.64590401105551, -1.00000000000000, 0.58707808049895],
[-0.54138539933010, -1.00000000000000, 0.54138539933010],
[-1.00000000000000, -0.94117406944344, 0.58707808049895],
[-0.91594133949377, -0.91594133949377, 0.59653417593955],
[-0.84093333011616, -0.90987534922907, 0.59174200946140],
[-0.76465149695200, -0.91594133949377, 0.59653417593955],
[-0.64590401105551, -0.94117406944344, 0.58707808049895],
[-1.00000000000000, -0.85566517591551, 0.60951915417401],
[-0.90987534922907, -0.84093333011616, 0.59174200946140],
[-0.84093333011616, -0.84093333011616, 0.59174200946140],
[-0.75385397825850, -0.85566517591551, 0.60951915417401],
[-1.00000000000000, -0.75385397825850, 0.60951915417401],
[-0.91594133949377, -0.76465149695200, 0.59653417593955],
[-0.85566517591551, -0.75385397825850, 0.60951915417401],
[-1.00000000000000, -0.64590401105551, 0.58707808049895],
[-0.94117406944344, -0.64590401105551, 0.58707808049895],
[-1.00000000000000, -0.54138539933010, 0.54138539933010],
[-1.00000000000000, -1.00000000000000, 0.69102898062768],
[-0.94594521938428, -1.00000000000000, 0.72664228521737],
[-0.86918515701361, -1.00000000000000, 0.73837031402721],
[-0.78069706583309, -1.00000000000000, 0.72664228521737],
[-0.69102898062768, -1.00000000000000, 0.69102898062768],
[-1.00000000000000, -0.94594521938428, 0.72664228521737],
[-0.92422372884499, -0.92422372884499, 0.71789249717559],
[-0.86944503948562, -0.92422372884499, 0.71789249717559],
[-0.78069706583309, -0.94594521938428, 0.72664228521737],
[-1.00000000000000, -0.86918515701361, 0.73837031402721],
[-0.92422372884499, -0.86944503948562, 0.71789249717560],
[-0.86918515701361, -0.86918515701361, 0.73837031402721],
[-1.00000000000000, -0.78069706583309, 0.72664228521737],
[-0.94594521938428, -0.78069706583309, 0.72664228521737],
[-1.00000000000000, -0.69102898062768, 0.69102898062768],
[-1.00000000000000, -1.00000000000000, 0.81569625122177],
[-0.95256383018706, -1.00000000000000, 0.83984954838364],
[-0.88728571819659, -1.00000000000000, 0.83984954838364],
[-0.81569625122177, -1.00000000000000, 0.81569625122177],
[-1.00000000000000, -0.95256383018706, 0.83984954838364],
[-0.93943632227224, -0.93943632227224, 0.81830896681673],
[-0.88728571819659, -0.95256383018706, 0.83984954838364],
[-1.00000000000000, -0.88728571819659, 0.83984954838364],
[-0.95256383018706, -0.88728571819659, 0.83984954838364],
[-1.00000000000000, -0.81569625122177, 0.81569625122177],
[-1.00000000000000, -1.00000000000000, 0.91087999591557],
[-0.96145683115081, -1.00000000000000, 0.92291366230160],
[-0.91087999591557, -1.00000000000000, 0.91087999591557],
[-1.00000000000000, -0.96145683115081, 0.92291366230160],
[-0.96145683115080, -0.96145683115081, 0.92291366230160],
[-1.00000000000000, -0.91087999591557, 0.91087999591557],
[-1.00000000000000, -1.00000000000000, 0.97313217663142],
[-0.97313217663142, -1.00000000000000, 0.97313217663142],
[-1.00000000000000, -0.97313217663142, 0.97313217663142],
])
elif C==16:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.97610555741220, -1.00000000000000, -1.00000000000000],
[-0.92064918534753, -1.00000000000000, -1.00000000000000],
[-0.83559353521809, -1.00000000000000, -1.00000000000000],
[-0.72367932928324, -1.00000000000000, -1.00000000000000],
[-0.58850483431866, -1.00000000000000, -1.00000000000000],
[-0.43441503691212, -1.00000000000000, -1.00000000000000],
[-0.26636265287828, -1.00000000000000, -1.00000000000000],
[-0.08974909348465, -1.00000000000000, -1.00000000000000],
[0.08974909348465, -1.00000000000000, -1.00000000000000],
[0.26636265287828, -1.00000000000000, -1.00000000000000],
[0.43441503691212, -1.00000000000000, -1.00000000000000],
[0.58850483431866, -1.00000000000000, -1.00000000000000],
[0.72367932928324, -1.00000000000000, -1.00000000000000],
[0.83559353521809, -1.00000000000000, -1.00000000000000],
[0.92064918534753, -1.00000000000000, -1.00000000000000],
[0.97610555741220, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.97610555741220, -1.00000000000000],
[-0.96566215411209, -0.96566215411209, -1.00000000000000],
[-0.89951569869071, -0.95757537612802, -1.00000000000000],
[-0.80411857775921, -0.95143658722507, -1.00000000000000],
[-0.68283080625434, -0.94689840816918, -1.00000000000000],
[-0.53977863434066, -0.94367298086688, -1.00000000000000],
[-0.37973175758212, -0.94153338637932, -1.00000000000000],
[-0.20795453403575, -0.94031572376258, -1.00000000000000],
[-0.03003955463067, -0.93992089073865, -1.00000000000000],
[0.14827025779834, -0.94031572376258, -1.00000000000000],
[0.32126514396144, -0.94153338637932, -1.00000000000000],
[0.48345161520754, -0.94367298086688, -1.00000000000000],
[0.62972921442352, -0.94689840816918, -1.00000000000000],
[0.75555516498428, -0.95143658722507, -1.00000000000000],
[0.85709107481873, -0.95757537612802, -1.00000000000000],
[0.93132430822417, -0.96566215411208, -1.00000000000000],
[0.97610555741220, -0.97610555741220, -1.00000000000000],
[-1.00000000000000, -0.92064918534753, -1.00000000000000],
[-0.95757537612802, -0.89951569869071, -1.00000000000000],
[-0.88303323724208, -0.88303323724208, -1.00000000000000],
[-0.77955200987756, -0.87046490686619, -1.00000000000000],
[-0.65107787413874, -0.86118354428076, -1.00000000000000],
[-0.50223646595824, -0.85467813358830, -1.00000000000000],
[-0.33819570282874, -0.85055979016974, -1.00000000000000],
[-0.16450620740505, -0.84856652844490, -1.00000000000000],
[0.01307273584995, -0.84856652844490, -1.00000000000000],
[0.18875549299847, -0.85055979016974, -1.00000000000000],
[0.35691459954653, -0.85467813358830, -1.00000000000000],
[0.51226141841950, -0.86118354428076, -1.00000000000000],
[0.65001691674374, -0.87046490686619, -1.00000000000000],
[0.76606647448416, -0.88303323724208, -1.00000000000000],
[0.85709107481873, -0.89951569869071, -1.00000000000000],
[0.92064918534753, -0.92064918534753, -1.00000000000000],
[-1.00000000000000, -0.83559353521809, -1.00000000000000],
[-0.95143658722507, -0.80411857775921, -1.00000000000000],
[-0.87046490686619, -0.77955200987756, -1.00000000000000],
[-0.76088114861788, -0.76088114861788, -1.00000000000000],
[-0.62719310134571, -0.74725908993286, -1.00000000000000],
[-0.47450138980813, -0.73801547866742, -1.00000000000000],
[-0.30834740911645, -0.73266516174362, -1.00000000000000],
[-0.13454275358835, -0.73091449282331, -1.00000000000000],
[0.04101257086006, -0.73266516174362, -1.00000000000000],
[0.21251686847555, -0.73801547866742, -1.00000000000000],
[0.37445219127856, -0.74725908993286, -1.00000000000000],
[0.52176229723576, -0.76088114861788, -1.00000000000000],
[0.65001691674374, -0.77955200987756, -1.00000000000000],
[0.75555516498428, -0.80411857775921, -1.00000000000000],
[0.83559353521809, -0.83559353521809, -1.00000000000000],
[-1.00000000000000, -0.72367932928324, -1.00000000000000],
[-0.94689840816918, -0.68283080625434, -1.00000000000000],
[-0.86118354428076, -0.65107787413874, -1.00000000000000],
[-0.74725908993286, -0.62719310134570, -1.00000000000000],
[-0.61017433892153, -0.61017433892153, -1.00000000000000],
[-0.45548262743559, -0.59925804242534, -1.00000000000000],
[-0.28907450090662, -0.59392944953705, -1.00000000000000],
[-0.11699604955632, -0.59392944953705, -1.00000000000000],
[0.05474066986092, -0.59925804242534, -1.00000000000000],
[0.22034867784306, -0.61017433892153, -1.00000000000000],
[0.37445219127856, -0.62719310134571, -1.00000000000000],
[0.51226141841950, -0.65107787413874, -1.00000000000000],
[0.62972921442352, -0.68283080625434, -1.00000000000000],
[0.72367932928324, -0.72367932928324, -1.00000000000000],
[-1.00000000000000, -0.58850483431866, -1.00000000000000],
[-0.94367298086688, -0.53977863434066, -1.00000000000000],
[-0.85467813358830, -0.50223646595824, -1.00000000000000],
[-0.73801547866742, -0.47450138980813, -1.00000000000000],
[-0.59925804242534, -0.45548262743559, -1.00000000000000],
[-0.44439033087540, -0.44439033087540, -1.00000000000000],
[-0.27962686227842, -0.44074627544316, -1.00000000000000],
[-0.11121933824920, -0.44439033087540, -1.00000000000000],
[0.05474066986092, -0.45548262743559, -1.00000000000000],
[0.21251686847555, -0.47450138980813, -1.00000000000000],
[0.35691459954653, -0.50223646595824, -1.00000000000000],
[0.48345161520754, -0.53977863434066, -1.00000000000000],
[0.58850483431866, -0.58850483431866, -1.00000000000000],
[-1.00000000000000, -0.43441503691212, -1.00000000000000],
[-0.94153338637932, -0.37973175758212, -1.00000000000000],
[-0.85055979016974, -0.33819570282874, -1.00000000000000],
[-0.73266516174362, -0.30834740911645, -1.00000000000000],
[-0.59392944953705, -0.28907450090662, -1.00000000000000],
[-0.44074627544316, -0.27962686227842, -1.00000000000000],
[-0.27962686227842, -0.27962686227842, -1.00000000000000],
[-0.11699604955632, -0.28907450090662, -1.00000000000000],
[0.04101257086006, -0.30834740911645, -1.00000000000000],
[0.18875549299847, -0.33819570282874, -1.00000000000000],
[0.32126514396144, -0.37973175758212, -1.00000000000000],
[0.43441503691212, -0.43441503691212, -1.00000000000000],
[-1.00000000000000, -0.26636265287828, -1.00000000000000],
[-0.94031572376258, -0.20795453403575, -1.00000000000000],
[-0.84856652844490, -0.16450620740505, -1.00000000000000],
[-0.73091449282331, -0.13454275358835, -1.00000000000000],
[-0.59392944953705, -0.11699604955632, -1.00000000000000],
[-0.44439033087540, -0.11121933824920, -1.00000000000000],
[-0.28907450090662, -0.11699604955632, -1.00000000000000],
[-0.13454275358835, -0.13454275358835, -1.00000000000000],
[0.01307273584995, -0.16450620740505, -1.00000000000000],
[0.14827025779833, -0.20795453403575, -1.00000000000000],
[0.26636265287828, -0.26636265287828, -1.00000000000000],
[-1.00000000000000, -0.08974909348465, -1.00000000000000],
[-0.93992089073865, -0.03003955463067, -1.00000000000000],
[-0.84856652844490, 0.01307273584995, -1.00000000000000],
[-0.73266516174362, 0.04101257086006, -1.00000000000000],
[-0.59925804242534, 0.05474066986092, -1.00000000000000],
[-0.45548262743559, 0.05474066986092, -1.00000000000000],
[-0.30834740911645, 0.04101257086006, -1.00000000000000],
[-0.16450620740505, 0.01307273584995, -1.00000000000000],
[-0.03003955463067, -0.03003955463067, -1.00000000000000],
[0.08974909348465, -0.08974909348465, -1.00000000000000],
[-1.00000000000000, 0.08974909348465, -1.00000000000000],
[-0.94031572376258, 0.14827025779833, -1.00000000000000],
[-0.85055979016974, 0.18875549299847, -1.00000000000000],
[-0.73801547866742, 0.21251686847555, -1.00000000000000],
[-0.61017433892153, 0.22034867784306, -1.00000000000000],
[-0.47450138980813, 0.21251686847555, -1.00000000000000],
[-0.33819570282874, 0.18875549299847, -1.00000000000000],
[-0.20795453403575, 0.14827025779834, -1.00000000000000],
[-0.08974909348465, 0.08974909348465, -1.00000000000000],
[-1.00000000000000, 0.26636265287828, -1.00000000000000],
[-0.94153338637932, 0.32126514396144, -1.00000000000000],
[-0.85467813358830, 0.35691459954653, -1.00000000000000],
[-0.74725908993286, 0.37445219127856, -1.00000000000000],
[-0.62719310134570, 0.37445219127856, -1.00000000000000],
[-0.50223646595824, 0.35691459954653, -1.00000000000000],
[-0.37973175758212, 0.32126514396144, -1.00000000000000],
[-0.26636265287828, 0.26636265287828, -1.00000000000000],
[-1.00000000000000, 0.43441503691212, -1.00000000000000],
[-0.94367298086688, 0.48345161520754, -1.00000000000000],
[-0.86118354428076, 0.51226141841950, -1.00000000000000],
[-0.76088114861788, 0.52176229723576, -1.00000000000000],
[-0.65107787413874, 0.51226141841950, -1.00000000000000],
[-0.53977863434066, 0.48345161520754, -1.00000000000000],
[-0.43441503691212, 0.43441503691212, -1.00000000000000],
[-1.00000000000000, 0.58850483431866, -1.00000000000000],
[-0.94689840816918, 0.62972921442352, -1.00000000000000],
[-0.87046490686619, 0.65001691674374, -1.00000000000000],
[-0.77955200987756, 0.65001691674374, -1.00000000000000],
[-0.68283080625434, 0.62972921442352, -1.00000000000000],
[-0.58850483431866, 0.58850483431866, -1.00000000000000],
[-1.00000000000000, 0.72367932928324, -1.00000000000000],
[-0.95143658722507, 0.75555516498428, -1.00000000000000],
[-0.88303323724208, 0.76606647448416, -1.00000000000000],
[-0.80411857775921, 0.75555516498428, -1.00000000000000],
[-0.72367932928324, 0.72367932928324, -1.00000000000000],
[-1.00000000000000, 0.83559353521809, -1.00000000000000],
[-0.95757537612802, 0.85709107481873, -1.00000000000000],
[-0.89951569869071, 0.85709107481873, -1.00000000000000],
[-0.83559353521809, 0.83559353521809, -1.00000000000000],
[-1.00000000000000, 0.92064918534753, -1.00000000000000],
[-0.96566215411209, 0.93132430822417, -1.00000000000000],
[-0.92064918534753, 0.92064918534753, -1.00000000000000],
[-1.00000000000000, 0.97610555741220, -1.00000000000000],
[-0.97610555741220, 0.97610555741220, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.97610555741220],
[-0.96566215411209, -1.00000000000000, -0.96566215411209],
[-0.89951569869071, -1.00000000000000, -0.95757537612802],
[-0.80411857775921, -1.00000000000000, -0.95143658722507],
[-0.68283080625434, -1.00000000000000, -0.94689840816918],
[-0.53977863434066, -1.00000000000000, -0.94367298086688],
[-0.37973175758212, -1.00000000000000, -0.94153338637932],
[-0.20795453403575, -1.00000000000000, -0.94031572376258],
[-0.03003955463067, -1.00000000000000, -0.93992089073865],
[0.14827025779834, -1.00000000000000, -0.94031572376258],
[0.32126514396144, -1.00000000000000, -0.94153338637932],
[0.48345161520754, -1.00000000000000, -0.94367298086688],
[0.62972921442352, -1.00000000000000, -0.94689840816918],
[0.75555516498428, -1.00000000000000, -0.95143658722507],
[0.85709107481873, -1.00000000000000, -0.95757537612802],
[0.93132430822417, -1.00000000000000, -0.96566215411208],
[0.97610555741220, -1.00000000000000, -0.97610555741220],
[-1.00000000000000, -0.96566215411209, -0.96566215411209],
[-0.94524290297158, -0.94524290297158, -0.94524290297158],
[-0.88400423517214, -0.93088733901333, -0.93088733901333],
[-0.79134151435775, -0.92300465044929, -0.92300465044929],
[-0.66892711010229, -0.91826859853484, -0.91826859853484],
[-0.52168268319682, -0.91533769804486, -0.91533769804486],
[-0.35580977157487, -0.91361325731751, -0.91361325731751],
[-0.17821561606993, -0.91281028806768, -0.91281028806768],
[0.00383619220529, -0.91281028806768, -0.91281028806768],
[0.18303628620988, -0.91361325731751, -0.91361325731751],
[0.35235807928655, -0.91533769804486, -0.91533769804486],
[0.50546430717196, -0.91826859853484, -0.91826859853484],
[0.63735081525634, -0.92300465044929, -0.92300465044929],
[0.74577891319880, -0.93088733901333, -0.93088733901333],
[0.83572870891473, -0.94524290297158, -0.94524290297158],
[0.93132430822417, -0.96566215411209, -0.96566215411209],
[-1.00000000000000, -0.89951569869071, -0.95757537612802],
[-0.93088733901333, -0.88400423517214, -0.93088733901333],
[-0.85699505928483, -0.85699505928483, -0.91725218290052],
[-0.75553373590533, -0.83977988741760, -0.90975051014484],
[-0.62778228539838, -0.82880683679752, -0.90533315725650],
[-0.47858570939270, -0.82197902687780, -0.90271908968347],
[-0.31409235901703, -0.81822259638933, -0.90132394717945],
[-0.14104686191484, -0.81702189747097, -0.90088437869935],
[0.03363890258581, -0.81822259638933, -0.90132394717945],
[0.20328382595397, -0.82197902687780, -0.90271908968347],
[0.36192227945240, -0.82880683679752, -0.90533315725650],
[0.50506413346777, -0.83977988741760, -0.90975051014484],
[0.63124230147018, -0.85699505928483, -0.91725218290052],
[0.74577891319880, -0.88400423517214, -0.93088733901333],
[0.85709107481873, -0.89951569869071, -0.95757537612802],
[-1.00000000000000, -0.80411857775921, -0.95143658722507],
[-0.92300465044929, -0.79134151435776, -0.92300465044929],
[-0.83977988741760, -0.75553373590533, -0.90975051014484],
[-0.73132910648164, -0.73132910648164, -0.90259903319141],
[-0.59938063492158, -0.71565649846207, -0.89849390365871],
[-0.44889894801261, -0.70620673063170, -0.89619431421031],
[-0.28598097100158, -0.70174749688164, -0.89515246800868],
[-0.11711906410810, -0.70174749688164, -0.89515246800868],
[0.05129999285461, -0.70620673063170, -0.89619431421031],
[0.21353103704236, -0.71565649846207, -0.89849390365871],
[0.36525724615468, -0.73132910648164, -0.90259903319141],
[0.50506413346777, -0.75553373590533, -0.90975051014484],
[0.63735081525634, -0.79134151435776, -0.92300465044929],
[0.75555516498428, -0.80411857775921, -0.95143658722507],
[-1.00000000000000, -0.68283080625434, -0.94689840816918],
[-0.91826859853484, -0.66892711010229, -0.91826859853484],
[-0.82880683679752, -0.62778228539838, -0.90533315725650],
[-0.71565649846207, -0.59938063492158, -0.89849390365871],
[-0.58120945840524, -0.58120945840524, -0.89470167097220],
[-0.43064547863352, -0.57106333491179, -0.89275319595316],
[-0.27002622797889, -0.56779754185188, -0.89215000219033],
[-0.10553799050153, -0.57106333491179, -0.89275319595316],
[0.05712058778268, -0.58120945840524, -0.89470167097220],
[0.21353103704236, -0.59938063492158, -0.89849390365871],
[0.36192227945240, -0.62778228539838, -0.90533315725650],
[0.50546430717196, -0.66892711010229, -0.91826859853484],
[0.62972921442352, -0.68283080625434, -0.94689840816918],
[-1.00000000000000, -0.53977863434066, -0.94367298086688],
[-0.91533769804486, -0.52168268319682, -0.91533769804486],
[-0.82197902687780, -0.47858570939270, -0.90271908968347],
[-0.70620673063170, -0.44889894801261, -0.89619431421031],
[-0.57106333491179, -0.43064547863352, -0.89275319595316],
[-0.42195032631194, -0.42195032631194, -0.89124316588241],
[-0.26485618149371, -0.42195032631194, -0.89124316588241],
[-0.10553799050153, -0.43064547863352, -0.89275319595316],
[0.05129999285461, -0.44889894801261, -0.89619431421031],
[0.20328382595397, -0.47858570939270, -0.90271908968347],
[0.35235807928655, -0.52168268319682, -0.91533769804486],
[0.48345161520754, -0.53977863434066, -0.94367298086688],
[-1.00000000000000, -0.37973175758212, -0.94153338637932],
[-0.91361325731751, -0.35580977157487, -0.91361325731751],
[-0.81822259638933, -0.31409235901703, -0.90132394717945],
[-0.70174749688164, -0.28598097100158, -0.89515246800868],
[-0.56779754185188, -0.27002622797889, -0.89215000219033],
[-0.42195032631194, -0.26485618149372, -0.89124316588241],
[-0.27002622797889, -0.27002622797889, -0.89215000219033],
[-0.11711906410810, -0.28598097100158, -0.89515246800868],
[0.03363890258581, -0.31409235901703, -0.90132394717945],
[0.18303628620988, -0.35580977157487, -0.91361325731751],
[0.32126514396144, -0.37973175758212, -0.94153338637932],
[-1.00000000000000, -0.20795453403575, -0.94031572376258],
[-0.91281028806768, -0.17821561606993, -0.91281028806768],
[-0.81702189747097, -0.14104686191484, -0.90088437869935],
[-0.70174749688164, -0.11711906410810, -0.89515246800868],
[-0.57106333491179, -0.10553799050153, -0.89275319595316],
[-0.43064547863352, -0.10553799050153, -0.89275319595316],
[-0.28598097100158, -0.11711906410810, -0.89515246800868],
[-0.14104686191484, -0.14104686191484, -0.90088437869935],
[0.00383619220529, -0.17821561606993, -0.91281028806768],
[0.14827025779833, -0.20795453403575, -0.94031572376258],
[-1.00000000000000, -0.03003955463067, -0.93992089073865],
[-0.91281028806768, 0.00383619220529, -0.91281028806768],
[-0.81822259638933, 0.03363890258581, -0.90132394717945],
[-0.70620673063170, 0.05129999285461, -0.89619431421031],
[-0.58120945840524, 0.05712058778268, -0.89470167097220],
[-0.44889894801261, 0.05129999285461, -0.89619431421031],
[-0.31409235901703, 0.03363890258581, -0.90132394717945],
[-0.17821561606993, 0.00383619220529, -0.91281028806768],
[-0.03003955463067, -0.03003955463067, -0.93992089073865],
[-1.00000000000000, 0.14827025779834, -0.94031572376258],
[-0.91361325731751, 0.18303628620988, -0.91361325731751],
[-0.82197902687780, 0.20328382595397, -0.90271908968347],
[-0.71565649846207, 0.21353103704236, -0.89849390365871],
[-0.59938063492158, 0.21353103704236, -0.89849390365871],
[-0.47858570939270, 0.20328382595397, -0.90271908968347],
[-0.35580977157487, 0.18303628620988, -0.91361325731751],
[-0.20795453403575, 0.14827025779834, -0.94031572376258],
[-1.00000000000000, 0.32126514396144, -0.94153338637932],
[-0.91533769804486, 0.35235807928655, -0.91533769804486],
[-0.82880683679752, 0.36192227945240, -0.90533315725650],
[-0.73132910648164, 0.36525724615468, -0.90259903319141],
[-0.62778228539838, 0.36192227945240, -0.90533315725650],
[-0.52168268319682, 0.35235807928655, -0.91533769804486],
[-0.37973175758212, 0.32126514396144, -0.94153338637932],
[-1.00000000000000, 0.48345161520754, -0.94367298086688],
[-0.91826859853484, 0.50546430717196, -0.91826859853484],
[-0.83977988741760, 0.50506413346777, -0.90975051014484],
[-0.75553373590533, 0.50506413346777, -0.90975051014484],
[-0.66892711010229, 0.50546430717196, -0.91826859853484],
[-0.53977863434066, 0.48345161520754, -0.94367298086688],
[-1.00000000000000, 0.62972921442352, -0.94689840816918],
[-0.92300465044929, 0.63735081525634, -0.92300465044929],
[-0.85699505928483, 0.63124230147018, -0.91725218290052],
[-0.79134151435775, 0.63735081525634, -0.92300465044929],
[-0.68283080625434, 0.62972921442352, -0.94689840816918],
[-1.00000000000000, 0.75555516498428, -0.95143658722507],
[-0.93088733901333, 0.74577891319880, -0.93088733901333],
[-0.88400423517214, 0.74577891319880, -0.93088733901333],
[-0.80411857775921, 0.75555516498428, -0.95143658722507],
[-1.00000000000000, 0.85709107481873, -0.95757537612802],
[-0.94524290297158, 0.83572870891473, -0.94524290297158],
[-0.89951569869071, 0.85709107481873, -0.95757537612802],
[-1.00000000000000, 0.93132430822417, -0.96566215411208],
[-0.96566215411209, 0.93132430822417, -0.96566215411208],
[-1.00000000000000, 0.97610555741220, -0.97610555741220],
[-1.00000000000000, -1.00000000000000, -0.92064918534753],
[-0.95757537612802, -1.00000000000000, -0.89951569869071],
[-0.88303323724208, -1.00000000000000, -0.88303323724208],
[-0.77955200987756, -1.00000000000000, -0.87046490686619],
[-0.65107787413874, -1.00000000000000, -0.86118354428076],
[-0.50223646595824, -1.00000000000000, -0.85467813358830],
[-0.33819570282874, -1.00000000000000, -0.85055979016974],
[-0.16450620740505, -1.00000000000000, -0.84856652844490],
[0.01307273584995, -1.00000000000000, -0.84856652844490],
[0.18875549299847, -1.00000000000000, -0.85055979016974],
[0.35691459954653, -1.00000000000000, -0.85467813358830],
[0.51226141841950, -1.00000000000000, -0.86118354428076],
[0.65001691674374, -1.00000000000000, -0.87046490686619],
[0.76606647448416, -1.00000000000000, -0.88303323724208],
[0.85709107481873, -1.00000000000000, -0.89951569869071],
[0.92064918534753, -1.00000000000000, -0.92064918534753],
[-1.00000000000000, -0.95757537612802, -0.89951569869071],
[-0.93088733901333, -0.93088733901333, -0.88400423517214],
[-0.85699505928483, -0.91725218290052, -0.85699505928483],
[-0.75553373590533, -0.90975051014484, -0.83977988741760],
[-0.62778228539838, -0.90533315725650, -0.82880683679752],
[-0.47858570939270, -0.90271908968347, -0.82197902687780],
[-0.31409235901703, -0.90132394717945, -0.81822259638933],
[-0.14104686191484, -0.90088437869935, -0.81702189747097],
[0.03363890258581, -0.90132394717945, -0.81822259638933],
[0.20328382595397, -0.90271908968347, -0.82197902687780],
[0.36192227945240, -0.90533315725650, -0.82880683679752],
[0.50506413346777, -0.90975051014484, -0.83977988741760],
[0.63124230147018, -0.91725218290052, -0.85699505928483],
[0.74577891319880, -0.93088733901333, -0.88400423517214],
[0.85709107481873, -0.95757537612802, -0.89951569869071],
[-1.00000000000000, -0.88303323724208, -0.88303323724208],
[-0.91725218290052, -0.85699505928483, -0.85699505928483],
[-0.83354227525151, -0.83354227525151, -0.83354227525151],
[-0.72622199534603, -0.81806030657087, -0.81806030657087],
[-0.59557793689121, -0.80821469636381, -0.80821469636381],
[-0.44624189631944, -0.80237272611754, -0.80237272611754],
[-0.28431281823316, -0.79964507343425, -0.79964507343425],
[-0.11639703489834, -0.79964507343425, -0.79964507343425],
[0.05098734855452, -0.80237272611754, -0.80237272611754],
[0.21200732961882, -0.80821469636381, -0.80821469636381],
[0.36234260848776, -0.81806030657087, -0.81806030657087],
[0.50062682575452, -0.83354227525151, -0.83354227525151],
[0.63124230147018, -0.85699505928483, -0.85699505928483],
[0.76606647448416, -0.88303323724208, -0.88303323724208],
[-1.00000000000000, -0.77955200987756, -0.87046490686619],
[-0.90975051014484, -0.75553373590533, -0.83977988741760],
[-0.81806030657087, -0.72622199534603, -0.81806030657087],
[-0.70546057011574, -0.70546057011574, -0.80407960155614],
[-0.57235298198835, -0.69209820344694, -0.79546369024868],
[-0.42330908651523, -0.68465203917083, -0.79077416730400],
[-0.26422727435709, -0.68226100768640, -0.78928444359942],
[-0.10126470700993, -0.68465203917083, -0.79077416730400],
[0.05991487568396, -0.69209820344694, -0.79546369024868],
[0.21500074178762, -0.70546057011574, -0.80407960155614],
[0.36234260848776, -0.72622199534603, -0.81806030657087],
[0.50506413346777, -0.75553373590533, -0.83977988741760],
[0.65001691674374, -0.77955200987756, -0.87046490686619],
[-1.00000000000000, -0.65107787413874, -0.86118354428076],
[-0.90533315725650, -0.62778228539838, -0.82880683679752],
[-0.80821469636381, -0.59557793689121, -0.80821469636381],
[-0.69209820344694, -0.57235298198835, -0.79546369024868],
[-0.55785828724523, -0.55785828724523, -0.78803820908470],
[-0.41008062239189, -0.55093034546477, -0.78460952663812],
[-0.25437950550522, -0.55093034546477, -0.78460952663812],
[-0.09624521642484, -0.55785828724523, -0.78803820908470],
[0.05991487568396, -0.57235298198835, -0.79546369024868],
[0.21200732961882, -0.59557793689121, -0.80821469636381],
[0.36192227945240, -0.62778228539838, -0.82880683679752],
[0.51226141841950, -0.65107787413874, -0.86118354428076],
[-1.00000000000000, -0.50223646595824, -0.85467813358830],
[-0.90271908968347, -0.47858570939270, -0.82197902687780],
[-0.80237272611754, -0.44624189631944, -0.80237272611754],
[-0.68465203917083, -0.42330908651523, -0.79077416730400],
[-0.55093034546477, -0.41008062239189, -0.78460952663812],
[-0.40577573935925, -0.40577573935925, -0.78267278192224],
[-0.25437950550522, -0.41008062239189, -0.78460952663812],
[-0.10126470700993, -0.42330908651523, -0.79077416730400],
[0.05098734855452, -0.44624189631944, -0.80237272611754],
[0.20328382595397, -0.47858570939270, -0.82197902687780],
[0.35691459954653, -0.50223646595824, -0.85467813358830],
[-1.00000000000000, -0.33819570282874, -0.85055979016974],
[-0.90132394717945, -0.31409235901703, -0.81822259638933],
[-0.79964507343425, -0.28431281823316, -0.79964507343425],
[-0.68226100768640, -0.26422727435709, -0.78928444359942],
[-0.55093034546477, -0.25437950550522, -0.78460952663812],
[-0.41008062239189, -0.25437950550522, -0.78460952663812],
[-0.26422727435709, -0.26422727435709, -0.78928444359942],
[-0.11639703489834, -0.28431281823316, -0.79964507343425],
[0.03363890258581, -0.31409235901703, -0.81822259638933],
[0.18875549299847, -0.33819570282874, -0.85055979016974],
[-1.00000000000000, -0.16450620740505, -0.84856652844490],
[-0.90088437869935, -0.14104686191484, -0.81702189747097],
[-0.79964507343425, -0.11639703489834, -0.79964507343425],
[-0.68465203917083, -0.10126470700993, -0.79077416730400],
[-0.55785828724523, -0.09624521642484, -0.78803820908470],
[-0.42330908651523, -0.10126470700993, -0.79077416730400],
[-0.28431281823316, -0.11639703489834, -0.79964507343425],
[-0.14104686191484, -0.14104686191484, -0.81702189747097],
[0.01307273584995, -0.16450620740505, -0.84856652844490],
[-1.00000000000000, 0.01307273584995, -0.84856652844490],
[-0.90132394717945, 0.03363890258581, -0.81822259638933],
[-0.80237272611754, 0.05098734855452, -0.80237272611754],
[-0.69209820344694, 0.05991487568396, -0.79546369024868],
[-0.57235298198835, 0.05991487568396, -0.79546369024868],
[-0.44624189631944, 0.05098734855452, -0.80237272611754],
[-0.31409235901703, 0.03363890258581, -0.81822259638933],
[-0.16450620740505, 0.01307273584995, -0.84856652844490],
[-1.00000000000000, 0.18875549299847, -0.85055979016974],
[-0.90271908968347, 0.20328382595397, -0.82197902687780],
[-0.80821469636381, 0.21200732961882, -0.80821469636381],
[-0.70546057011574, 0.21500074178762, -0.80407960155614],
[-0.59557793689120, 0.21200732961882, -0.80821469636381],
[-0.47858570939270, 0.20328382595397, -0.82197902687780],
[-0.33819570282874, 0.18875549299847, -0.85055979016974],
[-1.00000000000000, 0.35691459954653, -0.85467813358830],
[-0.90533315725650, 0.36192227945240, -0.82880683679752],
[-0.81806030657087, 0.36234260848776, -0.81806030657087],
[-0.72622199534603, 0.36234260848776, -0.81806030657087],
[-0.62778228539838, 0.36192227945240, -0.82880683679752],
[-0.50223646595824, 0.35691459954653, -0.85467813358830],
[-1.00000000000000, 0.51226141841950, -0.86118354428076],
[-0.90975051014484, 0.50506413346777, -0.83977988741760],
[-0.83354227525151, 0.50062682575452, -0.83354227525151],
[-0.75553373590533, 0.50506413346777, -0.83977988741760],
[-0.65107787413874, 0.51226141841950, -0.86118354428076],
[-1.00000000000000, 0.65001691674374, -0.87046490686619],
[-0.91725218290052, 0.63124230147018, -0.85699505928483],
[-0.85699505928483, 0.63124230147018, -0.85699505928483],
[-0.77955200987756, 0.65001691674374, -0.87046490686619],
[-1.00000000000000, 0.76606647448416, -0.88303323724208],
[-0.93088733901333, 0.74577891319880, -0.88400423517214],
[-0.88303323724208, 0.76606647448416, -0.88303323724208],
[-1.00000000000000, 0.85709107481873, -0.89951569869071],
[-0.95757537612802, 0.85709107481873, -0.89951569869071],
[-1.00000000000000, 0.92064918534753, -0.92064918534753],
[-1.00000000000000, -1.00000000000000, -0.83559353521809],
[-0.95143658722507, -1.00000000000000, -0.80411857775921],
[-0.87046490686619, -1.00000000000000, -0.77955200987756],
[-0.76088114861788, -1.00000000000000, -0.76088114861788],
[-0.62719310134571, -1.00000000000000, -0.74725908993286],
[-0.47450138980813, -1.00000000000000, -0.73801547866742],
[-0.30834740911645, -1.00000000000000, -0.73266516174362],
[-0.13454275358835, -1.00000000000000, -0.73091449282331],
[0.04101257086006, -1.00000000000000, -0.73266516174362],
[0.21251686847555, -1.00000000000000, -0.73801547866742],
[0.37445219127856, -1.00000000000000, -0.74725908993286],
[0.52176229723576, -1.00000000000000, -0.76088114861788],
[0.65001691674374, -1.00000000000000, -0.77955200987756],
[0.75555516498428, -1.00000000000000, -0.80411857775921],
[0.83559353521809, -1.00000000000000, -0.83559353521809],
[-1.00000000000000, -0.95143658722507, -0.80411857775921],
[-0.92300465044929, -0.92300465044929, -0.79134151435776],
[-0.83977988741760, -0.90975051014484, -0.75553373590533],
[-0.73132910648164, -0.90259903319141, -0.73132910648164],
[-0.59938063492158, -0.89849390365871, -0.71565649846207],
[-0.44889894801261, -0.89619431421031, -0.70620673063170],
[-0.28598097100158, -0.89515246800868, -0.70174749688164],
[-0.11711906410810, -0.89515246800868, -0.70174749688164],
[0.05129999285461, -0.89619431421031, -0.70620673063170],
[0.21353103704236, -0.89849390365871, -0.71565649846207],
[0.36525724615468, -0.90259903319141, -0.73132910648164],
[0.50506413346777, -0.90975051014484, -0.75553373590533],
[0.63735081525634, -0.92300465044929, -0.79134151435776],
[0.75555516498428, -0.95143658722507, -0.80411857775921],
[-1.00000000000000, -0.87046490686619, -0.77955200987756],
[-0.90975051014484, -0.83977988741760, -0.75553373590533],
[-0.81806030657087, -0.81806030657087, -0.72622199534603],
[-0.70546057011574, -0.80407960155614, -0.70546057011574],
[-0.57235298198835, -0.79546369024868, -0.69209820344694],
[-0.42330908651523, -0.79077416730400, -0.68465203917083],
[-0.26422727435709, -0.78928444359942, -0.68226100768640],
[-0.10126470700993, -0.79077416730400, -0.68465203917083],
[0.05991487568396, -0.79546369024868, -0.69209820344694],
[0.21500074178762, -0.80407960155614, -0.70546057011574],
[0.36234260848776, -0.81806030657087, -0.72622199534603],
[0.50506413346777, -0.83977988741760, -0.75553373590533],
[0.65001691674374, -0.87046490686619, -0.77955200987756],
[-1.00000000000000, -0.76088114861788, -0.76088114861788],
[-0.90259903319141, -0.73132910648164, -0.73132910648164],
[-0.80407960155614, -0.70546057011574, -0.70546057011574],
[-0.68766270949317, -0.68766270949317, -0.68766270949317],
[-0.55356600345381, -0.67674763053283, -0.67674763053283],
[-0.40609844374786, -0.67157579256611, -0.67157579256611],
[-0.25074997111992, -0.67157579256611, -0.67157579256611],
[-0.09293873548053, -0.67674763053283, -0.67674763053283],
[0.06298812847952, -0.68766270949317, -0.68766270949317],
[0.21500074178762, -0.70546057011574, -0.70546057011574],
[0.36525724615468, -0.73132910648164, -0.73132910648164],
[0.52176229723576, -0.76088114861788, -0.76088114861788],
[-1.00000000000000, -0.62719310134571, -0.74725908993286],
[-0.89849390365871, -0.59938063492158, -0.71565649846207],
[-0.79546369024868, -0.57235298198835, -0.69209820344694],
[-0.67674763053283, -0.55356600345381, -0.67674763053283],
[-0.54278153811667, -0.54278153811667, -0.66817385848353],
[-0.39764989863294, -0.53927929004885, -0.66542091268526],
[-0.24626306528312, -0.54278153811667, -0.66817385848353],
[-0.09293873548053, -0.55356600345381, -0.67674763053283],
[0.05991487568396, -0.57235298198835, -0.69209820344694],
[0.21353103704236, -0.59938063492158, -0.71565649846207],
[0.37445219127856, -0.62719310134571, -0.74725908993286],
[-1.00000000000000, -0.47450138980813, -0.73801547866742],
[-0.89619431421031, -0.44889894801261, -0.70620673063170],
[-0.79077416730400, -0.42330908651523, -0.68465203917083],
[-0.67157579256611, -0.40609844374786, -0.67157579256611],
[-0.53927929004885, -0.39764989863294, -0.66542091268526],
[-0.39764989863294, -0.39764989863294, -0.66542091268526],
[-0.25074997111992, -0.40609844374786, -0.67157579256611],
[-0.10126470700993, -0.42330908651523, -0.68465203917083],
[0.05129999285461, -0.44889894801261, -0.70620673063170],
[0.21251686847555, -0.47450138980813, -0.73801547866742],
[-1.00000000000000, -0.30834740911645, -0.73266516174362],
[-0.89515246800868, -0.28598097100158, -0.70174749688164],
[-0.78928444359942, -0.26422727435709, -0.68226100768640],
[-0.67157579256611, -0.25074997111992, -0.67157579256611],
[-0.54278153811667, -0.24626306528312, -0.66817385848353],
[-0.40609844374786, -0.25074997111992, -0.67157579256611],
[-0.26422727435709, -0.26422727435709, -0.68226100768640],
[-0.11711906410810, -0.28598097100158, -0.70174749688164],
[0.04101257086006, -0.30834740911645, -0.73266516174362],
[-1.00000000000000, -0.13454275358835, -0.73091449282331],
[-0.89515246800868, -0.11711906410810, -0.70174749688164],
[-0.79077416730400, -0.10126470700993, -0.68465203917083],
[-0.67674763053283, -0.09293873548053, -0.67674763053283],
[-0.55356600345381, -0.09293873548053, -0.67674763053283],
[-0.42330908651523, -0.10126470700993, -0.68465203917083],
[-0.28598097100158, -0.11711906410810, -0.70174749688164],
[-0.13454275358835, -0.13454275358835, -0.73091449282331],
[-1.00000000000000, 0.04101257086006, -0.73266516174362],
[-0.89619431421031, 0.05129999285461, -0.70620673063170],
[-0.79546369024868, 0.05991487568396, -0.69209820344694],
[-0.68766270949317, 0.06298812847952, -0.68766270949317],
[-0.57235298198835, 0.05991487568396, -0.69209820344694],
[-0.44889894801261, 0.05129999285461, -0.70620673063170],
[-0.30834740911645, 0.04101257086006, -0.73266516174362],
[-1.00000000000000, 0.21251686847555, -0.73801547866742],
[-0.89849390365871, 0.21353103704236, -0.71565649846207],
[-0.80407960155614, 0.21500074178762, -0.70546057011574],
[-0.70546057011574, 0.21500074178762, -0.70546057011574],
[-0.59938063492158, 0.21353103704236, -0.71565649846207],
[-0.47450138980813, 0.21251686847555, -0.73801547866742],
[-1.00000000000000, 0.37445219127856, -0.74725908993286],
[-0.90259903319141, 0.36525724615468, -0.73132910648164],
[-0.81806030657087, 0.36234260848776, -0.72622199534603],
[-0.73132910648164, 0.36525724615468, -0.73132910648164],
[-0.62719310134570, 0.37445219127856, -0.74725908993286],
[-1.00000000000000, 0.52176229723576, -0.76088114861788],
[-0.90975051014484, 0.50506413346777, -0.75553373590533],
[-0.83977988741760, 0.50506413346777, -0.75553373590533],
[-0.76088114861788, 0.52176229723576, -0.76088114861788],
[-1.00000000000000, 0.65001691674374, -0.77955200987756],
[-0.92300465044929, 0.63735081525634, -0.79134151435776],
[-0.87046490686619, 0.65001691674374, -0.77955200987756],
[-1.00000000000000, 0.75555516498428, -0.80411857775921],
[-0.95143658722507, 0.75555516498428, -0.80411857775921],
[-1.00000000000000, 0.83559353521809, -0.83559353521809],
[-1.00000000000000, -1.00000000000000, -0.72367932928324],
[-0.94689840816918, -1.00000000000000, -0.68283080625434],
[-0.86118354428076, -1.00000000000000, -0.65107787413874],
[-0.74725908993286, -1.00000000000000, -0.62719310134570],
[-0.61017433892153, -1.00000000000000, -0.61017433892153],
[-0.45548262743559, -1.00000000000000, -0.59925804242534],
[-0.28907450090662, -1.00000000000000, -0.59392944953705],
[-0.11699604955632, -1.00000000000000, -0.59392944953705],
[0.05474066986092, -1.00000000000000, -0.59925804242534],
[0.22034867784306, -1.00000000000000, -0.61017433892153],
[0.37445219127856, -1.00000000000000, -0.62719310134571],
[0.51226141841950, -1.00000000000000, -0.65107787413874],
[0.62972921442352, -1.00000000000000, -0.68283080625434],
[0.72367932928324, -1.00000000000000, -0.72367932928324],
[-1.00000000000000, -0.94689840816918, -0.68283080625434],
[-0.91826859853484, -0.91826859853484, -0.66892711010229],
[-0.82880683679752, -0.90533315725650, -0.62778228539838],
[-0.71565649846207, -0.89849390365871, -0.59938063492158],
[-0.58120945840524, -0.89470167097220, -0.58120945840524],
[-0.43064547863352, -0.89275319595316, -0.57106333491179],
[-0.27002622797889, -0.89215000219033, -0.56779754185188],
[-0.10553799050153, -0.89275319595316, -0.57106333491179],
[0.05712058778268, -0.89470167097220, -0.58120945840524],
[0.21353103704236, -0.89849390365871, -0.59938063492158],
[0.36192227945240, -0.90533315725650, -0.62778228539838],
[0.50546430717196, -0.91826859853484, -0.66892711010229],
[0.62972921442352, -0.94689840816918, -0.68283080625434],
[-1.00000000000000, -0.86118354428076, -0.65107787413874],
[-0.90533315725650, -0.82880683679752, -0.62778228539838],
[-0.80821469636381, -0.80821469636381, -0.59557793689120],
[-0.69209820344694, -0.79546369024868, -0.57235298198835],
[-0.55785828724523, -0.78803820908470, -0.55785828724523],
[-0.41008062239189, -0.78460952663812, -0.55093034546477],
[-0.25437950550522, -0.78460952663812, -0.55093034546477],
[-0.09624521642484, -0.78803820908470, -0.55785828724523],
[0.05991487568396, -0.79546369024868, -0.57235298198835],
[0.21200732961882, -0.80821469636381, -0.59557793689121],
[0.36192227945240, -0.82880683679752, -0.62778228539838],
[0.51226141841950, -0.86118354428076, -0.65107787413874],
[-1.00000000000000, -0.74725908993286, -0.62719310134570],
[-0.89849390365871, -0.71565649846207, -0.59938063492158],
[-0.79546369024868, -0.69209820344694, -0.57235298198835],
[-0.67674763053283, -0.67674763053283, -0.55356600345381],
[-0.54278153811667, -0.66817385848353, -0.54278153811667],
[-0.39764989863294, -0.66542091268526, -0.53927929004885],
[-0.24626306528312, -0.66817385848353, -0.54278153811667],
[-0.09293873548053, -0.67674763053283, -0.55356600345381],
[0.05991487568396, -0.69209820344694, -0.57235298198835],
[0.21353103704236, -0.71565649846207, -0.59938063492158],
[0.37445219127856, -0.74725908993286, -0.62719310134570],
[-1.00000000000000, -0.61017433892153, -0.61017433892153],
[-0.89470167097220, -0.58120945840524, -0.58120945840524],
[-0.78803820908470, -0.55785828724523, -0.55785828724523],
[-0.66817385848353, -0.54278153811667, -0.54278153811667],
[-0.53548665674294, -0.53548665674294, -0.53548665674294],
[-0.39354002977118, -0.53548665674294, -0.53548665674294],
[-0.24626306528312, -0.54278153811667, -0.54278153811667],
[-0.09624521642484, -0.55785828724523, -0.55785828724523],
[0.05712058778268, -0.58120945840524, -0.58120945840524],
[0.22034867784306, -0.61017433892153, -0.61017433892153],
[-1.00000000000000, -0.45548262743559, -0.59925804242534],
[-0.89275319595316, -0.43064547863352, -0.57106333491179],
[-0.78460952663812, -0.41008062239189, -0.55093034546477],
[-0.66542091268526, -0.39764989863294, -0.53927929004885],
[-0.53548665674294, -0.39354002977118, -0.53548665674294],
[-0.39764989863294, -0.39764989863294, -0.53927929004885],
[-0.25437950550522, -0.41008062239189, -0.55093034546477],
[-0.10553799050153, -0.43064547863352, -0.57106333491179],
[0.05474066986092, -0.45548262743559, -0.59925804242534],
[-1.00000000000000, -0.28907450090662, -0.59392944953705],
[-0.89215000219033, -0.27002622797889, -0.56779754185188],
[-0.78460952663812, -0.25437950550522, -0.55093034546477],
[-0.66817385848353, -0.24626306528312, -0.54278153811667],
[-0.54278153811667, -0.24626306528312, -0.54278153811667],
[-0.41008062239189, -0.25437950550522, -0.55093034546477],
[-0.27002622797889, -0.27002622797889, -0.56779754185188],
[-0.11699604955632, -0.28907450090662, -0.59392944953705],
[-1.00000000000000, -0.11699604955632, -0.59392944953705],
[-0.89275319595316, -0.10553799050153, -0.57106333491179],
[-0.78803820908470, -0.09624521642484, -0.55785828724523],
[-0.67674763053283, -0.09293873548053, -0.55356600345381],
[-0.55785828724523, -0.09624521642484, -0.55785828724523],
[-0.43064547863352, -0.10553799050153, -0.57106333491179],
[-0.28907450090662, -0.11699604955632, -0.59392944953705],
[-1.00000000000000, 0.05474066986092, -0.59925804242534],
[-0.89470167097220, 0.05712058778268, -0.58120945840524],
[-0.79546369024868, 0.05991487568396, -0.57235298198835],
[-0.69209820344694, 0.05991487568396, -0.57235298198835],
[-0.58120945840524, 0.05712058778268, -0.58120945840524],
[-0.45548262743559, 0.05474066986092, -0.59925804242534],
[-1.00000000000000, 0.22034867784306, -0.61017433892153],
[-0.89849390365871, 0.21353103704236, -0.59938063492158],
[-0.80821469636381, 0.21200732961882, -0.59557793689121],
[-0.71565649846207, 0.21353103704236, -0.59938063492158],
[-0.61017433892153, 0.22034867784306, -0.61017433892153],
[-1.00000000000000, 0.37445219127856, -0.62719310134571],
[-0.90533315725650, 0.36192227945240, -0.62778228539838],
[-0.82880683679752, 0.36192227945240, -0.62778228539838],
[-0.74725908993286, 0.37445219127856, -0.62719310134571],
[-1.00000000000000, 0.51226141841950, -0.65107787413874],
[-0.91826859853484, 0.50546430717196, -0.66892711010229],
[-0.86118354428076, 0.51226141841950, -0.65107787413874],
[-1.00000000000000, 0.62972921442352, -0.68283080625434],
[-0.94689840816918, 0.62972921442352, -0.68283080625434],
[-1.00000000000000, 0.72367932928324, -0.72367932928324],
[-1.00000000000000, -1.00000000000000, -0.58850483431866],
[-0.94367298086688, -1.00000000000000, -0.53977863434066],
[-0.85467813358830, -1.00000000000000, -0.50223646595824],
[-0.73801547866742, -1.00000000000000, -0.47450138980813],
[-0.59925804242534, -1.00000000000000, -0.45548262743559],
[-0.44439033087540, -1.00000000000000, -0.44439033087540],
[-0.27962686227842, -1.00000000000000, -0.44074627544316],
[-0.11121933824920, -1.00000000000000, -0.44439033087540],
[0.05474066986092, -1.00000000000000, -0.45548262743559],
[0.21251686847555, -1.00000000000000, -0.47450138980813],
[0.35691459954653, -1.00000000000000, -0.50223646595824],
[0.48345161520754, -1.00000000000000, -0.53977863434066],
[0.58850483431866, -1.00000000000000, -0.58850483431866],
[-1.00000000000000, -0.94367298086688, -0.53977863434066],
[-0.91533769804486, -0.91533769804486, -0.52168268319682],
[-0.82197902687780, -0.90271908968347, -0.47858570939270],
[-0.70620673063170, -0.89619431421031, -0.44889894801261],
[-0.57106333491179, -0.89275319595316, -0.43064547863352],
[-0.42195032631194, -0.89124316588241, -0.42195032631194],
[-0.26485618149372, -0.89124316588241, -0.42195032631194],
[-0.10553799050153, -0.89275319595316, -0.43064547863352],
[0.05129999285461, -0.89619431421031, -0.44889894801261],
[0.20328382595397, -0.90271908968347, -0.47858570939270],
[0.35235807928655, -0.91533769804486, -0.52168268319682],
[0.48345161520754, -0.94367298086688, -0.53977863434066],
[-1.00000000000000, -0.85467813358830, -0.50223646595824],
[-0.90271908968347, -0.82197902687780, -0.47858570939270],
[-0.80237272611754, -0.80237272611754, -0.44624189631944],
[-0.68465203917083, -0.79077416730400, -0.42330908651523],
[-0.55093034546477, -0.78460952663812, -0.41008062239189],
[-0.40577573935925, -0.78267278192224, -0.40577573935925],
[-0.25437950550522, -0.78460952663812, -0.41008062239189],
[-0.10126470700993, -0.79077416730400, -0.42330908651523],
[0.05098734855452, -0.80237272611754, -0.44624189631944],
[0.20328382595397, -0.82197902687780, -0.47858570939270],
[0.35691459954653, -0.85467813358830, -0.50223646595824],
[-1.00000000000000, -0.73801547866742, -0.47450138980813],
[-0.89619431421031, -0.70620673063170, -0.44889894801261],
[-0.79077416730400, -0.68465203917083, -0.42330908651523],
[-0.67157579256611, -0.67157579256611, -0.40609844374786],
[-0.53927929004885, -0.66542091268526, -0.39764989863294],
[-0.39764989863294, -0.66542091268526, -0.39764989863294],
[-0.25074997111992, -0.67157579256611, -0.40609844374786],
[-0.10126470700993, -0.68465203917083, -0.42330908651523],
[0.05129999285461, -0.70620673063170, -0.44889894801261],
[0.21251686847555, -0.73801547866742, -0.47450138980813],
[-1.00000000000000, -0.59925804242534, -0.45548262743559],
[-0.89275319595316, -0.57106333491179, -0.43064547863352],
[-0.78460952663812, -0.55093034546477, -0.41008062239189],
[-0.66542091268526, -0.53927929004885, -0.39764989863294],
[-0.53548665674294, -0.53548665674294, -0.39354002977118],
[-0.39764989863294, -0.53927929004885, -0.39764989863294],
[-0.25437950550522, -0.55093034546477, -0.41008062239189],
[-0.10553799050153, -0.57106333491179, -0.43064547863352],
[0.05474066986092, -0.59925804242534, -0.45548262743559],
[-1.00000000000000, -0.44439033087540, -0.44439033087540],
[-0.89124316588241, -0.42195032631194, -0.42195032631194],
[-0.78267278192224, -0.40577573935925, -0.40577573935925],
[-0.66542091268526, -0.39764989863294, -0.39764989863294],
[-0.53927929004885, -0.39764989863294, -0.39764989863294],
[-0.40577573935925, -0.40577573935925, -0.40577573935925],
[-0.26485618149372, -0.42195032631194, -0.42195032631194],
[-0.11121933824920, -0.44439033087540, -0.44439033087540],
[-1.00000000000000, -0.27962686227842, -0.44074627544316],
[-0.89124316588241, -0.26485618149372, -0.42195032631194],
[-0.78460952663812, -0.25437950550522, -0.41008062239189],
[-0.67157579256611, -0.25074997111992, -0.40609844374786],
[-0.55093034546477, -0.25437950550522, -0.41008062239189],
[-0.42195032631194, -0.26485618149372, -0.42195032631194],
[-0.27962686227842, -0.27962686227842, -0.44074627544316],
[-1.00000000000000, -0.11121933824920, -0.44439033087540],
[-0.89275319595316, -0.10553799050153, -0.43064547863352],
[-0.79077416730400, -0.10126470700993, -0.42330908651523],
[-0.68465203917083, -0.10126470700993, -0.42330908651523],
[-0.57106333491179, -0.10553799050153, -0.43064547863352],
[-0.44439033087540, -0.11121933824920, -0.44439033087540],
[-1.00000000000000, 0.05474066986092, -0.45548262743559],
[-0.89619431421031, 0.05129999285461, -0.44889894801261],
[-0.80237272611754, 0.05098734855452, -0.44624189631944],
[-0.70620673063170, 0.05129999285461, -0.44889894801261],
[-0.59925804242534, 0.05474066986092, -0.45548262743559],
[-1.00000000000000, 0.21251686847555, -0.47450138980813],
[-0.90271908968347, 0.20328382595397, -0.47858570939270],
[-0.82197902687780, 0.20328382595397, -0.47858570939270],
[-0.73801547866742, 0.21251686847555, -0.47450138980813],
[-1.00000000000000, 0.35691459954653, -0.50223646595824],
[-0.91533769804486, 0.35235807928655, -0.52168268319682],
[-0.85467813358830, 0.35691459954653, -0.50223646595824],
[-1.00000000000000, 0.48345161520754, -0.53977863434066],
[-0.94367298086688, 0.48345161520754, -0.53977863434066],
[-1.00000000000000, 0.58850483431866, -0.58850483431866],
[-1.00000000000000, -1.00000000000000, -0.43441503691212],
[-0.94153338637932, -1.00000000000000, -0.37973175758212],
[-0.85055979016974, -1.00000000000000, -0.33819570282874],
[-0.73266516174362, -1.00000000000000, -0.30834740911645],
[-0.59392944953705, -1.00000000000000, -0.28907450090662],
[-0.44074627544316, -1.00000000000000, -0.27962686227842],
[-0.27962686227842, -1.00000000000000, -0.27962686227842],
[-0.11699604955632, -1.00000000000000, -0.28907450090662],
[0.04101257086006, -1.00000000000000, -0.30834740911645],
[0.18875549299847, -1.00000000000000, -0.33819570282874],
[0.32126514396144, -1.00000000000000, -0.37973175758212],
[0.43441503691212, -1.00000000000000, -0.43441503691212],
[-1.00000000000000, -0.94153338637932, -0.37973175758212],
[-0.91361325731751, -0.91361325731751, -0.35580977157487],
[-0.81822259638933, -0.90132394717945, -0.31409235901703],
[-0.70174749688164, -0.89515246800868, -0.28598097100158],
[-0.56779754185188, -0.89215000219033, -0.27002622797889],
[-0.42195032631194, -0.89124316588241, -0.26485618149372],
[-0.27002622797889, -0.89215000219033, -0.27002622797889],
[-0.11711906410810, -0.89515246800868, -0.28598097100158],
[0.03363890258581, -0.90132394717945, -0.31409235901703],
[0.18303628620988, -0.91361325731751, -0.35580977157487],
[0.32126514396144, -0.94153338637932, -0.37973175758212],
[-1.00000000000000, -0.85055979016974, -0.33819570282874],
[-0.90132394717945, -0.81822259638933, -0.31409235901703],
[-0.79964507343425, -0.79964507343425, -0.28431281823316],
[-0.68226100768640, -0.78928444359942, -0.26422727435709],
[-0.55093034546477, -0.78460952663812, -0.25437950550522],
[-0.41008062239189, -0.78460952663812, -0.25437950550522],
[-0.26422727435709, -0.78928444359942, -0.26422727435709],
[-0.11639703489834, -0.79964507343425, -0.28431281823316],
[0.03363890258581, -0.81822259638933, -0.31409235901703],
[0.18875549299847, -0.85055979016974, -0.33819570282874],
[-1.00000000000000, -0.73266516174362, -0.30834740911645],
[-0.89515246800868, -0.70174749688164, -0.28598097100158],
[-0.78928444359942, -0.68226100768640, -0.26422727435709],
[-0.67157579256611, -0.67157579256611, -0.25074997111992],
[-0.54278153811667, -0.66817385848353, -0.24626306528312],
[-0.40609844374786, -0.67157579256611, -0.25074997111992],
[-0.26422727435709, -0.68226100768640, -0.26422727435709],
[-0.11711906410810, -0.70174749688164, -0.28598097100158],
[0.04101257086006, -0.73266516174362, -0.30834740911645],
[-1.00000000000000, -0.59392944953705, -0.28907450090662],
[-0.89215000219033, -0.56779754185188, -0.27002622797889],
[-0.78460952663812, -0.55093034546477, -0.25437950550522],
[-0.66817385848353, -0.54278153811667, -0.24626306528312],
[-0.54278153811667, -0.54278153811667, -0.24626306528312],
[-0.41008062239189, -0.55093034546477, -0.25437950550522],
[-0.27002622797889, -0.56779754185188, -0.27002622797889],
[-0.11699604955632, -0.59392944953705, -0.28907450090662],
[-1.00000000000000, -0.44074627544316, -0.27962686227842],
[-0.89124316588241, -0.42195032631194, -0.26485618149372],
[-0.78460952663812, -0.41008062239189, -0.25437950550522],
[-0.67157579256611, -0.40609844374786, -0.25074997111992],
[-0.55093034546477, -0.41008062239189, -0.25437950550522],
[-0.42195032631194, -0.42195032631194, -0.26485618149372],
[-0.27962686227842, -0.44074627544316, -0.27962686227842],
[-1.00000000000000, -0.27962686227842, -0.27962686227842],
[-0.89215000219033, -0.27002622797889, -0.27002622797889],
[-0.78928444359942, -0.26422727435709, -0.26422727435709],
[-0.68226100768640, -0.26422727435709, -0.26422727435709],
[-0.56779754185188, -0.27002622797889, -0.27002622797889],
[-0.44074627544316, -0.27962686227842, -0.27962686227842],
[-1.00000000000000, -0.11699604955632, -0.28907450090662],
[-0.89515246800868, -0.11711906410810, -0.28598097100158],
[-0.79964507343425, -0.11639703489834, -0.28431281823316],
[-0.70174749688164, -0.11711906410810, -0.28598097100158],
[-0.59392944953705, -0.11699604955632, -0.28907450090662],
[-1.00000000000000, 0.04101257086006, -0.30834740911645],
[-0.90132394717945, 0.03363890258581, -0.31409235901703],
[-0.81822259638933, 0.03363890258581, -0.31409235901703],
[-0.73266516174362, 0.04101257086006, -0.30834740911645],
[-1.00000000000000, 0.18875549299847, -0.33819570282874],
[-0.91361325731751, 0.18303628620988, -0.35580977157487],
[-0.85055979016974, 0.18875549299847, -0.33819570282874],
[-1.00000000000000, 0.32126514396144, -0.37973175758212],
[-0.94153338637932, 0.32126514396144, -0.37973175758212],
[-1.00000000000000, 0.43441503691212, -0.43441503691212],
[-1.00000000000000, -1.00000000000000, -0.26636265287828],
[-0.94031572376258, -1.00000000000000, -0.20795453403575],
[-0.84856652844490, -1.00000000000000, -0.16450620740505],
[-0.73091449282331, -1.00000000000000, -0.13454275358835],
[-0.59392944953705, -1.00000000000000, -0.11699604955632],
[-0.44439033087540, -1.00000000000000, -0.11121933824920],
[-0.28907450090662, -1.00000000000000, -0.11699604955632],
[-0.13454275358835, -1.00000000000000, -0.13454275358835],
[0.01307273584995, -1.00000000000000, -0.16450620740505],
[0.14827025779833, -1.00000000000000, -0.20795453403575],
[0.26636265287828, -1.00000000000000, -0.26636265287828],
[-1.00000000000000, -0.94031572376258, -0.20795453403575],
[-0.91281028806768, -0.91281028806768, -0.17821561606993],
[-0.81702189747097, -0.90088437869935, -0.14104686191484],
[-0.70174749688164, -0.89515246800868, -0.11711906410810],
[-0.57106333491179, -0.89275319595316, -0.10553799050153],
[-0.43064547863352, -0.89275319595316, -0.10553799050153],
[-0.28598097100158, -0.89515246800868, -0.11711906410810],
[-0.14104686191484, -0.90088437869935, -0.14104686191484],
[0.00383619220529, -0.91281028806768, -0.17821561606993],
[0.14827025779834, -0.94031572376258, -0.20795453403575],
[-1.00000000000000, -0.84856652844490, -0.16450620740505],
[-0.90088437869935, -0.81702189747097, -0.14104686191484],
[-0.79964507343425, -0.79964507343425, -0.11639703489834],
[-0.68465203917083, -0.79077416730400, -0.10126470700993],
[-0.55785828724523, -0.78803820908470, -0.09624521642484],
[-0.42330908651523, -0.79077416730400, -0.10126470700993],
[-0.28431281823316, -0.79964507343425, -0.11639703489834],
[-0.14104686191484, -0.81702189747097, -0.14104686191484],
[0.01307273584995, -0.84856652844490, -0.16450620740505],
[-1.00000000000000, -0.73091449282331, -0.13454275358835],
[-0.89515246800868, -0.70174749688164, -0.11711906410810],
[-0.79077416730400, -0.68465203917083, -0.10126470700993],
[-0.67674763053283, -0.67674763053283, -0.09293873548053],
[-0.55356600345381, -0.67674763053283, -0.09293873548053],
[-0.42330908651523, -0.68465203917083, -0.10126470700993],
[-0.28598097100158, -0.70174749688164, -0.11711906410810],
[-0.13454275358835, -0.73091449282331, -0.13454275358835],
[-1.00000000000000, -0.59392944953705, -0.11699604955632],
[-0.89275319595316, -0.57106333491179, -0.10553799050153],
[-0.78803820908470, -0.55785828724523, -0.09624521642484],
[-0.67674763053283, -0.55356600345381, -0.09293873548053],
[-0.55785828724523, -0.55785828724523, -0.09624521642484],
[-0.43064547863352, -0.57106333491179, -0.10553799050153],
[-0.28907450090662, -0.59392944953705, -0.11699604955632],
[-1.00000000000000, -0.44439033087540, -0.11121933824920],
[-0.89275319595316, -0.43064547863352, -0.10553799050153],
[-0.79077416730400, -0.42330908651523, -0.10126470700993],
[-0.68465203917083, -0.42330908651523, -0.10126470700993],
[-0.57106333491179, -0.43064547863352, -0.10553799050153],
[-0.44439033087540, -0.44439033087540, -0.11121933824920],
[-1.00000000000000, -0.28907450090662, -0.11699604955632],
[-0.89515246800868, -0.28598097100158, -0.11711906410810],
[-0.79964507343425, -0.28431281823316, -0.11639703489834],
[-0.70174749688164, -0.28598097100158, -0.11711906410810],
[-0.59392944953705, -0.28907450090662, -0.11699604955632],
[-1.00000000000000, -0.13454275358835, -0.13454275358835],
[-0.90088437869935, -0.14104686191484, -0.14104686191484],
[-0.81702189747097, -0.14104686191484, -0.14104686191484],
[-0.73091449282331, -0.13454275358835, -0.13454275358835],
[-1.00000000000000, 0.01307273584995, -0.16450620740505],
[-0.91281028806768, 0.00383619220529, -0.17821561606993],
[-0.84856652844490, 0.01307273584995, -0.16450620740505],
[-1.00000000000000, 0.14827025779833, -0.20795453403575],
[-0.94031572376258, 0.14827025779833, -0.20795453403575],
[-1.00000000000000, 0.26636265287828, -0.26636265287828],
[-1.00000000000000, -1.00000000000000, -0.08974909348465],
[-0.93992089073865, -1.00000000000000, -0.03003955463067],
[-0.84856652844490, -1.00000000000000, 0.01307273584995],
[-0.73266516174362, -1.00000000000000, 0.04101257086006],
[-0.59925804242534, -1.00000000000000, 0.05474066986092],
[-0.45548262743559, -1.00000000000000, 0.05474066986092],
[-0.30834740911645, -1.00000000000000, 0.04101257086006],
[-0.16450620740505, -1.00000000000000, 0.01307273584995],
[-0.03003955463067, -1.00000000000000, -0.03003955463067],
[0.08974909348465, -1.00000000000000, -0.08974909348465],
[-1.00000000000000, -0.93992089073865, -0.03003955463067],
[-0.91281028806768, -0.91281028806768, 0.00383619220529],
[-0.81822259638933, -0.90132394717945, 0.03363890258581],
[-0.70620673063170, -0.89619431421031, 0.05129999285461],
[-0.58120945840524, -0.89470167097220, 0.05712058778268],
[-0.44889894801261, -0.89619431421031, 0.05129999285461],
[-0.31409235901703, -0.90132394717945, 0.03363890258581],
[-0.17821561606993, -0.91281028806768, 0.00383619220529],
[-0.03003955463067, -0.93992089073865, -0.03003955463067],
[-1.00000000000000, -0.84856652844490, 0.01307273584995],
[-0.90132394717945, -0.81822259638933, 0.03363890258581],
[-0.80237272611754, -0.80237272611754, 0.05098734855452],
[-0.69209820344694, -0.79546369024868, 0.05991487568396],
[-0.57235298198835, -0.79546369024868, 0.05991487568396],
[-0.44624189631944, -0.80237272611754, 0.05098734855452],
[-0.31409235901703, -0.81822259638933, 0.03363890258581],
[-0.16450620740505, -0.84856652844490, 0.01307273584995],
[-1.00000000000000, -0.73266516174362, 0.04101257086006],
[-0.89619431421031, -0.70620673063170, 0.05129999285461],
[-0.79546369024868, -0.69209820344694, 0.05991487568396],
[-0.68766270949317, -0.68766270949317, 0.06298812847952],
[-0.57235298198835, -0.69209820344694, 0.05991487568396],
[-0.44889894801261, -0.70620673063170, 0.05129999285461],
[-0.30834740911645, -0.73266516174362, 0.04101257086006],
[-1.00000000000000, -0.59925804242534, 0.05474066986092],
[-0.89470167097220, -0.58120945840524, 0.05712058778268],
[-0.79546369024868, -0.57235298198835, 0.05991487568396],
[-0.69209820344694, -0.57235298198835, 0.05991487568396],
[-0.58120945840524, -0.58120945840524, 0.05712058778268],
[-0.45548262743559, -0.59925804242534, 0.05474066986092],
[-1.00000000000000, -0.45548262743559, 0.05474066986092],
[-0.89619431421031, -0.44889894801261, 0.05129999285461],
[-0.80237272611754, -0.44624189631944, 0.05098734855452],
[-0.70620673063170, -0.44889894801261, 0.05129999285461],
[-0.59925804242534, -0.45548262743559, 0.05474066986092],
[-1.00000000000000, -0.30834740911645, 0.04101257086006],
[-0.90132394717945, -0.31409235901703, 0.03363890258581],
[-0.81822259638933, -0.31409235901703, 0.03363890258581],
[-0.73266516174362, -0.30834740911645, 0.04101257086006],
[-1.00000000000000, -0.16450620740505, 0.01307273584995],
[-0.91281028806768, -0.17821561606993, 0.00383619220529],
[-0.84856652844490, -0.16450620740505, 0.01307273584995],
[-1.00000000000000, -0.03003955463067, -0.03003955463067],
[-0.93992089073865, -0.03003955463067, -0.03003955463067],
[-1.00000000000000, 0.08974909348465, -0.08974909348465],
[-1.00000000000000, -1.00000000000000, 0.08974909348465],
[-0.94031572376258, -1.00000000000000, 0.14827025779833],
[-0.85055979016974, -1.00000000000000, 0.18875549299847],
[-0.73801547866742, -1.00000000000000, 0.21251686847555],
[-0.61017433892153, -1.00000000000000, 0.22034867784306],
[-0.47450138980813, -1.00000000000000, 0.21251686847555],
[-0.33819570282874, -1.00000000000000, 0.18875549299847],
[-0.20795453403575, -1.00000000000000, 0.14827025779834],
[-0.08974909348465, -1.00000000000000, 0.08974909348465],
[-1.00000000000000, -0.94031572376258, 0.14827025779833],
[-0.91361325731751, -0.91361325731751, 0.18303628620988],
[-0.82197902687780, -0.90271908968347, 0.20328382595397],
[-0.71565649846207, -0.89849390365871, 0.21353103704236],
[-0.59938063492158, -0.89849390365871, 0.21353103704236],
[-0.47858570939270, -0.90271908968347, 0.20328382595397],
[-0.35580977157487, -0.91361325731751, 0.18303628620988],
[-0.20795453403575, -0.94031572376258, 0.14827025779833],
[-1.00000000000000, -0.85055979016974, 0.18875549299847],
[-0.90271908968347, -0.82197902687780, 0.20328382595397],
[-0.80821469636381, -0.80821469636381, 0.21200732961882],
[-0.70546057011574, -0.80407960155614, 0.21500074178762],
[-0.59557793689121, -0.80821469636381, 0.21200732961882],
[-0.47858570939270, -0.82197902687780, 0.20328382595397],
[-0.33819570282874, -0.85055979016974, 0.18875549299847],
[-1.00000000000000, -0.73801547866742, 0.21251686847555],
[-0.89849390365871, -0.71565649846207, 0.21353103704236],
[-0.80407960155614, -0.70546057011574, 0.21500074178762],
[-0.70546057011574, -0.70546057011574, 0.21500074178762],
[-0.59938063492158, -0.71565649846207, 0.21353103704236],
[-0.47450138980813, -0.73801547866742, 0.21251686847555],
[-1.00000000000000, -0.61017433892153, 0.22034867784306],
[-0.89849390365871, -0.59938063492158, 0.21353103704236],
[-0.80821469636381, -0.59557793689120, 0.21200732961882],
[-0.71565649846207, -0.59938063492158, 0.21353103704236],
[-0.61017433892153, -0.61017433892153, 0.22034867784306],
[-1.00000000000000, -0.47450138980813, 0.21251686847555],
[-0.90271908968347, -0.47858570939270, 0.20328382595397],
[-0.82197902687780, -0.47858570939270, 0.20328382595397],
[-0.73801547866742, -0.47450138980813, 0.21251686847555],
[-1.00000000000000, -0.33819570282874, 0.18875549299847],
[-0.91361325731751, -0.35580977157487, 0.18303628620988],
[-0.85055979016974, -0.33819570282874, 0.18875549299847],
[-1.00000000000000, -0.20795453403575, 0.14827025779834],
[-0.94031572376258, -0.20795453403575, 0.14827025779834],
[-1.00000000000000, -0.08974909348465, 0.08974909348465],
[-1.00000000000000, -1.00000000000000, 0.26636265287828],
[-0.94153338637932, -1.00000000000000, 0.32126514396144],
[-0.85467813358830, -1.00000000000000, 0.35691459954653],
[-0.74725908993286, -1.00000000000000, 0.37445219127856],
[-0.62719310134570, -1.00000000000000, 0.37445219127856],
[-0.50223646595824, -1.00000000000000, 0.35691459954653],
[-0.37973175758212, -1.00000000000000, 0.32126514396144],
[-0.26636265287828, -1.00000000000000, 0.26636265287828],
[-1.00000000000000, -0.94153338637932, 0.32126514396144],
[-0.91533769804486, -0.91533769804486, 0.35235807928655],
[-0.82880683679752, -0.90533315725650, 0.36192227945240],
[-0.73132910648164, -0.90259903319141, 0.36525724615468],
[-0.62778228539838, -0.90533315725650, 0.36192227945240],
[-0.52168268319682, -0.91533769804486, 0.35235807928655],
[-0.37973175758212, -0.94153338637932, 0.32126514396144],
[-1.00000000000000, -0.85467813358830, 0.35691459954653],
[-0.90533315725650, -0.82880683679752, 0.36192227945240],
[-0.81806030657087, -0.81806030657087, 0.36234260848776],
[-0.72622199534603, -0.81806030657087, 0.36234260848776],
[-0.62778228539838, -0.82880683679752, 0.36192227945240],
[-0.50223646595824, -0.85467813358830, 0.35691459954653],
[-1.00000000000000, -0.74725908993286, 0.37445219127856],
[-0.90259903319141, -0.73132910648164, 0.36525724615468],
[-0.81806030657087, -0.72622199534603, 0.36234260848776],
[-0.73132910648164, -0.73132910648164, 0.36525724615468],
[-0.62719310134571, -0.74725908993286, 0.37445219127856],
[-1.00000000000000, -0.62719310134570, 0.37445219127856],
[-0.90533315725650, -0.62778228539838, 0.36192227945240],
[-0.82880683679752, -0.62778228539838, 0.36192227945240],
[-0.74725908993286, -0.62719310134570, 0.37445219127856],
[-1.00000000000000, -0.50223646595824, 0.35691459954653],
[-0.91533769804486, -0.52168268319682, 0.35235807928655],
[-0.85467813358830, -0.50223646595824, 0.35691459954653],
[-1.00000000000000, -0.37973175758212, 0.32126514396144],
[-0.94153338637932, -0.37973175758212, 0.32126514396144],
[-1.00000000000000, -0.26636265287828, 0.26636265287828],
[-1.00000000000000, -1.00000000000000, 0.43441503691212],
[-0.94367298086688, -1.00000000000000, 0.48345161520754],
[-0.86118354428076, -1.00000000000000, 0.51226141841950],
[-0.76088114861788, -1.00000000000000, 0.52176229723576],
[-0.65107787413874, -1.00000000000000, 0.51226141841950],
[-0.53977863434066, -1.00000000000000, 0.48345161520754],
[-0.43441503691212, -1.00000000000000, 0.43441503691212],
[-1.00000000000000, -0.94367298086688, 0.48345161520754],
[-0.91826859853484, -0.91826859853484, 0.50546430717196],
[-0.83977988741760, -0.90975051014484, 0.50506413346777],
[-0.75553373590533, -0.90975051014484, 0.50506413346777],
[-0.66892711010229, -0.91826859853484, 0.50546430717196],
[-0.53977863434066, -0.94367298086688, 0.48345161520754],
[-1.00000000000000, -0.86118354428076, 0.51226141841950],
[-0.90975051014484, -0.83977988741760, 0.50506413346777],
[-0.83354227525151, -0.83354227525151, 0.50062682575452],
[-0.75553373590533, -0.83977988741760, 0.50506413346777],
[-0.65107787413874, -0.86118354428076, 0.51226141841950],
[-1.00000000000000, -0.76088114861788, 0.52176229723576],
[-0.90975051014484, -0.75553373590533, 0.50506413346777],
[-0.83977988741760, -0.75553373590533, 0.50506413346777],
[-0.76088114861788, -0.76088114861788, 0.52176229723576],
[-1.00000000000000, -0.65107787413874, 0.51226141841950],
[-0.91826859853484, -0.66892711010229, 0.50546430717196],
[-0.86118354428076, -0.65107787413874, 0.51226141841950],
[-1.00000000000000, -0.53977863434066, 0.48345161520754],
[-0.94367298086688, -0.53977863434066, 0.48345161520754],
[-1.00000000000000, -0.43441503691212, 0.43441503691212],
[-1.00000000000000, -1.00000000000000, 0.58850483431866],
[-0.94689840816918, -1.00000000000000, 0.62972921442352],
[-0.87046490686619, -1.00000000000000, 0.65001691674374],
[-0.77955200987756, -1.00000000000000, 0.65001691674374],
[-0.68283080625434, -1.00000000000000, 0.62972921442352],
[-0.58850483431866, -1.00000000000000, 0.58850483431866],
[-1.00000000000000, -0.94689840816918, 0.62972921442352],
[-0.92300465044929, -0.92300465044929, 0.63735081525634],
[-0.85699505928483, -0.91725218290052, 0.63124230147018],
[-0.79134151435775, -0.92300465044929, 0.63735081525634],
[-0.68283080625434, -0.94689840816918, 0.62972921442352],
[-1.00000000000000, -0.87046490686619, 0.65001691674374],
[-0.91725218290052, -0.85699505928483, 0.63124230147018],
[-0.85699505928483, -0.85699505928483, 0.63124230147018],
[-0.77955200987756, -0.87046490686619, 0.65001691674374],
[-1.00000000000000, -0.77955200987756, 0.65001691674374],
[-0.92300465044929, -0.79134151435776, 0.63735081525634],
[-0.87046490686619, -0.77955200987756, 0.65001691674374],
[-1.00000000000000, -0.68283080625434, 0.62972921442352],
[-0.94689840816918, -0.68283080625434, 0.62972921442352],
[-1.00000000000000, -0.58850483431866, 0.58850483431866],
[-1.00000000000000, -1.00000000000000, 0.72367932928324],
[-0.95143658722507, -1.00000000000000, 0.75555516498428],
[-0.88303323724208, -1.00000000000000, 0.76606647448416],
[-0.80411857775921, -1.00000000000000, 0.75555516498428],
[-0.72367932928324, -1.00000000000000, 0.72367932928324],
[-1.00000000000000, -0.95143658722507, 0.75555516498428],
[-0.93088733901333, -0.93088733901333, 0.74577891319880],
[-0.88400423517214, -0.93088733901333, 0.74577891319880],
[-0.80411857775921, -0.95143658722507, 0.75555516498428],
[-1.00000000000000, -0.88303323724208, 0.76606647448416],
[-0.93088733901333, -0.88400423517214, 0.74577891319880],
[-0.88303323724208, -0.88303323724208, 0.76606647448416],
[-1.00000000000000, -0.80411857775921, 0.75555516498428],
[-0.95143658722507, -0.80411857775921, 0.75555516498428],
[-1.00000000000000, -0.72367932928324, 0.72367932928324],
[-1.00000000000000, -1.00000000000000, 0.83559353521809],
[-0.95757537612802, -1.00000000000000, 0.85709107481873],
[-0.89951569869071, -1.00000000000000, 0.85709107481873],
[-0.83559353521809, -1.00000000000000, 0.83559353521809],
[-1.00000000000000, -0.95757537612802, 0.85709107481873],
[-0.94524290297158, -0.94524290297158, 0.83572870891473],
[-0.89951569869071, -0.95757537612802, 0.85709107481873],
[-1.00000000000000, -0.89951569869071, 0.85709107481873],
[-0.95757537612802, -0.89951569869071, 0.85709107481873],
[-1.00000000000000, -0.83559353521809, 0.83559353521809],
[-1.00000000000000, -1.00000000000000, 0.92064918534753],
[-0.96566215411209, -1.00000000000000, 0.93132430822417],
[-0.92064918534753, -1.00000000000000, 0.92064918534753],
[-1.00000000000000, -0.96566215411209, 0.93132430822417],
[-0.96566215411208, -0.96566215411209, 0.93132430822417],
[-1.00000000000000, -0.92064918534753, 0.92064918534753],
[-1.00000000000000, -1.00000000000000, 0.97610555741220],
[-0.97610555741220, -1.00000000000000, 0.97610555741220],
[-1.00000000000000, -0.97610555741220, 0.97610555741220],
])
elif C==17:
feketeNodes = np.array([
[-1.00000000000000, -1.00000000000000, -1.00000000000000],
[1.00000000000000, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, 1.00000000000000, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, 1.00000000000000],
[-0.97861176622208, -1.00000000000000, -1.00000000000000],
[-0.92890152815259, -1.00000000000000, -1.00000000000000],
[-0.85246057779665, -1.00000000000000, -1.00000000000000],
[-0.75149420255261, -1.00000000000000, -1.00000000000000],
[-0.62890813726522, -1.00000000000000, -1.00000000000000],
[-0.48822928568071, -1.00000000000000, -1.00000000000000],
[-0.33350484782450, -1.00000000000000, -1.00000000000000],
[-0.16918602340928, -1.00000000000000, -1.00000000000000],
[-0.00000000000000, -1.00000000000000, -1.00000000000000],
[0.16918602340928, -1.00000000000000, -1.00000000000000],
[0.33350484782450, -1.00000000000000, -1.00000000000000],
[0.48822928568071, -1.00000000000000, -1.00000000000000],
[0.62890813726522, -1.00000000000000, -1.00000000000000],
[0.75149420255261, -1.00000000000000, -1.00000000000000],
[0.85246057779665, -1.00000000000000, -1.00000000000000],
[0.92890152815259, -1.00000000000000, -1.00000000000000],
[0.97861176622208, -1.00000000000000, -1.00000000000000],
[-1.00000000000000, -0.97861176622208, -1.00000000000000],
[-0.96921630071686, -0.96921630071687, -1.00000000000000],
[-0.90986964594991, -0.96183417565652, -1.00000000000000],
[-0.82402439713301, -0.95613320518782, -1.00000000000000],
[-0.71439175458728, -0.95182812061116, -1.00000000000000],
[-0.58431589231501, -0.94867846854126, -1.00000000000000],
[-0.43768590012289, -0.94648934393266, -1.00000000000000],
[-0.27882755855641, -0.94511287539771, -1.00000000000000],
[-0.11238110084642, -0.94444963013195, -1.00000000000000],
[0.05683073097837, -0.94444963013195, -1.00000000000000],
[0.22394043395412, -0.94511287539771, -1.00000000000000],
[0.38417524405555, -0.94648934393266, -1.00000000000000],
[0.53299436085627, -0.94867846854126, -1.00000000000000],
[0.66621987519844, -0.95182812061116, -1.00000000000000],
[0.78015760232083, -0.95613320518782, -1.00000000000000],
[0.87170382160643, -0.96183417565652, -1.00000000000000],
[0.93843260143372, -0.96921630071686, -1.00000000000000],
[0.97861176622208, -0.97861176622208, -1.00000000000000],
[-1.00000000000000, -0.92890152815259, -1.00000000000000],
[-0.96183417565652, -0.90986964594991, -1.00000000000000],
[-0.89481010149642, -0.89481010149641, -1.00000000000000],
[-0.80149102953572, -0.88312209180235, -1.00000000000000],
[-0.68506801977964, -0.87428693174862, -1.00000000000000],
[-0.54930144164810, -0.86787251363754, -1.00000000000000],
[-0.39842216100643, -0.86353773463513, -1.00000000000000],
[-0.23701502595382, -0.86103620266103, -1.00000000000000],
[-0.06989051473883, -0.86021897052234, -1.00000000000000],
[0.09805122861485, -0.86103620266103, -1.00000000000000],
[0.26195989564156, -0.86353773463513, -1.00000000000000],
[0.41717395528564, -0.86787251363754, -1.00000000000000],
[0.55935495152827, -0.87428693174862, -1.00000000000000],
[0.68461312133806, -0.88312209180234, -1.00000000000000],
[0.78962020299283, -0.89481010149641, -1.00000000000000],
[0.87170382160643, -0.90986964594991, -1.00000000000000],
[0.92890152815259, -0.92890152815259, -1.00000000000000],
[-1.00000000000000, -0.85246057779665, -1.00000000000000],
[-0.95613320518782, -0.82402439713301, -1.00000000000000],
[-0.88312209180234, -0.80149102953572, -1.00000000000000],
[-0.78402924082624, -0.78402924082624, -1.00000000000000],
[-0.66250646855334, -0.77093131190284, -1.00000000000000],
[-0.52271163373050, -0.76162101409829, -1.00000000000000],
[-0.36919858094923, -0.75566022473134, -1.00000000000000],
[-0.20679219426527, -0.75275399873620, -1.00000000000000],
[-0.04045380699852, -0.75275399873620, -1.00000000000000],
[0.12485880568057, -0.75566022473134, -1.00000000000000],
[0.28433264782879, -0.76162101409829, -1.00000000000000],
[0.43343778045617, -0.77093131190284, -1.00000000000000],
[0.56805848165248, -0.78402924082624, -1.00000000000000],
[0.68461312133807, -0.80149102953572, -1.00000000000000],
[0.78015760232082, -0.82402439713301, -1.00000000000000],
[0.85246057779665, -0.85246057779665, -1.00000000000000],
[-1.00000000000000, -0.75149420255261, -1.00000000000000],
[-0.95182812061116, -0.71439175458728, -1.00000000000000],
[-0.87428693174862, -0.68506801977964, -1.00000000000000],
[-0.77093131190284, -0.66250646855334, -1.00000000000000],
[-0.64585840843297, -0.64585840843297, -1.00000000000000],
[-0.50360674986693, -0.63445303796278, -1.00000000000000],
[-0.34903506888062, -0.62780542211337, -1.00000000000000],
[-0.18718886587164, -0.62562226825673, -1.00000000000000],
[-0.02315950900602, -0.62780542211337, -1.00000000000000],
[0.13805978782971, -0.63445303796278, -1.00000000000000],
[0.29171681686594, -0.64585840843297, -1.00000000000000],
[0.43343778045617, -0.66250646855334, -1.00000000000000],
[0.55935495152827, -0.68506801977964, -1.00000000000000],
[0.66621987519844, -0.71439175458728, -1.00000000000000],
[0.75149420255261, -0.75149420255261, -1.00000000000000],
[-1.00000000000000, -0.62890813726522, -1.00000000000000],
[-0.94867846854126, -0.58431589231501, -1.00000000000000],
[-0.86787251363754, -0.54930144164810, -1.00000000000000],
[-0.76162101409829, -0.52271163373050, -1.00000000000000],
[-0.63445303796278, -0.50360674986693, -1.00000000000000],
[-0.49127187212102, -0.49127187212102, -1.00000000000000],
[-0.33722289443963, -0.48522553203446, -1.00000000000000],
[-0.17755157352591, -0.48522553203446, -1.00000000000000],
[-0.01745625575796, -0.49127187212102, -1.00000000000000],
[0.13805978782971, -0.50360674986693, -1.00000000000000],
[0.28433264782879, -0.52271163373050, -1.00000000000000],
[0.41717395528564, -0.54930144164810, -1.00000000000000],
[0.53299436085627, -0.58431589231501, -1.00000000000000],
[0.62890813726522, -0.62890813726522, -1.00000000000000],
[-1.00000000000000, -0.48822928568071, -1.00000000000000],
[-0.94648934393266, -0.43768590012289, -1.00000000000000],
[-0.86353773463513, -0.39842216100643, -1.00000000000000],
[-0.75566022473134, -0.36919858094923, -1.00000000000000],
[-0.62780542211337, -0.34903506888062, -1.00000000000000],
[-0.48522553203446, -0.33722289443963, -1.00000000000000],
[-0.33333333333333, -0.33333333333333, -1.00000000000000],
[-0.17755157352591, -0.33722289443963, -1.00000000000000],
[-0.02315950900602, -0.34903506888062, -1.00000000000000],
[0.12485880568057, -0.36919858094923, -1.00000000000000],
[0.26195989564156, -0.39842216100643, -1.00000000000000],
[0.38417524405555, -0.43768590012289, -1.00000000000000],
[0.48822928568071, -0.48822928568071, -1.00000000000000],
[-1.00000000000000, -0.33350484782450, -1.00000000000000],
[-0.94511287539771, -0.27882755855641, -1.00000000000000],
[-0.86103620266103, -0.23701502595382, -1.00000000000000],
[-0.75275399873620, -0.20679219426527, -1.00000000000000],
[-0.62562226825673, -0.18718886587164, -1.00000000000000],
[-0.48522553203446, -0.17755157352591, -1.00000000000000],
[-0.33722289443963, -0.17755157352591, -1.00000000000000],
[-0.18718886587164, -0.18718886587164, -1.00000000000000],
[-0.04045380699852, -0.20679219426527, -1.00000000000000],
[0.09805122861485, -0.23701502595382, -1.00000000000000],
[0.22394043395412, -0.27882755855641, -1.00000000000000],
[0.33350484782450, -0.33350484782450, -1.00000000000000],
[-1.00000000000000, -0.16918602340928, -1.00000000000000],
[-0.94444963013195, -0.11238110084642, -1.00000000000000],
[-0.86021897052234, -0.06989051473883, -1.00000000000000],
[-0.75275399873620, -0.04045380699852, -1.00000000000000],
[-0.62780542211337, -0.02315950900602, -1.00000000000000],
[-0.49127187212102, -0.01745625575796, -1.00000000000000],
[-0.34903506888062, -0.02315950900602, -1.00000000000000],
[-0.20679219426527, -0.04045380699852, -1.00000000000000],
[-0.06989051473883, -0.06989051473883, -1.00000000000000],
[0.05683073097837, -0.11238110084642, -1.00000000000000],
[0.16918602340928, -0.16918602340928, -1.00000000000000],
[-1.00000000000000, -0.00000000000000, -1.00000000000000],
[-0.94444963013195, 0.05683073097837, -1.00000000000000],
[-0.86103620266103, 0.09805122861485, -1.00000000000000],
[-0.75566022473134, 0.12485880568057, -1.00000000000000],
[-0.63445303796278, 0.13805978782971, -1.00000000000000],
[-0.50360674986693, 0.13805978782971, -1.00000000000000],
[-0.36919858094923, 0.12485880568057, -1.00000000000000],
[-0.23701502595382, 0.09805122861485, -1.00000000000000],
[-0.11238110084642, 0.05683073097837, -1.00000000000000],
[0.00000000000000, -0.00000000000000, -1.00000000000000],
[-1.00000000000000, 0.16918602340928, -1.00000000000000],
[-0.94511287539771, 0.22394043395412, -1.00000000000000],
[-0.86353773463513, 0.26195989564156, -1.00000000000000],
[-0.76162101409829, 0.28433264782879, -1.00000000000000],
[-0.64585840843297, 0.29171681686594, -1.00000000000000],
[-0.52271163373050, 0.28433264782879, -1.00000000000000],
[-0.39842216100643, 0.26195989564156, -1.00000000000000],
[-0.27882755855641, 0.22394043395412, -1.00000000000000],
[-0.16918602340928, 0.16918602340928, -1.00000000000000],
[-1.00000000000000, 0.33350484782450, -1.00000000000000],
[-0.94648934393266, 0.38417524405555, -1.00000000000000],
[-0.86787251363754, 0.41717395528564, -1.00000000000000],
[-0.77093131190284, 0.43343778045617, -1.00000000000000],
[-0.66250646855334, 0.43343778045617, -1.00000000000000],
[-0.54930144164810, 0.41717395528564, -1.00000000000000],
[-0.43768590012289, 0.38417524405555, -1.00000000000000],
[-0.33350484782450, 0.33350484782450, -1.00000000000000],
[-1.00000000000000, 0.48822928568071, -1.00000000000000],
[-0.94867846854126, 0.53299436085627, -1.00000000000000],
[-0.87428693174862, 0.55935495152827, -1.00000000000000],
[-0.78402924082624, 0.56805848165248, -1.00000000000000],
[-0.68506801977964, 0.55935495152827, -1.00000000000000],
[-0.58431589231501, 0.53299436085627, -1.00000000000000],
[-0.48822928568071, 0.48822928568071, -1.00000000000000],
[-1.00000000000000, 0.62890813726522, -1.00000000000000],
[-0.95182812061116, 0.66621987519844, -1.00000000000000],
[-0.88312209180235, 0.68461312133806, -1.00000000000000],
[-0.80149102953572, 0.68461312133806, -1.00000000000000],
[-0.71439175458728, 0.66621987519844, -1.00000000000000],
[-0.62890813726522, 0.62890813726522, -1.00000000000000],
[-1.00000000000000, 0.75149420255261, -1.00000000000000],
[-0.95613320518782, 0.78015760232082, -1.00000000000000],
[-0.89481010149642, 0.78962020299283, -1.00000000000000],
[-0.82402439713301, 0.78015760232083, -1.00000000000000],
[-0.75149420255261, 0.75149420255261, -1.00000000000000],
[-1.00000000000000, 0.85246057779665, -1.00000000000000],
[-0.96183417565652, 0.87170382160643, -1.00000000000000],
[-0.90986964594991, 0.87170382160643, -1.00000000000000],
[-0.85246057779665, 0.85246057779665, -1.00000000000000],
[-1.00000000000000, 0.92890152815259, -1.00000000000000],
[-0.96921630071687, 0.93843260143372, -1.00000000000000],
[-0.92890152815259, 0.92890152815259, -1.00000000000000],
[-1.00000000000000, 0.97861176622208, -1.00000000000000],
[-0.97861176622208, 0.97861176622208, -1.00000000000000],
[-1.00000000000000, -1.00000000000000, -0.97861176622208],
[-0.96921630071686, -1.00000000000000, -0.96921630071687],
[-0.90986964594991, -1.00000000000000, -0.96183417565652],
[-0.82402439713301, -1.00000000000000, -0.95613320518782],
[-0.71439175458728, -1.00000000000000, -0.95182812061116],
[-0.58431589231501, -1.00000000000000, -0.94867846854126],
[-0.43768590012289, -1.00000000000000, -0.94648934393266],
[-0.27882755855641, -1.00000000000000, -0.94511287539771],
[-0.11238110084642, -1.00000000000000, -0.94444963013195],
[0.05683073097837, -1.00000000000000, -0.94444963013195],
[0.22394043395412, -1.00000000000000, -0.94511287539771],
[0.38417524405555, -1.00000000000000, -0.94648934393266],
[0.53299436085627, -1.00000000000000, -0.94867846854126],
[0.66621987519844, -1.00000000000000, -0.95182812061116],
[0.78015760232083, -1.00000000000000, -0.95613320518782],
[0.87170382160643, -1.00000000000000, -0.96183417565652],
[0.93843260143372, -1.00000000000000, -0.96921630071686],
[0.97861176622208, -1.00000000000000, -0.97861176622208],
[-1.00000000000000, -0.96921630071686, -0.96921630071687],
[-0.95020261978982, -0.95020261978982, -0.95020261978982],
[-0.89636070430775, -0.93662610527592, -0.93662610527592],
[-0.81407483050090, -0.92912322243421, -0.92912322243421],
[-0.70423159131223, -0.92455283614781, -0.92455283614781],
[-0.57071207463121, -0.92164607327834, -0.92164607327834],
[-0.41855377344348, -0.91983334031592, -0.91983334031592],
[-0.25345243902624, -0.91883441162852, -0.91883441162852],
[-0.08148510339275, -0.91851489660725, -0.91851489660725],
[0.09112126228328, -0.91883441162852, -0.91883441162852],
[0.25822045407531, -0.91983334031592, -0.91983334031592],
[0.41400422118789, -0.92164607327834, -0.92164607327834],
[0.55333726360785, -0.92455283614781, -0.92455283614781],
[0.67232127536933, -0.92912322243421, -0.92912322243421],
[0.76961291485959, -0.93662610527592, -0.93662610527592],
[0.85060785936946, -0.95020261978982, -0.95020261978982],
[0.93843260143372, -0.96921630071686, -0.96921630071687],
[-1.00000000000000, -0.90986964594991, -0.96183417565652],
[-0.93662610527592, -0.89636070430775, -0.93662610527592],
[-0.87075027964005, -0.87075027964005, -0.92365055664618],
[-0.77988120333450, -0.85424182173311, -0.91644803161939],
[-0.66447096686623, -0.84351520488171, -0.91213357461004],
[-0.52830742254336, -0.83657760165055, -0.90948715646774],
[-0.37639651999271, -0.83238749086458, -0.90794227941269],
[-0.21434467481798, -0.83040818807740, -0.90722633956355],
[-0.04802079754107, -0.83040818807740, -0.90722633956355],
[0.11672629026997, -0.83238749086458, -0.90794227941269],
[0.27437218066165, -0.83657760165055, -0.90948715646774],
[0.42011974635798, -0.84351520488171, -0.91213357461004],
[0.55057105668700, -0.85424182173311, -0.91644803161939],
[0.66515111592627, -0.87075027964005, -0.92365055664618],
[0.76961291485958, -0.89636070430775, -0.93662610527592],
[0.87170382160643, -0.90986964594991, -0.96183417565652],
[-1.00000000000000, -0.82402439713301, -0.95613320518782],
[-0.92912322243421, -0.81407483050090, -0.92912322243421],
[-0.85424182173311, -0.77988120333450, -0.91644803161939],
[-0.75639330816958, -0.75639330816958, -0.90953070252583],
[-0.63641853842956, -0.74076899503587, -0.90546778458822],
[-0.49821762933626, -0.73078382574408, -0.90306867344143],
[-0.34679050471314, -0.72520086770922, -0.90179031390690],
[-0.18760481018088, -0.72340251365098, -0.90138786598726],
[-0.02621831367073, -0.72520086770922, -0.90179031390690],
[0.13207012852178, -0.73078382574408, -0.90306867344143],
[0.28265531805365, -0.74076899503587, -0.90546778458822],
[0.42231731886500, -0.75639330816958, -0.90953070252583],
[0.55057105668700, -0.77988120333450, -0.91644803161939],
[0.67232127536933, -0.81407483050090, -0.92912322243421],
[0.78015760232082, -0.82402439713301, -0.95613320518782],
[-1.00000000000000, -0.71439175458728, -0.95182812061116],
[-0.92455283614781, -0.70423159131223, -0.92455283614781],
[-0.84351520488171, -0.66447096686623, -0.91213357461004],
[-0.74076899503587, -0.63641853842956, -0.90546778458822],
[-0.61776031453800, -0.61776031453800, -0.90164936399329],
[-0.47860498936408, -0.60631564176952, -0.89951291559293],
[-0.32833227112600, -0.60086228900483, -0.89854561120942],
[-0.17225982865975, -0.60086228900483, -0.89854561120942],
[-0.01556645327346, -0.60631564176952, -0.89951291559293],
[0.13716999306930, -0.61776031453800, -0.90164936399329],
[0.28265531805365, -0.63641853842956, -0.90546778458822],
[0.42011974635798, -0.66447096686623, -0.91213357461004],
[0.55333726360785, -0.70423159131223, -0.92455283614781],
[0.66621987519844, -0.71439175458728, -0.95182812061116],
[-1.00000000000000, -0.58431589231501, -0.94867846854126],
[-0.92164607327834, -0.57071207463121, -0.92164607327834],
[-0.83657760165055, -0.52830742254336, -0.90948715646774],
[-0.73078382574408, -0.49821762933626, -0.90306867344143],
[-0.60631564176952, -0.47860498936408, -0.89951291559293],
[-0.46752640816546, -0.46752640816546, -0.89768698260900],
[-0.31946880693230, -0.46394050213782, -0.89712188399759],
[-0.16726020106008, -0.46752640816546, -0.89768698260900],
[-0.01556645327346, -0.47860498936408, -0.89951291559293],
[0.13207012852178, -0.49821762933626, -0.90306867344143],
[0.27437218066165, -0.52830742254336, -0.90948715646774],
[0.41400422118789, -0.57071207463121, -0.92164607327834],
[0.53299436085627, -0.58431589231501, -0.94867846854126],
[-1.00000000000000, -0.43768590012289, -0.94648934393266],
[-0.91983334031592, -0.41855377344348, -0.91983334031592],
[-0.83238749086458, -0.37639651999271, -0.90794227941269],
[-0.72520086770922, -0.34679050471314, -0.90179031390690],
[-0.60086228900483, -0.32833227112600, -0.89854561120942],
[-0.46394050213782, -0.31946880693230, -0.89712188399759],
[-0.31946880693230, -0.31946880693230, -0.89712188399759],
[-0.17225982865975, -0.32833227112600, -0.89854561120942],
[-0.02621831367073, -0.34679050471314, -0.90179031390690],
[0.11672629026997, -0.37639651999271, -0.90794227941269],
[0.25822045407531, -0.41855377344348, -0.91983334031592],
[0.38417524405555, -0.43768590012289, -0.94648934393266],
[-1.00000000000000, -0.27882755855641, -0.94511287539771],
[-0.91883441162852, -0.25345243902624, -0.91883441162852],
[-0.83040818807740, -0.21434467481798, -0.90722633956355],
[-0.72340251365098, -0.18760481018088, -0.90138786598726],
[-0.60086228900483, -0.17225982865975, -0.89854561120942],
[-0.46752640816546, -0.16726020106008, -0.89768698260900],
[-0.32833227112600, -0.17225982865975, -0.89854561120942],
[-0.18760481018088, -0.18760481018088, -0.90138786598726],
[-0.04802079754107, -0.21434467481798, -0.90722633956355],
[0.09112126228329, -0.25345243902624, -0.91883441162852],
[0.22394043395412, -0.27882755855641, -0.94511287539771],
[-1.00000000000000, -0.11238110084642, -0.94444963013195],
[-0.91851489660725, -0.08148510339275, -0.91851489660725],
[-0.83040818807740, -0.04802079754107, -0.90722633956355],
[-0.72520086770922, -0.02621831367073, -0.90179031390690],
[-0.60631564176952, -0.01556645327346, -0.89951291559293],
[-0.47860498936408, -0.01556645327346, -0.89951291559293],
[-0.34679050471314, -0.02621831367073, -0.90179031390690],
[-0.21434467481798, -0.04802079754107, -0.90722633956355],
[-0.08148510339275, -0.08148510339275, -0.91851489660725],
[0.05683073097837, -0.11238110084642, -0.94444963013195],
[-1.00000000000000, 0.05683073097837, -0.94444963013195],
[-0.91883441162852, 0.09112126228328, -0.91883441162852],
[-0.83238749086458, 0.11672629026997, -0.90794227941269],
[-0.73078382574408, 0.13207012852178, -0.90306867344143],
[-0.61776031453800, 0.13716999306930, -0.90164936399329],
[-0.49821762933626, 0.13207012852178, -0.90306867344143],
[-0.37639651999271, 0.11672629026997, -0.90794227941269],
[-0.25345243902624, 0.09112126228328, -0.91883441162852],
[-0.11238110084642, 0.05683073097837, -0.94444963013195],
[-1.00000000000000, 0.22394043395412, -0.94511287539771],
[-0.91983334031592, 0.25822045407531, -0.91983334031592],
[-0.83657760165055, 0.27437218066165, -0.90948715646774],
[-0.74076899503587, 0.28265531805365, -0.90546778458822],
[-0.63641853842956, 0.28265531805365, -0.90546778458822],
[-0.52830742254336, 0.27437218066165, -0.90948715646774],
[-0.41855377344348, 0.25822045407531, -0.91983334031592],
[-0.27882755855641, 0.22394043395412, -0.94511287539771],
[-1.00000000000000, 0.38417524405555, -0.94648934393266],
[-0.92164607327834, 0.41400422118789, -0.92164607327834],
[-0.84351520488171, 0.42011974635798, -0.91213357461004],
[-0.75639330816958, 0.42231731886500, -0.90953070252583],
[-0.66447096686623, 0.42011974635798, -0.91213357461004],
[-0.57071207463121, 0.41400422118789, -0.92164607327834],
[-0.43768590012289, 0.38417524405555, -0.94648934393266],
[-1.00000000000000, 0.53299436085627, -0.94867846854126],
[-0.92455283614781, 0.55333726360785, -0.92455283614781],
[-0.85424182173311, 0.55057105668700, -0.91644803161939],
[-0.77988120333450, 0.55057105668700, -0.91644803161939],
[-0.70423159131223, 0.55333726360785, -0.92455283614781],
[-0.58431589231501, 0.53299436085627, -0.94867846854126],
[-1.00000000000000, 0.66621987519844, -0.95182812061116],
[-0.92912322243421, 0.67232127536933, -0.92912322243421],
[-0.87075027964005, 0.66515111592627, -0.92365055664618],
[-0.81407483050090, 0.67232127536933, -0.92912322243421],
[-0.71439175458728, 0.66621987519844, -0.95182812061116],
[-1.00000000000000, 0.78015760232083, -0.95613320518782],
[-0.93662610527592, 0.76961291485958, -0.93662610527592],
[-0.89636070430775, 0.76961291485958, -0.93662610527592],
[-0.82402439713301, 0.78015760232083, -0.95613320518782],
[-1.00000000000000, 0.87170382160643, -0.96183417565652],
[-0.95020261978982, 0.85060785936946, -0.95020261978982],
[-0.90986964594991, 0.87170382160643, -0.96183417565652],
[-1.00000000000000, 0.93843260143372, -0.96921630071686],
[-0.96921630071687, 0.93843260143372, -0.96921630071686],
[-1.00000000000000, 0.97861176622208, -0.97861176622208],
[-1.00000000000000, -1.00000000000000, -0.92890152815259],
[-0.96183417565652, -1.00000000000000, -0.90986964594991],
[-0.89481010149642, -1.00000000000000, -0.89481010149642],
[-0.80149102953572, -1.00000000000000, -0.88312209180235],
[-0.68506801977964, -1.00000000000000, -0.87428693174862],
[-0.54930144164810, -1.00000000000000, -0.86787251363754],
[-0.39842216100643, -1.00000000000000, -0.86353773463513],
[-0.23701502595382, -1.00000000000000, -0.86103620266103],
[-0.06989051473883, -1.00000000000000, -0.86021897052234],
[0.09805122861485, -1.00000000000000, -0.86103620266103],
[0.26195989564156, -1.00000000000000, -0.86353773463513],
[0.41717395528564, -1.00000000000000, -0.86787251363754],
[0.55935495152827, -1.00000000000000, -0.87428693174862],
[0.68461312133806, -1.00000000000000, -0.88312209180234],
[0.78962020299283, -1.00000000000000, -0.89481010149641],
[0.87170382160643, -1.00000000000000, -0.90986964594991],
[0.92890152815259, -1.00000000000000, -0.92890152815259],
[-1.00000000000000, -0.96183417565652, -0.90986964594991],
[-0.93662610527592, -0.93662610527592, -0.89636070430775],
[-0.87075027964005, -0.92365055664618, -0.87075027964005],
[-0.77988120333450, -0.91644803161939, -0.85424182173311],
[-0.66447096686623, -0.91213357461004, -0.84351520488171],
[-0.52830742254336, -0.90948715646774, -0.83657760165055],
[-0.37639651999271, -0.90794227941269, -0.83238749086458],
[-0.21434467481798, -0.90722633956355, -0.83040818807740],
[-0.04802079754107, -0.90722633956355, -0.83040818807740],
[0.11672629026997, -0.90794227941269, -0.83238749086458],
[0.27437218066165, -0.90948715646774, -0.83657760165055],
[0.42011974635798, -0.91213357461004, -0.84351520488171],
[0.55057105668700, -0.91644803161939, -0.85424182173311],
[0.66515111592627, -0.92365055664618, -0.87075027964005],
[0.76961291485958, -0.93662610527592, -0.89636070430775],
[0.87170382160643, -0.96183417565652, -0.90986964594991],
[-1.00000000000000, -0.89481010149642, -0.89481010149642],
[-0.92365055664618, -0.87075027964005, -0.87075027964005],
[-0.84839105335568, -0.84839105335568, -0.84839105335568],
[-0.75171491241389, -0.83338493946422, -0.83338493946422],
[-0.63305540494471, -0.82358348252395, -0.82358348252395],
[-0.49597183364571, -0.81742789416128, -0.81742789416128],
[-0.34544887559606, -0.81403044464816, -0.81403044464816],
[-0.18705655429077, -0.81294344570923, -0.81294344570923],
[-0.02649023510762, -0.81403044464816, -0.81403044464816],
[0.13082762196828, -0.81742789416128, -0.81742789416128],
[0.28022236999262, -0.82358348252395, -0.82358348252395],
[0.41848479134233, -0.83338493946422, -0.83338493946422],
[0.54517316006704, -0.84839105335568, -0.84839105335568],
[0.66515111592627, -0.87075027964005, -0.87075027964005],
[0.78962020299283, -0.89481010149642, -0.89481010149642],
[-1.00000000000000, -0.80149102953572, -0.88312209180235],
[-0.91644803161939, -0.77988120333450, -0.85424182173311],
[-0.83338493946422, -0.75171491241389, -0.83338493946422],
[-0.73126457126186, -0.73126457126186, -0.81967807323490],
[-0.60960230657246, -0.71756638414979, -0.81090684444452],
[-0.47192181765812, -0.70918399460409, -0.80567770832935],
[-0.32310215627271, -0.70520420515783, -0.80322983337844],
[-0.16846380519102, -0.70520420515783, -0.80322983337844],
[-0.01321647940843, -0.70918399460409, -0.80567770832935],
[0.13807553516676, -0.71756638414979, -0.81090684444452],
[0.28220721575863, -0.73126457126186, -0.81967807323490],
[0.41848479134232, -0.75171491241389, -0.83338493946422],
[0.55057105668700, -0.77988120333450, -0.85424182173311],
[0.68461312133807, -0.80149102953572, -0.88312209180235],
[-1.00000000000000, -0.68506801977964, -0.87428693174862],
[-0.91213357461004, -0.66447096686623, -0.84351520488171],
[-0.82358348252395, -0.63305540494471, -0.82358348252395],
[-0.71756638414979, -0.60960230657246, -0.81090684444452],
[-0.59405231379420, -0.59405231379420, -0.80310238773651],
[-0.45663502936177, -0.58523460790996, -0.79885128081555],
[-0.31006008254609, -0.58237982119733, -0.79750001371048],
[-0.15927908191273, -0.58523460790996, -0.79885128081555],
[-0.00879298467510, -0.59405231379420, -0.80310238773651],
[0.13807553516676, -0.60960230657246, -0.81090684444452],
[0.28022236999262, -0.63305540494471, -0.82358348252395],
[0.42011974635798, -0.66447096686623, -0.84351520488171],
[0.55935495152827, -0.68506801977964, -0.87428693174862],
[-1.00000000000000, -0.54930144164810, -0.86787251363754],
[-0.90948715646774, -0.52830742254336, -0.83657760165055],
[-0.81742789416128, -0.49597183364571, -0.81742789416128],
[-0.70918399460409, -0.47192181765812, -0.80567770832935],
[-0.58523460790996, -0.45663502936177, -0.79885128081555],
[-0.44925376666235, -0.44925376666235, -0.79570094663374],
[-0.30579152004156, -0.44925376666235, -0.79570094663374],
[-0.15927908191273, -0.45663502936177, -0.79885128081555],
[-0.01321647940843, -0.47192181765812, -0.80567770832935],
[0.13082762196828, -0.49597183364571, -0.81742789416128],
[0.27437218066165, -0.52830742254336, -0.83657760165055],
[0.41717395528564, -0.54930144164810, -0.86787251363754],
[-1.00000000000000, -0.39842216100643, -0.86353773463513],
[-0.90794227941269, -0.37639651999271, -0.83238749086458],
[-0.81403044464816, -0.34544887559606, -0.81403044464816],
[-0.70520420515783, -0.32310215627271, -0.80322983337844],
[-0.58237982119733, -0.31006008254609, -0.79750001371048],
[-0.44925376666235, -0.30579152004156, -0.79570094663374],
[-0.31006008254609, -0.31006008254609, -0.79750001371048],
[-0.16846380519102, -0.32310215627271, -0.80322983337844],
[-0.02649023510762, -0.34544887559606, -0.81403044464816],
[0.11672629026997, -0.37639651999271, -0.83238749086458],
[0.26195989564156, -0.39842216100643, -0.86353773463513],
[-1.00000000000000, -0.23701502595382, -0.86103620266103],
[-0.90722633956355, -0.21434467481798, -0.83040818807740],
[-0.81294344570923, -0.18705655429077, -0.81294344570923],
[-0.70520420515783, -0.16846380519102, -0.80322983337844],
[-0.58523460790996, -0.15927908191273, -0.79885128081555],
[-0.45663502936177, -0.15927908191273, -0.79885128081555],
[-0.32310215627271, -0.16846380519102, -0.80322983337844],
[-0.18705655429077, -0.18705655429077, -0.81294344570923],
[-0.04802079754107, -0.21434467481798, -0.83040818807740],
[0.09805122861485, -0.23701502595382, -0.86103620266103],
[-1.00000000000000, -0.06989051473883, -0.86021897052234],
[-0.90722633956355, -0.04802079754107, -0.83040818807740],
[-0.81403044464816, -0.02649023510762, -0.81403044464816],
[-0.70918399460409, -0.01321647940843, -0.80567770832935],
[-0.59405231379420, -0.00879298467510, -0.80310238773651],
[-0.47192181765812, -0.01321647940843, -0.80567770832935],
[-0.34544887559606, -0.02649023510762, -0.81403044464816],
[-0.21434467481798, -0.04802079754107, -0.83040818807740],
[-0.06989051473883, -0.06989051473883, -0.86021897052234],
[-1.00000000000000, 0.09805122861485, -0.86103620266103],
[-0.90794227941269, 0.11672629026997, -0.83238749086458],
[-0.81742789416128, 0.13082762196828, -0.81742789416128],
[-0.71756638414979, 0.13807553516676, -0.81090684444452],
[-0.60960230657246, 0.13807553516676, -0.81090684444452],
[-0.49597183364571, 0.13082762196828, -0.81742789416128],
[-0.37639651999271, 0.11672629026997, -0.83238749086458],
[-0.23701502595382, 0.09805122861485, -0.86103620266103],
[-1.00000000000000, 0.26195989564156, -0.86353773463513],
[-0.90948715646774, 0.27437218066165, -0.83657760165055],
[-0.82358348252395, 0.28022236999262, -0.82358348252395],
[-0.73126457126186, 0.28220721575863, -0.81967807323490],
[-0.63305540494471, 0.28022236999262, -0.82358348252395],
[-0.52830742254336, 0.27437218066165, -0.83657760165055],
[-0.39842216100643, 0.26195989564156, -0.86353773463513],
[-1.00000000000000, 0.41717395528564, -0.86787251363754],
[-0.91213357461004, 0.42011974635798, -0.84351520488171],
[-0.83338493946422, 0.41848479134232, -0.83338493946422],
[-0.75171491241389, 0.41848479134232, -0.83338493946422],
[-0.66447096686623, 0.42011974635798, -0.84351520488171],
[-0.54930144164810, 0.41717395528564, -0.86787251363754],
[-1.00000000000000, 0.55935495152827, -0.87428693174862],
[-0.91644803161939, 0.55057105668700, -0.85424182173311],
[-0.84839105335568, 0.54517316006704, -0.84839105335568],
[-0.77988120333450, 0.55057105668700, -0.85424182173311],
[-0.68506801977964, 0.55935495152827, -0.87428693174862],
[-1.00000000000000, 0.68461312133806, -0.88312209180234],
[-0.92365055664618, 0.66515111592627, -0.87075027964005],
[-0.87075027964005, 0.66515111592627, -0.87075027964005],
[-0.80149102953572, 0.68461312133806, -0.88312209180234],
[-1.00000000000000, 0.78962020299283, -0.89481010149641],
[-0.93662610527592, 0.76961291485958, -0.89636070430775],
[-0.89481010149641, 0.78962020299283, -0.89481010149641],
[-1.00000000000000, 0.87170382160643, -0.90986964594991],
[-0.96183417565652, 0.87170382160643, -0.90986964594991],
[-1.00000000000000, 0.92890152815259, -0.92890152815259],
[-1.00000000000000, -1.00000000000000, -0.85246057779665],
[-0.95613320518782, -1.00000000000000, -0.82402439713301],
[-0.88312209180234, -1.00000000000000, -0.80149102953572],
[-0.78402924082624, -1.00000000000000, -0.78402924082624],
[-0.66250646855334, -1.00000000000000, -0.77093131190284],
[-0.52271163373050, -1.00000000000000, -0.76162101409829],
[-0.36919858094923, -1.00000000000000, -0.75566022473134],
[-0.20679219426527, -1.00000000000000, -0.75275399873620],
[-0.04045380699852, -1.00000000000000, -0.75275399873620],
[0.12485880568057, -1.00000000000000, -0.75566022473134],
[0.28433264782879, -1.00000000000000, -0.76162101409829],
[0.43343778045617, -1.00000000000000, -0.77093131190284],
[0.56805848165248, -1.00000000000000, -0.78402924082624],
[0.68461312133807, -1.00000000000000, -0.80149102953572],
[0.78015760232082, -1.00000000000000, -0.82402439713301],
[0.85246057779665, -1.00000000000000, -0.85246057779665],
[-1.00000000000000, -0.95613320518782, -0.82402439713301],
[-0.92912322243421, -0.92912322243421, -0.81407483050090],
[-0.85424182173311, -0.91644803161939, -0.77988120333450],
[-0.75639330816958, -0.90953070252583, -0.75639330816958],
[-0.63641853842956, -0.90546778458822, -0.74076899503587],
[-0.49821762933626, -0.90306867344143, -0.73078382574408],
[-0.34679050471314, -0.90179031390690, -0.72520086770922],
[-0.18760481018088, -0.90138786598726, -0.72340251365098],
[-0.02621831367073, -0.90179031390690, -0.72520086770922],
[0.13207012852178, -0.90306867344143, -0.73078382574408],
[0.28265531805365, -0.90546778458822, -0.74076899503587],
[0.42231731886500, -0.90953070252583, -0.75639330816958],
[0.55057105668700, -0.91644803161939, -0.77988120333450],
[0.67232127536933, -0.92912322243421, -0.81407483050090],
[0.78015760232083, -0.95613320518782, -0.82402439713301],
[-1.00000000000000, -0.88312209180234, -0.80149102953572],
[-0.91644803161939, -0.85424182173311, -0.77988120333450],
[-0.83338493946422, -0.83338493946422, -0.75171491241389],
[-0.73126457126186, -0.81967807323490, -0.73126457126186],
[-0.60960230657246, -0.81090684444452, -0.71756638414979],
[-0.47192181765812, -0.80567770832935, -0.70918399460409],
[-0.32310215627271, -0.80322983337844, -0.70520420515783],
[-0.16846380519102, -0.80322983337844, -0.70520420515783],
[-0.01321647940843, -0.80567770832935, -0.70918399460409],
[0.13807553516676, -0.81090684444452, -0.71756638414979],
[0.28220721575863, -0.81967807323490, -0.73126457126186],
[0.41848479134232, -0.83338493946422, -0.75171491241389],
[0.55057105668700, -0.85424182173311, -0.77988120333450],
[0.68461312133806, -0.88312209180234, -0.80149102953572],
[-1.00000000000000, -0.78402924082624, -0.78402924082624],
[-0.90953070252583, -0.75639330816958, -0.75639330816958],
[-0.81967807323490, -0.73126457126186, -0.73126457126186],
[-0.71341546709955, -0.71341546709955, -0.71341546709955],
[-0.59007928566553, -0.70179897659711, -0.70179897659711],
[-0.45300248894697, -0.69527817901571, -0.69527817901571],
[-0.30682283717191, -0.69317716282809, -0.69317716282809],
[-0.15644115302162, -0.69527817901571, -0.69527817901571],
[-0.00632276114025, -0.70179897659711, -0.70179897659711],
[0.14024640129865, -0.71341546709955, -0.71341546709955],
[0.28220721575863, -0.73126457126186, -0.73126457126186],
[0.42231731886500, -0.75639330816958, -0.75639330816958],
[0.56805848165248, -0.78402924082624, -0.78402924082624],
[-1.00000000000000, -0.66250646855334, -0.77093131190284],
[-0.90546778458822, -0.63641853842956, -0.74076899503587],
[-0.81090684444452, -0.60960230657246, -0.71756638414979],
[-0.70179897659711, -0.59007928566553, -0.70179897659711],
[-0.57773232675737, -0.57773232675737, -0.69212321321649],
[-0.44194112200907, -0.57178609262114, -0.68753058657660],
[-0.29874219879319, -0.57178609262114, -0.68753058657660],
[-0.15241213326878, -0.57773232675737, -0.69212321321649],
[-0.00632276114025, -0.59007928566553, -0.70179897659711],
[0.13807553516676, -0.60960230657246, -0.71756638414979],
[0.28265531805365, -0.63641853842956, -0.74076899503587],
[0.43343778045617, -0.66250646855334, -0.77093131190284],
[-1.00000000000000, -0.52271163373050, -0.76162101409829],
[-0.90306867344143, -0.49821762933626, -0.73078382574408],
[-0.80567770832935, -0.47192181765812, -0.70918399460409],
[-0.69527817901571, -0.45300248894697, -0.69527817901571],
[-0.57178609262114, -0.44194112200907, -0.68753058657660],
[-0.43831873052124, -0.43831873052124, -0.68504380843629],
[-0.29874219879319, -0.44194112200907, -0.68753058657660],
[-0.15644115302162, -0.45300248894697, -0.69527817901571],
[-0.01321647940843, -0.47192181765812, -0.70918399460409],
[0.13207012852178, -0.49821762933626, -0.73078382574408],
[0.28433264782879, -0.52271163373050, -0.76162101409829],
[-1.00000000000000, -0.36919858094923, -0.75566022473134],
[-0.90179031390690, -0.34679050471314, -0.72520086770922],
[-0.80322983337844, -0.32310215627271, -0.70520420515783],
[-0.69317716282809, -0.30682283717191, -0.69317716282809],
[-0.57178609262114, -0.29874219879319, -0.68753058657660],
[-0.44194112200907, -0.29874219879319, -0.68753058657660],
[-0.30682283717191, -0.30682283717191, -0.69317716282809],
[-0.16846380519102, -0.32310215627271, -0.70520420515783],
[-0.02621831367073, -0.34679050471314, -0.72520086770922],
[0.12485880568057, -0.36919858094923, -0.75566022473134],
[-1.00000000000000, -0.20679219426527, -0.75275399873620],
[-0.90138786598726, -0.18760481018088, -0.72340251365098],
[-0.80322983337844, -0.16846380519102, -0.70520420515783],
[-0.69527817901571, -0.15644115302162, -0.69527817901571],
[-0.57773232675737, -0.15241213326878, -0.69212321321649],
[-0.45300248894697, -0.15644115302162, -0.69527817901571],
[-0.32310215627271, -0.16846380519102, -0.70520420515783],
[-0.18760481018088, -0.18760481018088, -0.72340251365098],
[-0.04045380699852, -0.20679219426527, -0.75275399873620],
[-1.00000000000000, -0.04045380699852, -0.75275399873620],
[-0.90179031390690, -0.02621831367073, -0.72520086770922],
[-0.80567770832935, -0.01321647940843, -0.70918399460409],
[-0.70179897659711, -0.00632276114025, -0.70179897659711],
[-0.59007928566553, -0.00632276114025, -0.70179897659711],
[-0.47192181765812, -0.01321647940843, -0.70918399460409],
[-0.34679050471314, -0.02621831367073, -0.72520086770922],
[-0.20679219426527, -0.04045380699852, -0.75275399873620],
[-1.00000000000000, 0.12485880568057, -0.75566022473134],
[-0.90306867344143, 0.13207012852178, -0.73078382574408],
[-0.81090684444452, 0.13807553516676, -0.71756638414979],
[-0.71341546709955, 0.14024640129865, -0.71341546709955],
[-0.60960230657246, 0.13807553516676, -0.71756638414979],
[-0.49821762933626, 0.13207012852178, -0.73078382574408],
[-0.36919858094923, 0.12485880568057, -0.75566022473134],
[-1.00000000000000, 0.28433264782879, -0.76162101409829],
[-0.90546778458822, 0.28265531805365, -0.74076899503587],
[-0.81967807323490, 0.28220721575863, -0.73126457126186],
[-0.73126457126186, 0.28220721575863, -0.73126457126186],
[-0.63641853842956, 0.28265531805365, -0.74076899503587],
[-0.52271163373050, 0.28433264782879, -0.76162101409829],
[-1.00000000000000, 0.43343778045617, -0.77093131190284],
[-0.90953070252583, 0.42231731886500, -0.75639330816958],
[-0.83338493946422, 0.41848479134232, -0.75171491241389],
[-0.75639330816958, 0.42231731886500, -0.75639330816958],
[-0.66250646855334, 0.43343778045617, -0.77093131190284],
[-1.00000000000000, 0.56805848165248, -0.78402924082624],
[-0.91644803161939, 0.55057105668700, -0.77988120333450],
[-0.85424182173311, 0.55057105668700, -0.77988120333450],
[-0.78402924082624, 0.56805848165248, -0.78402924082624],
[-1.00000000000000, 0.68461312133807, -0.80149102953572],
[-0.92912322243421, 0.67232127536933, -0.81407483050090],
[-0.88312209180235, 0.68461312133807, -0.80149102953572],
[-1.00000000000000, 0.78015760232082, -0.82402439713301],
[-0.95613320518782, 0.78015760232082, -0.82402439713301],
[-1.00000000000000, 0.85246057779665, -0.85246057779665],
[-1.00000000000000, -1.00000000000000, -0.75149420255261],
[-0.95182812061116, -1.00000000000000, -0.71439175458728],
[-0.87428693174862, -1.00000000000000, -0.68506801977964],
[-0.77093131190284, -1.00000000000000, -0.66250646855334],
[-0.64585840843297, -1.00000000000000, -0.64585840843297],
[-0.50360674986693, -1.00000000000000, -0.63445303796278],
[-0.34903506888062, -1.00000000000000, -0.62780542211337],
[-0.18718886587164, -1.00000000000000, -0.62562226825673],
[-0.02315950900602, -1.00000000000000, -0.62780542211337],
[0.13805978782971, -1.00000000000000, -0.63445303796278],
[0.29171681686594, -1.00000000000000, -0.64585840843297],
[0.43343778045617, -1.00000000000000, -0.66250646855334],
[0.55935495152827, -1.00000000000000, -0.68506801977964],
[0.66621987519844, -1.00000000000000, -0.71439175458728],
[0.75149420255261, -1.00000000000000, -0.75149420255261],
[-1.00000000000000, -0.95182812061116, -0.71439175458728],
[-0.92455283614781, -0.92455283614781, -0.70423159131223],
[-0.84351520488171, -0.91213357461004, -0.66447096686623],
[-0.74076899503587, -0.90546778458822, -0.63641853842956],
[-0.61776031453800, -0.90164936399329, -0.61776031453800],
[-0.47860498936408, -0.89951291559293, -0.60631564176952],
[-0.32833227112600, -0.89854561120942, -0.60086228900483],
[-0.17225982865975, -0.89854561120942, -0.60086228900483],
[-0.01556645327346, -0.89951291559293, -0.60631564176952],
[0.13716999306930, -0.90164936399329, -0.61776031453800],
[0.28265531805365, -0.90546778458822, -0.63641853842956],
[0.42011974635798, -0.91213357461004, -0.66447096686623],
[0.55333726360785, -0.92455283614781, -0.70423159131223],
[0.66621987519844, -0.95182812061116, -0.71439175458728],
[-1.00000000000000, -0.87428693174862, -0.68506801977964],
[-0.91213357461004, -0.84351520488171, -0.66447096686623],
[-0.82358348252395, -0.82358348252395, -0.63305540494471],
[-0.71756638414979, -0.81090684444452, -0.60960230657246],
[-0.59405231379420, -0.80310238773651, -0.59405231379420],
[-0.45663502936177, -0.79885128081555, -0.58523460790996],
[-0.31006008254609, -0.79750001371048, -0.58237982119733],
[-0.15927908191273, -0.79885128081555, -0.58523460790996],
[-0.00879298467510, -0.80310238773651, -0.59405231379420],
[0.13807553516676, -0.81090684444452, -0.60960230657246],
[0.28022236999262, -0.82358348252395, -0.63305540494471],
[0.42011974635798, -0.84351520488171, -0.66447096686623],
[0.55935495152827, -0.87428693174862, -0.68506801977964],
[-1.00000000000000, -0.77093131190284, -0.66250646855334],
[-0.90546778458822, -0.74076899503587, -0.63641853842956],
[-0.81090684444452, -0.71756638414979, -0.60960230657246],
[-0.70179897659711, -0.70179897659711, -0.59007928566553],
[-0.57773232675737, -0.69212321321649, -0.57773232675737],
[-0.44194112200907, -0.68753058657660, -0.57178609262114],
[-0.29874219879319, -0.68753058657660, -0.57178609262114],
[-0.15241213326878, -0.69212321321649, -0.57773232675737],
[-0.00632276114025, -0.70179897659711, -0.59007928566553],
[0.13807553516676, -0.71756638414979, -0.60960230657246],
[0.28265531805365, -0.74076899503587, -0.63641853842956],
[0.43343778045617, -0.77093131190284, -0.66250646855334],
[-1.00000000000000, -0.64585840843297, -0.64585840843297],
[-0.90164936399329, -0.61776031453800, -0.61776031453800],
[-0.80310238773651, -0.59405231379420, -0.59405231379420],
[-0.69212321321649, -0.57773232675737, -0.57773232675737],
[-0.56834602168433, -0.56834602168433, -0.56834602168433],
[-0.43470792783287, -0.56529207216713, -0.56529207216713],
[-0.29496193494703, -0.56834602168433, -0.56834602168433],
[-0.15241213326878, -0.57773232675737, -0.57773232675737],
[-0.00879298467510, -0.59405231379420, -0.59405231379420],
[0.13716999306930, -0.61776031453800, -0.61776031453800],
[0.29171681686594, -0.64585840843297, -0.64585840843297],
[-1.00000000000000, -0.50360674986693, -0.63445303796278],
[-0.89951291559293, -0.47860498936408, -0.60631564176952],
[-0.79885128081555, -0.45663502936177, -0.58523460790996],
[-0.68753058657660, -0.44194112200907, -0.57178609262114],
[-0.56529207216713, -0.43470792783287, -0.56529207216713],
[-0.43470792783287, -0.43470792783287, -0.56529207216713],
[-0.29874219879319, -0.44194112200907, -0.57178609262114],
[-0.15927908191273, -0.45663502936177, -0.58523460790996],
[-0.01556645327346, -0.47860498936408, -0.60631564176952],
[0.13805978782971, -0.50360674986693, -0.63445303796278],
[-1.00000000000000, -0.34903506888062, -0.62780542211337],
[-0.89854561120942, -0.32833227112600, -0.60086228900483],
[-0.79750001371048, -0.31006008254609, -0.58237982119733],
[-0.68753058657660, -0.29874219879319, -0.57178609262114],
[-0.56834602168433, -0.29496193494703, -0.56834602168433],
[-0.44194112200907, -0.29874219879319, -0.57178609262114],
[-0.31006008254609, -0.31006008254609, -0.58237982119733],
[-0.17225982865975, -0.32833227112600, -0.60086228900483],
[-0.02315950900602, -0.34903506888062, -0.62780542211337],
[-1.00000000000000, -0.18718886587164, -0.62562226825673],
[-0.89854561120942, -0.17225982865975, -0.60086228900483],
[-0.79885128081555, -0.15927908191273, -0.58523460790996],
[-0.69212321321649, -0.15241213326878, -0.57773232675737],
[-0.57773232675737, -0.15241213326878, -0.57773232675737],
[-0.45663502936177, -0.15927908191273, -0.58523460790996],
[-0.32833227112600, -0.17225982865975, -0.60086228900483],
[-0.18718886587164, -0.18718886587164, -0.62562226825673],
[-1.00000000000000, -0.02315950900602, -0.62780542211337],
[-0.89951291559293, -0.01556645327346, -0.60631564176952],
[-0.80310238773651, -0.00879298467510, -0.59405231379420],
[-0.70179897659711, -0.00632276114025, -0.59007928566553],
[-0.59405231379420, -0.00879298467510, -0.59405231379420],
[-0.47860498936408, -0.01556645327346, -0.60631564176952],
[-0.34903506888062, -0.02315950900602, -0.62780542211337],
[-1.00000000000000, 0.13805978782971, -0.63445303796278],
[-0.90164936399329, 0.13716999306930, -0.61776031453800],
[-0.81090684444452, 0.13807553516676, -0.60960230657246],
[-0.71756638414979, 0.13807553516676, -0.60960230657246],
[-0.61776031453800, 0.13716999306930, -0.61776031453800],
[-0.50360674986693, 0.13805978782971, -0.63445303796278],
[-1.00000000000000, 0.29171681686594, -0.64585840843297],
[-0.90546778458822, 0.28265531805365, -0.63641853842956],
[-0.82358348252395, 0.28022236999262, -0.63305540494471],
[-0.74076899503587, 0.28265531805365, -0.63641853842956],
[-0.64585840843297, 0.29171681686594, -0.64585840843297],
[-1.00000000000000, 0.43343778045617, -0.66250646855334],
[-0.91213357461004, 0.42011974635798, -0.66447096686623],
[-0.84351520488171, 0.42011974635798, -0.66447096686623],
[-0.77093131190284, 0.43343778045617, -0.66250646855334],
[-1.00000000000000, 0.55935495152827, -0.68506801977964],
[-0.92455283614781, 0.55333726360785, -0.70423159131223],
[-0.87428693174862, 0.55935495152827, -0.68506801977964],
[-1.00000000000000, 0.66621987519844, -0.71439175458728],
[-0.95182812061116, 0.66621987519844, -0.71439175458728],
[-1.00000000000000, 0.75149420255261, -0.75149420255261],
[-1.00000000000000, -1.00000000000000, -0.62890813726522],
[-0.94867846854126, -1.00000000000000, -0.58431589231501],
[-0.86787251363754, -1.00000000000000, -0.54930144164810],
[-0.76162101409829, -1.00000000000000, -0.52271163373050],
[-0.63445303796278, -1.00000000000000, -0.50360674986693],
[-0.49127187212102, -1.00000000000000, -0.49127187212102],
[-0.33722289443963, -1.00000000000000, -0.48522553203446],
[-0.17755157352591, -1.00000000000000, -0.48522553203446],
[-0.01745625575796, -1.00000000000000, -0.49127187212102],
[0.13805978782971, -1.00000000000000, -0.50360674986693],
[0.28433264782879, -1.00000000000000, -0.52271163373050],
[0.41717395528564, -1.00000000000000, -0.54930144164810],
[0.53299436085627, -1.00000000000000, -0.58431589231501],
[0.62890813726522, -1.00000000000000, -0.62890813726522],
[-1.00000000000000, -0.94867846854126, -0.58431589231501],
[-0.92164607327834, -0.92164607327834, -0.57071207463121],
[-0.83657760165055, -0.90948715646774, -0.52830742254336],
[-0.73078382574408, -0.90306867344143, -0.49821762933626],
[-0.60631564176952, -0.89951291559293, -0.47860498936408],
[-0.46752640816546, -0.89768698260900, -0.46752640816546],
[-0.31946880693230, -0.89712188399759, -0.46394050213782],
[-0.16726020106008, -0.89768698260900, -0.46752640816546],
[-0.01556645327346, -0.89951291559293, -0.47860498936408],
[0.13207012852178, -0.90306867344143, -0.49821762933626],
[0.27437218066165, -0.90948715646774, -0.52830742254336],
[0.41400422118789, -0.92164607327834, -0.57071207463121],
[0.53299436085627, -0.94867846854126, -0.58431589231501],
[-1.00000000000000, -0.86787251363754, -0.54930144164810],
[-0.90948715646774, -0.83657760165055, -0.52830742254336],
[-0.81742789416128, -0.81742789416128, -0.49597183364571],
[-0.70918399460409, -0.80567770832935, -0.47192181765812],
[-0.58523460790996, -0.79885128081555, -0.45663502936177],
[-0.44925376666235, -0.79570094663374, -0.44925376666235],
[-0.30579152004156, -0.79570094663374, -0.44925376666235],
[-0.15927908191273, -0.79885128081555, -0.45663502936177],
[-0.01321647940843, -0.80567770832935, -0.47192181765812],
[0.13082762196828, -0.81742789416128, -0.49597183364571],
[0.27437218066165, -0.83657760165055, -0.52830742254336],
[0.41717395528564, -0.86787251363754, -0.54930144164810],
[-1.00000000000000, -0.76162101409829, -0.52271163373050],
[-0.90306867344143, -0.73078382574408, -0.49821762933626],
[-0.80567770832935, -0.70918399460409, -0.47192181765812],
[-0.69527817901571, -0.69527817901571, -0.45300248894697],
[-0.57178609262114, -0.68753058657660, -0.44194112200907],
[-0.43831873052124, -0.68504380843629, -0.43831873052124],
[-0.29874219879319, -0.68753058657660, -0.44194112200907],
[-0.15644115302162, -0.69527817901571, -0.45300248894697],
[-0.01321647940843, -0.70918399460409, -0.47192181765812],
[0.13207012852178, -0.73078382574408, -0.49821762933626],
[0.28433264782879, -0.76162101409829, -0.52271163373050],
[-1.00000000000000, -0.63445303796278, -0.50360674986693],
[-0.89951291559293, -0.60631564176952, -0.47860498936408],
[-0.79885128081555, -0.58523460790996, -0.45663502936177],
[-0.68753058657660, -0.57178609262114, -0.44194112200907],
[-0.56529207216713, -0.56529207216713, -0.43470792783287],
[-0.43470792783287, -0.56529207216713, -0.43470792783287],
[-0.29874219879319, -0.57178609262114, -0.44194112200907],
[-0.15927908191273, -0.58523460790996, -0.45663502936177],
[-0.01556645327346, -0.60631564176952, -0.47860498936408],
[0.13805978782971, -0.63445303796278, -0.50360674986693],
[-1.00000000000000, -0.49127187212102, -0.49127187212102],
[-0.89768698260900, -0.46752640816546, -0.46752640816546],
[-0.79570094663374, -0.44925376666235, -0.44925376666235],
[-0.68504380843629, -0.43831873052124, -0.43831873052124],
[-0.56529207216713, -0.43470792783287, -0.43470792783287],
[-0.43831873052124, -0.43831873052124, -0.43831873052124],
[-0.30579152004156, -0.44925376666235, -0.44925376666235],
[-0.16726020106008, -0.46752640816546, -0.46752640816546],
[-0.01745625575796, -0.49127187212102, -0.49127187212102],
[-1.00000000000000, -0.33722289443963, -0.48522553203446],
[-0.89712188399759, -0.31946880693230, -0.46394050213782],
[-0.79570094663374, -0.30579152004156, -0.44925376666235],
[-0.68753058657660, -0.29874219879319, -0.44194112200907],
[-0.57178609262114, -0.29874219879319, -0.44194112200907],
[-0.44925376666235, -0.30579152004156, -0.44925376666235],
[-0.31946880693230, -0.31946880693230, -0.46394050213782],
[-0.17755157352591, -0.33722289443963, -0.48522553203446],
[-1.00000000000000, -0.17755157352591, -0.48522553203446],
[-0.89768698260900, -0.16726020106008, -0.46752640816546],
[-0.79885128081555, -0.15927908191273, -0.45663502936177],
[-0.69527817901571, -0.15644115302162, -0.45300248894697],
[-0.58523460790996, -0.15927908191273, -0.45663502936177],
[-0.46752640816546, -0.16726020106008, -0.46752640816546],
[-0.33722289443963, -0.17755157352591, -0.48522553203446],
[-1.00000000000000, -0.01745625575796, -0.49127187212102],
[-0.89951291559293, -0.01556645327346, -0.47860498936408],
[-0.80567770832935, -0.01321647940843, -0.47192181765812],
[-0.70918399460409, -0.01321647940843, -0.47192181765812],
[-0.60631564176952, -0.01556645327346, -0.47860498936408],
[-0.49127187212102, -0.01745625575796, -0.49127187212102],
[-1.00000000000000, 0.13805978782971, -0.50360674986693],
[-0.90306867344143, 0.13207012852178, -0.49821762933626],
[-0.81742789416128, 0.13082762196828, -0.49597183364571],
[-0.73078382574408, 0.13207012852178, -0.49821762933626],
[-0.63445303796278, 0.13805978782971, -0.50360674986693],
[-1.00000000000000, 0.28433264782879, -0.52271163373050],
[-0.90948715646774, 0.27437218066165, -0.52830742254336],
[-0.83657760165055, 0.27437218066165, -0.52830742254336],
[-0.76162101409829, 0.28433264782879, -0.52271163373050],
[-1.00000000000000, 0.41717395528564, -0.54930144164810],
[-0.92164607327834, 0.41400422118789, -0.57071207463121],
[-0.86787251363754, 0.41717395528564, -0.54930144164810],
[-1.00000000000000, 0.53299436085627, -0.58431589231501],
[-0.94867846854126, 0.53299436085627, -0.58431589231501],
[-1.00000000000000, 0.62890813726522, -0.62890813726522],
[-1.00000000000000, -1.00000000000000, -0.48822928568071],
[-0.94648934393266, -1.00000000000000, -0.43768590012289],
[-0.86353773463513, -1.00000000000000, -0.39842216100643],
[-0.75566022473134, -1.00000000000000, -0.36919858094923],
[-0.62780542211337, -1.00000000000000, -0.34903506888062],
[-0.48522553203446, -1.00000000000000, -0.33722289443963],
[-0.33333333333333, -1.00000000000000, -0.33333333333333],
[-0.17755157352591, -1.00000000000000, -0.33722289443963],
[-0.02315950900602, -1.00000000000000, -0.34903506888062],
[0.12485880568057, -1.00000000000000, -0.36919858094923],
[0.26195989564156, -1.00000000000000, -0.39842216100643],
[0.38417524405555, -1.00000000000000, -0.43768590012289],
[0.48822928568071, -1.00000000000000, -0.48822928568071],
[-1.00000000000000, -0.94648934393266, -0.43768590012289],
[-0.91983334031592, -0.91983334031592, -0.41855377344348],
[-0.83238749086458, -0.90794227941269, -0.37639651999271],
[-0.72520086770922, -0.90179031390690, -0.34679050471314],
[-0.60086228900483, -0.89854561120942, -0.32833227112600],
[-0.46394050213782, -0.89712188399759, -0.31946880693230],
[-0.31946880693230, -0.89712188399759, -0.31946880693230],
[-0.17225982865975, -0.89854561120942, -0.32833227112600],
[-0.02621831367073, -0.90179031390690, -0.34679050471314],
[0.11672629026997, -0.90794227941269, -0.37639651999271],
[0.25822045407531, -0.91983334031592, -0.41855377344348],
[0.38417524405555, -0.94648934393266, -0.43768590012289],
[-1.00000000000000, -0.86353773463513, -0.39842216100643],
[-0.90794227941269, -0.83238749086458, -0.37639651999271],
[-0.81403044464816, -0.81403044464816, -0.34544887559606],
[-0.70520420515783, -0.80322983337844, -0.32310215627271],
[-0.58237982119733, -0.79750001371048, -0.31006008254609],
[-0.44925376666235, -0.79570094663374, -0.30579152004156],
[-0.31006008254609, -0.79750001371048, -0.31006008254609],
[-0.16846380519102, -0.80322983337844, -0.32310215627271],
[-0.02649023510762, -0.81403044464816, -0.34544887559606],
[0.11672629026997, -0.83238749086458, -0.37639651999271],
[0.26195989564156, -0.86353773463513, -0.39842216100643],
[-1.00000000000000, -0.75566022473134, -0.36919858094923],
[-0.90179031390690, -0.72520086770922, -0.34679050471314],
[-0.80322983337844, -0.70520420515783, -0.32310215627271],
[-0.69317716282809, -0.69317716282809, -0.30682283717191],
[-0.57178609262114, -0.68753058657660, -0.29874219879319],
[-0.44194112200907, -0.68753058657660, -0.29874219879319],
[-0.30682283717191, -0.69317716282809, -0.30682283717191],
[-0.16846380519102, -0.70520420515783, -0.32310215627271],
[-0.02621831367073, -0.72520086770922, -0.34679050471314],
[0.12485880568057, -0.75566022473134, -0.36919858094923],
[-1.00000000000000, -0.62780542211337, -0.34903506888062],
[-0.89854561120942, -0.60086228900483, -0.32833227112600],
[-0.79750001371048, -0.58237982119733, -0.31006008254609],
[-0.68753058657660, -0.57178609262114, -0.29874219879319],
[-0.56834602168432, -0.56834602168433, -0.29496193494703],
[-0.44194112200907, -0.57178609262114, -0.29874219879319],
[-0.31006008254609, -0.58237982119733, -0.31006008254609],
[-0.17225982865975, -0.60086228900483, -0.32833227112600],
[-0.02315950900602, -0.62780542211337, -0.34903506888062],
[-1.00000000000000, -0.48522553203446, -0.33722289443963],
[-0.89712188399759, -0.46394050213782, -0.31946880693230],
[-0.79570094663374, -0.44925376666235, -0.30579152004156],
[-0.68753058657660, -0.44194112200907, -0.29874219879319],
[-0.57178609262114, -0.44194112200907, -0.29874219879319],
[-0.44925376666235, -0.44925376666235, -0.30579152004156],
[-0.31946880693230, -0.46394050213782, -0.31946880693230],
[-0.17755157352591, -0.48522553203446, -0.33722289443963],
[-1.00000000000000, -0.33333333333333, -0.33333333333333],
[-0.89712188399759, -0.31946880693230, -0.31946880693230],
[-0.79750001371048, -0.31006008254609, -0.31006008254609],
[-0.69317716282809, -0.30682283717191, -0.30682283717191],
[-0.58237982119733, -0.31006008254609, -0.31006008254609],
[-0.46394050213782, -0.31946880693230, -0.31946880693230],
[-0.33333333333333, -0.33333333333333, -0.33333333333333],
[-1.00000000000000, -0.17755157352591, -0.33722289443963],
[-0.89854561120942, -0.17225982865975, -0.32833227112600],
[-0.80322983337844, -0.16846380519102, -0.32310215627271],
[-0.70520420515783, -0.16846380519102, -0.32310215627271],
[-0.60086228900483, -0.17225982865975, -0.32833227112600],
[-0.48522553203446, -0.17755157352591, -0.33722289443963],
[-1.00000000000000, -0.02315950900602, -0.34903506888062],
[-0.90179031390690, -0.02621831367073, -0.34679050471314],
[-0.81403044464816, -0.02649023510762, -0.34544887559606],
[-0.72520086770922, -0.02621831367073, -0.34679050471314],
[-0.62780542211337, -0.02315950900602, -0.34903506888062],
[-1.00000000000000, 0.12485880568057, -0.36919858094923],
[-0.90794227941269, 0.11672629026997, -0.37639651999271],
[-0.83238749086458, 0.11672629026997, -0.37639651999271],
[-0.75566022473134, 0.12485880568057, -0.36919858094923],
[-1.00000000000000, 0.26195989564156, -0.39842216100643],
[-0.91983334031592, 0.25822045407531, -0.41855377344348],
[-0.86353773463513, 0.26195989564156, -0.39842216100643],
[-1.00000000000000, 0.38417524405555, -0.43768590012289],
[-0.94648934393266, 0.38417524405555, -0.43768590012289],
[-1.00000000000000, 0.48822928568071, -0.48822928568071],
[-1.00000000000000, -1.00000000000000, -0.33350484782450],
[-0.94511287539771, -1.00000000000000, -0.27882755855641],
[-0.86103620266103, -1.00000000000000, -0.23701502595382],
[-0.75275399873620, -1.00000000000000, -0.20679219426527],
[-0.62562226825673, -1.00000000000000, -0.18718886587164],
[-0.48522553203446, -1.00000000000000, -0.17755157352591],
[-0.33722289443963, -1.00000000000000, -0.17755157352591],
[-0.18718886587164, -1.00000000000000, -0.18718886587164],
[-0.04045380699852, -1.00000000000000, -0.20679219426527],
[0.09805122861485, -1.00000000000000, -0.23701502595382],
[0.22394043395412, -1.00000000000000, -0.27882755855641],
[0.33350484782450, -1.00000000000000, -0.33350484782450],
[-1.00000000000000, -0.94511287539771, -0.27882755855641],
[-0.91883441162852, -0.91883441162852, -0.25345243902624],
[-0.83040818807740, -0.90722633956355, -0.21434467481798],
[-0.72340251365098, -0.90138786598726, -0.18760481018088],
[-0.60086228900483, -0.89854561120942, -0.17225982865975],
[-0.46752640816546, -0.89768698260900, -0.16726020106008],
[-0.32833227112600, -0.89854561120942, -0.17225982865975],
[-0.18760481018088, -0.90138786598726, -0.18760481018088],
[-0.04802079754107, -0.90722633956355, -0.21434467481798],
[0.09112126228328, -0.91883441162852, -0.25345243902624],
[0.22394043395412, -0.94511287539771, -0.27882755855641],
[-1.00000000000000, -0.86103620266103, -0.23701502595382],
[-0.90722633956355, -0.83040818807740, -0.21434467481798],
[-0.81294344570923, -0.81294344570923, -0.18705655429077],
[-0.70520420515783, -0.80322983337844, -0.16846380519102],
[-0.58523460790996, -0.79885128081555, -0.15927908191273],
[-0.45663502936177, -0.79885128081555, -0.15927908191273],
[-0.32310215627271, -0.80322983337844, -0.16846380519102],
[-0.18705655429077, -0.81294344570923, -0.18705655429077],
[-0.04802079754107, -0.83040818807740, -0.21434467481798],
[0.09805122861485, -0.86103620266103, -0.23701502595382],
[-1.00000000000000, -0.75275399873620, -0.20679219426527],
[-0.90138786598726, -0.72340251365098, -0.18760481018088],
[-0.80322983337844, -0.70520420515783, -0.16846380519102],
[-0.69527817901571, -0.69527817901571, -0.15644115302162],
[-0.57773232675737, -0.69212321321649, -0.15241213326878],
[-0.45300248894697, -0.69527817901571, -0.15644115302162],
[-0.32310215627271, -0.70520420515783, -0.16846380519102],
[-0.18760481018088, -0.72340251365098, -0.18760481018088],
[-0.04045380699852, -0.75275399873620, -0.20679219426527],
[-1.00000000000000, -0.62562226825673, -0.18718886587164],
[-0.89854561120942, -0.60086228900483, -0.17225982865975],
[-0.79885128081555, -0.58523460790996, -0.15927908191273],
[-0.69212321321649, -0.57773232675737, -0.15241213326878],
[-0.57773232675737, -0.57773232675737, -0.15241213326878],
[-0.45663502936177, -0.58523460790996, -0.15927908191273],
[-0.32833227112600, -0.60086228900483, -0.17225982865975],
[-0.18718886587164, -0.62562226825673, -0.18718886587164],
[-1.00000000000000, -0.48522553203446, -0.17755157352591],
[-0.89768698260900, -0.46752640816546, -0.16726020106008],
[-0.79885128081555, -0.45663502936177, -0.15927908191273],
[-0.69527817901571, -0.45300248894697, -0.15644115302162],
[-0.58523460790996, -0.45663502936177, -0.15927908191273],
[-0.46752640816546, -0.46752640816546, -0.16726020106008],
[-0.33722289443963, -0.48522553203446, -0.17755157352591],
[-1.00000000000000, -0.33722289443963, -0.17755157352591],
[-0.89854561120942, -0.32833227112600, -0.17225982865975],
[-0.80322983337844, -0.32310215627271, -0.16846380519102],
[-0.70520420515783, -0.32310215627271, -0.16846380519102],
[-0.60086228900483, -0.32833227112600, -0.17225982865975],
[-0.48522553203446, -0.33722289443963, -0.17755157352591],
[-1.00000000000000, -0.18718886587164, -0.18718886587164],
[-0.90138786598726, -0.18760481018088, -0.18760481018088],
[-0.81294344570923, -0.18705655429077, -0.18705655429077],
[-0.72340251365098, -0.18760481018088, -0.18760481018088],
[-0.62562226825673, -0.18718886587164, -0.18718886587164],
[-1.00000000000000, -0.04045380699852, -0.20679219426527],
[-0.90722633956355, -0.04802079754107, -0.21434467481798],
[-0.83040818807740, -0.04802079754107, -0.21434467481798],
[-0.75275399873620, -0.04045380699852, -0.20679219426527],
[-1.00000000000000, 0.09805122861485, -0.23701502595382],
[-0.91883441162852, 0.09112126228328, -0.25345243902624],
[-0.86103620266103, 0.09805122861485, -0.23701502595382],
[-1.00000000000000, 0.22394043395412, -0.27882755855641],
[-0.94511287539771, 0.22394043395412, -0.27882755855641],
[-1.00000000000000, 0.33350484782450, -0.33350484782450],
[-1.00000000000000, -1.00000000000000, -0.16918602340928],
[-0.94444963013195, -1.00000000000000, -0.11238110084642],
[-0.86021897052234, -1.00000000000000, -0.06989051473883],
[-0.75275399873620, -1.00000000000000, -0.04045380699852],
[-0.62780542211337, -1.00000000000000, -0.02315950900602],
[-0.49127187212102, -1.00000000000000, -0.01745625575796],
[-0.34903506888062, -1.00000000000000, -0.02315950900602],
[-0.20679219426527, -1.00000000000000, -0.04045380699852],
[-0.06989051473883, -1.00000000000000, -0.06989051473883],
[0.05683073097837, -1.00000000000000, -0.11238110084642],
[0.16918602340928, -1.00000000000000, -0.16918602340928],
[-1.00000000000000, -0.94444963013195, -0.11238110084642],
[-0.91851489660725, -0.91851489660725, -0.08148510339275],
[-0.83040818807740, -0.90722633956355, -0.04802079754107],
[-0.72520086770922, -0.90179031390690, -0.02621831367073],
[-0.60631564176952, -0.89951291559293, -0.01556645327346],
[-0.47860498936408, -0.89951291559293, -0.01556645327346],
[-0.34679050471314, -0.90179031390690, -0.02621831367073],
[-0.21434467481798, -0.90722633956355, -0.04802079754107],
[-0.08148510339275, -0.91851489660725, -0.08148510339275],
[0.05683073097837, -0.94444963013195, -0.11238110084642],
[-1.00000000000000, -0.86021897052234, -0.06989051473883],
[-0.90722633956355, -0.83040818807740, -0.04802079754107],
[-0.81403044464816, -0.81403044464816, -0.02649023510762],
[-0.70918399460409, -0.80567770832935, -0.01321647940843],
[-0.59405231379420, -0.80310238773651, -0.00879298467510],
[-0.47192181765812, -0.80567770832935, -0.01321647940843],
[-0.34544887559606, -0.81403044464816, -0.02649023510762],
[-0.21434467481798, -0.83040818807740, -0.04802079754107],
[-0.06989051473883, -0.86021897052234, -0.06989051473883],
[-1.00000000000000, -0.75275399873620, -0.04045380699852],
[-0.90179031390690, -0.72520086770922, -0.02621831367073],
[-0.80567770832935, -0.70918399460409, -0.01321647940843],
[-0.70179897659711, -0.70179897659711, -0.00632276114025],
[-0.59007928566553, -0.70179897659711, -0.00632276114025],
[-0.47192181765812, -0.70918399460409, -0.01321647940843],
[-0.34679050471314, -0.72520086770922, -0.02621831367073],
[-0.20679219426527, -0.75275399873620, -0.04045380699852],
[-1.00000000000000, -0.62780542211337, -0.02315950900602],
[-0.89951291559293, -0.60631564176952, -0.01556645327346],
[-0.80310238773651, -0.59405231379420, -0.00879298467510],
[-0.70179897659711, -0.59007928566553, -0.00632276114025],
[-0.59405231379420, -0.59405231379420, -0.00879298467510],
[-0.47860498936408, -0.60631564176952, -0.01556645327346],
[-0.34903506888062, -0.62780542211337, -0.02315950900602],
[-1.00000000000000, -0.49127187212102, -0.01745625575796],
[-0.89951291559293, -0.47860498936408, -0.01556645327346],
[-0.80567770832935, -0.47192181765812, -0.01321647940843],
[-0.70918399460409, -0.47192181765812, -0.01321647940843],
[-0.60631564176952, -0.47860498936408, -0.01556645327346],
[-0.49127187212102, -0.49127187212102, -0.01745625575796],
[-1.00000000000000, -0.34903506888062, -0.02315950900602],
[-0.90179031390690, -0.34679050471314, -0.02621831367073],
[-0.81403044464816, -0.34544887559606, -0.02649023510762],
[-0.72520086770922, -0.34679050471314, -0.02621831367073],
[-0.62780542211337, -0.34903506888062, -0.02315950900602],
[-1.00000000000000, -0.20679219426527, -0.04045380699852],
[-0.90722633956355, -0.21434467481798, -0.04802079754107],
[-0.83040818807740, -0.21434467481798, -0.04802079754107],
[-0.75275399873620, -0.20679219426527, -0.04045380699852],
[-1.00000000000000, -0.06989051473883, -0.06989051473883],
[-0.91851489660725, -0.08148510339275, -0.08148510339275],
[-0.86021897052234, -0.06989051473883, -0.06989051473883],
[-1.00000000000000, 0.05683073097837, -0.11238110084642],
[-0.94444963013195, 0.05683073097837, -0.11238110084642],
[-1.00000000000000, 0.16918602340928, -0.16918602340928],
[-1.00000000000000, -1.00000000000000, 0.00000000000000],
[-0.94444963013195, -1.00000000000000, 0.05683073097837],
[-0.86103620266103, -1.00000000000000, 0.09805122861485],
[-0.75566022473134, -1.00000000000000, 0.12485880568057],
[-0.63445303796278, -1.00000000000000, 0.13805978782971],
[-0.50360674986693, -1.00000000000000, 0.13805978782971],
[-0.36919858094923, -1.00000000000000, 0.12485880568057],
[-0.23701502595382, -1.00000000000000, 0.09805122861485],
[-0.11238110084642, -1.00000000000000, 0.05683073097837],
[0.00000000000000, -1.00000000000000, 0.00000000000000],
[-1.00000000000000, -0.94444963013195, 0.05683073097837],
[-0.91883441162852, -0.91883441162852, 0.09112126228328],
[-0.83238749086458, -0.90794227941269, 0.11672629026997],
[-0.73078382574408, -0.90306867344143, 0.13207012852178],
[-0.61776031453800, -0.90164936399329, 0.13716999306930],
[-0.49821762933626, -0.90306867344143, 0.13207012852178],
[-0.37639651999271, -0.90794227941269, 0.11672629026997],
[-0.25345243902624, -0.91883441162852, 0.09112126228328],
[-0.11238110084642, -0.94444963013195, 0.05683073097837],
[-1.00000000000000, -0.86103620266103, 0.09805122861485],
[-0.90794227941269, -0.83238749086458, 0.11672629026997],
[-0.81742789416128, -0.81742789416128, 0.13082762196828],
[-0.71756638414979, -0.81090684444452, 0.13807553516676],
[-0.60960230657246, -0.81090684444452, 0.13807553516676],
[-0.49597183364571, -0.81742789416128, 0.13082762196828],
[-0.37639651999271, -0.83238749086458, 0.11672629026997],
[-0.23701502595382, -0.86103620266103, 0.09805122861485],
[-1.00000000000000, -0.75566022473134, 0.12485880568057],
[-0.90306867344143, -0.73078382574408, 0.13207012852178],
[-0.81090684444452, -0.71756638414979, 0.13807553516676],
[-0.71341546709955, -0.71341546709955, 0.14024640129865],
[-0.60960230657246, -0.71756638414979, 0.13807553516676],
[-0.49821762933626, -0.73078382574408, 0.13207012852178],
[-0.36919858094923, -0.75566022473134, 0.12485880568057],
[-1.00000000000000, -0.63445303796278, 0.13805978782971],
[-0.90164936399329, -0.61776031453800, 0.13716999306930],
[-0.81090684444452, -0.60960230657246, 0.13807553516676],
[-0.71756638414979, -0.60960230657246, 0.13807553516676],
[-0.61776031453800, -0.61776031453800, 0.13716999306930],
[-0.50360674986693, -0.63445303796278, 0.13805978782971],
[-1.00000000000000, -0.50360674986693, 0.13805978782971],
[-0.90306867344143, -0.49821762933626, 0.13207012852178],
[-0.81742789416128, -0.49597183364571, 0.13082762196828],
[-0.73078382574408, -0.49821762933626, 0.13207012852178],
[-0.63445303796278, -0.50360674986693, 0.13805978782971],
[-1.00000000000000, -0.36919858094923, 0.12485880568057],
[-0.90794227941269, -0.37639651999271, 0.11672629026997],
[-0.83238749086458, -0.37639651999271, 0.11672629026997],
[-0.75566022473134, -0.36919858094923, 0.12485880568057],
[-1.00000000000000, -0.23701502595382, 0.09805122861485],
[-0.91883441162852, -0.25345243902624, 0.09112126228328],
[-0.86103620266103, -0.23701502595382, 0.09805122861485],
[-1.00000000000000, -0.11238110084642, 0.05683073097837],
[-0.94444963013195, -0.11238110084642, 0.05683073097837],
[-1.00000000000000, 0.00000000000000, -0.00000000000000],
[-1.00000000000000, -1.00000000000000, 0.16918602340928],
[-0.94511287539771, -1.00000000000000, 0.22394043395412],
[-0.86353773463513, -1.00000000000000, 0.26195989564156],
[-0.76162101409829, -1.00000000000000, 0.28433264782879],
[-0.64585840843297, -1.00000000000000, 0.29171681686594],
[-0.52271163373050, -1.00000000000000, 0.28433264782879],
[-0.39842216100643, -1.00000000000000, 0.26195989564156],
[-0.27882755855641, -1.00000000000000, 0.22394043395412],
[-0.16918602340928, -1.00000000000000, 0.16918602340928],
[-1.00000000000000, -0.94511287539771, 0.22394043395412],
[-0.91983334031592, -0.91983334031592, 0.25822045407531],
[-0.83657760165055, -0.90948715646774, 0.27437218066165],
[-0.74076899503587, -0.90546778458822, 0.28265531805365],
[-0.63641853842956, -0.90546778458822, 0.28265531805365],
[-0.52830742254336, -0.90948715646774, 0.27437218066165],
[-0.41855377344348, -0.91983334031592, 0.25822045407531],
[-0.27882755855641, -0.94511287539771, 0.22394043395412],
[-1.00000000000000, -0.86353773463513, 0.26195989564156],
[-0.90948715646774, -0.83657760165055, 0.27437218066165],
[-0.82358348252395, -0.82358348252395, 0.28022236999262],
[-0.73126457126186, -0.81967807323490, 0.28220721575863],
[-0.63305540494471, -0.82358348252395, 0.28022236999262],
[-0.52830742254336, -0.83657760165055, 0.27437218066165],
[-0.39842216100643, -0.86353773463513, 0.26195989564156],
[-1.00000000000000, -0.76162101409829, 0.28433264782879],
[-0.90546778458822, -0.74076899503587, 0.28265531805365],
[-0.81967807323490, -0.73126457126186, 0.28220721575863],
[-0.73126457126186, -0.73126457126186, 0.28220721575863],
[-0.63641853842956, -0.74076899503587, 0.28265531805365],
[-0.52271163373050, -0.76162101409829, 0.28433264782879],
[-1.00000000000000, -0.64585840843297, 0.29171681686594],
[-0.90546778458822, -0.63641853842956, 0.28265531805365],
[-0.82358348252395, -0.63305540494471, 0.28022236999262],
[-0.74076899503587, -0.63641853842956, 0.28265531805365],
[-0.64585840843297, -0.64585840843297, 0.29171681686594],
[-1.00000000000000, -0.52271163373050, 0.28433264782879],
[-0.90948715646774, -0.52830742254336, 0.27437218066165],
[-0.83657760165055, -0.52830742254336, 0.27437218066165],
[-0.76162101409829, -0.52271163373050, 0.28433264782879],
[-1.00000000000000, -0.39842216100643, 0.26195989564156],
[-0.91983334031592, -0.41855377344348, 0.25822045407531],
[-0.86353773463513, -0.39842216100643, 0.26195989564156],
[-1.00000000000000, -0.27882755855641, 0.22394043395412],
[-0.94511287539771, -0.27882755855641, 0.22394043395412],
[-1.00000000000000, -0.16918602340928, 0.16918602340928],
[-1.00000000000000, -1.00000000000000, 0.33350484782450],
[-0.94648934393266, -1.00000000000000, 0.38417524405555],
[-0.86787251363754, -1.00000000000000, 0.41717395528564],
[-0.77093131190284, -1.00000000000000, 0.43343778045617],
[-0.66250646855334, -1.00000000000000, 0.43343778045617],
[-0.54930144164810, -1.00000000000000, 0.41717395528564],
[-0.43768590012289, -1.00000000000000, 0.38417524405555],
[-0.33350484782450, -1.00000000000000, 0.33350484782450],
[-1.00000000000000, -0.94648934393266, 0.38417524405555],
[-0.92164607327834, -0.92164607327834, 0.41400422118789],
[-0.84351520488171, -0.91213357461004, 0.42011974635798],
[-0.75639330816958, -0.90953070252583, 0.42231731886500],
[-0.66447096686623, -0.91213357461004, 0.42011974635798],
[-0.57071207463121, -0.92164607327834, 0.41400422118789],
[-0.43768590012289, -0.94648934393266, 0.38417524405555],
[-1.00000000000000, -0.86787251363754, 0.41717395528564],
[-0.91213357461004, -0.84351520488171, 0.42011974635798],
[-0.83338493946422, -0.83338493946422, 0.41848479134232],
[-0.75171491241389, -0.83338493946422, 0.41848479134232],
[-0.66447096686623, -0.84351520488171, 0.42011974635798],
[-0.54930144164810, -0.86787251363754, 0.41717395528564],
[-1.00000000000000, -0.77093131190284, 0.43343778045617],
[-0.90953070252583, -0.75639330816958, 0.42231731886500],
[-0.83338493946422, -0.75171491241389, 0.41848479134232],
[-0.75639330816958, -0.75639330816958, 0.42231731886500],
[-0.66250646855334, -0.77093131190284, 0.43343778045617],
[-1.00000000000000, -0.66250646855334, 0.43343778045617],
[-0.91213357461004, -0.66447096686623, 0.42011974635798],
[-0.84351520488171, -0.66447096686623, 0.42011974635798],
[-0.77093131190284, -0.66250646855334, 0.43343778045617],
[-1.00000000000000, -0.54930144164810, 0.41717395528564],
[-0.92164607327834, -0.57071207463121, 0.41400422118789],
[-0.86787251363754, -0.54930144164810, 0.41717395528564],
[-1.00000000000000, -0.43768590012289, 0.38417524405555],
[-0.94648934393266, -0.43768590012289, 0.38417524405555],
[-1.00000000000000, -0.33350484782450, 0.33350484782450],
[-1.00000000000000, -1.00000000000000, 0.48822928568071],
[-0.94867846854126, -1.00000000000000, 0.53299436085627],
[-0.87428693174862, -1.00000000000000, 0.55935495152827],
[-0.78402924082624, -1.00000000000000, 0.56805848165248],
[-0.68506801977964, -1.00000000000000, 0.55935495152827],
[-0.58431589231501, -1.00000000000000, 0.53299436085627],
[-0.48822928568071, -1.00000000000000, 0.48822928568071],
[-1.00000000000000, -0.94867846854126, 0.53299436085627],
[-0.92455283614781, -0.92455283614781, 0.55333726360785],
[-0.85424182173311, -0.91644803161939, 0.55057105668700],
[-0.77988120333450, -0.91644803161939, 0.55057105668700],
[-0.70423159131223, -0.92455283614781, 0.55333726360785],
[-0.58431589231501, -0.94867846854126, 0.53299436085627],
[-1.00000000000000, -0.87428693174862, 0.55935495152827],
[-0.91644803161939, -0.85424182173311, 0.55057105668700],
[-0.84839105335568, -0.84839105335568, 0.54517316006704],
[-0.77988120333450, -0.85424182173311, 0.55057105668700],
[-0.68506801977964, -0.87428693174862, 0.55935495152827],
[-1.00000000000000, -0.78402924082624, 0.56805848165248],
[-0.91644803161939, -0.77988120333450, 0.55057105668700],
[-0.85424182173311, -0.77988120333450, 0.55057105668700],
[-0.78402924082624, -0.78402924082624, 0.56805848165248],
[-1.00000000000000, -0.68506801977964, 0.55935495152827],
[-0.92455283614781, -0.70423159131223, 0.55333726360785],
[-0.87428693174862, -0.68506801977964, 0.55935495152827],
[-1.00000000000000, -0.58431589231501, 0.53299436085627],
[-0.94867846854126, -0.58431589231501, 0.53299436085627],
[-1.00000000000000, -0.48822928568071, 0.48822928568071],
[-1.00000000000000, -1.00000000000000, 0.62890813726522],
[-0.95182812061116, -1.00000000000000, 0.66621987519844],
[-0.88312209180235, -1.00000000000000, 0.68461312133807],
[-0.80149102953572, -1.00000000000000, 0.68461312133806],
[-0.71439175458728, -1.00000000000000, 0.66621987519844],
[-0.62890813726522, -1.00000000000000, 0.62890813726522],
[-1.00000000000000, -0.95182812061116, 0.66621987519844],
[-0.92912322243421, -0.92912322243421, 0.67232127536933],
[-0.87075027964005, -0.92365055664618, 0.66515111592627],
[-0.81407483050090, -0.92912322243421, 0.67232127536933],
[-0.71439175458728, -0.95182812061116, 0.66621987519844],
[-1.00000000000000, -0.88312209180235, 0.68461312133807],
[-0.92365055664618, -0.87075027964005, 0.66515111592627],
[-0.87075027964005, -0.87075027964005, 0.66515111592627],
[-0.80149102953572, -0.88312209180235, 0.68461312133807],
[-1.00000000000000, -0.80149102953572, 0.68461312133806],
[-0.92912322243421, -0.81407483050090, 0.67232127536933],
[-0.88312209180234, -0.80149102953572, 0.68461312133806],
[-1.00000000000000, -0.71439175458728, 0.66621987519844],
[-0.95182812061116, -0.71439175458728, 0.66621987519844],
[-1.00000000000000, -0.62890813726522, 0.62890813726522],
[-1.00000000000000, -1.00000000000000, 0.75149420255261],
[-0.95613320518782, -1.00000000000000, 0.78015760232082],
[-0.89481010149641, -1.00000000000000, 0.78962020299283],
[-0.82402439713301, -1.00000000000000, 0.78015760232083],
[-0.75149420255261, -1.00000000000000, 0.75149420255261],
[-1.00000000000000, -0.95613320518782, 0.78015760232082],
[-0.93662610527592, -0.93662610527592, 0.76961291485958],
[-0.89636070430775, -0.93662610527592, 0.76961291485958],
[-0.82402439713301, -0.95613320518782, 0.78015760232082],
[-1.00000000000000, -0.89481010149641, 0.78962020299283],
[-0.93662610527592, -0.89636070430775, 0.76961291485958],
[-0.89481010149641, -0.89481010149641, 0.78962020299283],
[-1.00000000000000, -0.82402439713301, 0.78015760232083],
[-0.95613320518782, -0.82402439713301, 0.78015760232083],
[-1.00000000000000, -0.75149420255261, 0.75149420255261],
[-1.00000000000000, -1.00000000000000, 0.85246057779665],
[-0.96183417565652, -1.00000000000000, 0.87170382160643],
[-0.90986964594991, -1.00000000000000, 0.87170382160643],
[-0.85246057779665, -1.00000000000000, 0.85246057779665],
[-1.00000000000000, -0.96183417565652, 0.87170382160643],
[-0.95020261978982, -0.95020261978982, 0.85060785936946],
[-0.90986964594991, -0.96183417565652, 0.87170382160643],
[-1.00000000000000, -0.90986964594991, 0.87170382160643],
[-0.96183417565652, -0.90986964594991, 0.87170382160643],
[-1.00000000000000, -0.85246057779665, 0.85246057779665],
[-1.00000000000000, -1.00000000000000, 0.92890152815259],
[-0.96921630071687, -1.00000000000000, 0.93843260143372],
[-0.92890152815259, -1.00000000000000, 0.92890152815259],
[-1.00000000000000, -0.96921630071687, 0.93843260143372],
[-0.96921630071686, -0.96921630071687, 0.93843260143372],
[-1.00000000000000, -0.92890152815259, 0.92890152815259],
[-1.00000000000000, -1.00000000000000, 0.97861176622208],
[-0.97861176622208, -1.00000000000000, 0.97861176622208],
[-1.00000000000000, -0.97861176622208, 0.97861176622208]
])
else:
raise NotImplementedError("Tetrahedral Fekete points beyond C=18 not available")
return feketeNodes
# FUNCTION TO PRINT THEM ALL IN THE FORMAT FROM FILE
# import numpy as np
# with open("warburtonNodes3D.m") as f:
# content = f.readlines()
# for i in range(len(content)):
# line = content[i].strip()
# if line == '':
# continue
# if 'case' in line:
# p = int(line.split(' ')[-1])
# if p==19:
# break
# print 'elif C=='+str(p-1)+':'
# elif 'warburtonNodes' in line:
# print '\tfeketeNodes = np.array(['
# elif ']' in line:
# print '\t\t])'
# else:
# points = line.strip().replace(' ','').split('\t')
# print '\t\t['+points[0]+', '+ points[1]+', '+points[2]+'],'
```
#### File: Florence/QuadratureRules/QuadraturePointsWeightsTri.py
```python
from __future__ import print_function
import numpy as np
from .NumericIntegrator import GaussQuadrature
import os
def QuadraturePointsWeightsTri(C,Opt=1):
# Opt IS FOR TYPE OF QUADTRATURE
# Opt=0 IS FOR GAUSSIAN QUADRATURE TECHNIQUE
# Opt=1 IS FOR OPTIMUM QUADRATURE (WILLIAM-SHUNNS) TECHNIQUE (DEFAULT)
# Opt=2 IS FOR SYMMETRIC OPTIMUM QUADRATURE (WITHERDEN-VINCENT) TECHNIQUE
# Opt=3 IS FOR OPTIMUM QUADRATURE (WILLIAM-SHUNNS) TECHNIQUE (ERRORNEOUS)
zw = []
if Opt==0 or C>19:
# IN CASE OPT WAS CHOSEN TO BE 3
if Opt==3:
print('Optimal quadrature for C>19 is not available. Falling back to Gaussian quadrature')
z1D, w1D = GaussQuadrature(C+1,-1.,1.)
zw = np.zeros((w1D.shape[0]**2,3))
counter=0
for i in range(w1D.shape[0]):
for j in range(0,w1D.shape[0]):
zw[counter,2] = w1D[i]*w1D[j]*(1. - z1D[j])/2.
zw[counter,0] = z1D[i]
zw[counter,1] = z1D[j]
counter +=1
elif Opt==1:
path = os.path.dirname(os.path.realpath(__file__))
path += '/Tables/tri/'
p = C+1
d = 0
if p==2:
d = 4
elif p==3:
d = 7
elif p==4:
d = 8
elif p==5:
d = 10
elif p==6:
d = 12
elif p==7:
d = 14
if d==0:
raise ValueError('Quadrature rule does not exist. Try QuadratureOpt = 3 for more points')
for i in os.listdir(path):
if 'williams-shunn-n' in i:
if 'd'+str(d) in i:
zw = np.loadtxt(path+i)
elif Opt==2:
path = os.path.dirname(os.path.realpath(__file__))
path += '/Tables/tri/'
d = C+2
for i in os.listdir(path):
if 'witherden-vincent-n' in i:
if 'd'+str(d) in i:
zw = np.loadtxt(path+i)
elif Opt==3:
# # AVOID INACCURATE QUADRATURE POINTS
# if C==4:
# C = 6
# if C==5:
# C = 7 # careful
# if C==6:
# C = 11
if C==0:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 2.000000000000000]
])
elif C==1:
zw = np.array([
[-0.666666666666667, -0.666666666666667, 0.666666666666667],
[-0.666666666666667, 0.333333333333333, 0.666666666666667],
[0.333333333333333, -0.666666666666667, 0.666666666666667]
])
elif C==2:
zw = np.array([
[-0.333333333333333, -0.333333333333333, -1.125000000000000],
[-0.600000000000000, -0.600000000000000, 1.041666666666667],
[-0.600000000000000, 0.200000000000000 , 1.041666666666667],
[0.200000000000000 , -0.600000000000000 , 1.041666666666667]
])
elif C==3:
zw = np.array([
[-0.108103018168070, -0.108103018168070, 0.446763179356022],
[-0.108103018168070, -0.783793963663860, 0.446763179356022],
[-0.783793963663860, -0.108103018168070, 0.446763179356022],
[-0.816847572980458, -0.816847572980458, 0.219903487310644],
[-0.816847572980458, 0.633695145960918 , 0.219903487310644],
[0.633695145960918 , -0.816847572980458 , 0.219903487310644]
])
elif C==4:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.450000000000000],
[-0.059715871789770, -0.059715871789770, 0.264788305577012],
[-0.059715871789770, -0.880568256420460, 0.264788305577012],
[-0.880568256420460, -0.059715871789770, 0.264788305577012],
[-0.797426985353088, -0.797426985353088, 0.251878361089654],
[-0.797426985353088, 0.594853970706174 , 0.251878361089654],
[0.594853970706174 , -0.797426985353088 , 0.251878361089654]
])
elif C==5:
zw = np.array([
[-0.501426509658180, -0.501426509658180, 0.233572551452758],
[-0.501426509658180, 0.002853019316358 , 0.233572551452758],
[0.002853019316358 , -0.501426509658180 , 0.233572551452758],
[-0.873821971016996, -0.873821971016996, 0.101689812740414],
[-0.873821971016996, 0.747643942033992 , 0.101689812740414],
[0.747643942033992 , -0.873821971016996 , 0.101689812740414],
[-0.379295097932432, 0.273004998242798 , 0.165702151236748],
[0.273004998242798 , -0.893709900310366 , 0.165702151236748],
[-0.893709900310366, -0.379295097932432, 0.165702151236748],
[-0.379295097932432, -0.893709900310366, 0.165702151236748],
[0.273004998242798 , -0.379295097932432 , 0.165702151236748],
[-0.893709900310366, 0.273004998242798 , 0.165702151236748]
])
elif C==6:
zw = np.array([
[-0.333333333333333, -0.333333333333333, -0.299140088935364],
[-0.479308067841920, -0.479308067841920, 0.351230514866416],
[-0.479308067841920, -0.041383864316160, 0.351230514866416],
[-0.041383864316160, -0.479308067841920, 0.351230514866416],
[-0.869739794195568, -0.869739794195568, 0.106694471217676],
[-0.869739794195568, 0.739479588391136 , 0.106694471217676],
[0.739479588391136 , -0.869739794195568 , 0.106694471217676],
[-0.374269007990252, 0.276888377139620 , 0.154227521780514],
[0.276888377139620 , -0.902619369149368 , 0.154227521780514],
[-0.902619369149368, -0.374269007990252, 0.154227521780514],
[-0.374269007990252, -0.902619369149368, 0.154227521780514],
[0.276888377139620 , -0.374269007990252 , 0.154227521780514],
[-0.902619369149368, 0.276888377139620 , 0.154227521780514]
])
elif C==7:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.288631215355574],
[-0.081414823414554, -0.081414823414554, 0.190183268534570],
[-0.081414823414554, -0.837170353170892, 0.190183268534570],
[-0.837170353170892, -0.081414823414554, 0.190183268534570],
[-0.658861384496480, -0.658861384496480, 0.206434741069436],
[-0.658861384496480, 0.317722768992960, 0.206434741069436],
[0.317722768992960, -0.658861384496480, 0.206434741069436],
[-0.898905543365938, -0.898905543365938, 0.064916995246396],
[-0.898905543365938, 0.797811086731876, 0.064916995246395],
[0.797811086731876 , -0.898905543365938, 0.064916995246396],
[-0.473774340730724, 0.456984785910808, 0.054460628348870],
[0.456984785910808 , -0.983210445180084, 0.054460628348870],
[-0.983210445180084, -0.473774340730724, 0.054460628348870],
[-0.473774340730724, -0.983210445180084, 0.054460628348870],
[0.456984785910808 , -0.473774340730724, 0.054460628348870],
[-0.983210445180084, 0.456984785910808, 0.054460628348870]
])
elif C==8:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.194271592565598],
[-0.020634961602524, -0.020634961602524, 0.062669400454278],
[-0.020634961602524, -0.958730076794950, 0.062669400454278],
[-0.958730076794950, -0.020634961602524, 0.062669400454278],
[-0.125820817014126, -0.125820817014126, 0.155655082009548],
[-0.125820817014126, -0.748358365971746, 0.155655082009548],
[-0.748358365971746, -0.125820817014126, 0.155655082009548],
[-0.623592928761934, -0.623592928761934, 0.159295477854420],
[-0.623592928761934, 0.247185857523870 , 0.159295477854420],
[0.247185857523870 , -0.623592928761934 , 0.159295477854420],
[-0.910540973211094, -0.910540973211094, 0.051155351317396],
[-0.910540973211094, 0.821081946422190 , 0.051155351317396],
[0.821081946422190 , -0.910540973211094 , 0.051155351317396],
[-0.556074021678468, 0.482397197568996 , 0.086567078754578],
[0.482397197568996 , -0.926323175890528 , 0.086567078754578],
[-0.926323175890528, -0.556074021678468, 0.086567078754578],
[-0.556074021678468, -0.926323175890528, 0.086567078754578],
[0.482397197568996 , -0.556074021678468 , 0.086567078754578],
[-0.926323175890528, 0.482397197568996 , 0.086567078754578]
])
elif C==9:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.181635980765508],
[-0.028844733232686, -0.028844733232686, 0.073451915512934],
[-0.028844733232686, -0.942310533534630, 0.073451915512934],
[-0.942310533534630, -0.028844733232686, 0.073451915512934],
[-0.781036849029926, -0.781036849029926, 0.090642118871056],
[-0.781036849029926, 0.562073698059852, 0.090642118871056],
[0.562073698059852 , -0.781036849029926, 0.090642118871056],
[-0.384120322471758, 0.100705883641998, 0.145515833690840],
[0.100705883641998 , -0.716585561170240, 0.145515833690840],
[-0.716585561170240, -0.384120322471758, 0.145515833690840],
[-0.384120322471758, -0.716585561170240, 0.145515833690840],
[0.100705883641998 , -0.384120322471758, 0.145515833690840],
[-0.716585561170240, 0.100705883641998, 0.145515833690840],
[-0.506654878720194, 0.456647809194822, 0.056654485062114],
[0.456647809194822 , -0.949992930474628, 0.056654485062114],
[-0.949992930474628, -0.506654878720194, 0.056654485062114],
[-0.506654878720194, -0.949992930474628, 0.056654485062114],
[0.456647809194822 , -0.506654878720194, 0.056654485062114],
[-0.949992930474628, 0.456647809194822, 0.056654485062114],
[-0.866393497975600, 0.847311867175000, 0.018843333927466],
[0.847311867175000 , -0.980918369199402, 0.018843333927466],
[-0.980918369199402, -0.866393497975600, 0.018843333927466],
[-0.866393497975600, -0.980918369199402, 0.018843333927466],
[0.847311867175000 , -0.866393497975600, 0.018843333927466],
[-0.980918369199402, 0.847311867175000, 0.018843333927466]
])
elif C==10:
zw = np.array([
[0.069222096541516, 0.069222096541516, 0.001854012657922],
[0.069222096541516, -1.138444193083034, 0.001854012657922],
[-1.138444193083034, 0.069222096541516, 0.001854012657922],
[-0.202061394068290, -0.202061394068290, 0.154299069829626],
[-0.202061394068290, -0.595877211863420, 0.154299069829626],
[-0.595877211863420, -0.202061394068290, 0.154299069829626],
[-0.593380199137436, -0.593380199137436, 0.118645954761548],
[-0.593380199137436, 0.186760398274870, 0.118645954761548],
[0.186760398274870, -0.593380199137436, 0.118645954761548],
[-0.761298175434838, -0.761298175434838, 0.072369081006836],
[-0.761298175434838, 0.522596350869674, 0.072369081006836],
[0.522596350869674 , -0.761298175434838, 0.072369081006836],
[-0.935270103777448, -0.935270103777448, 0.027319462005356],
[-0.935270103777448, 0.870540207554896, 0.027319462005356],
[0.870540207554896 , -0.935270103777448, 0.027319462005356],
[-0.286758703477414, 0.186402426856426, 0.104674223924408],
[0.186402426856426 , -0.899643723379010, 0.104674223924408],
[-0.899643723379010, -0.286758703477414, 0.104674223924408],
[-0.286758703477414, -0.899643723379010, 0.104674223924408],
[0.186402426856426 , -0.286758703477414, 0.104674223924408],
[-0.899643723379010, 0.186402426856426, 0.104674223924408],
[-0.657022039391916, 0.614978006319584, 0.041415319278282],
[0.614978006319584 , -0.957955966927668, 0.041415319278282],
[-0.957955966927668, -0.657022039391916, 0.041415319278282],
[-0.657022039391916, -0.957955966927668, 0.041415319278282],
[0.614978006319584 , -0.657022039391916, 0.041415319278282],
[-0.957955966927668, 0.614978006319584, 0.041415319278282]
])
elif C==11:
zw = np.array([
[-0.023565220452390, -0.023565220452390, 0.051462132880910],
[-0.023565220452390, -0.952869559095220, 0.051462132880910],
[-0.952869559095220, -0.023565220452390, 0.051462132880910],
[-0.120551215411080, -0.120551215411080, 0.087385089076076],
[-0.120551215411080, -0.758897569177842, 0.087385089076076],
[-0.758897569177842, -0.120551215411080, 0.087385089076076],
[-0.457579229975768, -0.457579229975768, 0.125716448435770],
[-0.457579229975768, -0.084841540048464, 0.125716448435770],
[-0.084841540048464, -0.457579229975768, 0.125716448435770],
[-0.744847708916828, -0.744847708916828, 0.069592225861418],
[-0.744847708916828, 0.489695417833656, 0.069592225861418],
[0.489695417833656 , -0.744847708916828, 0.069592225861418],
[-0.957365299093580, -0.957365299093580, 0.012332522103118],
[-0.957365299093580, 0.914730598187158, 0.012332522103118],
[0.914730598187158, -0.957365299093580, 0.012332522103118],
[-0.448573460628972, 0.217886471559576, 0.080743115532762],
[0.217886471559576 , -0.769313010930604, 0.080743115532762],
[-0.769313010930604, -0.448573460628972, 0.080743115532762],
[-0.448573460628972, -0.769313010930604, 0.080743115532762],
[0.217886471559576 , -0.448573460628972, 0.080743115532762],
[-0.769313010930604, 0.217886471559576, 0.080743115532762],
[-0.437348838020120, 0.391672173575606, 0.044713546404606],
[0.391672173575606 , -0.954323335555486, 0.044713546404606],
[-0.954323335555486, -0.437348838020120, 0.044713546404606],
[-0.437348838020120, -0.954323335555486, 0.044713546404606],
[0.391672173575606 , -0.437348838020120, 0.044713546404606],
[-0.954323335555486, 0.391672173575606, 0.044713546404606],
[-0.767496168184806, 0.716028067088146, 0.034632462217318],
[0.716028067088146 , -0.948531898903340, 0.034632462217318],
[-0.948531898903340, -0.767496168184806, 0.034632462217318],
[-0.767496168184806, -0.948531898903340, 0.034632462217318],
[0.716028067088146 , -0.767496168184806, 0.034632462217318],
[-0.948531898903340, 0.716028067088146, 0.034632462217318]
])
elif C==12:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.105041846801604],
[-0.009903630120590, -0.009903630120590, 0.022560290418660],
[-0.009903630120590, -0.980192739758818, 0.022560290418660],
[-0.980192739758818, -0.009903630120590, 0.022560290418660],
[-0.062566729780852, -0.062566729780852, 0.062847036724908],
[-0.062566729780852, -0.874866540438296, 0.062847036724908],
[-0.874866540438296, -0.062566729780852, 0.062847036724908],
[-0.170957326397446, -0.170957326397446, 0.094145005008388],
[-0.170957326397446, -0.658085347205106, 0.094145005008388],
[-0.658085347205106, -0.170957326397446, 0.094145005008388],
[-0.541200855914338, -0.541200855914338, 0.094727173072710],
[-0.541200855914338, 0.082401711828674 , 0.094727173072710],
[0.082401711828674 , -0.541200855914338 , 0.094727173072710],
[-0.771151009607340, -0.771151009607340, 0.062335058091588],
[-0.771151009607340, 0.542302019214680 , 0.062335058091588],
[0.542302019214680 , -0.771151009607340 , 0.062335058091588],
[-0.950377217273082, -0.950377217273082, 0.015951542930148],
[-0.950377217273082, 0.900754434546164 , 0.015951542930148],
[0.900754434546164 , -0.950377217273082 , 0.015951542930148],
[-0.462410005882478, 0.272702349123320 , 0.073696805457464],
[0.272702349123320 , -0.810292343240842 , 0.073696805457464],
[-0.810292343240842, -0.462410005882478, 0.073696805457464],
[-0.462410005882478, -0.810292343240842, 0.073696805457464],
[0.272702349123320 , -0.462410005882478 , 0.073696805457464],
[-0.810292343240842, 0.272702349123320 , 0.073696805457464],
[-0.416539866531424, 0.380338319973810 , 0.034802926607644],
[0.380338319973810 , -0.963798453442386 , 0.034802926607644],
[-0.963798453442386, -0.416539866531424, 0.034802926607644],
[-0.416539866531424, -0.963798453442386, 0.034802926607644],
[0.380338319973810 , -0.416539866531424 , 0.034802926607644],
[-0.963798453442386, 0.380338319973810 , 0.034802926607644],
[-0.747285229016662, 0.702819075668482 , 0.031043573678090],
[0.702819075668482 , -0.955533846651820 , 0.031043573678090],
[-0.955533846651820, -0.747285229016662, 0.031043573678090],
[-0.747285229016662, -0.955533846651820, 0.031043573678090],
[0.702819075668482 , -0.747285229016662 , 0.031043573678090],
[-0.955533846651820, 0.702819075668482 , 0.031043573678090]
])
elif C==13:
zw = np.array([
[-0.022072179275642, -0.022072179275642, 0.043767162738858],
[-0.022072179275642, -0.955855641448714, 0.043767162738858],
[-0.955855641448714, -0.022072179275642, 0.043767162738858],
[-0.164710561319092, -0.164710561319092, 0.065576707088250],
[-0.164710561319092, -0.670578877361816, 0.065576707088250],
[-0.670578877361816, -0.164710561319092, 0.065576707088250],
[-0.453044943382322, -0.453044943382322, 0.103548209014584],
[-0.453044943382322, -0.093910113235354, 0.103548209014584],
[-0.093910113235354, -0.453044943382322, 0.103548209014584],
[-0.645588935174914, -0.645588935174914, 0.084325177473986],
[-0.645588935174914, 0.291177870349826 , 0.084325177473986],
[0.291177870349826 , -0.645588935174914 , 0.084325177473986],
[-0.876400233818254, -0.876400233818254, 0.028867399339554],
[-0.876400233818254, 0.752800467636510 , 0.028867399339554],
[0.752800467636510 , -0.876400233818254 , 0.028867399339554],
[-0.961218077502598, -0.961218077502598, 0.009846807204800],
[-0.961218077502598, 0.922436155005196 , 0.009846807204800],
[0.9224361550051960, -0.961218077502598, 0.009846807204800],
[-0.655466624357288, 0.541217109549992 , 0.049331506425128],
[0.541217109549992 ,-0.885750485192704 , 0.049331506425128],
[-0.885750485192704, -0.655466624357288, 0.049331506425128],
[-0.655466624357288, -0.885750485192704, 0.049331506425128],
[0.541217109549992 , -0.655466624357288 , 0.049331506425128],
[-0.885750485192704, 0.541217109549992 , 0.049331506425128],
[-0.326277080407310, 0.140444581693366 , 0.077143021574122],
[0.140444581693366 , -0.814167501286056 , 0.077143021574122],
[-0.814167501286056, -0.326277080407310, 0.077143021574122],
[-0.326277080407310, -0.814167501286056, 0.077143021574122],
[0.140444581693366 , -0.326277080407310 , 0.077143021574122],
[-0.814167501286056, 0.140444581693366 , 0.077143021574122],
[-0.403254235727484, 0.373960335616176 , 0.028872616227068],
[0.373960335616176 , -0.970706099888692 , 0.028872616227068],
[-0.970706099888692, -0.403254235727484, 0.028872616227068],
[-0.403254235727484, -0.970706099888692, 0.028872616227068],
[0.373960335616176 , -0.403254235727484 , 0.028872616227068],
[-0.970706099888692, 0.373960335616176 , 0.028872616227068],
[-0.762051004606086, 0.759514342740342 , 0.010020457677002],
[0.759514342740342 , -0.997463338134256 , 0.010020457677002],
[-0.997463338134256, -0.762051004606086, 0.010020457677002],
[-0.762051004606086, -0.997463338134256, 0.010020457677002],
[0.759514342740342 , -0.762051004606086 , 0.010020457677002],
[-0.997463338134256, 0.759514342740342 , 0.010020457677002]
])
elif C==14:
zw = np.array([
[0.013945833716486 , 0.013945833716486 , 0.003833751285698],
[0.013945833716486 , -1.027891667432972, 0.003833751285698],
[-1.027891667432972, 0.013945833716486, 0.003833751285698],
[-0.137187291433954, -0.137187291433954, 0.088498054542290],
[-0.137187291433954, -0.725625417132090, 0.088498054542290],
[-0.725625417132090, -0.137187291433954, 0.088498054542290],
[-0.444612710305712, -0.444612710305712, 0.102373097437704],
[-0.444612710305712, -0.110774579388578, 0.102373097437704],
[-0.110774579388578, -0.444612710305712, 0.102373097437704],
[-0.747070217917492, -0.747070217917492, 0.047375471741376],
[-0.747070217917492, 0.494140435834984 , 0.047375471741376],
[0.494140435834984 , -0.747070217917492 , 0.047375471741376],
[-0.858383228050628, -0.858383228050628, 0.026579551380042],
[-0.858383228050628, 0.716766456101256 , 0.026579551380042],
[0.716766456101256 , -0.858383228050628 , 0.026579551380042],
[-0.962069659517854, -0.962069659517854, 0.009497833216384],
[-0.962069659517854, 0.924139319035706 , 0.009497833216384],
[0.924139319035706 , -0.962069659517854 , 0.009497833216384],
[-0.477377257719826, 0.209908933786582 , 0.077100145199186],
[0.209908933786582 , -0.732531676066758 , 0.077100145199186],
[-0.732531676066758, -0.477377257719826, 0.077100145199186],
[-0.477377257719826, -0.732531676066758, 0.077100145199186],
[0.209908933786582 , -0.477377257719826 , 0.077100145199186],
[-0.732531676066758, 0.209908933786582 , 0.077100145199186],
[-0.223906465819462, 0.151173111025628 , 0.054431628641248],
[0.151173111025628 , -0.927266645206166 , 0.054431628641248],
[-0.927266645206166, -0.223906465819462, 0.054431628641248],
[-0.223906465819462, -0.927266645206166, 0.054431628641248],
[0.151173111025628 , -0.223906465819462 , 0.054431628641248],
[-0.927266645206166, 0.151173111025628 , 0.054431628641248],
[-0.428575559900168, 0.448925326153310 , 0.004364154733594],
[0.448925326153310 , -1.020349766253142 , 0.004364154733594],
[-1.020349766253142, -0.428575559900168, 0.004364154733594],
[-0.428575559900168, -1.020349766253142, 0.004364154733594],
[0.448925326153310 , -0.428575559900168 , 0.004364154733594],
[-1.020349766253142, 0.448925326153310 , 0.004364154733594],
[-0.568800671855432, 0.495112932103676 , 0.043010639695462],
[0.495112932103676 , -0.926312260248244 , 0.043010639695462],
[-0.926312260248244, -0.568800671855432, 0.043010639695462],
[-0.568800671855432, -0.926312260248244, 0.043010639695462],
[0.495112932103676 , -0.568800671855432 , 0.043010639695462],
[-0.926312260248244, 0.495112932103676 , 0.043010639695462],
[-0.792848766847228, 0.767929148184832 , 0.015347885262098],
[0.767929148184832 , -0.975080381337602 , 0.015347885262098],
[-0.975080381337602, -0.792848766847228, 0.015347885262098],
[-0.792848766847228, -0.975080381337602, 0.015347885262098],
[0.767929148184832 , -0.792848766847228 , 0.015347885262098],
[-0.975080381337602, 0.767929148184832 , 0.015347885262098]
])
elif C==15:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.093751394855284],
[-0.005238916103124, -0.005238916103124, 0.012811757157170],
[-0.005238916103124, -0.989522167793754, 0.012811757157170],
[-0.989522167793754, -0.005238916103124, 0.012811757157170],
[-0.173061122901296, -0.173061122901296, 0.083420593478774],
[-0.173061122901296, -0.653877754197410, 0.083420593478774],
[-0.653877754197410, -0.173061122901296, 0.083420593478774],
[-0.059082801866018, -0.059082801866018, 0.053782968500128],
[-0.059082801866018, -0.881834396267966, 0.053782968500128],
[-0.881834396267966, -0.059082801866018, 0.053782968500128],
[-0.518892500060958, -0.518892500060958, 0.084265045523300],
[-0.518892500060958, 0.037785000121916 , 0.084265045523300],
[0.037785000121916 , -0.518892500060958 , 0.084265045523300],
[-0.704068411554854, -0.704068411554854, 0.060000533685546],
[-0.704068411554854, 0.408136823109708 , 0.060000533685546],
[0.408136823109708 , -0.704068411554854 , 0.060000533685546],
[-0.849069624685052, -0.849069624685052, 0.028400197850048],
[-0.849069624685052, 0.698139249370104 , 0.028400197850048],
[0.698139249370104 , -0.849069624685052 , 0.028400197850048],
[-0.966807194753950, -0.966807194753950, 0.007164924702546],
[-0.966807194753950, 0.933614389507900 , 0.007164924702546],
[0.933614389507900 , -0.966807194753950 , 0.007164924702546],
[-0.406888806840226, 0.199737422349722 , 0.065546294921254],
[0.199737422349722 , -0.792848615509496 , 0.065546294921254],
[-0.792848615509496, -0.406888806840226, 0.065546294921254],
[-0.406888806840226, -0.792848615509496, 0.065546294921254],
[0.199737422349722 , -0.406888806840226 , 0.065546294921254],
[-0.792848615509496, 0.199737422349722 , 0.065546294921254],
[-0.324553873193842, 0.284387049883010 , 0.030596612496882],
[0.284387049883010 , -0.959833176689168 , 0.030596612496882],
[-0.959833176689168, -0.324553873193842, 0.030596612496882],
[-0.324553873193842, -0.959833176689168, 0.030596612496882],
[0.284387049883010 , -0.324553873193842 , 0.030596612496882],
[-0.959833176689168, 0.284387049883010 , 0.030596612496882],
[-0.590503436714376, 0.599185441942654 , 0.004772488385678],
[0.599185441942654 , -1.008682005228278 , 0.004772488385678],
[-1.008682005228278, -0.590503436714376, 0.004772488385678],
[-0.590503436714376, -1.008682005228278, 0.004772488385678],
[0.599185441942654 , -0.590503436714376 , 0.004772488385678],
[-1.008682005228278, 0.599185441942654 , 0.004772488385678],
[-0.621283015738754, 0.537399442802736 , 0.038169585511798],
[0.537399442802736 , -0.916116427063980 , 0.038169585511798],
[-0.916116427063980, -0.621283015738754, 0.038169585511798],
[-0.621283015738754, -0.916116427063980, 0.038169585511798],
[0.537399442802736 , -0.621283015738754 , 0.038169585511798],
[-0.916116427063980, 0.537399442802736 , 0.038169585511798],
[-0.829432768634686, 0.800798128173322 , 0.013700109093084],
[0.800798128173322 , -0.971365359538638 , 0.013700109093084],
[-0.971365359538638, -0.829432768634686, 0.013700109093084],
[-0.829432768634686, -0.971365359538638, 0.013700109093084],
[0.800798128173322 , -0.829432768634686 , 0.013700109093084],
[-0.971365359538638, 0.800798128173322 , 0.013700109093084]
])
elif C==16:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.066874398581606],
[-0.005658918886452, -0.005658918886452, 0.010186830881014],
[-0.005658918886452, -0.988682162227096, 0.010186830881014],
[-0.988682162227096, -0.005658918886452, 0.010186830881014],
[-0.035647354750750, -0.035647354750750, 0.029341729055276],
[-0.035647354750750, -0.928705290498498, 0.029341729055276],
[-0.928705290498498, -0.035647354750750, 0.029341729055276],
[-0.099520061958436, -0.099520061958436, 0.048701756707344],
[-0.099520061958436, -0.800959876083126, 0.048701756707344],
[-0.800959876083126, -0.099520061958436, 0.048701756707344],
[-0.199467521245206, -0.199467521245206, 0.062215101737938],
[-0.199467521245206, -0.601064957509588, 0.062215101737938],
[-0.601064957509588, -0.199467521245206, 0.062215101737938],
[-0.495717464058094, -0.495717464058094, 0.062514222437240],
[-0.495717464058094, -0.008565071883810, 0.062514222437240],
[-0.008565071883810, -0.495717464058094, 0.062514222437240],
[-0.675905990683078, -0.675905990683078, 0.049631308679330],
[-0.675905990683078, 0.351811981366154 , 0.049631308679330],
[0.351811981366154 , -0.675905990683078 , 0.049631308679330],
[-0.848248235478508, -0.848248235478508, 0.028112146141114],
[-0.848248235478508, 0.696496470957016 , 0.028112146141114],
[0.696496470957016 , -0.848248235478508 , 0.028112146141114],
[-0.968690546064356, -0.968690546064356, 0.006389352347558],
[-0.968690546064356, 0.937381092128712 , 0.006389352347558],
[0.937381092128712 , -0.968690546064356 , 0.006389352347558],
[-0.331360265272684, 0.310986407618846 , 0.016239310637986],
[0.310986407618846 , -0.979626142346162 , 0.016239310637986],
[-0.979626142346162, -0.331360265272684, 0.016239310637986],
[-0.331360265272684, -0.979626142346162, 0.016239310637986],
[0.310986407618846 , -0.331360265272684 , 0.016239310637986],
[-0.979626142346162, 0.310986407618846 , 0.016239310637986],
[-0.415556924406112, 0.144675181064040 , 0.053611484566326],
[0.144675181064040 , -0.729118256657928 , 0.053611484566326],
[-0.729118256657928, -0.415556924406112, 0.053611484566326],
[-0.415556924406112, -0.729118256657928, 0.053611484566326],
[0.144675181064040 , -0.415556924406112 , 0.053611484566326],
[-0.729118256657928, 0.144675181064040 , 0.053611484566326],
[-0.360850229153620, 0.252002380572456 , 0.036919986421644],
[0.252002380572456 , -0.891152151418834 , 0.036919986421644],
[-0.891152151418834, -0.360850229153620, 0.036919986421644],
[-0.360850229153620, -0.891152151418834, 0.036919986421644],
[0.252002380572456 , -0.360850229153620 , 0.036919986421644],
[-0.891152151418834, 0.252002380572456 , 0.036919986421644],
[-0.618591551615416, 0.592854429948142 , 0.016953737068656],
[0.592854429948142 , -0.974262878332726 , 0.016953737068656],
[-0.974262878332726, -0.618591551615416, 0.016953737068656],
[-0.618591551615416, -0.974262878332726, 0.016953737068656],
[0.592854429948142 , -0.618591551615416 , 0.016953737068656],
[-0.974262878332726, 0.592854429948142 , 0.016953737068656],
[-0.639033576702508, 0.504702011875458 , 0.036585593540050],
[0.504702011875458 , -0.865668435172952 , 0.036585593540050],
[-0.865668435172952, -0.639033576702508, 0.036585593540050],
[-0.639033576702508, -0.865668435172952, 0.036585593540050],
[0.504702011875458 , -0.639033576702508 , 0.036585593540050],
[-0.865668435172952, 0.504702011875458 , 0.036585593540050],
[-0.838577372640872, 0.809251008191216 , 0.013331264008330],
[0.809251008191216 , -0.970673635550344 , 0.013331264008330],
[-0.970673635550344, -0.838577372640872, 0.013331264008330],
[-0.838577372640872, -0.970673635550344, 0.013331264008330],
[0.809251008191216 , -0.838577372640872 , 0.013331264008330],
[-0.970673635550344, 0.809251008191216 , 0.013331264008330]
])
elif C==17:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.061619879875294],
[-0.013310382738158, -0.013310382738158, 0.018144873358808],
[-0.013310382738158, -0.973379234523686, 0.018144873358808],
[-0.973379234523686, -0.013310382738158, 0.018144873358808],
[-0.061578811516086, -0.061578811516086, 0.037522633879188],
[-0.061578811516086, -0.876842376967828, 0.037522633879188],
[-0.876842376967828, -0.061578811516086, 0.037522633879188],
[-0.127437208225988, -0.127437208225988, 0.038882195970954],
[-0.127437208225988, -0.745125583548022, 0.038882195970954],
[-0.745125583548022, -0.127437208225988, 0.038882195970954],
[-0.210307658653168, -0.210307658653168, 0.055507897221620],
[-0.210307658653168, -0.579384682693664, 0.055507897221620],
[-0.579384682693664, -0.210307658653168, 0.055507897221620],
[-0.500410862393686, -0.500410862393686, 0.064512450702914],
[-0.500410862393686, 0.000821724787372 , 0.064512450702914],
[0.000821724787372 , -0.500410862393686 , 0.064512450702914],
[-0.677135612512314, -0.677135612512314, 0.050148065233844],
[-0.677135612512314, 0.354271225024630 , 0.050148065233844],
[0.354271225024630 , -0.677135612512314 , 0.050148065233844],
[-0.846803545029258, -0.846803545029258, 0.030543855943664],
[-0.846803545029258, 0.693607090058514 , 0.030543855943664],
[0.693607090058514 , -0.846803545029258 , 0.030543855943664],
[-0.951495121293100, -0.951495121293100, 0.013587844045926],
[-0.951495121293100, 0.902990242586200 , 0.013587844045926],
[0.902990242586200 , -0.951495121293100 , 0.013587844045926],
[-0.913707265566070, -0.913707265566070, -0.004446197459840],
[-0.913707265566070, 0.827414531132142 , -0.004446197459840],
[0.827414531132142 , -0.913707265566070 , -0.004446197459840],
[-0.282177010118112, 0.265315937713272 , 0.012663828152812],
[0.265315937713272 , -0.983138927595160 , 0.012663828152812],
[-0.983138927595160, -0.282177010118112, 0.012663828152812],
[-0.282177010118112, -0.983138927595160, 0.012663828152812],
[0.265315937713272 , -0.282177010118112 , 0.012663828152812],
[-0.983138927595160, 0.265315937713272 , 0.012663828152812],
[-0.411195046496086, 0.148821943021710 , 0.054515076098276],
[0.148821943021710 , -0.737626896525624 , 0.054515076098276],
[-0.737626896525624, -0.411195046496086, 0.054515076098276],
[-0.411195046496086, -0.737626896525624, 0.054515076098276],
[0.148821943021710 , -0.411195046496086 , 0.054515076098276],
[-0.737626896525624, 0.148821943021710 , 0.054515076098276],
[-0.349964396716372, 0.249558093585024 , 0.035353571298930],
[0.249558093585024 , -0.899593696868650 , 0.035353571298930],
[-0.899593696868650, -0.349964396716372, 0.035353571298930],
[-0.349964396716372, -0.899593696868650, 0.035353571298930],
[0.249558093585024 , -0.349964396716372 , 0.035353571298930],
[-0.899593696868650, 0.249558093585024 , 0.035353571298930],
[-0.630524880667908, 0.497866353046074 , 0.036758969276140],
[0.497866353046074 , -0.867341472378168 , 0.036758969276140],
[-0.867341472378168, -0.630524880667908, 0.036758969276140],
[-0.630524880667908, -0.867341472378168, 0.036758969276140],
[0.497866353046074 , -0.630524880667908 , 0.036758969276140],
[-0.867341472378168, 0.497866353046074 , 0.036758969276140],
[-0.562406399973358, 0.538414010840886 , 0.016209465616384],
[0.538414010840886 , -0.976007610867528 , 0.016209465616384],
[-0.976007610867528, -0.562406399973358, 0.016209465616384],
[-0.562406399973358, -0.976007610867528, 0.016209465616384],
[0.538414010840886 , -0.562406399973358 , 0.016209465616384],
[-0.976007610867528, 0.538414010840886 , 0.016209465616384],
[-0.797640805727184, 0.767924604546934 , 0.015268258141450],
[0.767924604546934 , -0.970283798819750 , 0.015268258141450],
[-0.970283798819750, -0.797640805727184, 0.015268258141450],
[-0.797640805727184, -0.970283798819750, 0.015268258141450],
[0.767924604546934 , -0.797640805727184 , 0.015268258141450],
[-0.970283798819750, 0.767924604546934 , 0.015268258141450],
[-0.958250489434828, 1.028694520010726 , 0.000092375321588],
[1.028694520010726 , -1.070444030575898 , 0.000092375321588],
[-1.070444030575898, -0.9582504894348281, 0.000092375321588],
[-0.958250489434828, -1.070444030575898, 0.000092375321588],
[1.028694520010726 , -0.958250489434828 , 0.000092375321588],
[-1.070444030575898, 1.028694520010726, 0.000092375321588]
])
elif C==18:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.065812662777838],
[-0.020780025853988, -0.020780025853988, 0.020661463782544],
[-0.020780025853988, -0.958439948292026, 0.020661463782544],
[-0.958439948292026, -0.020780025853988, 0.020661463782544],
[-0.090926214604214, -0.090926214604214, 0.044774494526032],
[-0.090926214604214, -0.818147570791570, 0.044774494526032],
[-0.818147570791570, -0.090926214604214, 0.044774494526032],
[-0.197166638701138, -0.197166638701138, 0.060532251738936],
[-0.197166638701138, -0.605666722597724, 0.060532251738936],
[-0.605666722597724, -0.197166638701138, 0.060532251738936],
[-0.488896691193804, -0.488896691193804, 0.060981935604396],
[-0.488896691193804, -0.022206617612390, 0.060981935604396],
[-0.022206617612390, -0.488896691193804, 0.060981935604396],
[-0.645844115695740, -0.645844115695740, 0.048318425483282],
[-0.645844115695740, 0.291688231391482 , 0.048318425483282],
[0.291688231391482 , -0.645844115695740 , 0.048318425483282],
[-0.779877893544096, -0.779877893544096, 0.032101607173602],
[-0.779877893544096, 0.559755787088192 , 0.032101607173602],
[0.559755787088192 , -0.779877893544096 , 0.032101607173602],
[-0.888942751496320, -0.888942751496320, 0.016169160523568],
[-0.888942751496320, 0.777885502992642 , 0.016169160523568],
[0.777885502992642 , -0.888942751496320 , 0.016169160523568],
[-0.974756272445542, -0.974756272445542, 0.004158724054970],
[-0.974756272445542, 0.949512544891086 , 0.004158724054970],
[0.949512544891086 , -0.974756272445542 , 0.004158724054970],
[-0.208490425286114, 0.201267589589290 , 0.007769753809962],
[0.201267589589290 , -0.992777164303176 , 0.007769753809962],
[-0.992777164303176, -0.208490425286114, 0.007769753809962],
[-0.208490425286114, -0.992777164303176, 0.007769753809962],
[0.201267589589290 , -0.208490425286114 , 0.007769753809962],
[-0.992777164303176, 0.201267589589286 , 0.007769753809962],
[-0.384140032239128, 0.115206523177568 , 0.051148321224044],
[0.115206523177568 , -0.731066490938440 , 0.051148321224044],
[-0.731066490938440, -0.384140032239128, 0.051148321224044],
[-0.384140032239128, -0.731066490938440, 0.051148321224044],
[0.115206523177568 , -0.384140032239128 , 0.051148321224044],
[-0.731066490938440, 0.115206523177568 , 0.051148321224044],
[-0.470866103186960, 0.441974051634730 , 0.017761807146676],
[0.441974051634730 , -0.971107948447770 , 0.017761807146676],
[-0.971107948447770, -0.470866103186960, 0.017761807146676],
[-0.470866103186960, -0.971107948447770, 0.017761807146676],
[0.441974051634730 , -0.470866103186960 , 0.017761807146676],
[-0.971107948447770, 0.441974051634730 , 0.017761807146676],
[-0.282921295588098, 0.189054137911742 , 0.032249093523462],
[0.189054137911742 , -0.906132842323644 , 0.032249093523462],
[-0.906132842323644, -0.282921295588098, 0.032249093523462],
[-0.282921295588098, -0.906132842323644, 0.032249093523462],
[0.189054137911742 , -0.282921295588098 , 0.032249093523462],
[-0.906132842323644, 0.189054137911742 , 0.032249093523462],
[-0.684385188062810, 0.678662947361678 , 0.004983883634982],
[0.678662947361678 , -0.994277759298866 , 0.004983883634982],
[-0.994277759298866, -0.684385188062810, 0.004983883634982],
[-0.684385188062810, -0.994277759298866, 0.004983883634982],
[0.678662947361678 , -0.684385188062810 , 0.004983883634982],
[-0.994277759298866, 0.678662947361678 , 0.004983883634982],
[-0.849898806048178, 0.402175957852346 , 0.036485680237902],
[0.402175957852346 , -0.552277151804168 , 0.036485680237902],
[-0.552277151804168, -0.849898806048178, 0.036485680237902],
[-0.849898806048178, -0.552277151804168, 0.036485680237902],
[0.402175957852346 , -0.849898806048178 , 0.036485680237902],
[-0.552277151804168, 0.402175957852346 , 0.036485680237902],
[-0.715156797773234, 0.645862648139714 , 0.020517127472398],
[0.645862648139714 , -0.930705850366480 , 0.020517127472398],
[-0.930705850366480, -0.715156797773234, 0.020517127472398],
[-0.715156797773234, -0.930705850366480, 0.020517127472398],
[0.645862648139714 , -0.715156797773234 , 0.020517127472398],
[-0.930705850366480, 0.645862648139714 , 0.020517127472398],
[-0.869010743834124, 0.848688505241568 , 0.007599857710604],
[0.848688505241568 , -0.979677761407444 , 0.007599857710604],
[-0.979677761407444, -0.869010743834124, 0.007599857710604],
[-0.869010743834124, -0.979677761407444, 0.007599857710604],
[0.848688505241568 , -0.869010743834124 , 0.007599857710604],
[-0.979677761407444, 0.848688505241568 , 0.007599857710604]
])
elif C==19:
zw = np.array([
[-0.333333333333333, -0.333333333333333, 0.066114111083248],
[0.001900928704400 , 0.001900928704400 , 0.001734038371326],
[0.001900928704400 , -1.003801857408800 , 0.001734038371326],
[-1.003801857408800, 0.001900928704400 , 0.001734038371326],
[-0.023574084130543, -0.023574084130543, 0.023320105432896],
[-0.023574084130543, -0.952851831738914, 0.023320105432896],
[-0.952851831738914, -0.023574084130543, 0.023320105432896],
[-0.089726626099435, -0.089726626099435, 0.045753872712842],
[-0.089726626099435, -0.820546727801130, 0.045753872712842],
[-0.820546727801130, -0.089726626099435, 0.045753872712842],
[-0.196007481363421, -0.196007481363421, 0.060897965347876],
[-0.196007481363421, -0.607985037273158, 0.060897965347876],
[-0.607985037273158, -0.196007481363421, 0.060897965347876],
[-0.488214180481157, -0.488214180481157, 0.061249783450710],
[-0.488214180481157, -0.023571639037686, 0.061249783450710],
[-0.023571639037686, -0.488214180481157, 0.061249783450710],
[-0.647023488009788, -0.647023488009788, 0.048736115353600],
[-0.647023488009788, 0.294046976019576 , 0.048736115353600],
[0.294046976019576 , -0.647023488009788 , 0.048736115353600],
[-0.791658289326483, -0.791658289326483, 0.031994864064048],
[-0.791658289326483, 0.583316578652966 , 0.031994864064048],
[0.583316578652966 , -0.791658289326483 , 0.031994864064048],
[-0.893862072318140, -0.893862072318140, 0.015396603631204],
[-0.893862072318140, 0.787724144636280 , 0.015396603631204],
[0.787724144636280 , -0.893862072318140 , 0.015396603631204],
[-0.916762569607942, -0.916762569607942, -0.001264120994976],
[-0.916762569607942, 0.833525139215884 , -0.001264120994976],
[0.833525139215884 , -0.916762569607942 , -0.001264120994976],
[-0.976836157186356, -0.976836157186356, 0.003502268602386],
[-0.976836157186356, 0.953672314372712 , 0.003502268602386],
[0.953672314372712 , -0.976836157186356 , 0.003502268602386],
[-0.310288459541998, 0.212805292212320 , 0.032931678379152],
[0.212805292212320 , -0.902516832670322 , 0.032931678379152],
[-0.902516832670322, -0.310288459541998, 0.032931678379152],
[-0.310288459541998, -0.902516832670322, 0.032931678379152],
[0.212805292212320 , -0.310288459541998 , 0.032931678379152],
[-0.902516832670322, 0.212805292212320 , 0.032931678379152],
[-0.244313460810292, 0.231685228913082 , 0.009678067080970],
[0.231685228913082 , -0.987371768102790 , 0.009678067080970],
[-0.987371768102790, -0.244313460810292, 0.009678067080970],
[-0.244313460810292, -0.987371768102790, 0.009678067080970],
[0.231685228913082 , -0.244313460810292 , 0.009678067080970],
[-0.987371768102790, 0.231685228913082 , 0.009678067080970],
[-0.386729041875286, 0.118096000780590 , 0.051609813069300],
[0.118096000780590 , -0.731366958905304 , 0.051609813069300],
[-0.731366958905304, -0.386729041875286, 0.051609813069300],
[-0.386729041875286, -0.731366958905304, 0.051609813069300],
[0.118096000780590 , -0.386729041875286 , 0.051609813069300],
[-0.731366958905304, 0.118096000780590 , 0.051609813069300],
[-0.501161274450516, 0.473213486525732 , 0.016942182108882],
[0.473213486525732 , -0.972052212075216 , 0.016942182108882],
[-0.972052212075216, -0.501161274450516, 0.016942182108882],
[-0.501161274450516, -0.972052212075216, 0.016942182108882],
[0.473213486525732 , -0.501161274450516 , 0.016942182108882],
[-0.972052212075216, 0.473213486525732 , 0.016942182108882],
[-0.574448550394396, 0.423350284574868 , 0.036709828212560],
[0.423350284574868 , -0.848901734180472 , 0.036709828212560],
[-0.848901734180472, -0.574448550394396, 0.036709828212560],
[-0.574448550394396, -0.848901734180472, 0.036709828212560],
[0.423350284574868 , -0.574448550394396 , 0.036709828212560],
[-0.848901734180472, 0.423350284574868 , 0.036709828212560],
[-0.706069127893522, 0.722805434309974 , 0.001408809355816],
[0.722805434309974 , -1.016736306416454 , 0.001408809355816],
[-1.016736306416454, -0.706069127893522, 0.001408809355816],
[-0.706069127893522, -1.016736306416454, 0.001408809355816],
[0.722805434309974 , -0.706069127893522 , 0.001408809355816],
[-1.016736306416454, 0.722805434309974 , 0.001408809355816],
[-0.724546042342154, 0.671173915824726 , 0.020225369854924],
[0.671173915824726 , -0.946627873482572 , 0.020225369854924],
[-0.946627873482572, -0.724546042342154, 0.020225369854924],
[-0.724546042342154, -0.946627873482572, 0.020225369854924],
[0.671173915824726 , -0.724546042342154 , 0.020225369854924],
[-0.946627873482572, 0.671173915824726 , 0.020225369854924],
[-0.880607781701986, 0.859512343113706 , 0.007147818771900],
[0.859512343113706 , -0.978904561411718 , 0.007147818771900],
[-0.978904561411718, -0.880607781701986, 0.007147818771900],
[-0.880607781701986, -0.978904561411718, 0.007147818771900],
[0.859512343113706 , -0.880607781701986 , 0.007147818771900],
[-0.978904561411718, 0.859512343113706 , 0.007147818771900]
])
else:
raise ValueError('Unknown option for quadrature rule. Opt must be either 0, 1, 2 or 3')
return zw
```
#### File: Florence/QuadratureRules/QuadratureRule.py
```python
import numpy as np
from warnings import warn
from Florence.QuadratureRules import GaussQuadrature
from Florence.QuadratureRules import QuadraturePointsWeightsTet
from Florence.QuadratureRules import QuadraturePointsWeightsTri
from Florence.QuadratureRules import WVQuadraturePointsWeightsQuad
from Florence.QuadratureRules import WVQuadraturePointsWeightsHex
class QuadratureRule(object):
def __init__(self, qtype="gauss", norder=2, mesh_type="tri", optimal=3, flatten=True, evaluate=True):
"""
input:
flatten: [bool] only used for quads and hexes as tensor based
quadrature is not flattened where tabulated values are.
Optimal quadrature points for all element types are in a
flattened representation
"""
self.qtype = qtype
self.norder = norder
self.element_type = mesh_type
self.points = []
self.weights = []
self.flatten = flatten
# OPTIMAL QUADRATURE POINTS FOR TRIS AND TETS
self.optimal = optimal
if evaluate is False:
return
if optimal is False or optimal is None:
self.qtype = None
z=[]; w=[];
if mesh_type == "hex":
if self.optimal==4:
zw = WVQuadraturePointsWeightsHex.WVQuadraturePointsWeightsHex(self.norder)
z = zw[:,:-1]; z=z.reshape(z.shape[0],z.shape[1]); w=zw[:,-1]
else:
z, w = GaussQuadrature(self.norder,-1.,1.)
elif mesh_type == "quad":
if self.optimal==4:
zw = WVQuadraturePointsWeightsQuad.WVQuadraturePointsWeightsQuad(self.norder)
z = zw[:,:-1]; z=z.reshape(z.shape[0],z.shape[1]); w=zw[:,-1]
else:
z, w = GaussQuadrature(self.norder,-1.,1.)
elif mesh_type == "tet":
zw = QuadraturePointsWeightsTet.QuadraturePointsWeightsTet(self.norder,self.optimal)
z = zw[:,:-1]; z=z.reshape(z.shape[0],z.shape[1]); w=zw[:,-1]
elif mesh_type == "tri":
zw = QuadraturePointsWeightsTri.QuadraturePointsWeightsTri(self.norder,self.optimal)
z = zw[:,:-1]; z=z.reshape(z.shape[0],z.shape[1]); w=zw[:,-1]
elif mesh_type == "line":
z, w = GaussQuadrature(self.norder,-1.,1.)
self.points = z
self.weights = w
if mesh_type == "quad" or mesh_type == "hex":
if z.ravel().shape[0] == w.ravel().shape[0]:
self.Flatten(mesh_type=mesh_type)
def Flatten(self, mesh_type=None):
"""Flateen a quadrature rule given its tensor product form
"""
if mesh_type == "quad":
w = np.zeros((int(self.points.shape[0]**2)))
z = np.zeros((int(self.points.shape[0]**2),2))
counter = 0
for i in range(self.points.shape[0]):
for j in range(self.points.shape[0]):
w[counter] = self.weights[i]*self.weights[j]
z[counter,0] = self.points[i]
z[counter,1] = self.points[j]
counter += 1
elif mesh_type == "hex":
w = np.zeros((int(self.points.shape[0]**3)))
z = np.zeros((int(self.points.shape[0]**3),3))
counter = 0
for i in range(self.points.shape[0]):
for j in range(self.points.shape[0]):
for k in range(self.points.shape[0]):
w[counter] = self.weights[i]*self.weights[j]*self.weights[k]
z[counter,0] = self.points[i]
z[counter,1] = self.points[j]
z[counter,2] = self.points[k]
counter += 1
else:
raise ValueError("Element type not understood")
self.points = z
self.weights = w
def GetRule(self):
return self.__dict__
def SetRule(self, in_dict):
return self.__dict__.update(in_dict)
```
#### File: Florence/Solver/DetachedParallelFEMSolver.py
```python
from copy import deepcopy
from time import time
import numpy as np
from .FEMSolver import FEMSolver
from Florence import BoundaryCondition
__all__ = ["DetachedParallelFEMSolver"]
class DetachedParallelFEMSolver(FEMSolver):
def __init__(self, **kwargs):
if 'number_of_partitions' in kwargs.keys():
self.number_of_partitions = kwargs['number_of_partitions']
del kwargs['number_of_partitions']
else:
self.number_of_partitions = 1
if 'fix_interface' in kwargs.keys():
self.fix_interface = kwargs['fix_interface']
del kwargs['fix_interface']
else:
self.fix_interface = False
if 'interface_fixity' in kwargs.keys():
self.interface_fixity = kwargs['interface_fixity']
del kwargs['interface_fixity']
else:
self.interface_fixity = [0,1,2]
if 'force_solution' in kwargs.keys():
self.force_solution = kwargs['force_solution']
del kwargs['force_solution']
else:
self.force_solution = False
if 'do_not_sync' in kwargs.keys():
self.do_not_sync = kwargs['do_not_sync']
del kwargs['do_not_sync']
else:
self.do_not_sync = False
super(DetachedParallelFEMSolver, self).__init__(**kwargs)
def Solve(self, formulation=None, mesh=None,
material=None, boundary_condition=None,
function_spaces=None, solver=None,
contact_formulation=None,
Eulerx=None, Eulerp=None):
from multiprocessing import Process, Pool, Manager, Queue
from contextlib import closing
from Florence.Tensor import in2d
# CHECK DATA CONSISTENCY
#---------------------------------------------------------------------------#
self.parallel = True
function_spaces, solver = self.__checkdata__(material, boundary_condition,
formulation, mesh, function_spaces, solver, contact_formulation=contact_formulation)
# MORE CHECKES
if boundary_condition.neumann_flags is not None:
raise NotImplementedError("Problems with Neumann BC are not supported yet by detached solver")
if boundary_condition.applied_neumann is not None:
raise NotImplementedError("Problems with Neumann BC are not supported yet by detached solver")
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
self.PrintPreAnalysisInfo(mesh, formulation)
#---------------------------------------------------------------------------#
self.PartitionMeshForParallelFEM(mesh,self.no_of_cpu_cores,formulation.nvar)
pmesh, pelement_indices, pnode_indices, partitioned_maps = self.pmesh, self.pelement_indices, \
self.pnode_indices, self.partitioned_maps
ndim = mesh.InferSpatialDimension()
if ndim == 3:
boundary = mesh.faces
elif ndim == 2:
boundary = mesh.edges
pboundary_conditions = []
for proc in range(self.no_of_cpu_cores):
imesh = pmesh[proc]
if ndim==3:
imesh.GetBoundaryFaces()
boundary_normals = imesh.FaceNormals()
else:
imesh.GetBoundaryEdges()
unit_outward_normals = imesh.Normals()
pnodes = pnode_indices[proc]
# APPLY BOUNDARY CONDITION COMING FROM BIG PROBLEM
pboundary_condition = BoundaryCondition()
pboundary_condition.dirichlet_flags = boundary_condition.dirichlet_flags[pnodes,:]
# CHECK IF THERE ARE REGIONS WHERE BOUNDARY CONDITITION IS NOT APPLIED AT ALL
bc_not_applied = np.isnan(pboundary_condition.dirichlet_flags).all()
if bc_not_applied:
if self.force_solution:
warn("There are regions where BC will not be applied properly. Detached solution can be incorrect")
else:
raise RuntimeError("There are regions where BC will not be applied properly. Detached solution can be incorrect")
# FIND PARTITIONED INTERFACES
if ndim == 3:
pboundary = imesh.faces
elif ndim == 2:
pboundary = imesh.edges
pboundary_mapped = pnodes[pboundary]
boundaries_not_in_big_mesh = ~in2d(pboundary_mapped, boundary, consider_sort=True)
normals_of_boundaries_not_in_big_mesh = unit_outward_normals[boundaries_not_in_big_mesh,:]
# IF NORMALS ARE NOT ORIENTED WITH X/Y/Z WE NEED CONTACT FORMULATION
if self.force_solution is False:
for i in range(ndim):
if not np.all(np.logical_or(np.isclose(unit_outward_normals[:,i],0.),
np.isclose(np.abs(unit_outward_normals[:,i]),1.))):
raise RuntimeError("Cannot run detached parallel solver as a contact formulation is needed")
return
local_interface_boundary = pboundary[boundaries_not_in_big_mesh]
interface_nodes = np.unique(local_interface_boundary)
if self.fix_interface:
# FIXED BC
self.interface_fixity = np.array(self.interface_fixity).ravel()
for i in self.interface_fixity:
pboundary_condition.dirichlet_flags[interface_nodes,i] = 0.
else:
# SYMMETRY BC
symmetry_direction_to_fix_boundaries = np.nonzero(normals_of_boundaries_not_in_big_mesh)[1]
symmetry_nodes_to_fix = local_interface_boundary.ravel()
symmetry_direction_to_fix_nodes = np.repeat(symmetry_direction_to_fix_boundaries,local_interface_boundary.shape[1])
pboundary_condition.dirichlet_flags[symmetry_nodes_to_fix,symmetry_direction_to_fix_nodes] = 0.
# # LOOP APPROACH
# for i in range(local_interface_boundary.shape[0]):
# pboundary_condition.dirichlet_flags[local_interface_boundary[i,:],symmetry_direction_to_fix_boundaries[i]] = 0.
pboundary_conditions.append(pboundary_condition)
# TURN OFF PARALLELISATION
self.parallel = False
if self.save_incremental_solution is True:
fname = deepcopy(self.incremental_solution_filename)
fnames = []
for proc in range(self.no_of_cpu_cores):
fnames.append(fname.split(".")[0]+"_proc"+str(proc))
self.parallel_model = "context_manager"
if self.parallel_model == "context_manager":
procs = []
manager = Manager(); solutions = manager.dict() # SPAWNS A NEW PROCESS
for proc in range(self.no_of_cpu_cores):
if self.save_incremental_solution is True:
self.incremental_solution_filename = fnames[proc]
proc = Process(target=self.__DetachedFEMRunner_ContextManager__,
args=(formulation, pmesh[proc],
material, pboundary_conditions[proc],
function_spaces, solver,
contact_formulation,
Eulerx, Eulerp, proc, solutions))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
elif self.parallel_model == "pool":
# with closing(Pool(processes=fem_solver.no_of_cpu_cores)) as pool:
# tups = pool.map(super(DetachedParallelFEMSolver, self).Solve,funcs)
# pool.terminate()
raise RuntimeError("Pool based detached parallelism not implemented yet")
elif self.parallel_model == "mpi":
raise RuntimeError("MPI based detached parallelism not implemented yet")
else:
# SERIAL
procs = []
solutions = [0]*self.no_of_cpu_cores
for proc in range(self.no_of_cpu_cores):
if self.save_incremental_solution is True:
self.incremental_solution_filename = fnames[proc]
self.__DetachedFEMRunner_ContextManager__(
formulation, pmesh[proc],
material, pboundary_conditions[proc],
function_spaces, solver,
contact_formulation,
Eulerx, Eulerp, proc, solutions)
if not self.do_not_sync:
# FIND COMMON AVAILABLE SOLUTION ACROSS ALL PARTITIONS
min_nincr = 1e20
for proc in range(self.no_of_cpu_cores):
incr = solutions[proc].sol.shape[2]
if incr < min_nincr:
min_nincr = incr
TotalDisp = np.zeros((mesh.points.shape[0], formulation.nvar, min_nincr))
for proc in range(self.no_of_cpu_cores):
pnodes = pnode_indices[proc]
TotalDisp[pnodes,:,:] = solutions[proc].sol[:,:,:min_nincr]
return self.__makeoutput__(mesh, TotalDisp, formulation, function_spaces, material)
else:
return self.__makeoutput__(mesh, np.zeros_like(mesh.points), formulation, function_spaces, material)
def __DetachedFEMRunner_ContextManager__(self, formulation, mesh,
material, boundary_condition,
function_spaces, solver,
contact_formulation,
Eulerx, Eulerp, proc, solutions):
solution = super(DetachedParallelFEMSolver, self).Solve(formulation=formulation, mesh=mesh,
material=material, boundary_condition=boundary_condition,
function_spaces=function_spaces, solver=solver,
contact_formulation=contact_formulation,
Eulerx=Eulerx, Eulerp=Eulerp)
solutions[proc] = solution
def __DetachedFEMRunner_Pool__(self, formulation, mesh,
material, boundary_condition,
function_spaces, solver,
contact_formulation,
Eulerx, Eulerp):
solution = super(DetachedParallelFEMSolver, self).Solve(formulation=formulation, mesh=mesh,
material=material, boundary_condition=boundary_condition,
function_spaces=function_spaces, solver=solver,
contact_formulation=contact_formulation,
Eulerx=Eulerx, Eulerp=Eulerp)
```
#### File: Florence/Solver/FEMSolverArcLength.py
```python
from __future__ import print_function
import gc, os, sys
import multiprocessing
from copy import deepcopy
from warnings import warn
from time import time
import numpy as np
from numpy.linalg import norm
import scipy as sp
from Florence.Utils import insensitive
from Florence.FiniteElements.Assembly import Assemble
from Florence.PostProcessing import *
from Florence.Solver import LinearSolver
from Florence.TimeIntegrators import StructuralDynamicIntegrators
from Florence import Mesh, FEMSolver
# class FEMSolverArcLength(FEMSolver):
# def __init__(self):
# pass
def StaticSolverArcLength(self, function_spaces, formulation, solver, K,
NeumannForces, NodalForces, Residual,
mesh, TotalDisp, Eulerx, Eulerp, material, boundary_condition):
LoadIncrement = self.number_of_load_increments
# LoadFactor = 1./LoadIncrement
AppliedDirichletInc = np.zeros(boundary_condition.applied_dirichlet.shape[0],dtype=np.float64)
# self.incremental_load_factor = 0.
self.incremental_load_factor = 1./LoadIncrement
self.accumulated_load_factor = 0.
self.arc_length_scaling_factor = 1.0
for Increment in range(LoadIncrement):
# APPLY NEUMANN BOUNDARY CONDITIONS
DeltaF = self.incremental_load_factor*NeumannForces
NodalForces += DeltaF
# OBRTAIN INCREMENTAL RESIDUAL - CONTRIBUTION FROM BOTH NEUMANN AND DIRICHLET
Residual = -boundary_condition.ApplyDirichletGetReducedMatrices(K,Residual,
boundary_condition.applied_dirichlet,LoadFactor=self.incremental_load_factor,only_residual=True)
Residual -= DeltaF
# GET THE INCREMENTAL DISPLACEMENT
AppliedDirichletInc = self.incremental_load_factor*boundary_condition.applied_dirichlet
t_increment = time()
# LET NORM OF THE FIRST RESIDUAL BE THE NORM WITH RESPECT TO WHICH WE
# HAVE TO CHECK THE CONVERGENCE OF NEWTON RAPHSON. TYPICALLY THIS IS
# NORM OF NODAL FORCES
if Increment==0:
self.NormForces = np.linalg.norm(Residual)
# AVOID DIVISION BY ZERO
if np.isclose(self.NormForces,0.0):
self.NormForces = 1e-14
self.norm_residual = np.linalg.norm(Residual)/self.NormForces
Eulerx, Eulerp, K, Residual = NewtonRaphsonArchLength(self, function_spaces, formulation, solver,
Increment, K, NodalForces, Residual, mesh, Eulerx, Eulerp,
material, boundary_condition, AppliedDirichletInc, NeumannForces, TotalDisp)
# UPDATE DISPLACEMENTS FOR THE CURRENT LOAD INCREMENT
TotalDisp[:,:formulation.ndim,Increment] = Eulerx - mesh.points
if formulation.fields == "electro_mechanics":
TotalDisp[:,-1,Increment] = Eulerp
# PRINT LOG IF ASKED FOR
if self.print_incremental_log:
dmesh = Mesh()
dmesh.points = TotalDisp[:,:formulation.ndim,Increment]
dmesh_bounds = dmesh.Bounds
if formulation.fields == "electro_mechanics":
_bounds = np.zeros((2,formulation.nvar))
_bounds[:,:formulation.ndim] = dmesh_bounds
_bounds[:,-1] = [TotalDisp[:,-1,Increment].min(),TotalDisp[:,-1,Increment].max()]
print("\nMinimum and maximum incremental solution values at increment {} are \n".format(Increment),_bounds)
else:
print("\nMinimum and maximum incremental solution values at increment {} are \n".format(Increment),dmesh_bounds)
# SAVE INCREMENTAL SOLUTION IF ASKED FOR
if self.save_incremental_solution:
from scipy.io import savemat
if self.incremental_solution_filename is not None:
savemat(self.incremental_solution_filename+"_"+str(Increment),{'solution':TotalDisp[:,:,Increment]},do_compression=True)
else:
raise ValueError("No file name provided to save incremental solution")
print('\nFinished Load increment', Increment, 'in', time()-t_increment, 'seconds')
try:
print('Norm of Residual is',
np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces), '\n')
except RuntimeWarning:
print("Invalid value encountered in norm of Newton-Raphson residual")
# STORE THE INFORMATION IF NEWTON-RAPHSON FAILS
if self.newton_raphson_failed_to_converge:
solver.condA = np.NAN
TotalDisp = TotalDisp[:,:,:Increment]
self.number_of_load_increments = Increment
break
# BREAK AT A SPECIFICED LOAD INCREMENT IF ASKED FOR
if self.break_at_increment != -1 and self.break_at_increment is not None:
if self.break_at_increment == Increment:
if self.break_at_increment < LoadIncrement - 1:
print("\nStopping at increment {} as specified\n\n".format(Increment))
TotalDisp = TotalDisp[:,:,:Increment]
break
return TotalDisp
def NewtonRaphsonArchLength(self, function_spaces, formulation, solver,
Increment, K, NodalForces, Residual, mesh, Eulerx, Eulerp, material,
boundary_condition, AppliedDirichletInc, NeumannForces, TotalDisp):
Tolerance = self.newton_raphson_tolerance
LoadIncrement = self.number_of_load_increments
Iter = 0
# APPLY INCREMENTAL DIRICHLET PER LOAD STEP (THIS IS INCREMENTAL NOT ACCUMULATIVE)
IncDirichlet = boundary_condition.UpdateFixDoFs(AppliedDirichletInc,
K.shape[0],formulation.nvar)
# UPDATE EULERIAN COORDINATE
Eulerx += IncDirichlet[:,:formulation.ndim]
Eulerp += IncDirichlet[:,-1]
# Predictor
if Increment == 0:
# GET THE REDUCED SYSTEM OF EQUATIONS
# K_b, F_b = boundary_condition.GetReducedMatrices(K,self.accumulated_load_factor*NeumannForces)[:2]
K_b, F_b = boundary_condition.GetReducedMatrices(K,NeumannForces)[:2]
# SOLVE THE SYSTEM
sol = solver.Solve(K_b,F_b)
# GET ITERATIVE SOLUTION
dU = boundary_condition.UpdateFreeDoFs(sol,K.shape[0],formulation.nvar)
# self.incremental_load_factor = 1./LoadIncrement
else:
dU = TotalDisp[:,:,Increment-1]*self.arc_length_scaling_factor
self.incremental_load_factor *= self.arc_length_scaling_factor
self.accumulated_load_factor += self.incremental_load_factor
# UPDATE THE EULERIAN COMPONENTS
Eulerx += dU[:,:formulation.ndim]
Eulerp += dU[:,-1]
while self.norm_residual > Tolerance or Iter==0:
# GET THE REDUCED SYSTEM OF EQUATIONS
K_b, F_b = boundary_condition.GetReducedMatrices(K,NeumannForces)[:2]
# SOLVE THE SYSTEM
sol = solver.Solve(K_b,F_b)
# GET ITERATIVE SOLUTION
dU1 = boundary_condition.UpdateFreeDoFs(sol,K.shape[0],formulation.nvar)
# GET THE REDUCED SYSTEM OF EQUATIONS
K_b, F_b = boundary_condition.GetReducedMatrices(K,Residual)[:2]
# SOLVE THE SYSTEM
sol = solver.Solve(K_b,-F_b)
# GET ITERATIVE SOLUTION
dU2 = boundary_condition.UpdateFreeDoFs(sol,K.shape[0],formulation.nvar)
iterative_load_factor = -np.dot(dU.flatten(),dU2.flatten())/np.dot(dU.flatten(),dU1.flatten())
ddU = iterative_load_factor*dU1 + dU2
# print(ddlam)
# dU = dU2
# UPDATE THE EULERIAN COMPONENTS
self.incremental_load_factor += iterative_load_factor
self.accumulated_load_factor += iterative_load_factor
dU[:,:] += ddU[:,:]
Eulerx += ddU[:,:formulation.ndim]
Eulerp += ddU[:,-1]
# Eulerx += dU[:,:formulation.ndim]
# Eulerp += dU[:,-1]
# print(self.accumulated_load_factor)
# RE-ASSEMBLE - COMPUTE INTERNAL TRACTION FORCES
K, TractionForces = Assemble(self, function_spaces[0], formulation, mesh, material,
Eulerx,Eulerp)[:2]
# FIND THE RESIDUAL
# Residual[boundary_condition.columns_in] = TractionForces[boundary_condition.columns_in] -\
# NodalForces[boundary_condition.columns_in] - NeumannForces[boundary_condition.columns_in]*self.accumulated_load_factor
Residual[boundary_condition.columns_in] = TractionForces[boundary_condition.columns_in] -\
NeumannForces[boundary_condition.columns_in]*self.accumulated_load_factor
# SAVE THE NORM
self.rel_norm_residual = la.norm(Residual[boundary_condition.columns_in])
if Iter==0:
self.NormForces = la.norm(Residual[boundary_condition.columns_in])
self.norm_residual = np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces)
# SAVE THE NORM
self.NRConvergence['Increment_'+str(Increment)] = np.append(self.NRConvergence['Increment_'+str(Increment)],\
self.norm_residual)
print("Iteration {} for increment {}.".format(Iter, Increment) +\
" Residual (abs) {0:>16.7g}".format(self.rel_norm_residual),
"\t Residual (rel) {0:>16.7g}".format(self.norm_residual))
if np.abs(self.rel_norm_residual) < Tolerance:
break
# UPDATE ITERATION NUMBER
Iter +=1
self.arc_length_scaling_factor = 0.5**(0.25*(Iter-5))
if Iter==self.maximum_iteration_for_newton_raphson and formulation.fields == "electro_mechanics":
# raise StopIteration("\n\nNewton Raphson did not converge! Maximum number of iterations reached.")
warn("\n\nNewton Raphson did not converge! Maximum number of iterations reached.")
self.newton_raphson_failed_to_converge = True
break
if Iter==self.maximum_iteration_for_newton_raphson:
self.newton_raphson_failed_to_converge = True
break
if np.isnan(self.norm_residual) or self.norm_residual>1e06:
self.newton_raphson_failed_to_converge = True
break
# USER DEFINED CRITERIA TO BREAK OUT OF NEWTON-RAPHSON
if self.user_defined_break_func != None:
if self.user_defined_break_func(Increment,Iter,self.norm_residual,self.rel_norm_residual, Tolerance):
break
# USER DEFINED CRITERIA TO STOP NEWTON-RAPHSON AND THE WHOLE ANALYSIS
if self.user_defined_stop_func != None:
if self.user_defined_stop_func(Increment,Iter,self.norm_residual,self.rel_norm_residual, Tolerance):
self.newton_raphson_failed_to_converge = True
break
return Eulerx, Eulerp, K, Residual
# def NewtonRaphsonArchLength(self, function_spaces, formulation, solver,
# Increment, K, NodalForces, Residual, mesh, Eulerx, Eulerp, material,
# boundary_condition, AppliedDirichletInc, DeltaF, TotalDisp):
# Tolerance = self.newton_raphson_tolerance
# LoadIncrement = self.number_of_load_increments
# LoadFactor = 1./LoadIncrement
# accumulated_load_factor = Increment/LoadIncrement
# Iter = 0
# dL = 1.
# psi = 1.
# # NodalForces = DeltaF
# Dlam = 0.
# dU = np.zeros((mesh.points.shape[0],formulation.nvar))
# dU_b = np.zeros((mesh.points.shape[0],formulation.nvar))
# # SOLVE WITH INCREMENTAL LOAD
# K_b, DF_b = boundary_condition.GetReducedMatrices(K,NodalForces)[:2]
# dU_t = solver.Solve(K_b,DF_b)
# dU_t = boundary_condition.UpdateFreeDoFs(dU_t,K.shape[0],formulation.nvar)
# # print(NodalForces)
# # dU = IncDirichlet
# # GET TOTAL ITERATIVE SOLUTION
# # dU = dU_actual + LoadFactor*dU_current
# # GET ARC LENGTH QUADRATIC EQUATIONS COEFFICIENTS
# # c1 = np.dot(dU.ravel(),dU.ravel()) + psi**2 * np.dot(DeltaF.ravel(),DeltaF.ravel())
# # c2 = 2.*np.dot(DU.ravel()+dU_actual.ravel(),dU_current.ravel()) + 2.*psi**2 * LoadFactor * np.dot(DeltaF.ravel(),DeltaF.ravel())
# # c3 = np.dot((DU+dU_actual).ravel(),(DU+dU_actual).ravel()) + psi**2 * LoadFactor**2 * np.dot(DeltaF.ravel(),DeltaF.ravel()) - dL**2
# # coeffs = [c1,c2,c3]
# # c1 = np.dot(dU_t.ravel(),dU_t.ravel()) + psi**2 * np.dot(NodalForces.ravel(),NodalForces.ravel())
# # c2 = 2.*np.dot(dU.ravel()+dU_b.ravel(),dU_t.ravel()) + 2.*psi**2 * Dlam * np.dot(NodalForces.ravel(),NodalForces.ravel())
# # c3 = np.dot((dU+dU_b).ravel(),(dU+dU_b).ravel()) + psi**2 * Dlam**2 * np.dot(NodalForces.ravel(),NodalForces.ravel()) - dL**2
# # coeffs = [c1,c2,c3]
# # # FIND THE NEW LOAD FACTOR
# # dlams = np.roots(coeffs)
# # dlam = np.real(dlams.max())
# # # print(c1,c2,c3,dlams, dlam)
# # # CORRECTOR
# # dU_iter = dU_b + dlam*dU_t
# # # print (dU_iter)
# # # exit()
# # APPLY INCREMENTAL DIRICHLET PER LOAD STEP (THIS IS INCREMENTAL NOT ACCUMULATIVE)
# IncDirichlet = boundary_condition.UpdateFixDoFs(AppliedDirichletInc,
# K.shape[0],formulation.nvar)
# # UPDATE EULERIAN COORDINATE
# Eulerx += IncDirichlet[:,:formulation.ndim]
# Eulerp += IncDirichlet[:,-1]
# # Eulerx += IncDirichlet[:,:formulation.ndim] + dU_iter[:,:formulation.ndim]
# # Eulerp += IncDirichlet[:,-1] + dU_iter[:,-1]
# # accumulated_load_factor += dlam
# # if Increment>0:
# # DU = TotalDisp[:,:,Increment] - TotalDisp[:,:,Increment-1]
# # else:
# # DU = np.zeros((mesh.points.shape[0],formulation.nvar))
# # DU = np.zeros((mesh.points.shape[0],formulation.nvar))
# while self.norm_residual > Tolerance or Iter==0:
# # GET THE REDUCED SYSTEM OF EQUATIONS
# K_b, F_b = boundary_condition.GetReducedMatrices(K,Residual)[:2]
# # SOLVE THE SYSTEM
# sol = solver.Solve(K_b,-F_b)
# # GET ITERATIVE SOLUTION
# # dU_b = boundary_condition.UpdateFreeDoFs(sol,K.shape[0],formulation.nvar)
# dU = boundary_condition.UpdateFreeDoFs(sol,K.shape[0],formulation.nvar)
# # print(dlams)
# # exit()
# # LoadFactor += np.real(np.max(dlams))
# # print(LoadFactor)
# c1 = np.dot(dU_t.ravel(),dU_t.ravel()) + psi**2 * np.dot(NodalForces.ravel(),NodalForces.ravel())
# c2 = 2.*np.dot(dU.ravel()+dU_b.ravel(),dU_t.ravel()) + 2.*psi**2 * Dlam * np.dot(NodalForces.ravel(),NodalForces.ravel())
# c3 = np.dot((dU+dU_b).ravel(),(dU+dU_b).ravel()) + psi**2 * Dlam**2 * np.dot(NodalForces.ravel(),NodalForces.ravel()) - dL**2
# coeffs = [c1,c2,c3]
# # FIND THE NEW LOAD FACTOR
# dlams = np.roots(coeffs)
# dlam = np.real(dlams.max())
# print(dlam)
# # CORRECTOR
# dU_iter = dU_b + dlam*dU_t
# accumulated_load_factor += dlam
# # UPDATE THE EULERIAN COMPONENTS
# Eulerx += dU[:,:formulation.ndim]
# Eulerp += dU[:,-1]
# # Eulerx += dU_iter[:,:formulation.ndim]
# # Eulerp += dU_iter[:,-1]
# # RE-ASSEMBLE - COMPUTE INTERNAL TRACTION FORCES
# K, TractionForces = Assemble(self, function_spaces[0], formulation, mesh, material,
# Eulerx,Eulerp)[:2]
# # FIND THE RESIDUAL
# Residual[boundary_condition.columns_in] = TractionForces[boundary_condition.columns_in] -\
# NodalForces[boundary_condition.columns_in]
# # SAVE THE NORM
# self.rel_norm_residual = la.norm(Residual[boundary_condition.columns_in])
# if Iter==0:
# self.NormForces = la.norm(Residual[boundary_condition.columns_in])
# self.norm_residual = np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces)
# # SAVE THE NORM
# self.NRConvergence['Increment_'+str(Increment)] = np.append(self.NRConvergence['Increment_'+str(Increment)],\
# self.norm_residual)
# print("Iteration {} for increment {}.".format(Iter, Increment) +\
# " Residual (abs) {0:>16.7g}".format(self.rel_norm_residual),
# "\t Residual (rel) {0:>16.7g}".format(self.norm_residual))
# if np.abs(self.rel_norm_residual) < Tolerance:
# break
# # UPDATE ITERATION NUMBER
# Iter +=1
# if Iter==self.maximum_iteration_for_newton_raphson and formulation.fields == "electro_mechanics":
# # raise StopIteration("\n\nNewton Raphson did not converge! Maximum number of iterations reached.")
# warn("\n\nNewton Raphson did not converge! Maximum number of iterations reached.")
# self.newton_raphson_failed_to_converge = True
# break
# if Iter==self.maximum_iteration_for_newton_raphson:
# self.newton_raphson_failed_to_converge = True
# break
# if np.isnan(self.norm_residual) or self.norm_residual>1e06:
# self.newton_raphson_failed_to_converge = True
# break
# # USER DEFINED CRITERIA TO BREAK OUT OF NEWTON-RAPHSON
# if self.user_defined_break_func != None:
# if self.user_defined_break_func(Increment,Iter,self.norm_residual,self.rel_norm_residual, Tolerance):
# break
# # USER DEFINED CRITERIA TO STOP NEWTON-RAPHSON AND THE WHOLE ANALYSIS
# if self.user_defined_stop_func != None:
# if self.user_defined_stop_func(Increment,Iter,self.norm_residual,self.rel_norm_residual, Tolerance):
# self.newton_raphson_failed_to_converge = True
# break
# return Eulerx, Eulerp, K, Residual
```
#### File: Florence/TimeIntegrators/ImplicitStructuralDynamicIntegrator.py
```python
from __future__ import print_function
import gc, os, sys
import numpy as np
import scipy as sp
import numpy.linalg as la
import scipy.linalg as sla
from numpy.linalg import norm
from time import time
from copy import deepcopy
from warnings import warn
from time import time
from Florence.FiniteElements.Assembly import Assemble
from Florence import Mesh
from Florence.PostProcessing import PostProcess
from .StructuralDynamicIntegrator import StructuralDynamicIntegrator
__all__ = ["NonlinearImplicitStructuralDynamicIntegrator", "LinearImplicitStructuralDynamicIntegrator"]
class NonlinearImplicitStructuralDynamicIntegrator(StructuralDynamicIntegrator):
"""Implicit dynamic solver for nonlinear problems based on Newmark's beta
"""
def __init__(self):
super(NonlinearImplicitStructuralDynamicIntegrator, self).__init__()
self.gamma = 0.5
self.beta = 0.25
def Solver(self, function_spaces, formulation, solver,
K, M, NeumannForces, NodalForces, Residual,
mesh, TotalDisp, Eulerx, Eulerp, material, boundary_condition, fem_solver):
# COMPUTE DAMPING MATRIX BASED ON MASS
D = 0.0
if fem_solver.include_physical_damping:
D = fem_solver.damping_factor*M
# GET BOUNDARY CONDITIONS INFROMATION
self.GetBoundaryInfo(mesh, formulation, boundary_condition)
if formulation.fields == "electro_mechanics":
M_mech = M[self.mechanical_dofs,:][:,self.mechanical_dofs]
if fem_solver.include_physical_damping:
D_mech = D[self.mechanical_dofs,:][:,self.mechanical_dofs]
# INITIALISE VELOCITY AND ACCELERATION
velocities = np.zeros((mesh.points.shape[0],formulation.ndim))
accelerations = np.zeros((mesh.points.shape[0],formulation.ndim))
# COMPUTE INITIAL ACCELERATION FOR TIME STEP 0
if NeumannForces.ndim == 2 and NeumannForces.shape[1]>1:
InitResidual = Residual - NeumannForces[:,0][:,None]
else:
InitResidual = Residual
if formulation.fields == "electro_mechanics":
accelerations[:,:] = solver.Solve(M_mech, -InitResidual[self.mechanical_dofs].ravel()
).reshape(mesh.points.shape[0],formulation.ndim)
else:
accelerations[:,:] = solver.Solve(M, -InitResidual.ravel() ).reshape(mesh.points.shape[0],formulation.ndim)
self.NRConvergence = fem_solver.NRConvergence
LoadIncrement = fem_solver.number_of_load_increments
LoadFactor = fem_solver.total_time/LoadIncrement
AppliedDirichletInc = np.zeros(boundary_condition.applied_dirichlet.shape[0],dtype=np.float64)
save_counter = 1
nincr_last = float(LoadIncrement-1) if LoadIncrement !=1 else 1
if boundary_condition.compound_dirichlet_bcs:
ChangedTotalDisp = np.zeros((mesh.nnode, formulation.nvar))
# TIME LOOP
for Increment in range(1,LoadIncrement):
t_increment = time()
# GET INCREMENTAL DIRICHLET BC
if not boundary_condition.has_step_wise_dirichlet_loading:
if boundary_condition.applied_dirichlet.ndim == 2:
AppliedDirichletInc = boundary_condition.applied_dirichlet[:,Increment]
else:
if boundary_condition.make_loading == "ramp":
AppliedDirichletInc = boundary_condition.applied_dirichlet*(1.*Increment/LoadIncrement)
else:
AppliedDirichletInc = boundary_condition.applied_dirichlet/nincr_last
else:
boundary_condition.ApplyStepWiseDirichletFunc(formulation, mesh, increment=Increment)
self.GetBoundaryInfo(mesh, formulation, boundary_condition, increment=Increment)
AppliedDirichletInc = boundary_condition.applied_dirichlet
if self.bc_changed_at_this_step and boundary_condition.compound_dirichlet_bcs:
ChangedTotalDisp += np.copy(U)
# GET INCREMENTAL NEUMANN DIRICHLET BC
if not boundary_condition.has_step_wise_neumann_loading:
if NeumannForces.ndim == 2 and NeumannForces.shape[1]>1:
NodalForces = NeumannForces[:,Increment][:,None]
else:
if boundary_condition.make_loading == "ramp":
NodalForces = NeumannForces*(1.*Increment/LoadIncrement)
else:
NodalForces = NeumannForces/nincr_last
else:
NodalForces = boundary_condition.ApplyStepWiseNeumannFunc(formulation, mesh,
material, increment=Increment)
NodalForces = NodalForces.ravel()[:,None]
# OBRTAIN INCREMENTAL RESIDUAL - CONTRIBUTION FROM BOTH NEUMANN AND DIRICHLET
# OLD WAY - RESIDUAL WAS GETTING CARRIED OVER FROM PREV NR STEP BUT AT THIS
# POINT IT WAS TINY (AS NR HAD CONVERGED) THAT IT DIDN'T MATTER AND WORKED AS EXPECTED
# Residual = -boundary_condition.ApplyDirichletGetReducedMatrices(K,Residual,
# AppliedDirichletInc,LoadFactor=1.0,mass=M,only_residual=True)
# ACTUAL WAY
Residual = -boundary_condition.ApplyDirichletGetReducedMatrices(K,np.zeros_like(Residual),
AppliedDirichletInc,LoadFactor=1.0,mass=M,only_residual=True)
Residual -= NodalForces
# COMPUTE INITIAL ACCELERATION - ONLY NEEDED IN CASES OF PRESTRETCHED CONFIGURATIONS
# accelerations[:,:] = solver.Solve(M, Residual.ravel() - \
# K.dot(TotalDisp[:,:,Increment].ravel())).reshape(mesh.points.shape[0],formulation.nvar)
# LET NORM OF THE FIRST RESIDUAL BE THE NORM WITH RESPECT TO WHICH WE
# HAVE TO CHECK THE CONVERGENCE OF NEWTON RAPHSON. TYPICALLY THIS IS
# NORM OF NODAL FORCES
if Increment==1:
self.NormForces = np.linalg.norm(Residual)
# AVOID DIVISION BY ZERO
if np.isclose(self.NormForces,0.0):
self.NormForces = 1e-14
self.norm_residual = np.linalg.norm(Residual)/self.NormForces
Eulerx, Eulerp, K, Residual, velocities, accelerations = self.NewtonRaphson(function_spaces, formulation, solver,
Increment, K, D, M, NodalForces, Residual, mesh, Eulerx, Eulerp,
material,boundary_condition,AppliedDirichletInc, fem_solver, velocities, accelerations)
# UPDATE DISPLACEMENTS FOR THE CURRENT LOAD INCREMENT
U = np.zeros((mesh.points.shape[0], formulation.nvar))
U[:,:formulation.ndim] = Eulerx - mesh.points
if formulation.fields == "electro_mechanics":
U[:,-1] = Eulerp
# SAVE RESULTS
if Increment % fem_solver.save_frequency == 0 or\
(Increment == LoadIncrement - 1 and save_counter<TotalDisp.shape[2]):
TotalDisp[:,:,save_counter] = U
if boundary_condition.compound_dirichlet_bcs:
TotalDisp[:,:,save_counter] += ChangedTotalDisp
save_counter += 1
# COMPUTE DISSIPATION OF ENERGY THROUGH TIME
if fem_solver.compute_energy_dissipation:
energy_info = self.ComputeEnergyDissipation(function_spaces[0], mesh, material, formulation, fem_solver,
Eulerx, U, NodalForces, M, velocities)
formulation.energy_dissipation.append(energy_info[0])
formulation.internal_energy.append(energy_info[1])
formulation.kinetic_energy.append(energy_info[2])
formulation.external_energy.append(energy_info[3])
# COMPUTE DISSIPATION OF LINEAR MOMENTUM THROUGH TIME
if fem_solver.compute_linear_momentum_dissipation:
power_info = self.ComputePowerDissipation(function_spaces[0], mesh, material, formulation, fem_solver,
Eulerx, U, NodalForces, M, velocities, accelerations)
formulation.power_dissipation.append(power_info[0])
formulation.internal_power.append(power_info[1])
formulation.kinetic_power.append(power_info[2])
formulation.external_power.append(power_info[3])
# LOG IF ASKED FOR
self.LogSave(fem_solver, formulation, U[:,:formulation.ndim], Eulerp, Increment)
print('\nFinished Load increment', Increment, 'in', time()-t_increment, 'seconds')
try:
print('Norm of Residual is',
np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces), '\n')
except RuntimeWarning:
print("Invalid value encountered in norm of Newton-Raphson residual")
# STORE THE INFORMATION IF NEWTON-RAPHSON FAILS
if fem_solver.newton_raphson_failed_to_converge:
solver.condA = np.NAN
TotalDisp = TotalDisp[:,:,:save_counter-1]
fem_solver.number_of_load_increments = save_counter - 1
break
# BREAK AT A SPECIFICED LOAD INCREMENT IF ASKED FOR
if fem_solver.break_at_increment != -1 and fem_solver.break_at_increment is not None:
if fem_solver.break_at_increment == Increment:
if fem_solver.break_at_increment < LoadIncrement - 1:
print("\nStopping at increment {} as specified\n\n".format(Increment))
TotalDisp = TotalDisp[:,:,:save_counter]
fem_solver.number_of_load_increments = save_counter
break
if fem_solver.save_frequency != 1:
if TotalDisp.shape[2] > save_counter:
# IN CASE SOLVER BLEW UP
TotalDisp = TotalDisp[:,:,:save_counter]
fem_solver.number_of_load_increments = TotalDisp.shape[2]
else:
fem_solver.number_of_load_increments = save_counter
return TotalDisp
def NewtonRaphson(self, function_spaces, formulation, solver,
Increment, K, D, M, NodalForces, Residual, mesh, Eulerx, Eulerp, material,
boundary_condition, AppliedDirichletInc, fem_solver, velocities, accelerations):
Tolerance = fem_solver.newton_raphson_tolerance
LoadIncrement = fem_solver.number_of_load_increments
LoadFactor = fem_solver.total_time/fem_solver.number_of_load_increments
Iter = 0
self.iterative_norm_history = []
# EulerxPrev = np.copy(Eulerx)
# EulerVPrev = np.copy(velocities[:,:,Increment-1])
# EulerAPrev = np.copy(accelerations[:,:,Increment-1])
# PREDICTOR STEP
tmpV = (1. - self.gamma/self.beta)*velocities + (1. - self.gamma/2./self.beta)*LoadFactor*accelerations
tmpA = (-1./self.beta/LoadFactor)*velocities - (1./2./self.beta)*(1.- 2.*self.beta)*accelerations
velocities = tmpV
accelerations = tmpA
if formulation.fields == "electro_mechanics":
M_mech = M[self.mechanical_dofs,:][:,self.mechanical_dofs]
InertiaResidual = np.zeros((Residual.shape[0],1))
InertiaResidual[self.mechanical_dofs,0] = M_mech.dot(accelerations.ravel())
if fem_solver.include_physical_damping:
D_mech = D[self.mechanical_dofs,:][:,self.mechanical_dofs]
InertiaResidual[self.mechanical_dofs,0] += D_mech.dot(velocities.ravel())
else:
InertiaResidual = np.zeros((Residual.shape[0],1))
InertiaResidual[:,0] = M.dot(accelerations.ravel())
if fem_solver.include_physical_damping:
InertiaResidual[:,0] += D.dot(velocities.ravel())
Residual[boundary_condition.columns_in] += InertiaResidual[boundary_condition.columns_in]
# APPLY INCREMENTAL DIRICHLET PER LOAD STEP (THIS IS INCREMENTAL NOT ACCUMULATIVE)
IncDirichlet = boundary_condition.UpdateFixDoFs(AppliedDirichletInc,
K.shape[0],formulation.nvar)
# UPDATE EULERIAN COORDINATE
# Eulerx += IncDirichlet[:,:formulation.ndim]
Eulerx[:,:] = mesh.points + IncDirichlet[:,:formulation.ndim]
Eulerp[:] = IncDirichlet[:,-1] # ENSURES Eulerp IS CONTIGUOUS - NECESSARY FOR LOW-LEVEL DISPATCHER
while np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces) > Tolerance or Iter==0:
# GET EFFECTIVE STIFFNESS
# K += (1./self.beta/LoadFactor**2)*M
K += (self.gamma/self.beta/LoadFactor)*D + (1./self.beta/LoadFactor**2)*M
# GET THE REDUCED SYSTEM OF EQUATIONS
K_b, F_b, _ = boundary_condition.GetReducedMatrices(K,Residual)
# SOLVE THE SYSTEM
sol = solver.Solve(K_b,-F_b)
# GET ITERATIVE SOLUTION
dU = boundary_condition.UpdateFreeDoFs(sol,K.shape[0],formulation.nvar)
# UPDATE THE EULERIAN COMPONENTS
# UPDATE THE GEOMETRY
Eulerx += dU[:,:formulation.ndim]
# GET ITERATIVE ELECTRIC POTENTIAL
Eulerp += dU[:,-1]
# UPDATE VELOCITY AND ACCELERATION
velocities += self.gamma/self.beta/LoadFactor*dU[:,:formulation.ndim]
accelerations += 1./self.beta/LoadFactor**2*dU[:,:formulation.ndim]
# OR ALTERNATIVELY
# dumA = 1./self.beta/LoadFactor**2*(Eulerx - EulerxPrev) -\
# 1./self.beta/LoadFactor*(EulerVPrev) -\
# 1./2./self.beta*(1. - 2.*self.beta)*(EulerAPrev)
# dumV = (1. - self.gamma/self.beta)*(EulerVPrev) +\
# (1. - self.gamma/2./self.beta)*LoadFactor*(EulerAPrev) +\
# self.gamma/self.beta/LoadFactor*(Eulerx - EulerxPrev)
# velocities = dumV
# accelerations = dumA
# RE-ASSEMBLE - COMPUTE STIFFNESS AND INTERNAL TRACTION FORCES
K, TractionForces, _, _ = Assemble(fem_solver,function_spaces[0], formulation, mesh, material,
Eulerx, Eulerp)
# FIND INITIAL RESIDUAL
if formulation.fields == "electro_mechanics":
InertiaResidual = np.zeros((TractionForces.shape[0],1))
InertiaResidual[self.mechanical_dofs,0] = M_mech.dot(accelerations.ravel())
if fem_solver.include_physical_damping:
InertiaResidual[self.mechanical_dofs,0] += D_mech.dot(velocities.ravel())
else:
InertiaResidual = np.zeros((TractionForces.shape[0],1))
InertiaResidual[:,0] = M.dot(accelerations.ravel())
if fem_solver.include_physical_damping:
InertiaResidual[:,0] += D.dot(velocities.ravel())
# UPDATE RESIDUAL
Residual[boundary_condition.columns_in] = TractionForces[boundary_condition.columns_in] \
- NodalForces[boundary_condition.columns_in] + InertiaResidual[boundary_condition.columns_in]
# SAVE THE NORM
self.abs_norm_residual = la.norm(Residual[boundary_condition.columns_in])
if Iter==0:
self.NormForces = la.norm(Residual[boundary_condition.columns_in])
self.norm_residual = np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces)
# SAVE THE NORM
self.NRConvergence['Increment_'+str(Increment)] = np.append(self.NRConvergence['Increment_'+str(Increment)],\
self.norm_residual)
print("Iteration {} for increment {}.".format(Iter, Increment) +\
" Residual (abs) {0:>16.7g}".format(self.abs_norm_residual),
"\t Residual (rel) {0:>16.7g}".format(self.norm_residual))
# BREAK BASED ON RELATIVE NORM
if np.abs(self.abs_norm_residual) < Tolerance:
break
# BREAK BASED ON INCREMENTAL SOLUTION - KEEP IT AFTER UPDATE
if norm(dU) <= fem_solver.newton_raphson_solution_tolerance:
print("Incremental solution within tolerance i.e. norm(dU): {}".format(norm(dU)))
break
# UPDATE ITERATION NUMBER
Iter +=1
if Iter==fem_solver.maximum_iteration_for_newton_raphson and formulation.fields == "electro_mechanics":
raise StopIteration("\n\nNewton Raphson did not converge! Maximum number of iterations reached.")
if Iter==fem_solver.maximum_iteration_for_newton_raphson:
fem_solver.newton_raphson_failed_to_converge = True
break
if np.isnan(self.norm_residual) or self.norm_residual>1e06:
fem_solver.newton_raphson_failed_to_converge = True
break
# IF BREAK WHEN NEWTON RAPHSON STAGNATES IS ACTIVATED
if fem_solver.break_at_stagnation:
self.iterative_norm_history.append(self.norm_residual)
if Iter >= 5 and self.abs_norm_residual<1e06:
if np.mean(self.iterative_norm_history) < 1.:
break
# USER DEFINED CRITERIA TO BREAK OUT OF NEWTON-RAPHSON
if fem_solver.user_defined_break_func != None:
if fem_solver.user_defined_break_func(Increment,Iter,self.norm_residual,self.abs_norm_residual, Tolerance):
break
# USER DEFINED CRITERIA TO STOP NEWTON-RAPHSON AND THE WHOLE ANALYSIS
if fem_solver.user_defined_stop_func != None:
if fem_solver.user_defined_stop_func(Increment,Iter,self.norm_residual,self.abs_norm_residual, Tolerance):
fem_solver.newton_raphson_failed_to_converge = True
break
return Eulerx, Eulerp, K, Residual, velocities, accelerations
#------------------------------------------ LINEAR IMPLICIT SOLVER ----------------------------------------------#
#----------------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------------#
class LinearImplicitStructuralDynamicIntegrator(StructuralDynamicIntegrator):
"""Implicit dynamic solver for linear problems based on Newmark's beta
"""
def __init__(self,**kwargs):
super(LinearImplicitStructuralDynamicIntegrator, self).__init__()
self.lump_rhs = False
self.gamma = 0.5
self.beta = 0.25
def Solver(self, function_spaces, formulation, solver,
K, M, NeumannForces, NodalForces, Residual,
mesh, TotalDisp, Eulerx, Eulerp, material, boundary_condition, fem_solver):
# CHECK FORMULATION
if formulation.fields != "mechanics" and formulation.fields != "electro_mechanics":
raise NotImplementedError("Linear implicit solver for {} is not available".format(formulation.fields))
if formulation.fields == "electro_mechanics":
warn("Linear implicit solver for electromechanics formulation is not thoroughly checked and may return incorrect results. "
"Please use nonlinear explicit dynamic solver instead")
# GET BOUNDARY CONDITIONS INFROMATION
self.GetBoundaryInfo(mesh, formulation, boundary_condition)
LoadIncrement = fem_solver.number_of_load_increments
LoadFactor = fem_solver.total_time/LoadIncrement
post_process = PostProcess(formulation.ndim,formulation.nvar)
post_process.SetAnalysis(analysis_type=fem_solver.analysis_type, analysis_nature=fem_solver.analysis_nature)
if NeumannForces.ndim == 2 and NeumannForces.shape[1]==1:
tmp = np.zeros((NeumannForces.shape[0],LoadIncrement))
tmp[:,0] = NeumannForces[:,0]
NeumannForces = tmp
dU = boundary_condition.UpdateFixDoFs(boundary_condition.applied_dirichlet[:,0],
mesh.points.shape[0]*formulation.nvar, formulation.nvar)
TotalDisp[:,:formulation.nvar,0] = dU
# INITIALISE VELOCITY AND ACCELERATION
velocities = np.zeros((mesh.points.shape[0]*formulation.ndim))
accelerations = np.zeros((mesh.points.shape[0]*formulation.ndim))
# COMPUTE DAMPING MATRIX BASED ON MASS
D = 0.0
if fem_solver.include_physical_damping:
D = fem_solver.damping_factor*M
if formulation.fields == "electro_mechanics":
M_mech = M[self.mechanical_dofs,:][:,self.mechanical_dofs]
if fem_solver.include_physical_damping:
D_mech = D[self.mechanical_dofs,:][:,self.mechanical_dofs]
else:
M_mech = M
D_mech = D
# COMPUTE INITIAL ACCELERATION FOR TIME STEP 0
Residual = np.zeros_like(Residual)
InitResidual = Residual + NeumannForces[:,0][:,None]
if formulation.fields == "electro_mechanics":
accelerations[:] = solver.Solve(M_mech, -InitResidual[self.mechanical_dofs].ravel())
else:
accelerations[:] = solver.Solve(M, InitResidual.ravel() )
# COMPUTE AUGMENTED K (INCLUDES INERTIA EFFECT)
K += (self.gamma/self.beta/LoadFactor)*D + (1./self.beta/LoadFactor**2)*M
# GET REDUCED VARIABLES
K_b, F_b, _ = boundary_condition.GetReducedMatrices(K,Residual)
if self.lump_rhs:
M_mech = M_mech.sum(axis=1).A.ravel() # FOR CSR
# M_mech = M_mech.sum(axis=0).ravel() # FOR CSC
if self.include_physical_damping:
D_mech = D_mech.sum(axis=1).A.ravel()
reuse_factorisation = False if formulation.fields == "electro_mechanics" else True
for Increment in range(1,LoadIncrement):
t_increment=time()
# FIXED INCREMENTAL DIRICHLET
AppliedDirichletInc = boundary_condition.applied_dirichlet[:,Increment-1]
# APPLY NEUMANN BOUNDARY CONDITIONS
DeltaF = NeumannForces[:,Increment][:,None]
NodalForces = DeltaF
# ACCUMULATED FORCE
if fem_solver.include_physical_damping:
if self.lump_rhs:
Residual[self.mechanical_dofs,0] = (1./self.beta/LoadFactor**2)*M_mech*TotalDisp[:,:formulation.ndim,Increment-1].ravel() +\
(1./self.beta/LoadFactor)*M_mech*velocities + (0.5/self.beta - 1.)*M_mech*accelerations +\
(self.gamma/self.beta/LoadFactor)*D_mech*TotalDisp[:,:formulation.ndim,Increment-1].ravel() +\
(self.gamma/self.beta - 1.)*D_mech*velocities -\
LoadFactor*((1-self.gamma)-self.gamma*(0.5/self.beta - 1.))*D_mech*accelerations
else:
Residual[self.mechanical_dofs,0] = (1./self.beta/LoadFactor**2)*M_mech.dot(TotalDisp[:,:formulation.ndim,Increment-1].ravel()) +\
(1./self.beta/LoadFactor)*M_mech.dot(velocities) + (0.5/self.beta - 1.)*M_mech.dot(accelerations) +\
(self.gamma/self.beta/LoadFactor)*D_mech.dot(TotalDisp[:,:formulation.ndim,Increment-1].ravel()) +\
(self.gamma/self.beta - 1.)*D_mech.dot(velocities) -\
LoadFactor*((1-self.gamma)-self.gamma*(0.5/self.beta - 1.))*D_mech.dot(accelerations)
else:
if self.lump_rhs:
Residual[self.mechanical_dofs,0] = (1./self.beta/LoadFactor**2)*M_mech*TotalDisp[:,:formulation.ndim,Increment-1].ravel() +\
(1./self.beta/LoadFactor)*M_mech*velocities + (0.5/self.beta - 1.)*M_mech*accelerations
else:
Residual[self.mechanical_dofs,0] = (1./self.beta/LoadFactor**2)*M_mech.dot(TotalDisp[:,:formulation.ndim,Increment-1].ravel()) +\
(1./self.beta/LoadFactor)*M_mech.dot(velocities) + (0.5/self.beta - 1.)*M_mech.dot(accelerations)
Residual += DeltaF
if formulation.fields == "electro_mechanics":
K = Assemble(fem_solver,function_spaces[0], formulation, mesh, material, Eulerx, Eulerp)[0]
K += (self.gamma/self.beta/LoadFactor)*D + (1./self.beta/LoadFactor**2)*M
# CHECK CONTACT AND ASSEMBLE IF DETECTED
if fem_solver.has_contact:
Eulerx = mesh.points + TotalDisp[:,:formulation.ndim,Increment-1]
TractionForcesContact = np.zeros_like(Residual)
TractionForcesContact = fem_solver.contact_formulation.AssembleTractions(mesh,material,Eulerx).ravel()*LoadFactor
if formulation.fields == "electro_mechanics" or formulation.fields == "flexoelectric":
Residual[self.mechanical_dofs,0] -= TractionForcesContact
elif formulation.fields == "mechanics" or formulation.fields == "couple_stress":
Residual[:,0] -= TractionForcesContact
else:
raise NotImplementedError("Contact algorithm for {} is not available".format(formulation.fields))
# REDUCED ACCUMULATED FORCE
if formulation.fields == "mechanics":
F_b = boundary_condition.ApplyDirichletGetReducedMatrices(K,Residual,
boundary_condition.applied_dirichlet[:,Increment],LoadFactor=1.0,
mass=M,only_residual=True)[boundary_condition.columns_in,0]
else:
K_b, F_b = boundary_condition.ApplyDirichletGetReducedMatrices(K,Residual,
boundary_condition.applied_dirichlet[:,Increment],LoadFactor=1.0,
mass=M)[:2]
# SOLVE THE SYSTEM
sol = solver.Solve(K_b, F_b, reuse_factorisation=reuse_factorisation)
dU = post_process.TotalComponentSol(sol, boundary_condition.columns_in,
boundary_condition.columns_out, AppliedDirichletInc,0,K.shape[0])
# STORE TOTAL SOLUTION DATA
TotalDisp[:,:,Increment] += dU
# UPDATE VELOCITY AND ACCELERATION
accelerations_old = np.copy(accelerations)
accelerations = (1./self.beta/LoadFactor**2)*(TotalDisp[:,:formulation.ndim,Increment] -\
TotalDisp[:,:formulation.ndim,Increment-1]).ravel() -\
1./self.beta/LoadFactor*velocities + (1.-0.5/self.beta)*accelerations_old
velocities += LoadFactor*(self.gamma*accelerations + (1-self.gamma)*accelerations_old)
# UPDATE
Eulerx += dU[:,:formulation.ndim]
Eulerp += dU[:,-1]
# LOG REQUESTS
fem_solver.LogSave(formulation, TotalDisp, Increment)
# BREAK AT A SPECIFICED LOAD INCREMENT IF ASKED FOR
if fem_solver.break_at_increment != -1 and fem_solver.break_at_increment is not None:
if fem_solver.break_at_increment == Increment:
if fem_solver.break_at_increment < LoadIncrement - 1:
print("\nStopping at increment {} as specified\n\n".format(Increment))
TotalDisp = TotalDisp[:,:,:Increment]
fem_solver.number_of_load_increments = Increment
break
# STORE THE INFORMATION IF THE SOLVER BLOWS UP
if Increment > 0:
U0 = TotalDisp[:,:,Increment-1].ravel()
U = TotalDisp[:,:,Increment].ravel()
tol = 1e200 if Increment < 5 else 10.
if np.isnan(norm(U)) or np.abs(U.max()/(U0.max()+1e-14)) > tol:
print("Solver blew up! Norm of incremental solution is too large")
TotalDisp = TotalDisp[:,:,:Increment]
fem_solver.number_of_load_increments = Increment
break
print('Finished Load increment', Increment, 'in', time()-t_increment, 'seconds\n')
solver.CleanUp()
return TotalDisp
```
#### File: Florence/VariationalPrinciple/DisplacementPotentialFormulation.py
```python
import numpy as np
from .VariationalPrinciple import VariationalPrinciple
from Florence import QuadratureRule, FunctionSpace
from Florence.FiniteElements.LocalAssembly.KinematicMeasures import *
from Florence.FiniteElements.LocalAssembly._KinematicMeasures_ import _KinematicMeasures_
from .DisplacementPotentialApproachIndices import *
from ._ConstitutiveStiffnessDPF_ import __ConstitutiveStiffnessIntegrandDPF__
from ._TractionDPF_ import __TractionIntegrandDPF__
from Florence.Tensor import issymetric
from Florence.LegendreTransform import LegendreTransform
__all__ = ["DisplacementPotentialFormulation"]
class DisplacementPotentialFormulation(VariationalPrinciple):
def __init__(self, mesh, variables_order=(1,),
quadrature_rules=None, quadrature_type=None, function_spaces=None, compute_post_quadrature=True,
equally_spaced_bases=False, quadrature_degree=None):
if mesh.element_type != "tet" and mesh.element_type != "tri" and \
mesh.element_type != "quad" and mesh.element_type != "hex":
raise NotImplementedError( type(self).__name__, "has not been implemented for", mesh.element_type, "elements")
if isinstance(variables_order,int):
self.variables_order = (self.variables_order,)
self.variables_order = variables_order
super(DisplacementPotentialFormulation, self).__init__(mesh,variables_order=self.variables_order,
quadrature_type=quadrature_type,quadrature_rules=quadrature_rules,function_spaces=function_spaces,
compute_post_quadrature=compute_post_quadrature)
self.fields = "electro_mechanics"
self.nvar = self.ndim+1
self.GetQuadraturesAndFunctionSpaces(mesh, variables_order=variables_order,
quadrature_rules=quadrature_rules, quadrature_type=quadrature_type,
function_spaces=function_spaces, compute_post_quadrature=compute_post_quadrature,
equally_spaced_bases=equally_spaced_bases, quadrature_degree=quadrature_degree)
def GetElementalMatrices(self, elem, function_space, mesh, material, fem_solver, Eulerx, Eulerp):
massel=[]; f = []
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = mesh.points[mesh.elements[elem,:],:]
EulerElemCoords = Eulerx[mesh.elements[elem,:],:]
ElectricPotentialElem = Eulerp[mesh.elements[elem,:]]
# COMPUTE THE STIFFNESS MATRIX
if material.has_low_level_dispatcher:
stiffnessel, t = self.__GetLocalStiffness__(function_space, material, LagrangeElemCoords,
EulerElemCoords, ElectricPotentialElem, fem_solver, elem)
else:
stiffnessel, t = self.GetLocalStiffness(function_space, material, LagrangeElemCoords,
EulerElemCoords, ElectricPotentialElem, fem_solver, elem)
I_mass_elem = []; J_mass_elem = []; V_mass_elem = []
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed is False:
# COMPUTE THE MASS MATRIX
if material.has_low_level_dispatcher:
massel = self.__GetLocalMass__(function_space,material,LagrangeElemCoords,EulerElemCoords,fem_solver,elem)
else:
massel = self.GetLocalMass(function_space,material,LagrangeElemCoords,EulerElemCoords,fem_solver,elem)
I_stiff_elem, J_stiff_elem, V_stiff_elem = self.FindIndices(stiffnessel)
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed is False:
I_mass_elem, J_mass_elem, V_mass_elem = self.FindIndices(massel)
return I_stiff_elem, J_stiff_elem, V_stiff_elem, t, f, I_mass_elem, J_mass_elem, V_mass_elem
def GetElementalMatricesInVectorForm(self, elem, function_space, mesh, material, fem_solver, Eulerx, Eulerp):
massel=[]; f = []
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = mesh.points[mesh.elements[elem,:],:]
EulerElemCoords = Eulerx[mesh.elements[elem,:],:]
ElectricPotentialElem = Eulerp[mesh.elements[elem,:]]
# COMPUTE THE TRACTION VECTOR
if material.has_low_level_dispatcher:
t = self.__GetLocalTraction__(function_space, material, LagrangeElemCoords,
EulerElemCoords, ElectricPotentialElem, fem_solver, elem)
else:
t = self.GetLocalTraction(function_space, material, LagrangeElemCoords,
EulerElemCoords, ElectricPotentialElem, fem_solver, elem)
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed is False:
# COMPUTE THE MASS MATRIX
if material.has_low_level_dispatcher:
# massel = self.__GetLocalMass__(function_space,material,LagrangeElemCoords,EulerElemCoords,fem_solver,elem)
massel = self.__GetLocalMass_Efficient__(function_space,material,LagrangeElemCoords,EulerElemCoords,fem_solver,elem)
else:
# massel = self.GetLocalMass(function_space,material,LagrangeElemCoords,EulerElemCoords,fem_solver,elem)
massel = self.GetLocalMass_Efficient(function_space,material,LagrangeElemCoords,EulerElemCoords,fem_solver,elem)
if fem_solver.analysis_subtype == "explicit" and fem_solver.mass_type == "lumped":
massel = self.GetLumpedMass(massel)
return t, f, massel
def GetLocalStiffness(self, function_space, material, LagrangeElemCoords,
EulerELemCoords, ElectricPotentialElem, fem_solver, elem=0):
"""Get stiffness matrix of the system"""
nvar = self.nvar
ndim = self.ndim
nodeperelem = function_space.Bases.shape[0]
det = np.linalg.det
inv = np.linalg.inv
Jm = function_space.Jm
AllGauss = function_space.AllGauss
# ALLOCATE
stiffness = np.zeros((nodeperelem*nvar,nodeperelem*nvar),dtype=np.float64)
tractionforce = np.zeros((nodeperelem*nvar,1),dtype=np.float64)
B = np.zeros((nodeperelem*nvar,material.H_VoigtSize),dtype=np.float64)
# COMPUTE KINEMATIC MEASURES AT ALL INTEGRATION POINTS USING EINSUM (AVOIDING THE FOR LOOP)
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F, fem_solver.analysis_nature)
# UPDATE/NO-UPDATE GEOMETRY
if fem_solver.requires_geometry_update:
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientx = np.einsum('ijk,jl->kil',Jm,EulerELemCoords)
# SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
SpatialGradient = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
detJ = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),np.abs(StrainTensors['J']))
else:
# SPATIAL GRADIENT AND MATERIAL GRADIENT TENSORS ARE EQUAL
SpatialGradient = np.einsum('ikj',MaterialGradient)
# COMPUTE ONCE detJ
detJ = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
if material.energy_type == "enthalpy":
# COMPUTE THE HESSIAN AT THIS GAUSS POINT
H_Voigt = material.Hessian(StrainTensors,ElectricFieldx[counter,:], elem, counter)
# COMPUTE ELECTRIC DISPLACEMENT
ElectricDisplacementx = material.ElectricDisplacementx(StrainTensors, ElectricFieldx[counter,:], elem, counter)
# COMPUTE CAUCHY STRESS TENSOR
CauchyStressTensor = []
if fem_solver.requires_geometry_update:
CauchyStressTensor = material.CauchyStress(StrainTensors,ElectricFieldx[counter,:],elem,counter)
elif material.energy_type == "internal_energy":
# THIS REQUIRES LEGENDRE TRANSFORM
# COMPUTE ELECTRIC DISPLACEMENT IMPLICITLY
ElectricDisplacementx = material.ElectricDisplacementx(StrainTensors, ElectricFieldx[counter,:], elem, counter)
# COMPUTE THE HESSIAN AT THIS GAUSS POINT
H_Voigt = material.Hessian(StrainTensors,ElectricDisplacementx, elem, counter)
# COMPUTE CAUCHY STRESS TENSOR
CauchyStressTensor = []
if fem_solver.requires_geometry_update:
CauchyStressTensor = material.CauchyStress(StrainTensors,ElectricDisplacementx,elem,counter)
# COMPUTE THE TANGENT STIFFNESS MATRIX
BDB_1, t = self.ConstitutiveStiffnessIntegrand(B, SpatialGradient[counter,:,:],
ElectricDisplacementx, CauchyStressTensor, H_Voigt, requires_geometry_update=fem_solver.requires_geometry_update)
# COMPUTE GEOMETRIC STIFFNESS MATRIX
if material.nature != "linear":
BDB_1 += self.GeometricStiffnessIntegrand(SpatialGradient[counter,:,:],CauchyStressTensor)
# INTEGRATE TRACTION FORCE
if fem_solver.requires_geometry_update:
tractionforce += t*detJ[counter]
# INTEGRATE STIFFNESS
stiffness += BDB_1*detJ[counter]
return stiffness, tractionforce
def __GetLocalStiffness__(self, function_space, material, LagrangeElemCoords,
EulerELemCoords, ElectricPotentialElem, fem_solver, elem=0):
"""Get stiffness matrix of the system"""
# GET LOCAL KINEMATICS
SpatialGradient, F, detJ = _KinematicMeasures_(function_space.Jm, function_space.AllGauss[:,0], LagrangeElemCoords,
EulerELemCoords, fem_solver.requires_geometry_update)
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# COMPUTE WORK-CONJUGATES AND HESSIAN AT THIS GAUSS POINT
ElectricDisplacementx, CauchyStressTensor, H_Voigt = material.KineticMeasures(F, ElectricFieldx, elem=elem)
# COMPUTE LOCAL CONSTITUTIVE STIFFNESS AND TRACTION
stiffness, tractionforce = __ConstitutiveStiffnessIntegrandDPF__(SpatialGradient,ElectricDisplacementx,
CauchyStressTensor,H_Voigt,detJ,self.nvar,fem_solver.requires_geometry_update)
# COMPUTE LOCAL GEOMETRIC STIFFNESS
if material.nature != "linear":
stiffness += self.__GeometricStiffnessIntegrand__(SpatialGradient,CauchyStressTensor,detJ)
return stiffness, tractionforce
def ConstitutiveStiffnessIntegrand(self, B, SpatialGradient, ElectricDisplacementx,
CauchyStressTensor, H_Voigt, requires_geometry_update=True):
"""Overrides base for electric potential formulation"""
# MATRIX FORM
SpatialGradient = SpatialGradient.T.copy()
ElectricDisplacementx = ElectricDisplacementx.flatten().copy()
FillConstitutiveB(B,SpatialGradient,self.ndim,self.nvar)
BDB = B.dot(H_Voigt.dot(B.T))
t=np.zeros((B.shape[0],1))
if requires_geometry_update:
TotalTraction = GetTotalTraction(CauchyStressTensor,ElectricDisplacementx)
t = np.dot(B,TotalTraction)
return BDB, t
def GetLocalTraction(self, function_space, material, LagrangeElemCoords,
EulerELemCoords, ElectricPotentialElem, fem_solver, elem=0):
"""Get traction vector of the system"""
nvar = self.nvar
ndim = self.ndim
nodeperelem = function_space.Bases.shape[0]
det = np.linalg.det
inv = np.linalg.inv
Jm = function_space.Jm
AllGauss = function_space.AllGauss
# ALLOCATE
tractionforce = np.zeros((nodeperelem*nvar,1),dtype=np.float64)
B = np.zeros((nodeperelem*nvar,material.H_VoigtSize),dtype=np.float64)
# COMPUTE KINEMATIC MEASURES AT ALL INTEGRATION POINTS USING EINSUM (AVOIDING THE FOR LOOP)
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F, fem_solver.analysis_nature)
# UPDATE/NO-UPDATE GEOMETRY
if fem_solver.requires_geometry_update:
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientx = np.einsum('ijk,jl->kil',Jm,EulerELemCoords)
# SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
SpatialGradient = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
detJ = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),np.abs(StrainTensors['J']))
else:
# SPATIAL GRADIENT AND MATERIAL GRADIENT TENSORS ARE EQUAL
SpatialGradient = np.einsum('ikj',MaterialGradient)
# COMPUTE ONCE detJ
detJ = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
if material.energy_type == "enthalpy":
# COMPUTE THE HESSIAN AT THIS GAUSS POINT
H_Voigt = material.Hessian(StrainTensors,ElectricFieldx[counter,:], elem, counter)
# COMPUTE ELECTRIC DISPLACEMENT
ElectricDisplacementx = material.ElectricDisplacementx(StrainTensors, ElectricFieldx[counter,:], elem, counter)
# COMPUTE CAUCHY STRESS TENSOR
CauchyStressTensor = []
if fem_solver.requires_geometry_update:
CauchyStressTensor = material.CauchyStress(StrainTensors,ElectricFieldx[counter,:],elem,counter)
elif material.energy_type == "internal_energy":
# THIS REQUIRES LEGENDRE TRANSFORM
# COMPUTE ELECTRIC DISPLACEMENT IMPLICITLY
ElectricDisplacementx = material.ElectricDisplacementx(StrainTensors, ElectricFieldx[counter,:], elem, counter)
# COMPUTE THE HESSIAN AT THIS GAUSS POINT
H_Voigt = material.Hessian(StrainTensors,ElectricDisplacementx, elem, counter)
# COMPUTE CAUCHY STRESS TENSOR
CauchyStressTensor = []
if fem_solver.requires_geometry_update:
CauchyStressTensor = material.CauchyStress(StrainTensors,ElectricDisplacementx,elem,counter)
# COMPUTE THE TANGENT STIFFNESS MATRIX
t = self.TractionIntegrand(B, SpatialGradient[counter,:,:],
ElectricDisplacementx, CauchyStressTensor, requires_geometry_update=fem_solver.requires_geometry_update)
if fem_solver.requires_geometry_update:
# INTEGRATE TRACTION FORCE
tractionforce += t*detJ[counter]
return tractionforce
def __GetLocalTraction__(self, function_space, material, LagrangeElemCoords,
EulerELemCoords, ElectricPotentialElem, fem_solver, elem=0):
"""Get traction vector of the system"""
# GET LOCAL KINEMATICS
SpatialGradient, F, detJ = _KinematicMeasures_(function_space.Jm, function_space.AllGauss[:,0], LagrangeElemCoords,
EulerELemCoords, fem_solver.requires_geometry_update)
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# COMPUTE WORK-CONJUGATES AND HESSIAN AT THIS GAUSS POINT
ElectricDisplacementx, CauchyStressTensor, _ = material.KineticMeasures(F, ElectricFieldx, elem=elem)
# COMPUTE LOCAL CONSTITUTIVE STIFFNESS AND TRACTION
tractionforce = __TractionIntegrandDPF__(SpatialGradient,ElectricDisplacementx,
CauchyStressTensor,detJ,material.H_VoigtSize,self.nvar,fem_solver.requires_geometry_update)
return tractionforce
def TractionIntegrand(self, B, SpatialGradient, ElectricDisplacementx,
CauchyStressTensor, requires_geometry_update=True):
"""Applies to displacement potential based formulation"""
# MATRIX FORM
SpatialGradient = SpatialGradient.T.copy()
ElectricDisplacementx = ElectricDisplacementx.flatten().copy()
FillConstitutiveB(B,SpatialGradient,self.ndim,self.nvar)
t=np.zeros((B.shape[0],1))
if requires_geometry_update:
TotalTraction = GetTotalTraction(CauchyStressTensor,ElectricDisplacementx)
t = np.dot(B,TotalTraction)
return t
def GetLocalResidual(self):
pass
def GetEnergy(self, function_space, material, LagrangeElemCoords,
EulerELemCoords, ElectricPotentialElem, fem_solver, elem=0):
"""Get virtual energy of the system. For dynamic analysis this is handy for computing conservation of energy.
The routine computes the global form of virtual internal energy i.e. integral of "W(C,G,C)"". This can be
computed purely in a Lagrangian configuration.
"""
nvar = self.nvar
ndim = self.ndim
nodeperelem = function_space.Bases.shape[0]
det = np.linalg.det
inv = np.linalg.inv
Jm = function_space.Jm
AllGauss = function_space.AllGauss
internal_energy = 0.
# COMPUTE KINEMATIC MEASURES AT ALL INTEGRATION POINTS USING EINSUM (AVOIDING THE FOR LOOP)
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F, fem_solver.analysis_nature)
# SPATIAL GRADIENT AND MATERIAL GRADIENT TENSORS ARE EQUAL
SpatialGradient = np.einsum('ikj',MaterialGradient)
# COMPUTE ONCE detJ
detJ = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
if material.energy_type == "enthalpy":
# COMPUTE THE INTERNAL ENERGY AT THIS GAUSS POINT
energy = material.InternalEnergy(StrainTensors,ElectricFieldx[counter,:],elem,counter)
elif material.energy_type == "internal_energy":
# COMPUTE ELECTRIC DISPLACEMENT IMPLICITLY
ElectricDisplacementx = material.ElectricDisplacementx(StrainTensors, ElectricFieldx[counter,:], elem, counter)
# COMPUTE THE INTERNAL ENERGY AT THIS GAUSS POINT
energy = material.InternalEnergy(StrainTensors,ElectricDisplacementx,elem,counter)
# INTEGRATE INTERNAL ENERGY
internal_energy += energy*detJ[counter]
return internal_energy
def GetLinearMomentum(self, function_space, material, LagrangeElemCoords,
EulerELemCoords, VelocityElem, ElectricPotentialElem, fem_solver, elem=0):
"""Get linear momentum or virtual power of the system. For dynamic analysis this is handy for computing conservation of linear momentum.
The routine computes the global form of virtual power i.e. integral of "P:Grad_0(V)"" where P is first Piola-Kirchhoff
stress tensor and Grad_0(V) is the material gradient of velocity. Alternatively in update Lagrangian format this could be
computed using "Sigma: Grad(V) J" where Sigma is the Cauchy stress tensor and Grad(V) is the spatial gradient of velocity.
The latter approach is followed here
"""
nvar = self.nvar
ndim = self.ndim
nodeperelem = function_space.Bases.shape[0]
det = np.linalg.det
inv = np.linalg.inv
Jm = function_space.Jm
AllGauss = function_space.AllGauss
internal_power = 0.
# COMPUTE KINEMATIC MEASURES AT ALL INTEGRATION POINTS USING EINSUM (AVOIDING THE FOR LOOP)
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# TIME DERIVATIVE OF F
Fdot = np.einsum('ij,kli->kjl', VelocityElem, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F, fem_solver.analysis_nature)
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientx = np.einsum('ijk,jl->kil',Jm, EulerELemCoords)
# SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
SpatialGradient = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
detJ = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),np.abs(StrainTensors['J']))
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
GradV = np.dot(Fdot[counter,:,:],np.linalg.inv(F[counter,:,:]))
if material.energy_type == "enthalpy":
# COMPUTE ELECTRIC DISPLACEMENT
ElectricDisplacementx = material.ElectricDisplacementx(StrainTensors, ElectricFieldx[counter,:], elem, counter)
# COMPUTE CAUCHY STRESS TENSOR
CauchyStressTensor = material.CauchyStress(StrainTensors,ElectricFieldx[counter,:],elem,counter)
elif material.energy_type == "internal_energy":
# COMPUTE ELECTRIC DISPLACEMENT IMPLICITLY
ElectricDisplacementx = material.ElectricDisplacementx(StrainTensors, ElectricFieldx[counter,:], elem, counter)
# COMPUTE CAUCHY STRESS TENSOR
CauchyStressTensor = material.CauchyStress(StrainTensors,ElectricDisplacementx,elem,counter)
# INTEGRATE INTERNAL VIRTUAL POWER
internal_power += np.einsum('ij,ij',CauchyStressTensor,GradV)*detJ[counter]
return internal_power
# ##############################################################################################
# ##############################################################################################
# def ConstitutiveStiffnessIntegrand(self, B, SpatialGradient, ElectricDisplacementx,
# CauchyStressTensor, H_Voigt, requires_geometry_update=True):
# ndim = self.ndim
# nvar = self.nvar
# # MATRIX FORM
# SpatialGradient = SpatialGradient.T
# # THREE DIMENSIONS
# if SpatialGradient.shape[0]==3:
# B[0::nvar,0] = SpatialGradient[0,:]
# B[1::nvar,1] = SpatialGradient[1,:]
# B[2::nvar,2] = SpatialGradient[2,:]
# # Mechanical - Shear Terms
# B[1::nvar,5] = SpatialGradient[2,:]
# B[2::nvar,5] = SpatialGradient[1,:]
# B[0::nvar,4] = SpatialGradient[2,:]
# B[2::nvar,4] = SpatialGradient[0,:]
# B[0::nvar,3] = SpatialGradient[1,:]
# B[1::nvar,3] = SpatialGradient[0,:]
# # Electrostatic
# B[3::nvar,6] = SpatialGradient[0,:]
# B[3::nvar,7] = SpatialGradient[1,:]
# B[3::nvar,8] = SpatialGradient[2,:]
# if requires_geometry_update:
# CauchyStressTensor_Voigt = np.array([
# CauchyStressTensor[0,0],CauchyStressTensor[1,1],CauchyStressTensor[2,2],
# CauchyStressTensor[0,1],CauchyStressTensor[0,2],CauchyStressTensor[1,2]
# ]).reshape(6,1)
# # TotalTraction = np.concatenate((CauchyStressTensor_Voigt,ElectricDisplacementx[:,None]),axis=0)
# TotalTraction = np.concatenate((CauchyStressTensor_Voigt,ElectricDisplacementx),axis=0)
# elif SpatialGradient.shape[0]==2:
# B[0::nvar,0] = SpatialGradient[0,:]
# B[1::nvar,1] = SpatialGradient[1,:]
# # Mechanical - Shear Terms
# B[0::nvar,2] = SpatialGradient[1,:]
# B[1::nvar,2] = SpatialGradient[0,:]
# # Electrostatic
# B[2::nvar,3] = SpatialGradient[0,:]
# B[2::nvar,4] = SpatialGradient[1,:]
# if requires_geometry_update:
# CauchyStressTensor_Voigt = np.array([
# CauchyStressTensor[0,0],CauchyStressTensor[1,1],
# CauchyStressTensor[0,1]]).reshape(3,1)
# TotalTraction = np.concatenate((CauchyStressTensor_Voigt,ElectricDisplacementx[:,None]),axis=0)
# BDB = np.dot(np.dot(B,H_Voigt),B.T)
# t=[]
# if requires_geometry_update:
# t = np.dot(B,TotalTraction)
# return BDB, t
# def GeometricStiffnessIntegrand(self,SpatialGradient,CauchyStressTensor):
# ndim = self.ndim
# nvar = self.nvar
# B = np.zeros((nvar*SpatialGradient.shape[0],ndim*ndim))
# SpatialGradient = SpatialGradient.T
# S = 0
# if SpatialGradient.shape[0]==3:
# B[0::nvar,0] = SpatialGradient[0,:]
# B[0::nvar,1] = SpatialGradient[1,:]
# B[0::nvar,2] = SpatialGradient[2,:]
# B[1::nvar,3] = SpatialGradient[0,:]
# B[1::nvar,4] = SpatialGradient[1,:]
# B[1::nvar,5] = SpatialGradient[2,:]
# B[2::nvar,6] = SpatialGradient[0,:]
# B[2::nvar,7] = SpatialGradient[1,:]
# B[2::nvar,8] = SpatialGradient[2,:]
# S = np.zeros((3*ndim,3*ndim))
# S[0:ndim,0:ndim] = CauchyStressTensor
# S[ndim:2*ndim,ndim:2*ndim] = CauchyStressTensor
# S[2*ndim:,2*ndim:] = CauchyStressTensor
# elif SpatialGradient.shape[0]==2:
# B[0::nvar,0] = SpatialGradient[0,:]
# B[0::nvar,1] = SpatialGradient[1,:]
# B[1::nvar,2] = SpatialGradient[0,:]
# B[1::nvar,3] = SpatialGradient[1,:]
# # S = np.zeros((3*ndim,3*ndim))
# S = np.zeros((ndim*ndim,ndim*ndim))
# S[0:ndim,0:ndim] = CauchyStressTensor
# S[ndim:2*ndim,ndim:2*ndim] = CauchyStressTensor
# # S[2*ndim:,2*ndim:] = CauchyStressTensor
# BDB = np.dot(np.dot(B,S),B.T)
# return BDB
```
#### File: Florence/VariationalPrinciple/FlexoelectricFormulation.py
```python
from copy import deepcopy
import gc
from numpy.linalg import det, inv, norm, cond
from Florence import QuadratureRule, FunctionSpace
from Florence.FiniteElements.LocalAssembly.KinematicMeasures import *
from Florence.FiniteElements.LocalAssembly._KinematicMeasures_ import _KinematicMeasures_
from ._ConstitutiveStiffnessDPF_ import __ConstitutiveStiffnessIntegrandDPF__
from Florence.Tensor import issymetric
from Florence.LegendreTransform import LegendreTransform
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from .VariationalPrinciple import *
from Florence.FiniteElements.Assembly.SparseAssemblyNative import SparseAssemblyNative
from Florence.FiniteElements.Assembly.RHSAssemblyNative import RHSAssemblyNative
__all__ = ["FlexoelectricFormulation"]
class FlexoelectricFormulation(VariationalPrinciple):
def __init__(self, mesh, variables_order=(1,0,0), subtype="lagrange_multiplier",
quadrature_rules=None, quadrature_type=None, function_spaces=None, compute_post_quadrature=False,
equally_spaced_bases=False, save_condensed_matrices=True, quadrature_degree=None):
"""
Input:
subtype: [str] either "lagrange_multiplier", "augmented_lagrange" or "penalty"
"""
if mesh.element_type != "tet" and mesh.element_type != "tri" and \
mesh.element_type != "quad" and mesh.element_type != "hex":
raise NotImplementedError( type(self).__name__, "has not been implemented for", mesh.element_type, "elements")
if isinstance(variables_order,int):
self.variables_order = (self.variables_order,)
self.variables_order = variables_order
super(FlexoelectricFormulation, self).__init__(mesh,variables_order=self.variables_order,
quadrature_type=quadrature_type,quadrature_rules=quadrature_rules,function_spaces=function_spaces,
compute_post_quadrature=compute_post_quadrature)
self.fields = "flexoelectric"
self.nvar = self.ndim + 1
self.subtype = subtype
self.save_condensed_matrices = save_condensed_matrices
C = mesh.InferPolynomialDegree() - 1
mesh.InferBoundaryElementType()
if C < 1:
raise ValueError("Incorrect initial mesh provided for the formulation. Mesh has to be at least order 2")
# CHECK IF MESH IS APPROPRIATE
# if C == 0:
# warn('Mesh not appropriate for formulation')
# raise ValueError('Mesh not appropriate for formulation. p>1 for primary variable (displacement)')
# BUILD MESHES FOR ALL FIELDS
p = C+1
# DISPLACEMENTS
mesh0 = deepcopy(mesh)
# ROTATIONS
mesh1 = deepcopy(mesh)
mesh1 = mesh1.GetLinearMesh(remap=True)
mesh1.GetHighOrderMesh(p=p-1)
# LAGRANGE MULTIPLIER
mesh2 = deepcopy(mesh)
mesh2 = mesh2.GetLinearMesh(remap=True)
mesh2.GetHighOrderMesh(p=p-1)
# ALL MESHES
self.meshes = (mesh0,mesh1,mesh2)
# GET QUADRATURE RULES
norder = C+2
if mesh.element_type == "quad" or mesh.element_type == "hex":
norder = C+1
if quadrature_rules == None and self.quadrature_rules == None:
# FOR DISPLACEMENT
quadrature0 = QuadratureRule(optimal=3, norder=self.GetQuadratureOrder(norder,mesh.element_type)[0],
mesh_type=mesh.element_type)
# FOR ROTATIONS
quadrature1 = QuadratureRule(optimal=3, norder=self.GetQuadratureOrder(norder,mesh.element_type)[0],
mesh_type=mesh.element_type)
# FOR LAGRANGE MULTIPLIER
quadrature2 = QuadratureRule(optimal=3, norder=self.GetQuadratureOrder(norder,mesh.element_type)[0],
mesh_type=mesh.element_type)
# BOUNDARY
bquadrature = QuadratureRule(optimal=3, norder=C+2, mesh_type=mesh.boundary_element_type)
self.quadrature_rules = (quadrature0,quadrature1,quadrature2,bquadrature)
else:
self.quadrature_rules = quadrature_rules
# GET FUNCTIONAL SPACES
if function_spaces == None and self.function_spaces == None:
# FOR DISPLACEMENT
function_space0 = FunctionSpace(mesh0, self.quadrature_rules[0], p=mesh0.degree,
equally_spaced=equally_spaced_bases)
# FOR ROTATIONS
function_space1 = FunctionSpace(mesh1, self.quadrature_rules[1], p=mesh1.degree,
equally_spaced=equally_spaced_bases)
# FOR LAGRANGE MULTIPLIER
function_space2 = FunctionSpace(mesh2, self.quadrature_rules[2], p=mesh2.degree,
equally_spaced=equally_spaced_bases)
# BOUNDARY
bfunction_space = FunctionSpace(mesh0.CreateDummyLowerDimensionalMesh(), self.quadrature_rules[3], p=mesh0.degree,
equally_spaced=equally_spaced_bases)
self.function_spaces = (function_space0, function_space1, function_space2, bfunction_space)
else:
self.function_spaces = function_spaces
# local_size = function_space.Bases.shape[0]*self.nvar
local_size = self.function_spaces[0].Bases.shape[0]*self.nvar
self.local_rows = np.repeat(np.arange(0,local_size),local_size,axis=0)
self.local_columns = np.tile(np.arange(0,local_size),local_size)
self.local_size = local_size
# FOR MASS
local_size_m = self.function_spaces[0].Bases.shape[0]*self.nvar
self.local_rows_mass = np.repeat(np.arange(0,local_size_m),local_size_m,axis=0)
self.local_columns_mass = np.tile(np.arange(0,local_size_m),local_size_m)
self.local_size_m = local_size_m
if self.save_condensed_matrices:
# elist = [0]*mesh.nelem # CANT USE ONE PRE-CREATED LIST AS IT GETS MODIFIED
# KEEP VECTORS AND MATRICES SEPARATE BECAUSE OF THE SAME REASON
if self.subtype == "lagrange_multiplier":
self.condensed_matrices = {'k_uu':[0]*mesh.nelem, 'k_up':[0]*mesh.nelem, 'k_pp':[0]*mesh.nelem, 'k_us':[0]*mesh.nelem,
'k_ww':[0]*mesh.nelem, 'k_wp':[0]*mesh.nelem, 'k_ws':[0]*mesh.nelem,'inv_k_ws':[0]*mesh.nelem}
self.condensed_vectors = {'tu':[0]*mesh.nelem,'tw':[0]*mesh.nelem,'ts':[0]*mesh.nelem,'tp':[0]*mesh.nelem}
elif self.subtype == "augmented_lagrange":
self.condensed_matrices = {'k_uu':[0]*mesh.nelem,'k_us':[0]*mesh.nelem,
'k_ww':[0]*mesh.nelem,'k_ws':[0]*mesh.nelem,'k_ss':[0]*mesh.nelem,'inv_k_ws':[0]*mesh.nelem}
self.condensed_vectors = {'tu':[0]*mesh.nelem,'tw':[0]*mesh.nelem,'ts':[0]*mesh.nelem}
elif self.subtype == "penalty":
self.condensed_matrices = {'k_uu':[0]*mesh.nelem,'k_uw':[0]*mesh.nelem,'k_ww':[0]*mesh.nelem}
self.condensed_vectors = {'tu':[0]*mesh.nelem,'tw':[0]*mesh.nelem}
# COMPUTE THE COMMON/NEIGHBOUR NODES ONCE
self.all_nodes = np.unique(self.meshes[1].elements)
self.Elss, self.Poss = self.meshes[1].GetNodeCommonality()[:2]
def GetElementalMatrices(self, elem, function_space, mesh, material, fem_solver, Eulerx, Eulerw, Eulers, Eulerp):
massel=[]; f = []
# COMPUTE THE STIFFNESS MATRIX
if material.has_low_level_dispatcher:
stiffnessel, t = self.__GetLocalStiffness__(material, fem_solver, Eulerx, Eulerw, Eulers, Eulerp, elem)
else:
stiffnessel, t = self.GetLocalStiffness(material, fem_solver, Eulerx, Eulerw, Eulers, Eulerp, elem)
I_mass_elem = []; J_mass_elem = []; V_mass_elem = []
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed is False:
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = mesh[0].points[mesh[0].elements[elem,:],:]
EulerElemCoords = Eulerx[mesh[0].elements[elem,:],:]
# COMPUTE THE MASS MATRIX
if material.has_low_level_dispatcher:
massel = self.__GetLocalMass__(material,fem_solver,elem)
else:
# massel = self.GetLocalMass(material,fem_solver,elem)
massel = self.GetLocalMass(function_space[0], material, LagrangeElemCoords, EulerElemCoords, fem_solver, elem)
if fem_solver.has_moving_boundary:
# COMPUTE FORCE VECTOR
f = ApplyNeumannBoundaryConditions3D(MainData, mesh, elem, LagrangeElemCoords)
I_stiff_elem, J_stiff_elem, V_stiff_elem = self.FindIndices(stiffnessel)
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed is False:
I_mass_elem, J_mass_elem, V_mass_elem = self.FindIndices(massel)
return I_stiff_elem, J_stiff_elem, V_stiff_elem, t, f, I_mass_elem, J_mass_elem, V_mass_elem
def GetMassMatrix(self, elem, function_space, mesh, material, fem_solver, Eulerx, Eulerw, Eulerp):
massel=[]
# COMPUTE THE MASS MATRIX
# if material.has_low_level_dispatcher:
# massel = self.__GetLocalMass__(material,fem_solver,elem)
# else:
# massel = self.GetLocalMass(material,fem_solver,elem)
massel = self.__GetLocalMass__(material,fem_solver,elem)
I_mass_elem, J_mass_elem, V_mass_elem = self.FindIndices(massel)
return I_mass_elem, J_mass_elem, V_mass_elem
def GetLocalStiffness(self, material, fem_solver, Eulerx, Eulerw, Eulers, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
# return self.K_uu(material, fem_solver, Eulerx, Eulerp, elem=0)
if self.subtype=="lagrange_multiplier" or self.subtype=="augmented_lagrange":
tractionforce = []
k_uupp, tup = self.K_uu(material, fem_solver, Eulerx, Eulerp, elem)
k_uw = self.K_uw(material, fem_solver, Eulerx, Eulerp, elem)
k_us = self.K_us(material, fem_solver, Eulerx, Eulerp, elem)
# k_ww, tw = self.K_ww(material, fem_solver, Eulerw, Eulerp, elem)
k_ww, tw = self.K_ww(material, fem_solver, Eulerx, Eulerp, elem) # CHECK Eulerx vs Eulerw
k_ws = self.K_ws(material, fem_solver, Eulerw, Eulerp, elem)
k_wp = self.K_wp(material, fem_solver, Eulerx, Eulerw, Eulerp, elem)
k_ss, ts = self.K_ss(material, fem_solver, Eulerw, Eulerp, elem)
# SEPARATE MECHANICAL AND ELECTRICAL
k_uu = k_uupp[fem_solver.all_local_mech_dofs,:][:,fem_solver.all_local_mech_dofs]
k_up = k_uupp[fem_solver.all_local_mech_dofs][:,fem_solver.all_local_electric_dofs]
k_pu = k_uupp[fem_solver.all_local_electric_dofs,:][:,fem_solver.all_local_mech_dofs]
k_pp = k_uupp[fem_solver.all_local_electric_dofs,:][:,fem_solver.all_local_electric_dofs]
tu = tup[fem_solver.all_local_mech_dofs]
tp = tup[fem_solver.all_local_electric_dofs]
if fem_solver.static_condensation is True:
# IF NO STATIC CONDENSATION
if self.subtype=="lagrange_multiplier":
inv_k_ws = inv(k_ws)
k1 = inv_k_ws
k2 = k1.dot(k_ww.dot(inv_k_ws))
kuu_eq = k_uu + k_us.dot(k2.dot(k_us.T))
kup_eq = k_up - k_us.dot(k1.dot(k_wp))
tu_eq = tu - k_us.dot(k1.dot((tw-k_ww.dot(inv_k_ws.dot(ts)))))
tp_eq = tp - k_wp.T.dot(inv_k_ws.dot(ts))
stiffness = np.zeros((self.meshes[0].elements.shape[1]*self.nvar,self.meshes[0].elements.shape[1]*self.nvar))
np.put(stiffness.ravel(),fem_solver.idx_uu,kuu_eq.ravel())
np.put(stiffness.ravel(),fem_solver.idx_up,kup_eq.ravel())
np.put(stiffness.ravel(),fem_solver.idx_pu,kup_eq.T.ravel())
np.put(stiffness.ravel(),fem_solver.idx_pp,k_pp.ravel())
tractionforce = np.zeros((self.meshes[0].elements.shape[1]*self.nvar,1))
tractionforce[fem_solver.all_local_mech_dofs] = tu_eq
tractionforce[fem_solver.all_local_electric_dofs] = tp_eq
if self.save_condensed_matrices:
self.condensed_matrices['k_uu'][elem] = k_uu
self.condensed_matrices['k_up'][elem] = k_up
self.condensed_matrices['k_us'][elem] = k_us
self.condensed_matrices['k_ww'][elem] = k_ww
self.condensed_matrices['k_ws'][elem] = k_ws
self.condensed_matrices['k_wp'][elem] = k_wp
self.condensed_matrices['k_pp'][elem] = k_pp
self.condensed_matrices['inv_k_ws'][elem] = inv_k_ws
self.condensed_vectors['tu'][elem] = tu
self.condensed_vectors['tw'][elem] = tw
self.condensed_vectors['ts'][elem] = ts
self.condensed_vectors['tp'][elem] = tp
elif self.subtype=="augmented_lagrange":
inv_k_ws = inv(k_ws)
k1 = inv(k_ws - k_ww.dot(inv_k_ws.dot(k_ss)))
k2 = k1.dot(k_ww.dot(inv_k_ws))
kuu_eq = k_uu + k_us.dot(k2.dot(k_us.T))
k3 = k_wp.T.dot(inv_k_ws.dot(k_ss))
k4 = k_ww.dot(inv_k_ws.dot(k_us.T))
kup_eq = k_up - k_us.dot(k1.dot(k_wp))
kpu_eq = k_up.T - k_wp.T.dot(inv_k_ws.dot(k_us.T)) - k3.dot(k1.dot(k4))
kpp_eq = k_pp + k3.dot(k1.dot(k_wp))
tu_eq = tu - k_us.dot(k1.dot((tw-k_ww.dot(inv_k_ws.dot(ts)))))
tp_eq = tp - k_wp.T.dot(inv_k_ws.dot(ts)) - k3.dot(k1.dot((tw-k_ww.dot(inv_k_ws.dot(ts)))))
stiffness = np.zeros((self.meshes[0].elements.shape[1]*self.nvar,self.meshes[0].elements.shape[1]*self.nvar))
np.put(stiffness.ravel(),fem_solver.idx_uu,kuu_eq.ravel())
np.put(stiffness.ravel(),fem_solver.idx_up,kup_eq.ravel())
np.put(stiffness.ravel(),fem_solver.idx_pu,kpu_eq.ravel())
np.put(stiffness.ravel(),fem_solver.idx_pp,kpp_eq.ravel())
tractionforce = np.zeros((self.meshes[0].elements.shape[1]*self.nvar,1))
tractionforce[fem_solver.all_local_mech_dofs] = tu_eq
tractionforce[fem_solver.all_local_electric_dofs] = tp_eq
if self.save_condensed_matrices:
self.condensed_matrices['k_uu'][elem] = k_uu
self.condensed_matrices['k_up'][elem] = k_up
self.condensed_matrices['k_us'][elem] = k_us
self.condensed_matrices['k_ww'][elem] = k_ww
self.condensed_matrices['k_ws'][elem] = k_ws
self.condensed_matrices['k_wp'][elem] = k_wp
self.condensed_matrices['k_pp'][elem] = k_pp
self.condensed_matrices['inv_k_ws'][elem] = inv_k_ws
self.condensed_vectors['tu'][elem] = tu
self.condensed_vectors['tw'][elem] = tw
self.condensed_vectors['ts'][elem] = ts
self.condensed_vectors['tp'][elem] = tp
else:
# IF NO STATIC CONDENSATION
raise NotImplementedError("Not implemented yet")
elif self.subtype=="penalty":
tractionforce = []
k_uupp, tup = self.K_uu(material, fem_solver, Eulerx, Eulerp, elem)
k_uu2, tu2 = self.K_uu_Penalty(material, fem_solver, Eulerx, Eulerp, elem)
k_uw = material.kappa*self.K_us(material, fem_solver, Eulerx, Eulerp, elem)
k_ww, tw = self.K_ww_Penalty(material, fem_solver, Eulerw, Eulerp, elem)
k_wp = self.K_wp(material, fem_solver, Eulerx, Eulerw, Eulerp, elem)
# SEPARATE MECHANICAL AND ELECTRICAL
k_uu = k_uupp[fem_solver.all_local_mech_dofs,:][:,fem_solver.all_local_mech_dofs]
k_up = k_uupp[fem_solver.all_local_mech_dofs][:,fem_solver.all_local_electric_dofs]
k_pu = k_uupp[fem_solver.all_local_electric_dofs,:][:,fem_solver.all_local_mech_dofs]
k_pp = k_uupp[fem_solver.all_local_electric_dofs,:][:,fem_solver.all_local_electric_dofs]
tu = tup[fem_solver.all_local_mech_dofs]
tp = tup[fem_solver.all_local_electric_dofs]
# IF NO STATIC CONDITON
if fem_solver.static_condensation is False:
raise NotImplementedError("Not implemented yet")
else:
inv_k_ww = inv(k_ww)
kuu_eq = k_uu + k_uu2 - np.dot(np.dot(k_uw,inv_k_ww),k_uw.T)
kup_eq = k_up - np.dot(np.dot(k_uw,inv_k_ww),k_wp)
kpp_eq = k_pp - np.dot(np.dot(k_wp.T,inv_k_ww),k_wp)
tu_eq = tu + tu2 - np.dot(np.dot(k_uw,inv_k_ww),tw)
tp_eq = tp - np.dot(np.dot(k_wp.T,inv_k_ww),tw)
stiffness = np.zeros((self.meshes[0].elements.shape[1]*self.nvar,self.meshes[0].elements.shape[1]*self.nvar))
np.put(stiffness.ravel(),fem_solver.idx_uu,kuu_eq.ravel())
np.put(stiffness.ravel(),fem_solver.idx_up,kup_eq.ravel())
np.put(stiffness.ravel(),fem_solver.idx_pu,kup_eq.T.ravel())
np.put(stiffness.ravel(),fem_solver.idx_pp,k_pp.ravel())
tractionforce = np.zeros((self.meshes[0].elements.shape[1]*self.nvar,1))
tractionforce[fem_solver.all_local_mech_dofs] = tu_eq
tractionforce[fem_solver.all_local_electric_dofs] = tp_eq
else:
raise ValueError("subtype of this variational formulation should be 'lagrange_multiplier' or 'penalty'")
return stiffness, tractionforce
def K_uu(self, material, fem_solver, Eulerx, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
meshes = self.meshes
mesh = self.meshes[0]
function_spaces = self.function_spaces
function_space = self.function_spaces[0]
ndim = self.ndim
nvar = self.nvar
nodeperelem = meshes[0].elements.shape[1]
# print nodeperelem
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = mesh.points[mesh.elements[elem,:],:]
EulerELemCoords = Eulerx[mesh.elements[elem,:],:]
ElectricPotentialElem = Eulerp[mesh.elements[elem,:]]
Jm = function_spaces[0].Jm
AllGauss = function_space.AllGauss
# GET LOCAL KINEMATICS
SpatialGradient, F, detJ = _KinematicMeasures_(Jm, AllGauss[:,0],
LagrangeElemCoords, EulerELemCoords, fem_solver.requires_geometry_update)
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# COMPUTE WORK-CONJUGATES AND HESSIAN AT THIS GAUSS POINT
ElectricDisplacementx, CauchyStressTensor, H_Voigt, _, _, _, _, _ = material.KineticMeasures(F,ElectricFieldx,elem=elem)
# COMPUTE LOCAL CONSTITUTIVE STIFFNESS AND TRACTION
stiffness, tractionforce = __ConstitutiveStiffnessIntegrandDPF__(SpatialGradient,ElectricDisplacementx,
CauchyStressTensor,H_Voigt,detJ,self.nvar,fem_solver.requires_geometry_update)
# # COMPUTE GEOMETRIC STIFFNESS
# if fem_solver.requires_geometry_update:
# stiffness += self.__GeometricStiffnessIntegrand__(SpatialGradient,CauchyStressTensor,detJ)
# SAVE AT THIS GAUSS POINT
self.SpatialGradient = SpatialGradient
self.ElectricFieldx = ElectricFieldx
self.detJ = detJ
return stiffness, tractionforce
# # ALLOCATE
# stiffness = np.zeros((nodeperelem*nvar,nodeperelem*nvar),dtype=np.float64)
# tractionforce = np.zeros((nodeperelem*nvar,1),dtype=np.float64)
# B = np.zeros((nodeperelem*nvar,material.H_VoigtSize),dtype=np.float64)
# # COMPUTE KINEMATIC MEASURES AT ALL INTEGRATION POINTS USING EINSUM (AVOIDING THE FOR LOOP)
# # MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
# ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# # MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
# MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# # DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
# F = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# # COMPUTE REMAINING KINEMATIC MEASURES
# StrainTensors = KinematicMeasures(F, fem_solver.analysis_nature)
# # UPDATE/NO-UPDATE GEOMETRY
# if fem_solver.requires_geometry_update:
# # MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
# ParentGradientx = np.einsum('ijk,jl->kil',Jm,EulerELemCoords)
# # SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
# SpatialGradient = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# # COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
# detJ = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),np.abs(StrainTensors['J']))
# else:
# # SPATIAL GRADIENT AND MATERIAL GRADIENT TENSORS ARE EQUAL
# SpatialGradient = np.einsum('ikj',MaterialGradient)
# # COMPUTE ONCE detJ
# detJ = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
# # GET ELECTRIC FIELD
# ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# # LOOP OVER GAUSS POINTS
# for counter in range(AllGauss.shape[0]):
# # COMPUTE THE HESSIAN AT THIS GAUSS POINT
# H_Voigt = material.Hessian(StrainTensors,ElectricFieldx[counter,:], elem, counter)
# # COMPUTE CAUCHY STRESS TENSOR
# CauchyStressTensor = []
# if fem_solver.requires_geometry_update:
# CauchyStressTensor = material.CauchyStress(StrainTensors,ElectricFieldx[counter,:],elem,counter)
# # COMPUTE THE TANGENT STIFFNESS MATRIX
# BDB_1, t = self.K_uu_Integrand(B, SpatialGradient[counter,:,:],
# ElectricFieldx[counter,:], CauchyStressTensor, H_Voigt, analysis_nature=fem_solver.analysis_nature,
# has_prestress=fem_solver.has_prestress)
# # COMPUTE GEOMETRIC STIFFNESS MATRIX
# if fem_solver.requires_geometry_update:
# # BDB_1 += self.GeometricStiffnessIntegrand(SpatialGradient[counter,:,:],CauchyStressTensor)
# # INTEGRATE TRACTION FORCE
# tractionforce += t*detJ[counter]
# # INTEGRATE STIFFNESS
# stiffness += BDB_1*detJ[counter]
# # SAVE AT THIS GAUSS POINT
# self.SpatialGradient = SpatialGradient
# self.ElectricFieldx = ElectricFieldx
# self.detJ = detJ
# return stiffness, tractionforce
def K_uw(self, material, fem_solver, Eulerx, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
return np.zeros((self.meshes[0].elements.shape[1]*self.ndim,self.meshes[1].elements.shape[1]*self.ndim),dtype=np.float64)
def K_us(self, material, fem_solver, Eulerx, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
meshes = self.meshes
function_spaces = self.function_spaces
Bases_s = function_spaces[2].Bases
Ns = np.zeros((self.ndim,Bases_s.shape[0]*self.ndim),dtype=np.float64)
Bu = np.zeros((self.meshes[0].elements.shape[1]*self.ndim,self.ndim),dtype=np.float64)
stiffness = np.zeros((self.meshes[0].elements.shape[1]*self.ndim,self.meshes[2].elements.shape[1]*self.ndim))
AllGauss = function_spaces[0].AllGauss
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
# COMPUTE THE TANGENT STIFFNESS MATRIX
Bu_Ns = self.K_us_Integrand(Bu, Ns, self.SpatialGradient[counter,:,:], Bases_s[:,counter])
# INTEGRATE STIFFNESS
stiffness += Bu_Ns*self.detJ[counter]
return stiffness
def K_ww(self, material, fem_solver, Eulerw, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
meshes = self.meshes
mesh = self.meshes[1]
function_spaces = self.function_spaces
function_space = self.function_spaces[1]
ndim = self.ndim
nvar = ndim
nodeperelem = meshes[1].elements.shape[1]
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = mesh.points[mesh.elements[elem,:],:]
EulerELemCoords = Eulerw[mesh.elements[elem,:],:]
ElectricPotentialElem = Eulerp[mesh.elements[elem,:]]
Jm = function_spaces[1].Jm
AllGauss = function_space.AllGauss
# # GET LOCAL KINEMATICS
# SpatialGradient, F, detJ = _KinematicMeasures_(Jm, AllGauss[:,0],
# LagrangeElemCoords, EulerELemCoords, fem_solver.requires_geometry_update)
# # COMPUTE WORK-CONJUGATES AND HESSIAN AT THIS GAUSS POINT
# CauchyStressTensor, _, H_Voigt = material.KineticMeasures(F,elem=elem)
# ALLOCATE
stiffness = np.zeros((nodeperelem*nvar,nodeperelem*nvar),dtype=np.float64)
tractionforce = np.zeros((nodeperelem*nvar,1),dtype=np.float64)
B = np.zeros((nodeperelem*nvar,material.gradient_elasticity_tensor_size),dtype=np.float64)
# COMPUTE KINEMATIC MEASURES AT ALL INTEGRATION POINTS USING EINSUM (AVOIDING THE FOR LOOP)
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F, fem_solver.analysis_nature)
# UPDATE/NO-UPDATE GEOMETRY
if fem_solver.requires_geometry_update:
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientx = np.einsum('ijk,jl->kil',Jm,EulerELemCoords)
# SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
SpatialGradient = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
detJ = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),np.abs(StrainTensors['J']))
else:
# SPATIAL GRADIENT AND MATERIAL GRADIENT TENSORS ARE EQUAL
SpatialGradient = np.einsum('ikj',MaterialGradient)
# COMPUTE ONCE detJ
detJ = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
# COMPUTE THE HESSIAN AT THIS GAUSS POINT
material.Hessian(StrainTensors,None, elem, counter)
H_Voigt = material.gradient_elasticity_tensor
# COMPUTE CAUCHY STRESS TENSOR
CoupleStressVector = []
if fem_solver.requires_geometry_update:
CoupleStressVector = material.CoupleStress(StrainTensors,None,elem,counter).reshape(self.ndim,1)
# COMPUTE THE TANGENT STIFFNESS MATRIX
BDB_1, t = self.K_ww_Integrand(B, SpatialGradient[counter,:,:],
None, CoupleStressVector, H_Voigt, analysis_nature=fem_solver.analysis_nature,
has_prestress=fem_solver.has_prestress)
# COMPUTE GEOMETRIC STIFFNESS MATRIX
if fem_solver.requires_geometry_update:
# INTEGRATE TRACTION FORCE
tractionforce += t*detJ[counter]
# INTEGRATE STIFFNESS
stiffness += BDB_1*detJ[counter]
# # SAVE AT THIS GAUSS POINT
# self.SpatialGradient = SpatialGradient
# self.detJ = detJ
return stiffness, tractionforce
def K_ws(self, material, fem_solver, Eulerw, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
meshes = self.meshes
function_spaces = self.function_spaces
Bases_w = function_spaces[1].Bases
Bases_s = function_spaces[2].Bases
Nw = np.zeros((Bases_w.shape[0]*self.ndim,self.ndim),dtype=np.float64)
Ns = np.zeros((self.ndim,Bases_s.shape[0]*self.ndim),dtype=np.float64)
stiffness = np.zeros((Bases_w.shape[0]*self.ndim,Bases_s.shape[0]*self.ndim))
AllGauss = function_spaces[0].AllGauss
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
# COMPUTE THE TANGENT STIFFNESS MATRIX
Nw_Ns = self.K_ws_Integrand(Nw, Ns, Bases_w[:,counter], Bases_s[:,counter])
# INTEGRATE STIFFNESS
stiffness += Nw_Ns*self.detJ[counter] ## CAREFUL ABOUT [CHECK] self.detJ[counter] ####################
return -stiffness
def K_wp(self, material, fem_solver, Eulerx, Eulerw, Eulerp, elem=0):
"""Get stiffness matrix of the system"""
meshes = self.meshes
mesh = self.meshes[1]
function_spaces = self.function_spaces
function_space = self.function_spaces[1]
ndim = self.ndim
nodeperelem = meshes[1].elements.shape[1]
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = mesh.points[mesh.elements[elem,:],:]
EulerELemCoords = Eulerw[mesh.elements[elem,:],:]
ElectricPotentialElem = Eulerp[self.meshes[0].elements[elem,:]]
Jm = function_spaces[1].Jm
AllGauss = function_space.AllGauss
# ALLOCATE
stiffness = np.zeros((nodeperelem*ndim,self.meshes[0].elements.shape[1]),dtype=np.float64)
B_w = np.zeros((nodeperelem*ndim,material.flexoelectric_tensor.shape[0]),dtype=np.float64)
B_p = np.zeros((self.meshes[0].elements.shape[1],ndim),dtype=np.float64)
# GIVES WRONG ANSWER FOR SOME REASON
# # GET LOCAL KINEMATICS - EVALUATED FOR W SHAPE FUNCTIONS
# SpatialGradient_w, F_w, detJ_w = _KinematicMeasures_(Jm, AllGauss[:,0],
# LagrangeElemCoords, EulerELemCoords, fem_solver.requires_geometry_update)
# USE THIS INSTEAD
# COMPUTE KINEMATIC MEASURES AT ALL INTEGRATION POINTS USING EINSUM (AVOIDING THE FOR LOOP)
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F_w = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F_w, fem_solver.analysis_nature)
# UPDATE/NO-UPDATE GEOMETRY
if fem_solver.requires_geometry_update:
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientx = np.einsum('ijk,jl->kil',Jm,EulerELemCoords)
# SPATIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla (N)]
SpatialGradient_w = np.einsum('ijk,kli->ilj',inv(ParentGradientx),Jm)
# COMPUTE ONCE detJ (GOOD SPEEDUP COMPARED TO COMPUTING TWICE)
detJ_w = np.einsum('i,i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)),np.abs(StrainTensors['J']))
else:
# SPATIAL GRADIENT AND MATERIAL GRADIENT TENSORS ARE EQUAL
SpatialGradient_w = np.einsum('ikj',MaterialGradient)
# COMPUTE ONCE detJ
detJ_w = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
# GET LOCAL KINEMATICS - EVALUATED FOR W SHAPE FUNCTIONS
SpatialGradient_p, F_p, detJ_p = _KinematicMeasures_(function_spaces[0].Jm, function_spaces[0].AllGauss[:,0],
self.meshes[0].points[self.meshes[0].elements[elem,:],:], Eulerx[self.meshes[0].elements[elem,:],:],
fem_solver.requires_geometry_update)
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient_p,ElectricPotentialElem)
# COMPUTE WORK-CONJUGATES AND HESSIAN AT THIS GAUSS POINT
material.KineticMeasures(F_w,ElectricFieldx,elem=elem)
H_Voigt = material.flexoelectric_tensors
# LOOP OVER GAUSS POINTS
for counter in range(function_spaces[0].AllGauss.shape[0]):
# COMPUTE THE TANGENT STIFFNESS MATRIX
BDB = self.K_wp_Integrand(B_w, B_p, SpatialGradient_w[counter,:,:], SpatialGradient_p[counter,:,:],H_Voigt[counter,:,:])
# INTEGRATE STIFFNESS
stiffness += BDB*detJ_p[counter]
return stiffness
def K_ss(self, material, fem_solver, Eulers, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
stiffness = np.zeros((self.function_spaces[2].Bases.shape[0]*self.ndim,self.function_spaces[2].Bases.shape[0]*self.ndim),dtype=np.float64)
tractionforce = np.zeros((self.function_spaces[2].Bases.shape[0]*self.ndim,1),dtype=np.float64)
if self.subtype == "lagrange_multiplier":
return stiffness, tractionforce
EulerELemS = Eulers[self.meshes[2].elements[elem,:],:]
Bases_s = self.function_spaces[2].Bases
Ns = np.zeros((self.ndim,Bases_s.shape[0]*self.ndim),dtype=np.float64)
AllGauss = self.function_spaces[2].AllGauss
# FIND LAGRANGE MULTIPLIER AT ALL GAUSS POINTS
EulerGaussS = np.dot(Bases_s.T,EulerELemS)
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
# COMPUTE STRESS
LagrangeMultiplierStressVector = material.LagrangeMultiplierStress(EulerGaussS,elem=elem,gcounter=counter)
# COMPUTE THE TANGENT STIFFNESS MATRIX
NDN, t = self.K_ss_Integrand(Ns, Bases_s[:,counter], 0, LagrangeMultiplierStressVector, material.kappa,
analysis_nature=fem_solver.analysis_nature, has_prestress=fem_solver.has_prestress)
# INTEGRATE STIFFNESS
stiffness += NDN*self.detJ[counter] ## CAREFUL ABOUT [CHECK] self.detJ[counter] ####################
# INTEGRAGE TRACTION
if fem_solver.requires_geometry_update:
# INTEGRATE TRACTION FORCE
tractionforce += t*self.detJ[counter]
return stiffness, tractionforce
def K_uu_Penalty(self, material, fem_solver, Eulerx, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
meshes = self.meshes
function_spaces = self.function_spaces
Bu = np.zeros((self.meshes[0].elements.shape[1]*self.ndim,self.ndim),dtype=np.float64)
stiffness = np.zeros((self.meshes[0].elements.shape[1]*self.ndim,self.meshes[0].elements.shape[1]*self.ndim))
AllGauss = function_spaces[0].AllGauss
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
# COMPUTE THE TANGENT STIFFNESS MATRIX
BDB = self.K_uu_Penalty_Integrand(Bu, self.SpatialGradient[counter,:,:])
# INTEGRATE STIFFNESS
stiffness += material.kappa*BDB*self.detJ[counter]
# THIS CONTRIBUTES TO TRACTION AS WELL
tractionforce = np.zeros((self.meshes[0].elements.shape[1]*self.ndim,1))
return stiffness, tractionforce
def K_ww_Penalty(self, material, fem_solver, Eulerw, Eulerp=None, elem=0):
"""Get stiffness matrix of the system"""
meshes = self.meshes
mesh = self.meshes[1]
function_spaces = self.function_spaces
function_space = self.function_spaces[1]
ndim = self.ndim
nvar = ndim
nodeperelem = meshes[1].elements.shape[1]
Jm = function_spaces[1].Jm
AllGauss = function_space.AllGauss
# ALLOCATE
stiffness = np.zeros((nodeperelem*nvar,nodeperelem*nvar),dtype=np.float64)
tractionforce = np.zeros((nodeperelem*nvar,1),dtype=np.float64)
Bases_w = self.function_spaces[1].Bases
Nw = np.zeros((self.ndim,Bases_w.shape[0]*self.ndim),dtype=np.float64)
# detJ = AllGauss[:,0]
detJ = self.detJ
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
# COMPUTE CAUCHY STRESS TENSOR
CoupleStressVector = []
if fem_solver.requires_geometry_update:
CoupleStressVector = material.CoupleStress(StrainTensors,None,elem,counter).reshape(self.ndim,1)
# COMPUTE THE TANGENT STIFFNESS MATRIX
BDB_1, t = self.K_ww_Penalty_Integrand(Nw, Bases_w[:,counter],
0, CoupleStressVector, material.kappa, analysis_nature=fem_solver.analysis_nature,
has_prestress=fem_solver.has_prestress)
# COMPUTE GEOMETRIC STIFFNESS MATRIX
if fem_solver.requires_geometry_update:
# INTEGRATE TRACTION FORCE
tractionforce += t*detJ[counter]
# INTEGRATE STIFFNESS
stiffness += material.kappa*BDB_1*detJ[counter]
return stiffness, tractionforce
def GetLocalTraction(self, function_space, material, LagrangeElemCoords,
EulerELemCoords, ElectricPotentialElem, fem_solver, elem=0):
"""Get traction vector of the system"""
pass
def K_uu_Integrand(self, B, SpatialGradient, ElectricDisplacementx,
CauchyStressTensor, H_Voigt, analysis_nature="nonlinear", has_prestress=True):
ndim = self.ndim
nvar = self.nvar
# MATRIX FORM
SpatialGradient = SpatialGradient.T
# THREE DIMENSIONS
if SpatialGradient.shape[0]==3:
B[0::nvar,0] = SpatialGradient[0,:]
B[1::nvar,1] = SpatialGradient[1,:]
B[2::nvar,2] = SpatialGradient[2,:]
# Mechanical - Shear Terms
B[1::nvar,5] = SpatialGradient[2,:]
B[2::nvar,5] = SpatialGradient[1,:]
B[0::nvar,4] = SpatialGradient[2,:]
B[2::nvar,4] = SpatialGradient[0,:]
B[0::nvar,3] = SpatialGradient[1,:]
B[1::nvar,3] = SpatialGradient[0,:]
if analysis_nature == 'nonlinear' or has_prestress:
CauchyStressTensor_Voigt = np.array([
CauchyStressTensor[0,0],CauchyStressTensor[1,1],CauchyStressTensor[2,2],
CauchyStressTensor[0,1],CauchyStressTensor[0,2],CauchyStressTensor[1,2]
]).reshape(6,1)
TotalTraction = CauchyStressTensor_Voigt
elif SpatialGradient.shape[0]==2:
B[0::nvar,0] = SpatialGradient[0,:]
B[1::nvar,1] = SpatialGradient[1,:]
# Mechanical - Shear Terms
B[0::nvar,2] = SpatialGradient[1,:]
B[1::nvar,2] = SpatialGradient[0,:]
if analysis_nature == 'nonlinear' or has_prestress:
CauchyStressTensor_Voigt = np.array([
CauchyStressTensor[0,0],CauchyStressTensor[1,1],
CauchyStressTensor[0,1]]).reshape(3,1)
TotalTraction = CauchyStressTensor
BDB = np.dot(np.dot(B,H_Voigt),B.T)
t=[]
if analysis_nature == 'nonlinear' or has_prestress:
t = np.dot(B,TotalTraction)
return BDB, t
def K_us_Integrand(self, Bu, Ns, SpatialGradient, Bases_s):
ndim = self.ndim
nvar = ndim
# MATRIX FORM
SpatialGradient = SpatialGradient.T
# THREE DIMENSIONS
if SpatialGradient.shape[0]==3:
# VORTICITY TERMS
Bu[1::nvar,0] = -SpatialGradient[2,:]
Bu[2::nvar,0] = SpatialGradient[1,:]
Bu[0::nvar,1] = SpatialGradient[2,:]
Bu[2::nvar,1] = -SpatialGradient[0,:]
Bu[0::nvar,2] = -SpatialGradient[1,:]
Bu[1::nvar,2] = SpatialGradient[0,:]
elif SpatialGradient.shape[0]==2:
# VORTICITY TERMS
Bu[0::nvar,0] = -SpatialGradient[1,:]
Bu[1::nvar,0] = SpatialGradient[0,:]
for ivar in range(ndim):
Ns[ivar,ivar::nvar] = Bases_s
Bu_Ns = 0.5*np.dot(Bu,Ns)
return Bu_Ns
def K_ww_Integrand(self, B, SpatialGradient, ElectricDisplacementx,
CoupleStressVector, H_Voigt, analysis_nature="nonlinear", has_prestress=True):
ndim = self.ndim
nvar = self.ndim
# MATRIX FORM
SpatialGradient = SpatialGradient.T
# THREE DIMENSIONS
if SpatialGradient.shape[0]==3:
# VORTICITY TERMS
B[1::nvar,0] = -SpatialGradient[2,:]
B[2::nvar,0] = SpatialGradient[1,:]
B[0::nvar,1] = SpatialGradient[2,:]
B[2::nvar,1] = -SpatialGradient[0,:]
B[0::nvar,2] = -SpatialGradient[1,:]
B[1::nvar,2] = SpatialGradient[0,:]
elif SpatialGradient.shape[0]==2:
# VORTICITY TERMS
B[0::nvar,0] = -SpatialGradient[1,:]
B[1::nvar,0] = SpatialGradient[0,:]
BDB = np.dot(np.dot(B,H_Voigt),B.T)
t=[]
if analysis_nature == 'nonlinear' or has_prestress:
t = np.dot(B,CoupleStressVector)
return BDB, t
def K_wp_Integrand(self, B_w, B_p, SpatialGradient_w, SpatialGradient_p, H_Voigt):
ndim = self.ndim
nvar = self.ndim
# MATRIX FORM
SpatialGradient_w = SpatialGradient_w.T
SpatialGradient_p = SpatialGradient_p.T
# THREE DIMENSIONS
if SpatialGradient_w.shape[0]==3:
# VORTICITY TERMS
B_w[1::nvar,0] = -SpatialGradient_w[2,:]
B_w[2::nvar,0] = SpatialGradient_w[1,:]
B_w[0::nvar,1] = SpatialGradient_w[2,:]
B_w[2::nvar,1] = -SpatialGradient_w[0,:]
B_w[0::nvar,2] = -SpatialGradient_w[1,:]
B_w[1::nvar,2] = SpatialGradient_w[0,:]
# Electrostatic
B_p[:,0] = SpatialGradient_p[0,:]
B_p[:,1] = SpatialGradient_p[1,:]
B_p[:,2] = SpatialGradient_p[2,:]
elif SpatialGradient_w.shape[0]==2:
# VORTICITY TERMS
B_w[0::nvar,0] = -SpatialGradient_w[1,:]
B_w[1::nvar,0] = SpatialGradient_w[0,:]
# Electrostatic
B_p[:,0] = SpatialGradient_p[0,:]
B_p[:,1] = SpatialGradient_p[1,:]
BDB = np.dot(np.dot(B_w,H_Voigt),B_p.T)
return BDB
def K_ws_Integrand(self, Nw, Ns, Bases_w, Bases_s):
ndim = self.ndim
nvar = ndim
for ivar in range(ndim):
Nw[ivar::nvar,ivar] = Bases_w
for ivar in range(ndim):
Ns[ivar,ivar::nvar] = Bases_s
Nw_Ns = 0.5*np.dot(Nw,Ns)
return Nw_Ns
def K_ss_Integrand(self, Ns, Bases_s, ElectricDisplacementx,
LagrangeMultiplierStressVector, kappa, analysis_nature="nonlinear", has_prestress=True):
ndim = self.ndim
nvar = ndim
for ivar in range(ndim):
Ns[ivar,ivar::nvar] = Bases_s
if self.subtype == "augmented_lagrange":
NDN = np.dot(Ns.T,Ns)/(1.0*kappa)
else:
NDN = np.zeros((self.function_spaces[2].Bases.shape[0]*self.ndim,self.function_spaces[2].Bases.shape[0]*self.ndim),dtype=np.float64)
t=[]
if analysis_nature == 'nonlinear' or has_prestress:
t = np.dot(Ns,LagrangeMultiplierStressVector)
return NDN, t
def K_uu_Penalty_Integrand(self, Bu, SpatialGradient):
ndim = self.ndim
nvar = ndim
# MATRIX FORM
SpatialGradient = SpatialGradient.T
# THREE DIMENSIONS
if SpatialGradient.shape[0]==3:
# VORTICITY TERMS
Bu[1::nvar,0] = -SpatialGradient[2,:]
Bu[2::nvar,0] = SpatialGradient[1,:]
Bu[0::nvar,1] = SpatialGradient[2,:]
Bu[2::nvar,1] = -SpatialGradient[0,:]
Bu[0::nvar,2] = -SpatialGradient[1,:]
Bu[1::nvar,2] = SpatialGradient[0,:]
elif SpatialGradient.shape[0]==2:
# VORTICITY TERMS
Bu[0::nvar,0] = -SpatialGradient[1,:]
Bu[1::nvar,0] = SpatialGradient[0,:]
BDB = 0.25*np.dot(Bu,Bu.T)
return BDB
def K_ww_Penalty_Integrand(self, Nw, Bases_w, ElectricDisplacementx,
CoupleStressVector, kappa, analysis_nature="nonlinear", has_prestress=True):
ndim = self.ndim
nvar = ndim
for ivar in range(ndim):
Nw[ivar,ivar::nvar] = Bases_w
NDN = kappa*np.dot(Nw.T,Nw)
t=[]
if analysis_nature == 'nonlinear' or has_prestress:
t = np.dot(Nw,CoupleStressVector)
return NDN, t
def TractionIntegrand(self, B, SpatialGradient, ElectricDisplacementx,
CauchyStressTensor, analysis_nature="nonlinear", has_prestress=True):
"""Applies to displacement potential based formulation"""
pass
def GetEnergy(self, function_space, material, LagrangeElemCoords,
EulerELemCoords, ElectricPotentialElem, fem_solver, elem=0):
"""Get virtual energy of the system. For dynamic analysis this is handy for computing conservation of energy.
The routine computes the global form of virtual internal energy i.e. integral of "W(C,G,C)"". This can be
computed purely in a Lagrangian configuration.
"""
nvar = self.nvar
ndim = self.ndim
nodeperelem = function_space.Bases.shape[0]
det = np.linalg.det
inv = np.linalg.inv
Jm = function_space.Jm
AllGauss = function_space.AllGauss
strain_energy = 0.
electrical_energy = 0.
# COMPUTE KINEMATIC MEASURES AT ALL INTEGRATION POINTS USING EINSUM (AVOIDING THE FOR LOOP)
# MAPPING TENSOR [\partial\vec{X}/ \partial\vec{\varepsilon} (ndim x ndim)]
ParentGradientX = np.einsum('ijk,jl->kil', Jm, LagrangeElemCoords)
# MATERIAL GRADIENT TENSOR IN PHYSICAL ELEMENT [\nabla_0 (N)]
MaterialGradient = np.einsum('ijk,kli->ijl', inv(ParentGradientX), Jm)
# DEFORMATION GRADIENT TENSOR [\vec{x} \otimes \nabla_0 (N)]
F = np.einsum('ij,kli->kjl', EulerELemCoords, MaterialGradient)
# COMPUTE REMAINING KINEMATIC MEASURES
StrainTensors = KinematicMeasures(F, fem_solver.analysis_nature)
# SPATIAL GRADIENT AND MATERIAL GRADIENT TENSORS ARE EQUAL
SpatialGradient = np.einsum('ikj',MaterialGradient)
# COMPUTE ONCE detJ
detJ = np.einsum('i,i->i',AllGauss[:,0],np.abs(det(ParentGradientX)))
# GET ELECTRIC FIELD
ElectricFieldx = - np.einsum('ijk,j',SpatialGradient,ElectricPotentialElem)
# LOOP OVER GAUSS POINTS
for counter in range(AllGauss.shape[0]):
if material.energy_type == "enthalpy":
# COMPUTE THE INTERNAL ENERGY AT THIS GAUSS POINT
energy = material.InternalEnergy(StrainTensors,ElectricFieldx[counter,:],elem,counter)
elif material.energy_type == "internal_energy":
# COMPUTE ELECTRIC DISPLACEMENT IMPLICITLY
ElectricDisplacementx = material.ElectricDisplacementx(StrainTensors, ElectricFieldx[counter,:], elem, counter)
# COMPUTE THE INTERNAL ENERGY AT THIS GAUSS POINT
energy = material.InternalEnergy(StrainTensors,ElectricDisplacementx[counter,:],elem,counter)
# INTEGRATE INTERNAL ENERGY
strain_energy += energy[0]*detJ[counter]
electrical_energy += energy[1]*detJ[counter]
return strain_energy, electrical_energy
def Assemble(self, fem_solver, material, Eulerx, Eulerw, Eulers, Eulerp):
# GET MESH DETAILS
# C = mesh.InferPolynomialDegree() - 1
formulation = self
meshes = formulation.meshes
mesh = meshes[0]
nvar = formulation.nvar
ndim = formulation.ndim
nelem = meshes[0].nelem
nodeperelem = meshes[0].elements.shape[1]
local_size = int(ndim*meshes[0].elements.shape[1] + ndim*meshes[1].elements.shape[1] + ndim*meshes[2].elements.shape[1])
capacity = local_size**2
# ALLOCATE VECTORS FOR SPARSE ASSEMBLY OF STIFFNESS MATRIX - CHANGE TYPES TO INT64 FOR DoF > 1e09
I_stiffness=np.zeros(int(capacity*nelem),dtype=np.int32)
J_stiffness=np.zeros(int(capacity*nelem),dtype=np.int32)
V_stiffness=np.zeros(int(capacity*nelem),dtype=np.float64)
I_mass=[]; J_mass=[]; V_mass=[]
if fem_solver.analysis_type !='static':
# ALLOCATE VECTORS FOR SPARSE ASSEMBLY OF MASS MATRIX - CHANGE TYPES TO INT64 FOR DoF > 1e09
I_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.int32)
J_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.int32)
V_mass=np.zeros(int((nvar*nodeperelem)**2*nelem),dtype=np.float64)
# T = np.zeros((local_size,1),np.float64)
T = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
mass, F = [], []
if fem_solver.has_moving_boundary:
F = np.zeros((mesh.points.shape[0]*nvar,1),np.float64)
if fem_solver.parallel:
# COMPUATE ALL LOCAL ELEMENTAL MATRICES (STIFFNESS, MASS, INTERNAL & EXTERNAL TRACTION FORCES )
# ParallelTuple = parmap.map(formulation.GetElementalMatrices,np.arange(0,nelem,dtype=np.int32),
# function_space, mesh, material, fem_solver, Eulerx, Eulerp)
ParallelTuple = parmap.map(formulation,np.arange(0,nelem,dtype=np.int32),
function_space, mesh, material, fem_solver, Eulerx, Eulerp, processes= int(multiprocessing.cpu_count()/2))
for elem in range(nelem):
if fem_solver.parallel:
# UNPACK PARALLEL TUPLE VALUES
I_stiff_elem = ParallelTuple[elem][0]; J_stiff_elem = ParallelTuple[elem][1]; V_stiff_elem = ParallelTuple[elem][2]
t = ParallelTuple[elem][3]; f = ParallelTuple[elem][4]
I_mass_elem = ParallelTuple[elem][5]; J_mass_elem = ParallelTuple[elem][6]; V_mass_elem = ParallelTuple[elem][6]
else:
# COMPUATE ALL LOCAL ELEMENTAL MATRICES (STIFFNESS, MASS, INTERNAL & EXTERNAL TRACTION FORCES )
I_stiff_elem, J_stiff_elem, V_stiff_elem, t, f, \
I_mass_elem, J_mass_elem, V_mass_elem = formulation.GetElementalMatrices(elem,
formulation.function_spaces, formulation.meshes, material, fem_solver, Eulerx, Eulerw, Eulers, Eulerp)
# SPARSE ASSEMBLY - STIFFNESS MATRIX
SparseAssemblyNative(I_stiff_elem,J_stiff_elem,V_stiff_elem,I_stiffness,J_stiffness,V_stiffness,
elem,nvar,nodeperelem,mesh.elements)
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed==False:
# SPARSE ASSEMBLY - MASS MATRIX
SparseAssemblyNative(I_mass_elem,J_mass_elem,V_mass_elem,I_mass,J_mass,V_mass,
elem,nvar,nodeperelem,mesh.elements)
if fem_solver.has_moving_boundary:
# RHS ASSEMBLY
RHSAssemblyNative(F,f,elem,nvar,nodeperelem,mesh.elements)
# INTERNAL TRACTION FORCE ASSEMBLY
RHSAssemblyNative(T,t,elem,nvar,nodeperelem,mesh.elements)
if (elem % fem_solver.assembly_print_counter == 0 or elem==nelem-1) and elem != 0:
nume = elem+1 if elem==nelem-1 else elem
print(('Assembled {} element matrices').format(nume))
if fem_solver.parallel:
del ParallelTuple
gc.collect()
# REALLY DANGEROUS FOR MULTIPHYSICS PROBLEMS - NOTE THAT SCIPY RUNS A PRUNE ANYWAY
# V_stiffness[np.isclose(V_stiffness,0.)] = 0.
stiffness = coo_matrix((V_stiffness,(I_stiffness,J_stiffness)),
shape=((nvar*mesh.points.shape[0],nvar*mesh.points.shape[0])),dtype=np.float64).tocsr()
# GET STORAGE/MEMORY DETAILS
fem_solver.spmat = stiffness.data.nbytes/1024./1024.
fem_solver.ijv = (I_stiffness.nbytes + J_stiffness.nbytes + V_stiffness.nbytes)/1024./1024.
del I_stiffness, J_stiffness, V_stiffness
gc.collect()
if fem_solver.analysis_type != 'static' and fem_solver.is_mass_computed==False:
mass = csr_matrix((V_mass,(I_mass,J_mass)),shape=((nvar*mesh.points.shape[0],
nvar*mesh.points.shape[0])),dtype=np.float64)
fem_solver.is_mass_computed = True
return stiffness, T, F, mass
def GetAugmentedSolution(self, fem_solver, material, TotalDisp, Eulerx, Eulerw, Eulers, Eulerp):
"""Get condensed variables
"""
if self.save_condensed_matrices is False:
return 0., 0.
mesh = self.meshes[0]
elements = mesh.elements
points = mesh.points
nelem = mesh.nelem
nodeperelem = mesh.elements.shape[1]
C = mesh.InferPolynomialDegree() - 1
ndim = mesh.InferSpatialDimension()
function_space = FunctionSpace(mesh, p=C+1, evaluate_at_nodes=True)
Jm = function_space.Jm
AllGauss = function_space.AllGauss
AllEulerW = np.zeros((nelem,self.meshes[1].elements.shape[1],ndim))
AllEulerS = np.zeros((nelem,self.meshes[2].elements.shape[1],ndim))
NodalEulerW = np.zeros((self.meshes[1].points.shape[0],self.ndim))
NodalEulerS = np.zeros((self.meshes[2].points.shape[0],self.ndim))
# LOOP OVER ELEMENTS
for elem in range(nelem):
# GET THE FIELDS AT THE ELEMENT LEVEL
LagrangeElemCoords = points[elements[elem,:],:]
EulerELemCoords = Eulerx[elements[elem,:],:]
ElectricPotentialElem = Eulerp[elements[elem,:]]
if self.subtype == "lagrange_multiplier" or self.subtype == "augmented_lagrange":
k_uu = self.condensed_matrices['k_uu'][elem]
k_up = self.condensed_matrices['k_up'][elem]
k_us = self.condensed_matrices['k_us'][elem]
k_ww = self.condensed_matrices['k_ww'][elem]
k_ws = self.condensed_matrices['k_ws'][elem]
k_wp = self.condensed_matrices['k_wp'][elem]
k_pp = self.condensed_matrices['k_pp'][elem]
inv_k_ws = self.condensed_matrices['inv_k_ws'][elem]
tu = self.condensed_vectors['tu'][elem]
tw = self.condensed_vectors['tw'][elem]
ts = self.condensed_vectors['ts'][elem]
tp = self.condensed_vectors['tp'][elem]
if self.subtype == "lagrange_multiplier":
EulerElemW = np.dot(inv_k_ws,(ts - np.dot(k_us.T,EulerELemCoords.ravel())[:,None])).ravel()
EulerElemS = np.dot(inv_k_ws,(tw - np.dot(k_ww,EulerElemW)[:,None] -\
np.dot(k_wp,ElectricPotentialElem)[:,None])).ravel()
elif self.subtype == "augmented_lagrange":
raise RuntimeError("Not implemented yet")
EulerElemW = np.dot(inv_k_ws,(ts - np.dot(k_us.T,EulerELemCoords.ravel())[:,None])).ravel()
EulerElemS = np.dot(inv_k_ws,(tw - np.dot(k_ww,EulerElemW)[:,None] -\
np.dot(k_wp,ElectricPotentialElem)[:,None])).ravel()
else:
raise RuntimeError("Not implemented yet")
# SAVE
AllEulerW[elem,:,:] = EulerElemW.reshape(self.meshes[1].elements.shape[1],ndim)
AllEulerS[elem,:,:] = EulerElemW.reshape(self.meshes[2].elements.shape[1],ndim)
for inode in self.all_nodes:
Els, Pos = self.Elss[inode], self.Poss[inode]
ncommon_nodes = Els.shape[0]
for uelem in range(ncommon_nodes):
NodalEulerW += AllEulerW[Els[uelem],Pos[uelem],:]
NodalEulerS += AllEulerS[Els[uelem],Pos[uelem],:]
# AVERAGE OUT
NodalEulerW[inode,:] /= ncommon_nodes
NodalEulerS[inode,:] /= ncommon_nodes
# NAKE SURE TO UPDATE THESE INSTEAD OF CREATING THEM IN WHICH CASE YOU HAVE TO RETURN THEM
Eulerw[:,:] += NodalEulerW
Eulers[:,:] += NodalEulerS
# if self.fields != 'electro_mechanics':
# TotalDisp[:,ndim:,Increment] = NodalEulerW
# TotalDisp[:,2*ndim:,Increment] = NodalEulerS
# else:
# TotalDisp[:,ndim+1:,Increment] = NodalEulerW
# TotalDisp[:,2*ndim+1:,Increment] = NodalEulerS
return NodalEulerW, NodalEulerS
```
#### File: tests/test_basics/test_BEM.py
```python
import numpy as np
def test_BEM():
"""Unnecessary test for the ugly and non-working and legacy BEM
for the sake of coverage
"""
from Florence.BoundaryElements import GetBasesBEM2D
from Florence.BoundaryElements import GenerateCoordinates
from Florence.BoundaryElements import CoordsJacobianRadiusatGaussPoints, CoordsJacobianRadiusatGaussPoints_LM
from Florence.BoundaryElements import AssemblyBEM2D
from Florence.BoundaryElements.Assembly import AssemblyBEM2D_Sparse
from Florence.BoundaryElements import Sort_BEM
from Florence import QuadratureRule, FunctionSpace, Mesh
# Unnecessary loop
for i in range(10):
mesh = Mesh()
mesh.element_type = "line"
mesh.points = np.array([
[0.,0.],
[1.,0.],
[1.,1.],
[0.,1.],
])
mesh.elements = np.array([
[0,1],
[1,2],
[2,3],
[3,0],
])
mesh.nelem = 4
q = QuadratureRule(mesh_type="line")
for C in range(10):
N, dN = GetBasesBEM2D(C,q.points)
N, dN = GetBasesBEM2D(2,q.points)
global_coord = np.zeros((mesh.points.shape[0],3))
global_coord[:,:2] = mesh.points
Jacobian = 2*np.ones((q.weights.shape[0],mesh.nelem))
nx = 4*np.ones((q.weights.shape[0],mesh.nelem))
ny = 3*np.ones((q.weights.shape[0],mesh.nelem))
XCO = 2*np.ones((q.weights.shape[0],mesh.nelem))
YCO = np.ones((q.weights.shape[0],mesh.nelem))
N = np.ones((mesh.elements.shape[1],q.weights.shape[0]))
dN = 0.5*np.ones((mesh.elements.shape[1],q.weights.shape[0]))
GenerateCoordinates(mesh.elements,mesh.points,0,q.points)
CoordsJacobianRadiusatGaussPoints(mesh.elements,global_coord,0,N,dN,q.weights)
# Not working
# CoordsJacobianRadiusatGaussPoints_LM(mesh.elements,global_coord[:,:3],0,N,dN,q.weights,mesh.elements)
class GeoArgs(object):
Lagrange_Multipliers = "activated"
def __init__(self):
Lagrange_Multipliers = "activated"
geo_args = GeoArgs()
K1, K2 = AssemblyBEM2D(0,global_coord,mesh.elements,mesh.elements,dN,N,
q.weights,q.points,Jacobian, nx, ny, XCO, YCO, geo_args)
AssemblyBEM2D_Sparse(0,global_coord,mesh.elements,mesh.elements,dN,N,
q.weights,q.points,Jacobian, nx, ny, XCO, YCO, geo_args)
bdata = np.zeros((2*mesh.points.shape[0],2))
bdata[:4,1] = -1
bdata[4:,0] = -1
Sort_BEM(bdata,K1, K2)
if __name__ == "__main__":
test_BEM()
```
|
{
"source": "jdlaubrie/Kuru",
"score": 2
}
|
#### File: Kuru/BoundaryCondition/BoundaryCondition.py
```python
from __future__ import print_function
import sys
import numpy as np #, scipy as sp, os, gc
from copy import deepcopy
#from warnings import warn
from time import time
class BoundaryCondition(object):
"""Base class for applying all types of boundary conditions"""
def __init__(self,
surface_identification_algorithm='minimisation',
modify_linear_mesh_on_projection=False,
project_on_curves=True,
activate_bounding_box=False,
bounding_box_padding=1e-3,
has_planar_surfaces=True,
solve_for_planar_faces=True,
save_dirichlet_data=False,
save_nurbs_data=False,
filename=None,
read_dirichlet_from_file=False,
make_loading="ramp",
compound_dirichlet_bcs=False
):
# TYPE OF BOUNDARY: straight or nurbs
self.boundary_type = 'straight'
self.dirichlet_data_applied_at = 'node' # or 'faces'
self.neumann_data_applied_at = 'node' # or 'faces'
self.requires_cad = False
self.cad_file = None
# PROJECTION TYPE FOR CAD EITHER orthogonal OR arc_length
self.projection_type = 'orthogonal'
# WHAT TYPE OF ARC LENGTH BASED PROJECTION, EITHER 'equal' OR 'fekete'
self.nodal_spacing_for_cad = 'equal'
self.project_on_curves = project_on_curves
self.scale_mesh_on_projection = False
self.scale_value_on_projection = 1.0
self.condition_for_projection = 1.0e20
self.has_planar_surfaces = False
self.solve_for_planar_faces = solve_for_planar_faces
self.projection_flags = None
# FIX DEGREES OF FREEDOM EVERY WHERE CAD PROJECTION IS NOT APPLIED
self.fix_dof_elsewhere = True
# FOR 3D ARC-LENGTH PROJECTION
self.orthogonal_fallback_tolerance = 1.0
# WHICH ALGORITHM TO USE FOR SURFACE IDENTIFICATION, EITHER 'minimisation' or 'pure_projection'
self.surface_identification_algorithm = surface_identification_algorithm
# MODIFY LINEAR MESH ON PROJECTION
self.modify_linear_mesh_on_projection = modify_linear_mesh_on_projection
# COMPUTE A BOUNDING BOX FOR EACH CAD SURFACE
self.activate_bounding_box = activate_bounding_box
self.bounding_box_padding = float(bounding_box_padding)
# FOR IGAKit WRAPPER
self.nurbs_info = None
self.nurbs_condition = None
self.analysis_type = 'static'
self.analysis_nature = 'linear'
self.dirichlet_flags = None
self.applied_dirichlet = None
self.is_dirichlet_computed = False
self.columns_out = None
self.columns_in = None
self.save_dirichlet_data = save_dirichlet_data
self.save_nurbs_data = save_nurbs_data
self.filename = filename
self.read_dirichlet_from_file = read_dirichlet_from_file
self.neumann_flags = None
self.applied_neumann = None
self.is_applied_neumann_shape_functions_computed = False
self.pressure_flags = None
self.applied_pressure = None
self.pressure_increment = 1.0
self.spring_flags = None
self.applied_spring = None
self.master_faces = None
self.slave_faces = None
self.applied_connector = None
self.connector_flags = None
self.connector_elements = None
self.connector_faces = None
self.is_body_force_shape_functions_computed = False
self.make_loading = make_loading # "ramp" or "constant"
self.has_step_wise_dirichlet_loading = False
self.step_wise_dirichlet_data = None
self.has_step_wise_neumann_loading = False
self.step_wise_neumann_data = None
self.compound_dirichlet_bcs = compound_dirichlet_bcs
# STORE A COPY OF SELF AT THE START TO RESET TO AT THE END
self.__save_state__()
# FOR INTERNAL PURPOSES WHEN WE DO NOT WANT TO REST
self.do_not_reset = True
def __save_state__(self):
self.__initialdict__ = deepcopy(self.__dict__)
def SetDirichletCriteria(self, func, *args, **kwargs):
"""Applies user defined Dirichlet data to self
"""
if "apply" in kwargs.keys():
del kwargs["apply"]
self.has_step_wise_dirichlet_loading = True
self.step_wise_dirichlet_data = {'func':func, 'args': args, 'kwargs': kwargs}
self.dirichlet_flags = func(0, *args, **kwargs)
return self.dirichlet_flags
self.dirichlet_flags = func(*args, **kwargs)
return self.dirichlet_flags
def SetNeumannCriteria(self, func, *args, **kwargs):
"""Applies user defined Neumann data to self
"""
if "apply" in kwargs.keys():
del kwargs["apply"]
self.has_step_wise_neumann_loading = True
self.step_wise_neumann_data = {'func':func, 'args': args, 'kwargs': kwargs}
tups = func(0, *args, **kwargs)
else:
tups = func(*args, **kwargs)
if not isinstance(tups,tuple) and self.neumann_data_applied_at == "node":
self.neumann_flags = tups
return self.neumann_flags
else:
self.neumann_data_applied_at == "face"
if len(tups) !=2:
raise ValueError("User-defined Neumann criterion function {} "
"should return one flag and one data array".format(func.__name__))
self.neumann_flags = tups[0]
self.applied_neumann = tups[1]
return tups
def SetRobinCriteria(self, func, *args, **kwargs):
"""Applies user defined Robin data to self, just working on surfaces
"""
dics = func(*args, **kwargs)
if isinstance(dics,dict):
self.RobinLoadSelector(dics)
elif isinstance(dics,tuple):
for idic in range(len(dics)):
if isinstance(dics[idic],dict):
self.RobinLoadSelector(dics[idic])
else:
raise ValueError("User-defined Robin criterion function {} "
"should return dictionary or tuple(dict,dict,...)".format(func.__name__))
else:
raise ValueError("User-defined Robin criterion function {} "
"should return dictionary or tuple".format(func.__name__))
return dics
def RobinLoadSelector(self, tups):
if tups['type'] == 'Pressure':
self.pressure_flags = tups['flags']
self.applied_pressure = tups['data']
elif tups['type'] == 'Spring':
self.spring_flags = tups['flags']
self.applied_spring = tups['data']
elif tups['type'] == 'Connector':
self.master_faces = tups['master_faces']
self.slave_faces = tups['slave_faces']
self.applied_connector = tups['data']
self.connector_flags = tups['flags']
if self.master_faces.shape[0] != self.slave_faces.shape[0]:
raise ValueError("The size of master_faces and slave_faces should be equal")
elif tups['type'] == 'Dashpot':
raise ValueError("Surrounding viscoelastic effects not implemented yet")
else:
raise ValueError("Type force {} not understood or not available. "
"Types are Pressure, Spring, SpringJoint and Dashpot.".format(tups['type']))
def GetConnectorElements(self, mesh):
""" Receive the faces along the surfaces interacting """
# gets the points in the dissection surfaces
master_points = np.unique(mesh.faces[self.master_faces,:])
slave_points = np.unique(mesh.faces[self.slave_faces,:])
# array with the coordinate of the master and slave points
master_points_coor = mesh.points[master_points]
slave_points_coor = mesh.points[slave_points]
# look for a connection between master and slave points
from scipy.spatial import cKDTree
tree = cKDTree(master_points_coor)
distance, id_point = tree.query(slave_points_coor,k=1)
pair_node_master_slave = np.c_[master_points[id_point],slave_points]
# build the elements
nodeperface = mesh.faces.shape[1]
connector_elements = np.zeros((self.master_faces.shape[0],2*nodeperface),np.uint64)
connector_elements[:,:4] = mesh.faces[self.master_faces]
# match the master nodes with its slave within the element
faces_s = np.zeros(self.master_faces.shape[0],dtype=np.uint64)
for i in range(self.master_faces.shape[0]):
iface = self.master_faces[i]
jnode_array = np.zeros(nodeperface,dtype=np.uint64)
for j in range(nodeperface):
inode = mesh.faces[iface,j]
idx = np.where(pair_node_master_slave[:,0]==inode)[0]
jnode = pair_node_master_slave[idx,1]
connector_elements[i,j+nodeperface] = jnode
jnode_array[j] = jnode
# use the slave point to recover the slave face respect a master face
jface_array = np.where(mesh.faces==jnode_array[0])[0]
for k in range(1,jnode_array.shape[0]):
jface_array = np.append(jface_array, np.where(mesh.faces==jnode_array[k])[0])
values, counts = np.unique(jface_array,return_counts=True)
jface = values[np.where(counts==nodeperface)[0]]
faces_s[i] = jface
pair_face_master_slave = np.c_[self.master_faces,faces_s]
pair_face_master_slave = np.array(pair_face_master_slave, dtype=np.uint64, copy=True)
self.connector_elements = connector_elements
self.connector_faces = pair_face_master_slave
return
def GetDirichletBoundaryConditions(self, formulation, mesh, materials=None, solver=None, fem_solver=None):
nvar = formulation.nvar
ndim = formulation.ndim
self.columns_in, self.applied_dirichlet = [], []
#----------------------------------------------------------------------------------------------------#
#-------------------------------------- NURBS BASED SOLUTION ----------------------------------------#
#----------------------------------------------------------------------------------------------------#
if self.boundary_type == 'nurbs':
tCAD = time()
if self.read_dirichlet_from_file is False:
if not self.is_dirichlet_computed:
# GET DIRICHLET BOUNDARY CONDITIONS BASED ON THE EXACT GEOMETRY FROM CAD
if self.requires_cad:
# CALL POSTMESH WRAPPER
nodesDBC, Dirichlet = self.PostMeshWrapper(formulation, mesh, materials, solver, fem_solver)
else:
nodesDBC, Dirichlet = self.nodesDBC, self.Dirichlet
# GET DIRICHLET DoFs
self.columns_out = (np.repeat(nodesDBC,nvar,axis=1)*nvar +\
np.tile(np.arange(nvar)[None,:],nodesDBC.shape[0]).reshape(nodesDBC.shape[0],formulation.ndim)).ravel()
self.applied_dirichlet = Dirichlet.ravel()
# FIX THE DOF IN THE REST OF THE BOUNDARY
if self.fix_dof_elsewhere:
if ndim==2:
rest_dofs = np.setdiff1d(np.unique(mesh.edges),nodesDBC)
elif ndim==3:
rest_dofs = np.setdiff1d(np.unique(mesh.faces),nodesDBC)
rest_out = np.repeat(rest_dofs,nvar)*nvar + np.tile(np.arange(nvar),rest_dofs.shape[0])
rest_app = np.zeros(rest_dofs.shape[0]*nvar)
self.columns_out = np.concatenate((self.columns_out,rest_out)).astype(np.int64)
self.applied_dirichlet = np.concatenate((self.applied_dirichlet,rest_app))
print('Finished identifying Dirichlet boundary conditions from CAD geometry.',
' Time taken', time()-tCAD, 'seconds')
else:
end = -3
self.applied_dirichlet = np.loadtxt(mesh.filename.split(".")[0][:end]+"_dirichlet.dat", dtype=np.float64)
self.columns_out = np.loadtxt(mesh.filename.split(".")[0][:end]+"_columns_out.dat")
print('Finished identifying Dirichlet boundary conditions from CAD geometry.',
' Time taken', time()-tCAD, 'seconds')
#----------------------------------------------------------------------------------------------------#
#------------------------------------- NON-NURBS BASED SOLUTION -------------------------------------#
#----------------------------------------------------------------------------------------------------#
elif self.boundary_type == 'straight' or self.boundary_type == 'mixed':
# IF DIRICHLET BOUNDARY CONDITIONS ARE APPLIED DIRECTLY AT NODES
if self.dirichlet_flags is None:
raise RuntimeError("Dirichlet boundary conditions are not set for the analysis")
if self.dirichlet_data_applied_at == 'node':
if self.analysis_type == "dynamic":
# FOR DYNAMIC ANALYSIS IT IS ASSUMED THAT
# self.columns_in and self.columns_out DO NOT CHANGE
# DURING THE ANALYSIS
if self.dirichlet_flags.ndim == 3:
flat_dirich = self.dirichlet_flags[:,:,0].ravel()
self.columns_out = np.arange(self.dirichlet_flags[:,:,0].size)[~np.isnan(flat_dirich)]
self.applied_dirichlet = np.zeros((self.columns_out.shape[0],self.dirichlet_flags.shape[2]))
for step in range(self.dirichlet_flags.shape[2]):
flat_dirich = self.dirichlet_flags[:,:,step].ravel()
self.applied_dirichlet[:,step] = flat_dirich[~np.isnan(flat_dirich)]
elif self.dirichlet_flags.ndim == 2:
flat_dirich = self.dirichlet_flags.ravel()
self.columns_out = np.arange(self.dirichlet_flags.size)[~np.isnan(flat_dirich)]
self.applied_dirichlet = flat_dirich[~np.isnan(flat_dirich)]
else:
raise ValueError("Incorrect Dirichlet flags for dynamic analysis")
else:
flat_dirich = self.dirichlet_flags.ravel()
self.columns_out = np.arange(self.dirichlet_flags.size)[~np.isnan(flat_dirich)]
self.applied_dirichlet = flat_dirich[~np.isnan(flat_dirich)]
# GENERAL PROCEDURE - GET REDUCED MATRICES FOR FINAL SOLUTION
self.columns_out = self.columns_out.astype(np.int64)
self.columns_in = np.delete(np.arange(0,nvar*mesh.points.shape[0]),self.columns_out)
if self.columns_in.shape[0] == 0:
warn("No Dirichlet boundary conditions have been applied. The system is unconstrained")
if self.columns_out.shape[0] == 0:
warn("Dirichlet boundary conditions have been applied on the entire mesh")
if self.save_dirichlet_data:
from scipy.io import savemat
diri_dict = {'columns_in':self.columns_in,
'columns_out':self.columns_out,
'applied_dirichlet':self.applied_dirichlet}
savemat(self.filename,diri_dict, do_compression=True)
def ComputeNeumannForces(self, mesh, materials, function_spaces, compute_traction_forces=True, compute_body_forces=False):
"""Compute/assemble traction and body forces"""
if self.neumann_flags is None:
return np.zeros((mesh.points.shape[0]*materials[0].nvar,1),dtype=np.float64)
nvar = materials[0].nvar
ndim = mesh.InferSpatialDimension()
if self.neumann_flags.shape[0] == mesh.points.shape[0]:
self.neumann_data_applied_at = "node"
else:
if ndim==3:
if self.neumann_flags.shape[0] == mesh.faces.shape[0]:
self.neumann_data_applied_at = "face"
elif ndim==2:
if self.neumann_flags.shape[0] == mesh.edges.shape[0]:
self.neumann_data_applied_at = "face"
if self.neumann_data_applied_at == 'face':
from Kuru.FiniteElements.Assembly import AssembleForces
if not isinstance(function_spaces,tuple):
raise ValueError("Boundary functional spaces not available for computing Neumman and body forces")
else:
# CHECK IF A FUNCTION SPACE FOR BOUNDARY EXISTS - SAFEGAURDS AGAINST FORMULATIONS THAT DO NO PROVIDE ONE
has_boundary_spaces = False
for fs in function_spaces:
if ndim == 3 and fs.ndim == 2:
has_boundary_spaces = True
break
elif ndim == 2 and fs.ndim == 1:
has_boundary_spaces = True
break
if not has_boundary_spaces:
from Kuru import QuadratureRule, FunctionSpace
# COMPUTE BOUNDARY FUNCTIONAL SPACES
p = mesh.InferPolynomialDegree()
bquadrature = QuadratureRule(optimal=3, norder=2*p+1,
mesh_type=mesh.boundary_element_type, is_flattened=False)
bfunction_space = FunctionSpace(mesh.CreateDummyLowerDimensionalMesh(),
bquadrature, p=p, equally_spaced=mesh.IsEquallySpaced, use_optimal_quadrature=False)
function_spaces = (function_spaces[0],bfunction_space)
# raise ValueError("Boundary functional spaces not available for computing Neumman and body forces")
t_tassembly = time()
if self.analysis_type == "static":
F = AssembleForces(self, mesh, materials, function_spaces,
compute_traction_forces=compute_traction_forces, compute_body_forces=compute_body_forces)
elif self.analysis_type == "dynamic":
if self.neumann_flags.ndim==2:
# THE POSITION OF NEUMANN DATA APPLIED AT FACES CAN CHANGE DYNAMICALLY
tmp_flags = np.copy(self.neumann_flags)
tmp_data = np.copy(self.applied_neumann)
F = np.zeros((mesh.points.shape[0]*nvar,self.neumann_flags.shape[1]))
for step in range(self.neumann_flags.shape[1]):
self.neumann_flags = tmp_flags[:,step]
self.applied_neumann = tmp_data[:,:,step]
F[:,step] = AssembleForces(self, mesh, materials, function_spaces,
compute_traction_forces=compute_traction_forces, compute_body_forces=compute_body_forces).flatten()
self.neumann_flags = tmp_flags
self.applied_neumann = tmp_data
else:
# THE POSITION OF NEUMANN DATA APPLIED AT FACES CAN CHANGE DYNAMICALLY
F = AssembleForces(self, mesh, materials, function_spaces,
compute_traction_forces=compute_traction_forces, compute_body_forces=compute_body_forces).flatten()
print("Assembled external traction forces. Time elapsed is {} seconds".format(time()-t_tassembly))
elif self.neumann_data_applied_at == 'node':
# A DIRICHLET TYPE METHODOLGY FOR APPLYING NEUMANN BOUNDARY CONDITONS (i.e. AT NODES)
if self.analysis_type == "dynamic":
if self.neumann_flags.ndim ==3:
# FOR DYNAMIC ANALYSIS IT IS ASSUMED THAT
# to_apply DOOES NOT CHANGE DURING THE ANALYSIS
flat_neu = self.neumann_flags[:,:,0].ravel()
to_apply = np.arange(self.neumann_flags[:,:,0].size)[~np.isnan(flat_neu)]
F = np.zeros((mesh.points.shape[0]*nvar,self.neumann_flags.shape[2]))
for step in range(self.neumann_flags.shape[2]):
flat_neu = self.neumann_flags[:,:,step].ravel()
to_apply = np.arange(self.neumann_flags[:,:,step].size)[~np.isnan(flat_neu)]
F[to_apply,step] = flat_neu[~np.isnan(flat_neu)]
else:
F = np.zeros((mesh.points.shape[0]*nvar,1))
flat_neu = self.neumann_flags.ravel()
to_apply = np.arange(self.neumann_flags.size)[~np.isnan(flat_neu)]
applied_neumann = flat_neu[~np.isnan(flat_neu)]
F[to_apply,0] = applied_neumann
else:
F = np.zeros((mesh.points.shape[0]*nvar,1))
flat_neu = self.neumann_flags.ravel()
to_apply = np.arange(self.neumann_flags.size)[~np.isnan(flat_neu)]
applied_neumann = flat_neu[~np.isnan(flat_neu)]
F[to_apply,0] = applied_neumann
return F
def ComputeRobinForces(self, mesh, materials, function_spaces, fem_solver, Eulerx, stiffness, F):
"""Compute/assemble traction and body forces"""
from Kuru.FiniteElements.Assembly import AssembleRobinForces
if not self.pressure_flags is None:
K_pressure, F_pressure = AssembleRobinForces(self, mesh,
materials[0], function_spaces, fem_solver, Eulerx, 'pressure')
stiffness -= K_pressure
F -= F_pressure[:,None]
if not self.spring_flags is None:
K_spring, F_spring = AssembleRobinForces(self, mesh,
materials[0], function_spaces, fem_solver, Eulerx, 'spring')
stiffness += K_spring
F += F_spring[:,None]
if not self.connector_elements is None:
K_connector, F_connector = AssembleRobinForces(self, mesh,
materials[0], function_spaces, fem_solver, Eulerx, 'connector')
stiffness += K_connector
F += F_connector[:,None]
return stiffness, F
def GetReducedMatrices(self, stiffness, F, mass=None, only_residual=False):
# GET REDUCED FORCE VECTOR
F_b = F[self.columns_in,0]
if only_residual:
return F_b
# GET REDUCED STIFFNESS MATRIX
stiffness_b = stiffness[self.columns_in,:][:,self.columns_in]
# GET REDUCED MASS MATRIX
mass_b = np.array([])
return stiffness_b, F_b, mass_b
def ApplyDirichletGetReducedMatrices(self, stiffness, F, AppliedDirichlet, LoadFactor=1., mass=None, only_residual=False):
"""AppliedDirichlet is a non-member because it can be external incremental Dirichlet,
which is currently not implemented as member of BoundaryCondition. F also does not
correspond to Dirichlet forces, as it can be residual in incrementally linearised
framework.
"""
# # APPLY DIRICHLET BOUNDARY CONDITIONS
# for i in range(self.columns_out.shape[0]):
# F = F - LoadFactor*AppliedDirichlet[i]*stiffness.getcol(self.columns_out[i])
# MUCH FASTER APPROACH
# F = F - (stiffness[:,self.columns_out]*AppliedDirichlet*LoadFactor)[:,None]
nnz_cols = ~np.isclose(AppliedDirichlet,0.0)
if self.columns_out[nnz_cols].shape[0]==0:
F[self.columns_in] = F[self.columns_in]
else:
F[self.columns_in] = F[self.columns_in] - (stiffness[self.columns_in,:]\
[:,self.columns_out[nnz_cols]]*AppliedDirichlet[nnz_cols]*LoadFactor)[:,None]
if only_residual:
return F
# GET REDUCED FORCE VECTOR
F_b = F[self.columns_in,0]
# GET REDUCED STIFFNESS
stiffness_b = stiffness[self.columns_in,:][:,self.columns_in]
# GET REDUCED MASS MATRIX
if self.analysis_type != 'static':
mass_b = mass[self.columns_in,:][:,self.columns_in]
return stiffness_b, F_b, F, mass_b
return stiffness_b, F_b, F
def UpdateFixDoFs(self, AppliedDirichletInc, fsize, nvar):
"""Updates the geometry (DoFs) with incremental Dirichlet boundary conditions
for fixed/constrained degrees of freedom only. Needs to be applied per time steps"""
# GET TOTAL SOLUTION
TotalSol = np.zeros((fsize,1))
TotalSol[self.columns_out,0] = AppliedDirichletInc
# RE-ORDER SOLUTION COMPONENTS
dU = TotalSol.reshape(int(TotalSol.shape[0]/nvar),nvar)
return dU
def UpdateFreeDoFs(self, sol, fsize, nvar):
"""Updates the geometry with iterative solutions of Newton-Raphson
for free degrees of freedom only. Needs to be applied per time NR iteration"""
# GET TOTAL SOLUTION
TotalSol = np.zeros((fsize,1))
TotalSol[self.columns_in,0] = sol
# RE-ORDER SOLUTION COMPONENTS
dU = TotalSol.reshape(int(TotalSol.shape[0]/nvar),nvar)
return dU
```
|
{
"source": "jdlauret/RaceGame",
"score": 3
}
|
#### File: jdlauret/RaceGame/game_start.py
```python
import pygame
import random
import os
import time
# directories
top_dir = os.path.dirname(os.path.abspath(__file__))
assets_dir = os.path.join(top_dir, 'assets')
image_dir = os.path.join(assets_dir, 'images')
pygame.init()
display_width = 800
display_height = 1000
# colors values (red, green, blue)
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
color_list = [
black,
red,
green,
blue
]
game_display = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('A bit Racey')
clock = pygame.time.Clock()
#images
car_image_path = os.path.join(image_dir, 'acura-sports-car-vertical-small.png')
car_image = pygame.image.load(car_image_path)
car_rect = car_image.get_rect()
car_center = car_rect.centerx = 25
class ImageObject:
def __init__(self, params):
image, x, y = params
self.x = x
self.y = y
self.speed = 0
self.image = image
self.size = image.get_rect()
self.width = self.size.width
self.height = self.size.height
self.sides()
def sides(self):
self.left = self.x
self.right = self.x + self.width
self.top = self.y
self.bottom = self.y + self.height
def draw(self):
game_display.blit(self.image, (self.x, self.y))
class RectObject:
def __init__(self, params):
x, y, w, h, color = params
self.x = x
self.y = y
self.width = w
self.height = h
self.color = color
self.sides()
def sides(self):
self.left = self.x
self.right = self.x + self.width
self.top = self.y
self.bottom = self.y + self.height
def draw(self):
self.sides()
pygame.draw.rect(game_display, self.color, [self.x, self.y, self.width, self.height])
def quit_game():
pygame.quit()
quit()
def blocks_dodged(count):
font = pygame.font.SysFont(None, 25)
text = font.render("Dodged: " + str(count), True, black)
game_display.blit(text, (0, 0))
def text_objects(text, font):
text_surface = font.render(text, True, black)
return text_surface, text_surface.get_rect()
def message_display(text):
largeText = pygame.font.Font('freesansbold.ttf', 115)
text_surf, text_rect = text_objects(text, largeText)
text_rect.center = ((display_width/2, display_height/2))
game_display.blit(text_surf, text_rect)
pygame.display.update()
time.sleep(2)
game_loop()
def crash():
message_display('You Crashed')
def create_obj(block_count):
# default block parameters
obj_list = []
for obj_count in range(block_count):
block_width = random.randrange(50, 100)
block_height = random.randrange(50, 100)
block_color = color_list[random.randrange(0, len(color_list) - 1)]
block_x_start = random.randrange(0, display_width - block_width)
block_y_start = random.randrange(-800, -600)
new_block = RectObject((block_x_start, block_y_start, block_width, block_height, block_color))
obj_list.append(new_block)
return obj_list
def game_loop():
# car starting position
x = display_width * 0.45
y = (display_height * 0.8)
# default location change
x_change = 0
# Number of objects that can be on screen
block_count = 2
obj_list = create_obj(block_count)
block_speed = 3
# dodged block counter
dodged_counter = 0
# game loop run variable
exit_game = False
# game loop
while not exit_game:
# event handling loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
elif event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT \
or event.key == pygame.K_RIGHT:
x_change = 0
# update car position
x += x_change
# create background
game_display.fill(white)
print(obj_list)
# draw block
for obj in obj_list:
obj.draw()
# change block location
obj.y += block_speed
# draw car
car = ImageObject((car_image, x, y))
car.draw()
blocks_dodged(dodged_counter)
# reset block once it leaves display
for obj in obj_list:
if obj.y > display_height:
obj.x = random.randrange(0, display_width - obj.width)
obj.y = random.randrange(-800, -600)
obj.width = random.randrange(50, 100)
obj.height = random.randrange(50, 100)
dodged_counter += 1
block_speed += 0.25
# check for collision with block_1
if car.top < obj.bottom and obj.top < car.bottom:
# print('Y Crossover')
if obj.left < car.left < obj.right \
or obj.left < car.right < obj.right:
# print('X Crossover')
crash()
# update frame
pygame.display.update()
# frames per second
clock.tick(60)
if __name__ == '__main__':
game_loop()
```
|
{
"source": "jdlauret/SudokuSolver",
"score": 4
}
|
#### File: jdlauret/SudokuSolver/Sudoku Solver.py
```python
assignments = []
rows = 'ABCDEFGHI'
cols = '123456789'
def cross(a, b):
# returns box notation for grid ie. A1, B1, A2, B2
return [s+t for s in a for t in b]
# contains all boxes for grid
boxes = cross(rows, cols)
# contains all rows in grid
row_units = [cross(r, cols) for r in rows]
# contains all columns in grid
col_units = [cross(rows, c) for c in cols]
# contains all squares in grid
square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]
# contains first diagonal
diagonal1 = [a[0]+a[1] for a in zip(rows, cols)]
# contains second diagonal
diagonal2 = [a[0]+a[1] for a in zip(rows, cols[::-1])]
# contains both diagonal
diagonal_units = [diagonal1, diagonal2]
def assign_value(values, box, value):
# Assigns a value to a given box. If it updates the board record it.
if values[box] == value:
return values
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
def grid_values(grid):
# converts a string containing the board layout into a dictionary
grid_dict = {}
values = '123456789'
for i, char in enumerate(grid):
if char == '.':
grid_dict[boxes[i]] = values
else:
grid_dict[boxes[i]] = char
return grid_dict
def display(values):
# prints a representation of the sudoku board based on the values contained within in the dictionary
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '') for c in cols))
if r in 'CF':
print(line)
return
def naked_twins(values):
# naked_twins searches for naked twins and removes values from the relevant peers
# finds twin candidates
solved_values = [box for box in values.keys() if len(values[box]) == 1]
twin_candidates = []
for box in boxes:
if len(values[box]) == 2:
if box not in twin_candidates:
twin_candidates.append(box)
# finds if any of the candidates are peers of each other
pairs = []
for candidate in twin_candidates:
for i in range(0, len(twin_candidates)):
if candidate != twin_candidates[i]:
if twin_candidates[i] in peers[candidate]:
if values[twin_candidates[i]] == values[candidate]:
if sorted([twin_candidates[i], candidate]) not in pairs:
pairs.append(sorted([twin_candidates[i], candidate]))
# finds all peers of a twins and removes the values found in the twin from the peers
for pair in pairs:
box_1 = pair[0]
box_2 = pair[1]
for unit in unit_list:
if box_1 in unit\
and box_2 in unit:
for box in unit:
if box not in solved_values\
and box not in pair:
for digit in values[box_1]:
new_value = values[box].replace(digit, '')
assign_value(values, box, new_value)
# returns the adjusted values
return values
def eliminate(values):
# eliminate finds solved boxes and removes the solved value from all of it's peers
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
value = values[box]
for peer in peers[box]:
new_value = values[peer].replace(value, '')
assign_value(values, peer, new_value)
return values
def only_choice(values):
# only_choice searches for if there is only one box in a unit which would allow a certain value,
# then that box is assigned that value
for unit in unit_list:
for digit in '123456789':
digits_found = []
for cell in unit:
if digit in values[cell]:
digits_found.append(cell)
if len(digits_found) == 1:
assign_value(values, digits_found[0], digit)
return values
def reduce_puzzle(values):
# reduce_puzzle runs a set of values through eliminate(), only_choice(), and naked_twins()
# until the values before and after are the same
# if the values are the same it exits the loop and returns the values
# if any values are completely removed resulting in a length of 0
# the function returns a False
stalled = False
while not stalled:
if isinstance(values, str):
values = grid_values(values)
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = only_choice(
naked_twins(
eliminate(values)
)
)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
# uses reduce_puzzle
# creates a search tree by finding the box with the minimum number of possible options
# creates a copy for each possible options contained in the box
# attempts to solve each of the possible options recursively with the left most option first
values = reduce_puzzle(values)
if values is False:
return False
if all(len(values[s]) == 1 for s in boxes):
return values
num, box = min(
# creates list of tuples and searches for the min value in the list
(len(values[box]), box)
for box in boxes if len(values[box]) > 1
)
for value in values[box]:
new_sudoku = values.copy()
new_sudoku[box] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
# used string input and coverts it to a grid
# then hands off the grid to search to be solved
values = grid_values(grid)
return search(values)
if __name__ == '__main__':
"""
HOW TO USE:
Find any sudoku puzzle you want to solve
A good place to look is http://sudoku.menu/
If you select a puzzle where the diagonals can be solved make sure to change solve_diagonals to True
"""
solve_diagonals = False
# Example Puzzles
diagonal_sudoku = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
very_hard_sudoku = '.46.1......28.....1.32.......872.4...9.....2...7.613.......71.2.....58......9.73.'
if solve_diagonals:
# list with all units
unit_list = row_units + col_units + square_units + diagonal_units
else:
unit_list = row_units + col_units + square_units
units = dict((s, [u for u in unit_list if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s], [])) - set([s])) for s in boxes)
# contains the grid in a string format
# displays solved grid
# visualizes the solving of the grid
display(solve(very_hard_sudoku))
```
|
{
"source": "jdlee6/EchoBot",
"score": 3
}
|
#### File: jdlee6/EchoBot/cmcAPI.py
```python
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json, os, threading
with open('config.json', 'r') as config_file:
config_data = json.load(config_file)
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
parameters = {
'start':'1',
'limit':'200',
'convert':'USD'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': os.environ.get('APIkey')
}
session = Session()
session.headers.update(headers)
def get_data():
response = session.get(url, params=parameters)
data = json.loads(response.text)
return data
```
|
{
"source": "jdlee6/Skoogle",
"score": 3
}
|
#### File: website/sort/routes.py
```python
from flask import Blueprint, render_template, request
from website.models import Result
# create instance of Blueprint; 'sort' is the name
sort = Blueprint('sort', __name__)
# highest to lowest rating sort route
@sort.route('/rate_high', methods=['GET', 'POST'])
def rate_high():
city = Result.query.with_entities(Result.city).limit(1).scalar()
page = request.args.get('page', 1, type=int)
page_results = Result.query.order_by(Result.rating.desc())\
.paginate(page=page, per_page=2)
return render_template('rate_high.html', results=page_results, origin=city)
# lowest to highest rating sort route
@sort.route('/rate_low', methods=['GET', 'POST'])
def rate_low():
city = Result.query.with_entities(Result.city).limit(1).scalar()
page = request.args.get('page', 1, type=int)
page_results = Result.query.order_by(Result.rating.asc())\
.paginate(page=page, per_page=2)
return render_template('rate_low.html',
results=page_results,
origin=city)
# fastest to slowest time sort route
@sort.route('/time_fast', methods=['GET', 'POST'])
def time_fast():
city = Result.query.with_entities(Result.city).limit(1).scalar()
page = request.args.get('page', 1, type=int)
page_results = Result.query.order_by(Result.duration.asc())\
.paginate(page=page, per_page=2)
return render_template('time_fast.html',
results=page_results,
origin=city)
# slowest to fastest time sort route
@sort.route('/time_slow', methods=['GET', 'POST'])
def time_slow():
city = Result.query.with_entities(Result.city).limit(1).scalar()
page = request.args.get('page', 1, type=int)
page_results = Result.query.order_by(Result.duration.desc())\
.paginate(page=page, per_page=2)
return render_template('time_slow.html',
results=page_results,
origin=city)
```
|
{
"source": "jdleesmiller/carnd-cloning",
"score": 2
}
|
#### File: jdleesmiller/carnd-cloning/common.py
```python
import os
IMAGE_COLUMNS = ['center_image', 'left_image', 'right_image']
CONTROL_COLUMNS = ['steering_angle', 'throttle', 'brake']
TELEMETRY_COLUMNS = ['speed']
IMAGE_SHAPE = (160, 320, 3)
DRIVING_LOG_CSV = 'driving_log.csv'
DRIVING_LOG_PICKLE = 'driving_log.p'
BOTTLENECK_PICKLE = 'bottleneck.p'
def base_model_stem(cut_index):
return 'base_model_%d' % cut_index
def make_filestem(prefix, params):
stem = prefix
for param in sorted(params):
value = params[param]
if value is None: value = 'None'
stem += '.' + param + '-' + str(value)
return stem
```
#### File: jdleesmiller/carnd-cloning/preprocess.py
```python
import os
import numpy as np
import pandas as pd
from scipy.ndimage.filters import gaussian_filter1d
from common import *
import bottleneck_features
def load_driving_log(path, header):
"""
Read in the driving log CSV and do some basic transforms.
"""
log = pd.read_csv(
path,
header=header,
names=IMAGE_COLUMNS + CONTROL_COLUMNS + TELEMETRY_COLUMNS)
# Get rid of the original image paths. (I've moved the files.)
log[IMAGE_COLUMNS] = log[IMAGE_COLUMNS].applymap(os.path.basename)
# Find delta t between frames from the image path names for smoothing.
log['time'] = pd.to_datetime(
log['center_image'], format='center_%Y_%m_%d_%H_%M_%S_%f.jpg')
# Add the correct paths, based on the location of the CSV file.
path_root = os.path.dirname(path)
log[IMAGE_COLUMNS] = log[IMAGE_COLUMNS].applymap(
lambda basename: os.path.join(path_root, 'IMG', basename))
# Add the path as a tag.
log['dataset'] = os.path.basename(path_root)
return log
def smooth(values, dt, tau):
"""
Apply smoothing for an unevenly spaced timeseries. Formula is from
http://www.eckner.com/papers/ts_alg.pdf
"""
result = np.empty(len(values))
result[0] = values[0]
weights = np.exp(-dt / tau)
for i in range(1, len(values)):
result[i] = weights[i] * result[i - 1] + (1 - weights[i]) * values[i]
return result
def smooth_control_inputs(log, tau):
"""
Bind smoothed control inputs to the driving log. This uses an exponential
moving average with time constant tau, and it averages both a forward and
a backward average. The weight for a measurement is $1 - exp(dt / tau)$,
where dt is the time since the last measurement.
"""
dt_prev = log['time'].diff( 1).map(lambda t: t.total_seconds())
dt_next = -log['time'].diff(-1).map(lambda t: t.total_seconds())
for control_column in CONTROL_COLUMNS:
smooth_forward = smooth(log[control_column], dt_prev, tau)
smooth_backward = smooth(
np.array(log[control_column])[::-1],
np.array(dt_next)[::-1],
tau)[::-1]
smooth_stack = np.vstack((smooth_forward, smooth_backward))
column_name = 'smooth_%s_%g' % (control_column, tau)
log[column_name] = np.mean(smooth_stack, 0)
return log
def smooth_control_inputs_gaussian(log, sigma):
"""
Bind smoothed control inputs to the driving log using a Gaussian filter.
This more closely preserves the mean than the exponential smoothing (but
the outputs have so far been not that different).
"""
for control_column in CONTROL_COLUMNS:
log['smooth_%s_gaussian_%g' % (control_column, sigma)] = \
gaussian_filter1d(log[control_column], sigma)
return log
def run(data_dir, cut_index, header=None, smooth=True):
"""
Load and smooth the driving log in the given directory and generate
bottleneck features.
"""
log = load_driving_log(os.path.join(data_dir, DRIVING_LOG_CSV), header)
if smooth:
log = smooth_control_inputs(log, 1)
log = smooth_control_inputs_gaussian(log, 3)
log = smooth_control_inputs_gaussian(log, 5)
else:
# The udacity data appears to be pretty smooth already, so just copy
# it over without smoothing.
log['smooth_steering_angle_1'] = log['steering_angle']
log['smooth_steering_angle_gaussian_3'] = log['steering_angle']
log['smooth_steering_angle_gaussian_5'] = log['steering_angle']
log = bottleneck_features.run(log, data_dir, cut_index)
return log
```
|
{
"source": "jdlesage/tf-yarn",
"score": 2
}
|
#### File: tf-yarn/examples/linear_classifier_experiment.py
```python
import logging
import os
import pwd
import sys
from functools import partial
from subprocess import check_output
import tensorflow as tf
import winequality
from tf_yarn import Experiment, run_on_yarn, TaskSpec
def experiment_fn(dataset_path: str) -> Experiment:
train_data, test_data = winequality.get_train_eval_datasets(dataset_path)
def train_input_fn():
return (train_data.shuffle(1000)
.batch(128)
.repeat()
.make_one_shot_iterator()
.get_next())
def eval_input_fn():
return (test_data.shuffle(1000)
.batch(128)
.make_one_shot_iterator()
.get_next())
fs = check_output(
"hdfs getconf -confKey fs.defaultFS".split()).strip().decode()
user = pwd.getpwuid(os.getuid()).pw_name
config = tf.estimator.RunConfig(
tf_random_seed=42,
model_dir=f"{fs}/user/{user}/{__name__}")
estimator = tf.estimator.LinearClassifier(
winequality.get_feature_columns(),
n_classes=winequality.get_n_classes(),
config=config)
return Experiment(
estimator,
tf.estimator.TrainSpec(train_input_fn, max_steps=10),
tf.estimator.EvalSpec(
eval_input_fn,
steps=10,
start_delay_secs=0,
throttle_secs=30))
if __name__ == "__main__":
try:
[dataset_path] = sys.argv[1:]
except ValueError:
sys.exit(winequality.__doc__)
logging.basicConfig(level="INFO")
run_on_yarn(
partial(experiment_fn, dataset_path),
task_specs={
"chief": TaskSpec(memory=2 * 2 ** 10, vcores=4),
"evaluator": TaskSpec(memory=2 ** 10, vcores=1)
},
files={
os.path.basename(winequality.__file__): winequality.__file__,
}
)
```
|
{
"source": "jdlin/post-Layout-Verification",
"score": 3
}
|
#### File: jdlin/post-Layout-Verification/summary.py
```python
import sys, os, re, string, getopt, fileinput, re
def parseDRC(filename):
#pattern = re.compile(r"RULECHECK\s+.+\s+TOTAL\s+Result\s+Count\s+=\s+\d*(\d*)", re.VERBOSE)
pattern = re.compile(r"RULECHECK\s*.+\s*TOTAL\s+Result\s+Count\s*=\s*\d*[(]*\d*[(]*\d*[)]*", re.VERBOSE)
pattern2 = re.compile(r"RULECHECK\s+.+\s+NOT\s+EXECUTED", re.VERBOSE)
rulecheck = []
for line in fileinput.input(filename):
match = pattern.search(line(:-1])
match2 = pattern2.search(line[:-l])
if match:
s = line.split()
if s[7] != '0':
try:
rulecheck.append([s(l], s[7], s[8}])
except:
rulecheck.append;(s[I], s[7], "-1"])
elif match2:
s = line.split()
rulecheck.append([s[l], "-1", "-!"])
return rulecheck
def parseDRCDBffilename, comment):
pattern = re.compiler(r"\s*{.*@\s*", re.VERBOSE)
for line in fileinput.input(filename):
match = pattern.search(line[:-l])
if match:
s = pattern.split(line[:-l])
if s[0] not in comment.keys():
comment[s[0]] = s[1]
return comment
def outputDRC(celllist):
cell list = []
rulecheck = {}
comment = {}
for cell in fileinput.input(celllist):
cell_list.append(cell[:-1)
for cell in cell_list:
reportpath = "run_drc/" + cell + "/"
os.chdir(reportpath)
reportname = cell + ".rep"
dbname = cell + ".db"
#print "Scanning ...", reportpath + reportname
rulecheck[cell] = parseDRC(reportname)
comment = parseDRCDB(dbname, comment)
os.chdir("../..")
rule = {}
for cell in cell_list:
#print "CELL", cell
for r in rulecheck[cell]:
if r[0] in rule.keys():
rule[r[O]].append(cell)
else:
rule(r[0]) = [cell]
#print "RULECHECK", r(0], " = ", r[1], r[2], comment[r[0]]
#print "-----------------------------"
#print "======================================================"
for r in rule.keys():
print "RULECHSCK %-16s %6s" % (r, len(rule[r]))
for cell in rule[r]:
print cell,
print
print "======================================================"
print "DRC Sumnary"
print "======================================================"
i = 0
for r in rule.keys():
summary = (r, len(rule[r]), comment[r])
print "RULECHECK %-16s %6s %s" % summary
i = i + 1
print "Total :", i, "RULECHECK"
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "i")
filename = args
outputDRC(filename)
```
|
{
"source": "jdlivingston/Miriad_Multicore",
"score": 2
}
|
#### File: jdlivingston/Miriad_Multicore/MM_cleaner.py
```python
import os
import subprocess,shlex,shutil
import sys,getopt
import glob
from multiprocessing import Pool
import tqdm
# Miriad Multicore Cleaner
# Work through making images from .uvaver miriad files
# <NAME> 10 Oct 2019
# Modified from RC polarimetry script from 10 August 2016 and <NAME> 10 Dec 2018
# WORKS FOR PYTHON 3
def get_noise(source,freq,chan):
'''
Generates noise cutoff from stokes v image to use for cleaning
Auto Inputs:
args = source,freq,chan
Outputs:
float that will be used as cutoff for cleaning process
'''
from astropy.io import fits
import numpy as np
noise=-100.0
stokes='v'
maps = f'{source}.{freq}.{chan}.{stokes}.map'
log_file = 'error_noise.log'
if os.path.isdir(maps):
fitsfile = f'{maps}.fits'
cmd = f'fits in={maps} out={fitsfile} op=xyout '
#print(cmd)
args=shlex.split(cmd)
with open(log_file,'a') as log:
p=subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Print the output
#for line in p.stdout:
# print(line)
p.wait()
# Open the FITS file
hdul = fits.open(fitsfile)
data = hdul[0].data
noise=np.std(data)/2
#print(f"RMS = {noise}")
os.remove(fitsfile)
return noise
def clean_images(args):
'''
Takes inputs and runs miriad invert from command line producing dirty maps and beams
User Inputs:
args = chan, source, freq, region, nit
Outputs:
cleans dirty maps and produces fits images for each stokes parameter
'''
chan, source, freq, region, nit = args
stokespars = ['i','q','u','v']
# Cycle over the channels
# Gets noise for clean cutoff from stokes v
cut_noise = get_noise(source,freq,chan)
# Create names for files
for stokes in stokespars:
mod = f'{source}.{freq}.{chan:04d}.{stokes}.mod'
cln = f'{source}.{freq}.{chan:04d}.{stokes}.cln'
pbcorr = f'{source}.{freq}.{chan:04d}.{stokes}.pbcorr'
maps = f'{source}.{freq}.{chan:04d}.{stokes}.map'
beam = f'{source}.{freq}.{chan:04d}.beam'
rms = f'{source}.{freq}.{chan:04d}.{stokes}.rms'
outfile = f'{source}.{freq}.{chan:04d}.{stokes}.cln.fits'
log_file = 'error_cln.log'
# Check that the map exists before trying
if not os.path.isdir(maps) and not os.path.isdir(beam):
#print(f"Map {maps} does not exist")
pass
else:
# Run through clean
cmd = f'clean map={maps} beam={beam} region=percentage({region}) niters={nit} cutoff={cut_noise} out={mod}'
#print(cmd)
args=shlex.split(cmd) # Splits the cmd into a string for subprocess
with open(log_file,'a') as log:
p=subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Print the output
#for line in p.stdout:
# print(line)
p.wait()
# Restor the images
cmd = f'restor map={maps} beam={beam} model={mod} out={pbcorr}'
#print(cmd)
args=shlex.split(cmd)
with open(log_file,'a') as log:
p=subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# #Print the output
#for line in p.stdout:
# print(line)
p.wait()
# # Primary Beam Correction
cmd = f'linmos in={pbcorr} out={cln}'
#print(cmd)
args=shlex.split(cmd)
with open(log_file,'a') as log:
p=subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# #Print the output
#for line in p.stdout:
# print(line)
p.wait()
# # Copy missing RMS after primary beam correction
cmd = f'gethd in={pbcorr}/rms log={rms}'
#print(cmd)
args=shlex.split(cmd)
with open(log_file,'a') as log:
p=subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# #Print the output
#for line in p.stdout:
# print(line)
p.wait()
# # Paste missing RMS onto primary beam correction
cmd = f'puthd in={cln}/rms value=@{rms}'
#print(cmd)
args=shlex.split(cmd)
with open(log_file,'a') as log:
p=subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# #Print the output
#for line in p.stdout:
# print(line)
p.wait()
# #convert to fits
cmd =f'fits in={cln} out={outfile} op=xyout'
#print(cmd)
args=shlex.split(cmd) # Splits the cmd into a string for subprocess
with open(log_file,'a') as log:
p=subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=log)
# Print the output
#for line in p.stdout:
# print(line)
p.wait()
return
def main(pool, args):
inputs = [[i, args.source, args.freq, args.region, args.n_iters] for i in range(args.start_chan, args.end_chan, args.step_size)]
#Runs each chunk of freq on new processor
print('Cleaning Images')
for _ in tqdm.tqdm(pool.imap(clean_images, inputs),total=len(inputs)):
pass
pool.close()
if __name__ == "__main__":
import argparse
import schwimmbad
# Help string to be shown using the -h option
descStr = """
Takes dirty maps produced using MM_inverter.py and uses miriad clean, linmos,
and restor to generate beam corrected clean images.
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-s", dest="source",
type=str, help="Source name in RA-DEC convention from miriad")
parser.add_argument("-f", dest="freq", type=int, default=2100,
help="centre frequency in MHz")
parser.add_argument("-1", dest="start_chan", type=int, default=1,
help="starting channel number")
parser.add_argument("-2", dest="end_chan", type=int, default=1500,
help="final channel number")
parser.add_argument("-d", dest="step_size", type=int, default=5,
help="channel step_size for images")
parser.add_argument("-i", dest="n_iters", type=int, default=1000,
help="number of iterations to clean")
parser.add_argument("-r", dest="region", type=float, default=95,
help="region (percentage) to clean as percentage of image")
group = parser.add_mutually_exclusive_group()
group.add_argument("--ncores", dest="n_cores", default=1,
type=int, help="Number of processes (uses multiprocessing).")
group.add_argument("--mpi", dest="mpi", default=False,
action="store_true", help="Run with MPI.")
args = parser.parse_args()
pool = schwimmbad.choose_pool(mpi=args.mpi, processes=args.n_cores)
#pool = schwimmbad.SerialPool()
if args.mpi:
if not pool.is_master():
pool.wait()
sys.exit(0)
# Clean the images
main(pool, args)
```
|
{
"source": "JDLopes/iob-tex",
"score": 3
}
|
#### File: iob-tex/software/block2tex.py
```python
import sys
import os.path
import re
def block_parse (program) :
program_out = []
for line in program :
flds_out = ['']
subline = line
flds = subline.split()
if not flds : continue #empty line
#print flds[0]
if (flds[0] != '//BLOCK'): continue #not a block description
#print flds
flds_out[0] = re.sub('_','\_'," ".join(flds[1:])) + " \\vspace{2mm}" #block
program_out.append(flds_out)
return program_out
def main () :
#parse command line
if len(sys.argv) < 3:
print("Usage: ./block2tex.py outfile [infiles]")
exit()
else:
outfile = sys.argv[1]
infiles = sys.argv[2:]
pass
print(sys.argv)
#add input files
program = []
for infile in infiles:
fin = open (infile, 'r')
program.extend(fin.readlines())
fin.close()
#parse input files
program = block_parse (program)
#write output file
fout = open (outfile, 'w')
for i in range(len(program)):
if ((i%2) != 0): fout.write("\\rowcolor{iob-blue}\n")
line = program[i]
line_out = str(line[0])
for l in range(1,len(line)):
line_out = line_out + (' & %s' % line[l])
fout.write(line_out + ' \\\ \hline\n')
#Close output file
fout.close()
if __name__ == "__main__" : main ()
```
|
{
"source": "jdlourenco/taxi-fare-deep",
"score": 3
}
|
#### File: taxi-fare-deep/taxifare_deep/data.py
```python
import pandas as pd
AWS_BUCKET_PATH = "s3://wagon-public-datasets/taxi-fare-train.csv"
def get_data(nrows=10_000):
'''returns a DataFrame with nrows from s3 bucket'''
df = pd.read_csv(AWS_BUCKET_PATH, nrows=nrows)
return df
def clean_data(df, test=False):
df = df.dropna(how='any', axis='rows')
df = df[(df.dropoff_latitude != 0) | (df.dropoff_longitude != 0)]
df = df[(df.pickup_latitude != 0) | (df.pickup_longitude != 0)]
if "fare_amount" in list(df):
df = df[df.fare_amount.between(0, 4000)]
df = df[df.passenger_count < 8]
df = df[df.passenger_count > 0]
df = df[df["pickup_latitude"].between(left=40, right=42)]
df = df[df["pickup_longitude"].between(left=-74.3, right=-72.9)]
df = df[df["dropoff_latitude"].between(left=40, right=42)]
df = df[df["dropoff_longitude"].between(left=-74, right=-72.9)]
return df
if __name__ == '__main__':
df = get_data()
```
#### File: taxi-fare-deep/taxifare_deep/utils.py
```python
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import time
def haversine_vectorized(
df,
start_lat="pickup_latitude",
start_lon="pickup_longitude",
end_lat="dropoff_latitude",
end_lon="dropoff_longitude",
):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
Vectorized version of the haversine distance for pandas df
Computes distance in kms
"""
lat_1_rad, lon_1_rad = np.radians(df[start_lat].astype(float)), np.radians(
df[start_lon].astype(float)
)
lat_2_rad, lon_2_rad = np.radians(df[end_lat].astype(float)), np.radians(
df[end_lon].astype(float)
)
dlon = lon_2_rad - lon_1_rad
dlat = lat_2_rad - lat_1_rad
a = (
np.sin(dlat / 2.0) ** 2
+ np.cos(lat_1_rad) * np.cos(lat_2_rad) * np.sin(dlon / 2.0) ** 2
)
c = 2 * np.arcsin(np.sqrt(a))
return 6371 * c
def compute_rmse(y_pred, y_true):
return np.sqrt(((y_pred - y_true) ** 2).mean())
def sinuser(X, period):
return np.sin(2 * math.pi / period * X)
def cosinuser(X, period):
return np.cos(2 * math.pi / period * X)
def plot_model_history(history):
"""Plot a Keras-fitted model history"""
plt.figure(figsize=(12, 10))
plt.subplot(2, 1, 1)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model loss")
plt.ylabel("Mean Square Error - Loss")
plt.xlabel("Epoch")
plt.legend(["Train", "Validation"], loc="best")
plt.subplot(2, 1, 2)
plt.plot(history.history["mae"])
plt.plot(history.history["val_mae"])
plt.title("Model mae")
plt.ylabel("Mean Absolute Error")
plt.xlabel("Epoch")
plt.legend(["Train", "Validation"], loc="best")
plt.show()
def df_optimized(df, verbose=True, **kwargs):
"""
Reduces size of dataframe by downcasting numerical columns
:param df: input dataframe
:param verbose: print size reduction if set to True
:param kwargs:
:return:
"""
in_size = df.memory_usage(index=True).sum()
for type in ["float", "integer"]:
l_cols = list(df.select_dtypes(include=type))
for col in l_cols:
df[col] = pd.to_numeric(df[col], downcast=type)
if type == "float":
df[col] = pd.to_numeric(df[col], downcast="integer")
out_size = df.memory_usage(index=True).sum()
ratio = (1 - round(out_size / in_size, 2)) * 100
GB = out_size / 1000000000
if verbose:
print("optimized size by {} % | {} GB".format(ratio, GB))
return df
################
# DECORATORS #
################
def simple_time_tracker(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int(te - ts)
else:
print(method.__name__, round(te - ts, 2))
return result
return timed
```
|
{
"source": "jdlovins/knights381",
"score": 2
}
|
#### File: knights381/home/views.py
```python
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from .forms import SignUpForm, LoginForm, UserForm, ProfileForm, ContactForm
from .decorators import custom_login_required
from .models import Profile, Book, ShoppingCart
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
if request.user.is_authenticated():
pass
next_url = request.GET.get('next')
signup_form = SignUpForm()
login_form = LoginForm()
return render(request, 'index.html', {'signup_form': signup_form, 'login_form': login_form, 'next': next_url})
def login_user(request):
if request.user.is_authenticated():
redirect(index)
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
user_name = form.cleaned_data.get('user_name')
password = form.cleaned_data.get('password')
user = authenticate(username=user_name, password=password)
if user is not None:
if not request.POST.get('remember_me', None):
request.session.set_expiry(0)
login(request, user)
else:
messages.error(request, "Username or Password is invalid, please try again.")
else:
messages.error(request, form.errors)
next_url = request.POST.get('next')
if next_url is not None:
return redirect(next_url)
else:
return redirect(index)
def register_user(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user_name = form.cleaned_data.get('user_name')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
user = User.objects.create_user(user_name, email, password)
user.save()
login(request, user)
return redirect(index)
else:
# for e in form.errors:
# for ee in form[e].errors:
messages.error(request, form.errors)
return redirect(index)
def logout_user(request):
if request.user.is_authenticated():
logout(request)
return redirect(index)
@custom_login_required
def user_profile(request):
signup_form = SignUpForm()
login_form = LoginForm()
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Your profile was successfully updated!')
return redirect(index)
else:
messages.error(request, 'Please correct the error below.')
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'account/profile.html', {
'user_form': user_form,
'profile_form': profile_form,
'signup_form': signup_form,
'login_form': login_form,
})
def catalog(request):
signup_form = SignUpForm()
login_form = LoginForm()
book_list = Book.objects.all()
return render(request, 'catalog.html', {'book_list': book_list,
'signup_form': signup_form,
'login_form': login_form})
def contact(request):
signup_form = SignUpForm()
login_form = LoginForm()
if request.method == 'POST':
contact_form = ContactForm(request.POST)
else:
contact_form = ContactForm()
return render(request, 'contact.html', {'form': contact_form, 'signup_form': signup_form, 'login_form': login_form})
@login_required()
def cart(request):
if request.method == 'POST':
book_id = request.POST['book_id']
if book_id is not None:
book = Book.objects.get(id=book_id)
item_exist = ShoppingCart.objects.filter(user_id=request.user.id, book=book).first()
if item_exist is not None:
item_exist.quantity += 1
item_exist.save()
else:
cart_item = ShoppingCart(user_id=request.user.id, book=book, quantity=1)
cart_item.save()
total_cart = ShoppingCart.objects.filter(user_id=request.user.id).all()
total_price = 0
for item in total_cart:
total_price += (item.book.book_retailPrice * item.quantity)
return render(request, 'cart.html', {'cart_items': total_cart, 'total_price': total_price})
```
|
{
"source": "jdlph/MIOCSV",
"score": 3
}
|
#### File: MIOCSV/test/test.py
```python
import csv
from time import time
from time import sleep
def test_reader():
ts = time()
with open('csvreader.csv', 'r') as f:
reader = csv.reader(f)
for line in reader:
continue
te = time()
print(f'Python csv.reader parses {reader.line_num} lines '
f'in {(te-ts)*1000:.0f} milliseconds')
def test_dictreader():
# sleep for 3 seconds
sleep(3)
ts = time()
with open('csvreader.csv', 'r') as f:
reader = csv.DictReader(f)
for line in reader:
continue
te = time()
print(f'Python csv.DictReader parses {reader.line_num} lines '
f'in {(te-ts)*1000:.0f} milliseconds')
if __name__ == '__main__':
test_reader()
test_dictreader()
```
|
{
"source": "jdlph/shortest-path-problem",
"score": 3
}
|
#### File: shortest-path-problem/src/classes.py
```python
class Node:
def __init__(self, nodeID, nodeUID):
# internal node id used for sp calculation
self.id = nodeID
# user-defined node id defined by user or input file
self.uid = nodeUID
self.outgoingLinks = []
def AddOutgoingLinks(self, linkID):
self.outgoingLinks.append(linkID)
def GetOutgoingLinks(self):
return self.outgoingLinks
def GetOutgoingLinksIter(self):
for i in self.outgoingLinks:
yield i
class Link:
def __init__(self, linkID, linkUID, origNodeID_, destNodeID_, linkLen_):
# internal link id used for sp calculation
self.id = linkID
# user-defined link id defined by user or input file
self.uid = linkUID
self.origNodeID = origNodeID_
self.destNodeID = destNodeID_
self.linkLen = linkLen_
def GetOrigNodeID(self):
return self.origNodeID
def GetDestNodeID(self):
return self.destNodeID
def GetLen(self):
return self.linkLen
class SimpleDequePy:
""" Special implementation of deque using fix-length array
the interface utilized for shortest-path algorithms is exactly the same as
the built-in deque.
"""
def __init__(self, size_):
self.nodes = [-1 for i in range(size_)]
self.head = -1
self.tail = -1
def __len__(self):
return self.head != -1
def appendleft(self, nodeID):
if self.head == -1:
self.nodes[nodeID] = -1
self.head = nodeID
self.tail = nodeID
else:
self.nodes[nodeID] = self.head
self.head = nodeID
def append(self, nodeID):
if self.head == -1:
self.head = nodeID
self.tail = nodeID
self.nodes[nodeID] = -1
else:
self.nodes[self.tail] = nodeID
self.nodes[nodeID] = -1
self.tail = nodeID
def popleft(self):
left = self.head
self.head = self.nodes[left]
self.nodes[left] = -1
return left
def clear(self):
self.head = -1
self.tail = -1
```
|
{
"source": "jdlrobson/tiddlyspace",
"score": 2
}
|
#### File: tiddlyspace/test/fixtures.py
```python
import os
import sys
import shutil
import httplib2
import Cookie
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.config import config
from tiddlyweb.store import HOOKS
from tiddlywebplugins.utils import get_store
from tiddlywebplugins.instancer.util import spawn
from tiddlywebplugins.tiddlyspace import instance as instance_module
from tiddlywebplugins.tiddlyspace.config import config as init_config
SESSION_COUNT = 1
def get_auth(username, password):
http = httplib2.Http()
response, _ = http.request(
'http://0.0.0.0:8080/challenge/tiddlywebplugins.tiddlyspace.cookie_form',
body='user=%s&password=%s' % (username, password),
method='POST',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
assert response.previous['status'] == '303'
user_cookie = response.previous['set-cookie']
cookie = Cookie.SimpleCookie()
cookie.load(user_cookie)
return cookie['tiddlyweb_user'].value
def make_test_env(module):
global SESSION_COUNT
try:
shutil.rmtree('test_instance')
except:
pass
os.system('mysqladmin -f drop tiddlyspacetest create tiddlyspacetest')
if SESSION_COUNT > 1:
del sys.modules['tiddlywebplugins.tiddlyspace.store']
del sys.modules['tiddlywebplugins.mysql2']
del sys.modules['tiddlywebplugins.sqlalchemy2']
import tiddlywebplugins.tiddlyspace.store
import tiddlywebplugins.mysql2
import tiddlywebplugins.sqlalchemy2
clear_hooks(HOOKS)
SESSION_COUNT += 1
db_config = init_config['server_store'][1]['db_config']
db_config = db_config.replace('///tiddlyspace?', '///tiddlyspacetest?')
init_config['server_store'][1]['db_config'] = db_config
init_config['log_level'] = 'DEBUG'
if sys.path[0] != os.getcwd():
sys.path.insert(0, os.getcwd())
spawn('test_instance', init_config, instance_module)
os.symlink('../tiddlywebplugins/templates', 'templates')
from tiddlyweb.web import serve
module.store = get_store(config)
app = serve.load_app()
def app_fn():
return app
module.app_fn = app_fn
def make_fake_space(store, name):
def set_policy(policy, private=False):
for policy_attr in policy.attributes:
if policy_attr not in ['read', 'owner']:
setattr(policy, policy_attr, [name])
if private:
policy.read = [name]
public_recipe = Recipe('%s_public' % name)
private_recipe = Recipe('%s_private' % name)
public_bag = Bag('%s_public' % name)
private_bag = Bag('%s_private' % name)
archive_bag = Bag('%s_archive' % name)
set_policy(public_recipe.policy)
set_policy(private_recipe.policy, private=True)
set_policy(public_bag.policy)
set_policy(private_bag.policy, private=True)
set_policy(archive_bag.policy, private=True)
public_recipe.set_recipe([('system', ''), ('tiddlyspace', ''), ('%s_public' % name, '')])
private_recipe.set_recipe([('system', ''), ('tiddlyspace', ''), ('%s_public' % name, ''),
('%s_private' % name, '')])
for entity in [public_recipe, private_recipe, public_bag,
private_bag, archive_bag]:
store.put(entity)
def clear_hooks(hooks): # XXX: temporary workaround?
for entity, actions in hooks.items():
actions['put'] = []
actions['delete'] = []
actions['get'] = []
```
#### File: tiddlyspace/test/test_web_status.py
```python
from test.fixtures import make_test_env, make_fake_space, get_auth
from wsgi_intercept import httplib2_intercept
import wsgi_intercept
import httplib2
import py.test
import simplejson
from tiddlyweb.model.user import User
from tiddlywebplugins.tiddlyspace import __version__ as VERSION
from tiddlywebplugins.tiddlyspace.spaces import change_space_member
def setup_module(module):
make_test_env(module)
httplib2_intercept.install()
wsgi_intercept.add_wsgi_intercept('0.0.0.0', 8080, app_fn)
wsgi_intercept.add_wsgi_intercept('thing.0.0.0.0', 8080, app_fn)
module.http = httplib2.Http()
make_fake_space(store, 'thing')
def teardown_module(module):
import os
os.chdir('..')
def test_status_base():
response, content = http.request('http://0.0.0.0:8080/status')
assert response['status'] == '200'
info = simplejson.loads(content)
assert info['username'] == 'GUEST'
assert info['tiddlyspace_version'] == VERSION
assert info['server_host']['host'] == '0.0.0.0'
assert info['server_host']['port'] == '8080'
assert 'space' not in info
def test_status_space():
response, content = http.request('http://thing.0.0.0.0:8080/status')
assert response['status'] == '200'
info = simplejson.loads(content)
assert info['username'] == 'GUEST'
assert info['tiddlyspace_version'] == VERSION
assert info['server_host']['host'] == '0.0.0.0'
assert info['server_host']['port'] == '8080'
assert info['space']['name'] == 'thing'
assert info['space']['recipe'] == 'thing_public'
def test_status_base_auth():
user = User('foo')
user.set_password('<PASSWORD>')
store.put(user)
user_cookie = get_auth('foo', 'foobar')
change_space_member(store, 'thing', add='foo')
response, content = http.request('http://0.0.0.0:8080/status',
headers={'Cookie': 'tiddlyweb_user="%s"' % user_cookie})
assert response['status'] == '200'
info = simplejson.loads(content)
assert info['username'] == 'foo'
assert 'space' not in info
def test_status_space_auth():
user_cookie = get_auth('foo', 'foobar')
response, content = http.request('http://thing.0.0.0.0:8080/status',
headers={'Cookie': 'tiddlyweb_user="%s"' % user_cookie})
assert response['status'] == '200'
info = simplejson.loads(content)
assert info['username'] == 'foo'
assert info['space']['name'] == 'thing'
assert info['space']['recipe'] == 'thing_private'
```
|
{
"source": "jdlubrano/cad_volume",
"score": 2
}
|
#### File: jdlubrano/cad_volume/step_to_stl.py
```python
import getopt
import os
import sys
from OCC.StlAPI import StlAPI_Writer
from OCC.STEPControl import STEPControl_Reader
from OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
def usage():
print('step_to_stl.py -i source -o dest')
sys.exit(2)
def convert(source, dest):
step_reader = STEPControl_Reader()
status = step_reader.ReadFile(source)
if status == IFSelect_RetDone:
i = 1
ok = False
number_of_roots = step_reader.NbRootsForTransfer()
while i <= number_of_roots and not ok:
ok = step_reader.TransferRoot(i)
i += 1
if (not ok):
return { 'error': 'Failed to find a suitable root for the STEP file' }
shape = step_reader.Shape(1)
output = os.path.abspath(dest)
stl_ascii = False
stl_writer = StlAPI_Writer()
stl_writer.SetASCIIMode(stl_ascii)
stl_writer.Write(shape, output)
print "STL FILE: %s" % output
else:
print "Error, can't read file: %s" % './demo.stp'
def main(argv):
try:
opts, args = getopt.getopt(argv, "hi:o:", ["infile=", "outfile="])
except getopt.GetoptError:
usage()
source = None
dest = None
for opt, arg in opts:
if opt in ("-i", "--infile"):
source = arg
if opt in ("-o", "--outfile"):
dest = arg
if source != None and dest != None:
convert(source, dest)
else:
usage()
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: jdlubrano/cad_volume/volume.py
```python
import getopt
import json
import math
import pdb
import sys
from OCC.Bnd import Bnd_Box
from OCC.BRepMesh import BRepMesh_IncrementalMesh
from OCC.BRepBndLib import brepbndlib_Add
from OCC.GProp import GProp_GProps
from OCC.gp import *
from OCC.BRepGProp import brepgprop_VolumeProperties
from OCC.BRepAlgoAPI import BRepAlgoAPI_Cut
from OCC.BRepPrimAPI import BRepPrimAPI_MakeCylinder
from OCC.TColStd import TColStd_SequenceOfAsciiString
from OCC.STEPControl import STEPControl_Reader
from OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
def display_shapes(shapes):
from OCC.Display.SimpleGui import init_display
display, start_display, add_menu, add_function_to_menu = init_display()
[display.DisplayShape(shape, update=True) for shape in shapes]
start_display()
def calculate_bnd_box(bbox):
xmin, ymin, zmin, xmax, ymax, zmax = bbox.Get()
x = xmax - xmin
y = ymax - ymin
z = zmax - zmin
return {
'volume': x * y * z,
'x_length': x,
'y_length': y,
'z_length': z,
'x_min': xmin,
'x_max': xmax,
'y_min': ymin,
'y_max': ymax,
'z_min': zmin,
'z_max': zmax
}
def pick_lengths(bounding_box):
lengths = [ i for i in bounding_box.keys() if i.endswith('length') ]
return { key: bounding_box[key] for key in lengths }
def get_longest_dimension(bounding_box):
lengths_only = pick_lengths(bounding_box)
longest = max(pick_lengths(bounding_box).values())
longest_length = lengths_only.keys()[lengths_only.values().index(longest)]
return longest, longest_length[0]
def x_axis(bounding_box):
axis_direction = gp_Dir(gp_XYZ(1,0,0))
axis_origin = gp_Pnt(
bounding_box['x_min'],
(bounding_box['y_min'] + bounding_box['y_max']) / 2,
(bounding_box['z_min'] + bounding_box['z_max']) / 2
)
return gp_Ax2(axis_origin, axis_direction)
def y_axis(bounding_box):
axis_direction = gp_Dir(gp_XYZ(0,1,0))
axis_origin = gp_Pnt(
(bounding_box['x_min'] + bounding_box['x_max']) / 2,
bounding_box['y_min'],
(bounding_box['z_min'] + bounding_box['z_max']) / 2
)
return gp_Ax2(axis_origin, axis_direction)
def z_axis(bounding_box):
axis_direction = gp_Dir(gp_XYZ(0,0,1))
axis_origin = gp_Pnt(
(bounding_box['x_min'] + bounding_box['x_max']) / 2,
(bounding_box['y_min'] + bounding_box['y_max']) / 2,
bounding_box['z_min']
)
return gp_Ax2(axis_origin, axis_direction)
def determine_axis(bounding_box):
l, longest_dimension = get_longest_dimension(bounding_box)
axis = None
if longest_dimension == 'x_length':
axis = x_axis(bounding_box)
elif longest_dimension == 'y_length':
axis = y_axis(bounding_box)
else:
axis = z_axis(bounding_box)
return axis
def get_axis(dimension, bounding_box):
axis_fn = dimension + '_axis'
return globals()[axis_fn](bounding_box)
def cylinder_dict(cylinder, cut, radius, height):
return {
'radius': radius,
'height': height,
'cylinder_volume': calculate_volume(cylinder.Shape()),
'cylinder': cylinder,
'cut': cut,
'cut_vol': calculate_volume(cut.Shape())
}
def min_cylinder(height_dimension, shape, bounding_box):
axis = get_axis(height_dimension, bounding_box)
lengths = pick_lengths(bounding_box)
height_length = height_dimension + '_length'
height = bounding_box[height_length]
radius = max([ value for key, value in lengths.iteritems() if key != height_length ]) / 2
cylinder = BRepPrimAPI_MakeCylinder(axis, radius, height)
cut = BRepAlgoAPI_Cut(shape, cylinder.Shape())
return cylinder_dict(cylinder, cut, radius, height)
def try_min_cylinders(shape, bounding_box):
x = min_cylinder('x', shape, bounding_box)
y = min_cylinder('y', shape, bounding_box)
z = min_cylinder('z', shape, bounding_box)
bounding = [ i for i in [x,y,z] if i['cut_vol'] == 0.0 ]
if bounding:
min_volume = min([ i['cylinder_volume'] for i in bounding ])
min_bounding = [ i for i in bounding if i['cylinder_volume'] == min_volume ]
return min_bounding[0]
else:
return None
def smallest_max_cylinder(shape, bounding_box):
# cylinder with diagonal of smaller face of bounding box
height, longest_dimension = get_longest_dimension(bounding_box)
longest_length = longest_dimension + '_length'
lengths = pick_lengths(bounding_box)
face_sides = [ value for key, value in lengths.iteritems() if key != longest_length ]
radius = math.sqrt(sum([i ** 2 for i in face_sides])) / 2 # diagonal / 2
axis = get_axis(longest_dimension, bounding_box)
cylinder = BRepPrimAPI_MakeCylinder(axis, radius, height)
cut = BRepAlgoAPI_Cut(shape, cylinder.Shape())
return cylinder_dict(cylinder, cut, radius, height)
def calculate_bounding_cylinder(shape, bounding_box):
cylinder = try_min_cylinders(shape, bounding_box)
if cylinder:
return cylinder
else:
return smallest_max_cylinder(shape, bounding_box)
def calculate_volume(shape):
props = GProp_GProps()
brepgprop_VolumeProperties(shape, props)
return props.Mass()
def analyze_file(filename):
step_reader = STEPControl_Reader()
status = step_reader.ReadFile(filename)
result = None
if status == IFSelect_RetDone: # check status
number_of_roots = step_reader.NbRootsForTransfer()
ok = False
i = 1
while i <= number_of_roots and not ok:
ok = step_reader.TransferRoot(i)
i += 1
if (not ok):
return { 'error': 'Failed to find a suitable root for the STEP file' }
number_of_shapes = step_reader.NbShapes()
if (number_of_shapes > 1):
return { 'error': 'Cannot handle more than one shape in a file' }
aResShape = step_reader.Shape(1)
# Units
length = TColStd_SequenceOfAsciiString()
angles = TColStd_SequenceOfAsciiString()
solid_angles = TColStd_SequenceOfAsciiString()
step_reader.FileUnits(length, angles, solid_angles)
# bounding box
bbox = Bnd_Box()
deflection = 0.01
BRepMesh_IncrementalMesh(aResShape, deflection)
brepbndlib_Add(aResShape, bbox)
xmin, ymin, zmin, xmax, ymax, zmax = bbox.Get()
bounding_box = calculate_bnd_box(bbox)
bounding_cylinder = calculate_bounding_cylinder(aResShape, bounding_box)
result = {'bounding_box_volume': bounding_box['volume'],
'bounding_box_x_length': bounding_box['x_length'],
'bounding_box_y_length': bounding_box['y_length'],
'bounding_box_z_length': bounding_box['z_length'],
'mesh_volume': calculate_volume(aResShape),
'mesh_surface_area': None,
'cylinder_volume': bounding_cylinder['cylinder_volume'],
'cylinder_diameter': bounding_cylinder['radius'] * 2,
'cylinder_length': bounding_cylinder['height'],
'convex_hull_volume': None,
'euler_number': None,
'units': length.First().ToCString().lower()}
else:
result = { 'error': 'Cannot read file' }
return result
def usage():
print 'volume.py -f <inputfile>'
sys.exit(0)
def main(argv):
try:
opts, args = getopt.getopt(argv, "hf:", ["file="])
except getopt.GetoptError:
usage()
filename = None
for opt, arg in opts:
if opt in ("-f", "--file"):
filename = arg
if filename != None:
try:
result = analyze_file(filename)
except RuntimeError as e:
result = { 'error': e.message, 'filename': filename }
print(json.dumps(result))
else:
result = { 'error': 'No filename provided' }
print(json.dumps(result))
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
```
|
{
"source": "jdlubrano/dep-appearances",
"score": 2
}
|
#### File: src/dep_appearances/appearances_report.py
```python
import os
import pdb
import pipfile
from dep_appearances.dependency import Dependency
from dep_appearances.import_statement import ImportStatement
class AppearancesReport:
def __init__(self, project_root):
self.project_root = os.path.abspath(project_root)
self.dependencies = []
def compile(self):
self.dependencies = self._dependencies_with_imports()
return self
def unused_dependencies(self):
unused_deps = [dep for dep in self.dependencies if dep.unused()]
return sorted(unused_deps, key=lambda dep: dep.name)
def underused_dependencies(self, usage_threshold):
deps = [dep for dep in self.dependencies if dep.underused(usage_threshold=usage_threshold)]
return sorted(deps, key=lambda dep: dep.name)
def _dependencies_with_imports(self):
dependencies = self._extract_dependencies()
import_statements = self._extract_import_statements()
for dep in dependencies:
for import_statement in import_statements:
if dep.imported_by(import_statement):
dep.add_import_statement(import_statement)
return dependencies
def _extract_dependencies(self):
dependencies = []
pfile = pipfile.load(os.path.join(self.project_root, "Pipfile"))
for package in pfile.data["default"].keys():
dependencies.append(package)
for package in pfile.data["develop"].keys():
dependencies.append(package)
return [Dependency(dependency) for dependency in dependencies]
def _extract_import_statements(self):
import_statements = []
for root, _dirs, files in os.walk(self.project_root):
if root.startswith(os.path.abspath(f"{self.project_root}/.venv")):
continue
for file in files:
if os.path.splitext(file)[1].lower() == ".py":
import_statements += self._extract_imports_from_py(os.path.join(root, file))
return import_statements
def _extract_imports_from_py(self, file):
imports = []
with open(file) as f:
line_number = 0
for line in f:
line_number += 1
if ImportStatement.test(line):
import_statement = ImportStatement(
source_file=file,
source_code=line,
line_number=line_number
)
imports.append(import_statement)
return imports
```
#### File: src/dep_appearances/cli.py
```python
from argparse import ArgumentParser
import os
import pdb
import sys
from dep_appearances.appearances_report import AppearancesReport
def main():
parser = ArgumentParser(description='Find dependencies that are unused and underused in your codebase.')
parser.add_argument(
'project_root',
metavar='PATH',
type=str,
nargs='?',
default=os.getcwd(),
help="The path to your project's root (defaults to your current working directory)"
)
parser.add_argument(
'--underused_threshold',
type=int,
default=2,
help='The threshold to set for marking dependencies as underused (default: 2)'
)
args = parser.parse_args()
report = AppearancesReport(project_root=args.project_root).compile()
unused_dependencies = report.unused_dependencies()
underused_dependencies = report.underused_dependencies(usage_threshold=args.underused_threshold)
if len(unused_dependencies) == 0:
print("No unused dependencies found")
else:
print("Unused dependencies:")
for dep in unused_dependencies:
print(f"\t{dep.name}")
print("")
if len(underused_dependencies) == 0:
print("No underused dependencies found")
else:
print(f"Underused dependencies (usage threshold = {args.underused_threshold}):")
for dep in underused_dependencies:
print(f"\t{dep.name}\n\t\timported in:")
for import_statement in dep.import_statements:
print(f"\t\t{os.path.relpath(import_statement.source_file)}:{import_statement.line_number}")
print("")
if __name__ == "__main__":
main()
```
#### File: src/dep_appearances/import_statement.py
```python
import re
class ImportStatement:
IMPORT_REGEX = re.compile(r'^\s*import\s+(\w+)|^\s*from\s+(\w+)(\.\w+)*\s+import')
@classmethod
def test(cls, source_code):
return cls.IMPORT_REGEX.match(source_code)
def __init__(self, source_file, source_code, line_number):
self.source_file = source_file
self.source_code = source_code
self.line_number = line_number
def package_name(self):
match = self.IMPORT_REGEX.match(self.source_code)
if match is None:
return None
return match.group(1) or match.group(2)
```
|
{
"source": "jdlubrano/step_maker_api",
"score": 3
}
|
#### File: step_maker_api/app/dimension.py
```python
class Dimension:
CONVERSION_FACTORS = { 'in': 25.4, 'cm': 10, 'mm': 1 }
def __init__(self, value, units):
self.value = float(value)
self.units = units
def to_string(self):
return str(self.value) + self.units
def in_mm(self):
return Dimension.CONVERSION_FACTORS[self.units] * self.value
```
|
{
"source": "jdm7dv/Microsoft-Biology-Foundation",
"score": 3
}
|
#### File: Samples/Python/MBFDebug.py
```python
import clr
import os
from os import path
from System.IO import File
from System.IO import Directory
import sys
build_dir = "bin\\Debug"
def deploy_file(filename):
"Copies a file to the bin\Debug folder, replacing any file of the same name already there."
new_filename = build_dir + "\\" + filename[filename.rfind("\\") + 1 :]
try:
if File.Exists(new_filename):
File.Delete(new_filename)
except:
# don't worry about replacing read-only files that we can't delete
pass
else:
File.Copy(filename, new_filename)
try:
# make build dir if needed
if not path.exists(build_dir):
os.mkdir(build_dir)
# get list of files to put in dll
filenames = os.listdir("MBFIronPython")
for i in range(0, len(filenames)):
filenames[i] = "MBFIronPython\\" + filenames[i]
# build dll
clr.CompileModules(build_dir + "\\MBF.IronPython.dll", *filenames)
# copy demo file
deploy_file("MBFMenu.py")
deploy_file("..\\..\\..\\Build\\Binaries\\Debug\\MBF.dll")
deploy_file("..\\..\\..\\Build\\Binaries\\Debug\\MBF.WebServiceHandlers.dll")
# copy test file
deploy_file("Data\\Small_Size.gbk")
# run the demo
import MBFMenu
except:
print "An error occurred: " + `sys.exc_info()` + "\n"
raw_input("Press enter to exit: ")
```
#### File: Python/MBFIronPython/IO.py
```python
import os
import Util
Util.add_mbfdotnet_reference("MBF")
from MBF.IO import *
from System.IO import *
def open_seq(filename):
"Parses a sequence file, returning a list of ISequence objects."
filename = filename.Trim('"').Trim('\'')
if not File.Exists(filename):
print "\nFile does not exists: " + filename
return None
parser = SequenceParsers.FindParserByFile(filename)
if parser == None:
print "\nInvalid file extension: " + filename
return None
return parser.Parse(filename)
def open_all_seq(dir_name):
"Parses all of the sequence files in a directory, returning a list of ISequence objects."
seq_list = []
for filename in os.listdir(dir_name):
seq_list.extend(open_seq(filename))
return seq_list
def save_seq(seq_list, filename):
"Saves a list of ISequence objects to file."
filename = filename.Trim('"').Trim('\'')
formatter = SequenceFormatters.FindFormatterByFile(filename)
if formatter == None:
raise Exception, "Failed to recognize sequence file extension: " + filename
formatter.Format(seq_list, filename)
def save_all_seq(seq_list, dir_name, file_extension):
"Saves a list of ISequence objects to separate files."
for seq in seq_list:
save_seq(filename_base + "\\" + seq.ID + file_extension, seq)
```
|
{
"source": "jdm7dv/visual-studio",
"score": 2
}
|
#### File: Python/audio.python/media.py
```python
from System import *
from System.Windows import *
from System.Windows.Controls import *
from System.Windows.Input import *
from System.Windows.Media import *
from System.Windows.Media.Imaging import * # for bitmap
from System.Collections.Generic import *
from System.Windows.Threading import DispatcherTimer
from System.Windows.Browser import *
from System.IO import StringReader
import clr
clr.AddReference("System.Xml")
from System.Xml import *
_isScrubberLocked = False
_positionTimer = None
_loop = False
_src = ""
_poster = ""
_volume = 0.5
_width = 300;
_height = 150
_autoPlay = True
_muted = False
_controls = False
_autoBuffer = False
_ended = False
class SourceElement(object):
src = ""
type = ""
title = ""
artist = ""
class MediaInfo(object):
def __init__(self, xml):
self._xml = xml
self.Sources = List[String]()
self.Video = True
self.Loop = True
self.Autoplay = True
self.Volume = .5
self.Width = 300
self.Height = 150
self.Poster = ""
self.Controls = True
self.Autobuffer = True
self.Muted = False
reader = XmlReader.Create(StringReader(self._xml))
while (reader.Read()):
if reader.Name == "video":
self.Video = reader.ReadElementContentAsBoolean()
elif reader.Name == "width":
self.Width = reader.ReadElementContentAsDouble()
elif reader.Name == "height":
self.Height = reader.ReadElementContentAsDouble()
elif reader.Name == "autoplay":
self.Autoplay = reader.ReadElementContentAsString()
elif reader.Name == "volume":
self.Volume = reader.ReadElementContentAsDouble()
elif reader.Name == "poster":
self.Poster = reader.ReadElementContentAsString()
elif reader.Name == "loop":
self.Loop = reader.ReadElementContentAsBoolean()
elif reader.Name == "controls":
self.Controls = reader.ReadElementContentAsBoolean()
elif reader.Name == "autobuffer":
self.Autobuffer = reader.ReadElementContentAsBoolean()
elif reader.Name == "muted":
self.Muted = reader.ReadElementContentAsBoolean()
elif reader.Name == "sources":
item = None
while (reader.Read()):
if reader.Name == "source":
item = reader.ReadElementContentAsString()
self.Sources.Add(item)
class SelectableSourceElementList (List[SourceElement]):
LastItem = True
def __init__(self):
self._SelectedIndex = 0
def Next(self):
self._SelectedIndex = self._SelectedIndex + 1
if self._SelectedIndex + 1 > self.Count:
self._SelectedIndex = 0
LastItem = True
else:
LastItem = False
def Previous(self):
self._SelectedIndex = self._SelectedIndex - 1
if self._SelectedIndex < 0:
self._SelectedIndex = self.Count - 1
def SetSelectedItem(self, value):
pass
def GetSelectedItem(self):
if self[self._SelectedIndex] != None:
return self[self._SelectedIndex]
else:
return None
SelectedItem = property(GetSelectedItem, SetSelectedItem)
def SetSelectedIndex(self, value):
self._SelectedIndex = value
def GetSelectedIndex(self):
return self._SelectedIndex
SelectedIndex = property(GetSelectedIndex, SetSelectedIndex)
def ConvertHexToColor(hexColor):
c = Color()
c = Color.FromArgb(
Convert.ToUInt32(hexColor.Substring(1, 2), 16),
Convert.ToUInt32(hexColor.Substring(3, 2), 16),
Convert.ToUInt32(hexColor.Substring(5, 2), 16),
Convert.ToUInt32(hexColor.Substring(7, 2), 16))
return c
def DomGetFullPathToDir():
content = ""
try:
path = HtmlPage.Document.DocumentUri.ToString()
segments = path.Split('/')
content = path.Replace(segments[segments.Length - 1], "")
except:
pass
return content
def EnsureAbsoluteFilePath(initialPath):
if String.IsNullOrEmpty(initialPath):
return String.Empty
if initialPath.ToLower().Contains("http://"):
return initialPath
else:
s = DomGetFullPathToDir()
return s + initialPath
def Opened(s, e):
me.Player.Play()
def _Play():
if me.Player.CurrentState != MediaElementState.Playing:
me.Poster.Visibility = Visibility.Collapsed
if MediaCollection.Count > 0:
if me.Player.Position.TotalSeconds == 0: # only queue up next video if the current one is finished playing
me.Player.Source = Uri(MediaCollection.SelectedItem.src, UriKind.Absolute)
me.Caption.Text = ""
if me.Player.AutoPlay == False:
me.Player.MediaOpened += Opened
else:
if me.Player.CurrentState != MediaElementState.Playing: # don't try to play if it's already playing
me.Player.Play()
def _Stop():
me.Poster.Visibility = Visibility.Visible; # make any present poster visible
def Next():
me.Player.Pause()
me.Player.Position = TimeSpan(0, 0, 0)
if MediaCollection.Count > 1:
MediaCollection.Next()
_Play()
def Previous():
me.Player.Pause()
me.Player.Position = TimeSpan(0, 0, 0)
if MediaCollection.Count > 1:
MediaCollection.Previous()
_Play()
# event handlers
def positionTimer_Tick(s,e):
if me.Player.Position.TotalSeconds > 0 and not _isScrubberLocked:
me.Scrubber.Value = Convert.ToDouble(me.Player.Position.Ticks) / Convert.ToDouble(me.Player.NaturalDuration.TimeSpan.Ticks)
me.MsgCurrentTime.Text = String.Format("{0:00}:{1:00}:{2:00}",
me.Player.Position.Hours, me.Player.Position.Minutes, me.Player.Position.Seconds)
def Player_MediaOpened(s, e):
me.Scrubber.Value = 0
me.MsgTotalTime.Text = String.Format("{0:00}:{1:00}:{2:00}",
me.Player.NaturalDuration.TimeSpan.Hours,
me.Player.NaturalDuration.TimeSpan.Minutes,
me.Player.NaturalDuration.TimeSpan.Seconds)
def Player_CurrentStateChanged(s, e):
if me.Player.CurrentState == MediaElementState.Playing:
me.ShowPauseButton.Begin()
_positionTimer.Start()
elif me.Player.CurrentState == MediaElementState.Paused:
me.ShowPlayButton.Begin()
_positionTimer.Stop()
if me.Player.CurrentState == MediaElementState.Stopped:
me.ShowPlayButton.Begin()
_positionTimer.Stop()
me.Scrubber.Value = 0
def Player_DownloadProgressChanged(s, e):
me.DownloadProgressTrack.RenderTransform.ScaleX = me.Player.DownloadProgress
def Scrubber_MouseLeave(s, e):
global _isScrubberLocked
_isScrubberLocked = False
def Scrubber_MouseMove(s, e):
global _isScrubberLocked
_isScrubberLocked = True
def Scrubber_MouseLeftButtonUp(s, e):
global _isScrubberLocked
me.Player.Position = TimeSpan.FromSeconds(me.Scrubber.Value * me.Player.NaturalDuration.TimeSpan.TotalSeconds)
_isScrubberLocked = False
def BtnPlayPause_MouseLeftButtonUp(s, e):
if me.Player.CurrentState == MediaElementState.Playing:
me.Player.Pause()
elif me.Player.CurrentState == MediaElementState.Paused:
_Play()
elif me.Player.CurrentState == MediaElementState.Stopped:
_Play()
def Poster_MouseLeftButtonDown(s, e):
_Play()
def BtnPlayPause_MouseLeave(s, e):
me.PlayPauseSymbol_MouseLeave.Begin()
def BtnPlayPause_MouseEnter(s, e):
me.PlayPauseSymbol_MouseEnter.Begin()
def VolumeSlider_ValueChanged(s, e):
me.Player.Volume = me.VolumeSlider.Value
def ShowControlPanel_Completed(s, e):
me.ControlPanelTimer.Begin()
def ShowVolumeSlider_Completed(s, e):
me.VolumeSliderTimer.Begin()
def VolumeSliderCanvas_MouseMove(s, e):
me.VolumeSliderTimer.Begin()
def VolumeSliderTimer_Completed(s, e):
me.HideVolumeSlider.Begin()
def ControlPanelTimer_Completed(s, e):
me.HideControlPanel.Begin()
def BtnVolume_MouseLeftButtonUp(s, e):
me.ShowVolumeSlider.Begin()
def Player_MouseMove(s, e):
if settings.Video:
me.ShowControlPanel.Begin()
def Player_MouseLeave(s, e):
me.ControlPanelTimer.Begin()
def BtnVolume_MouseEnter(s, e):
me.VolumeSymbol_MouseEnter.Begin()
def BtnVolume_MouseLeave(s, e):
me.VolumeSymbol_MouseLeave.Begin()
def NextSymbol_MouseEnter(s, e):
if mediaCount > 1:
me.NextSymbol_MouseEnter.Begin()
def NextSymbol_MouseLeave(s, e):
if mediaCount > 1:
me.NextSymbol_MouseLeave.Begin()
def PreviousSymbol_MouseEnter(s, e):
if mediaCount > 1:
me.PreviousSymbol_MouseEnter.Begin()
def PreviousSymbol_MouseLeave(s, e):
if mediaCount > 1:
me.PreviousSymbol_MouseLeave.Begin()
def FullSymbol_MouseEnter(s, e):
me.FullSymbol_MouseEnter.Begin()
def FullSymbol_MouseLeave(s, e):
me.FullSymbol_MouseLeave.Begin()
def Player_MarkerReached(s, e):
me.Caption.Text = e.Marker.Text
def FullSymbol_MouseLeftButtonDown(s, e):
Application.Current.Host.Content.IsFullScreen = not Application.Current.Host.Content.IsFullScreen
me.Width = Application.Current.Host.Content.ActualWidth
me.Height = Application.Current.Host.Content.ActualHeight
def BrowserHost_Resize(s, e):
me.Width = Application.Current.Host.Content.ActualWidth
me.Height = Application.Current.Host.Content.ActualHeight
def Player_MediaEnded(s, e):
me.Player.Position = TimeSpan(0, 0, 0)
me.Poster.Visibility = Visibility.Collapsed
if MediaCollection.Count > 0: # is there a playlist?
if _loop: # just keep looping
MediaCollection.Next()
_Play()
else:
MediaCollection.Next()
if MediaCollection.SelectedIndex > 0: # check to see if we've finished the playlist
_Play()
else:
_Stop()
elif _loop:
_Play()
else:
_Stop()
def Player_MediaFailed(s, e):
me.Caption.Text = "Issue loading file: " + MediaCollection.SelectedItem.src
def GoNext(s, e):
if mediaCount > 1:
Next()
def GoPrevious(s, e):
if mediaCount > 1:
Previous()
# if XAML was not loaded do not process further
if me != None:
# register for events
me.Player.MediaOpened += Player_MediaOpened
me.Player.CurrentStateChanged += Player_CurrentStateChanged
me.Player.DownloadProgressChanged += Player_DownloadProgressChanged
me.Player.MarkerReached += Player_MarkerReached
me.Player.MediaEnded += Player_MediaEnded
me.Player.MediaFailed += Player_MediaFailed
me.Player.MouseMove += Player_MouseMove
me.Scrubber.MouseLeftButtonUp += Scrubber_MouseLeftButtonUp
me.Scrubber.MouseMove += Scrubber_MouseMove
me.Scrubber.MouseLeave += Scrubber_MouseLeave
me.BtnPlayPause.MouseEnter += BtnPlayPause_MouseEnter
me.BtnPlayPause.MouseLeave += BtnPlayPause_MouseLeave
me.BtnPlayPause.MouseLeftButtonUp += BtnPlayPause_MouseLeftButtonUp
me.BtnVolume.MouseEnter += BtnVolume_MouseEnter
me.BtnVolume.MouseLeave += BtnVolume_MouseLeave
me.BtnVolume.MouseLeftButtonUp += BtnVolume_MouseLeftButtonUp
me.VolumeSlider.ValueChanged += VolumeSlider_ValueChanged
me.ShowVolumeSlider.Completed += ShowVolumeSlider_Completed
me.VolumeSliderTimer.Completed += VolumeSliderTimer_Completed
me.VolumeSliderCanvas.MouseMove += VolumeSliderCanvas_MouseMove
me.ControlPanel.MouseMove += Player_MouseMove
me.ShowControlPanel.Completed += ShowControlPanel_Completed
me.Player.MouseLeave += Player_MouseLeave
me.ControlPanelTimer.Completed += ControlPanelTimer_Completed
me.NextSymbol.MouseEnter += NextSymbol_MouseEnter
me.NextSymbol.MouseLeave += NextSymbol_MouseLeave
me.NextSymbol.MouseLeftButtonDown += GoNext
me.PreviousSymbol.MouseLeftButtonDown += GoPrevious
me.PreviousSymbol.MouseEnter += PreviousSymbol_MouseEnter
me.PreviousSymbol.MouseLeave += PreviousSymbol_MouseLeave
me.Poster.MouseLeftButtonDown += Poster_MouseLeftButtonDown
me.FullSymbol.MouseLeftButtonDown += FullSymbol_MouseLeftButtonDown
me.FullSymbol.MouseEnter += FullSymbol_MouseEnter
me.FullSymbol.MouseLeave += FullSymbol_MouseLeave
Application.Current.Host.Content.Resized += BrowserHost_Resize
# set to True if you want to override the element colors defined in the XAML
if False:
# must be ARGB format
ButtonOffHexValue = "#ffbbbbbb" # color of buttons when mouse is not over
ButtonOverHexValue = "#ffeeeeee" # color of buttons when mouse is hovering
PanelBackgroundHexValue = "#66ffffff" # The control panel background color
TextColorHexValue = "#ff808080" # the color of the timecode and caption
MediaBackDropHexValue = "#ff000000" # the color of the overall media player background
# set colors
fillColor = SolidColorBrush(ConvertHexToColor(ButtonOffHexValue))
me.PlaySymbol.Fill = fillColor
me.PauseSymbol.Fill = fillColor
me.SpeakerShape.Fill = fillColor
me.VolumeShape1.Stroke = fillColor
me.VolumeShape2.Stroke = fillColor
me.NextA.Fill = fillColor
me.NextB.Fill = fillColor
me.PreviousA.Fill = fillColor
me.PreviousB.Fill = fillColor
me.FullA.Fill = fillColor
me.FullB.Fill = fillColor
me.FullC.Fill = fillColor
me.FullD.Fill = fillColor
background = SolidColorBrush(ConvertHexToColor(PanelBackgroundHexValue))
me.VolumeSliderBackground.Fill = background
me.ControlPanelBackground.Fill = background
foreground = SolidColorBrush(ConvertHexToColor(TextColorHexValue))
me.MsgCurrentTime.Foreground = foreground
me.Caption.Foreground = foreground
me.TimeDivider.Foreground = foreground
me.MsgTotalTime.Foreground = foreground
backdrop = SolidColorBrush(ConvertHexToColor(MediaBackDropHexValue))
me.MediaBackground.Fill = backdrop
me.LayoutRoot.Background = backdrop
# set the storyboard To values for mouseleave events
buttonOffHexValue = ConvertHexToColor(ButtonOffHexValue)
me.Stop_MouseLeaveValue.To = buttonOffHexValue
me.Play_MouseLeaveValue.To = buttonOffHexValue
me.Pause_MouseLeaveValue.To = buttonOffHexValue
me.Volume_MouseLeaveValue.To = buttonOffHexValue
me.Volume1_MouseLeaveValue.To = buttonOffHexValue
me.Volume2_MouseLeaveValue.To = buttonOffHexValue
me.NextA_MouseLeaveValue.To = buttonOffHexValue
me.NextB_MouseLeaveValue.To = buttonOffHexValue
me.PreviousA_MouseLeaveValue.To = buttonOffHexValue
me.PreviousB_MouseLeaveValue.To = buttonOffHexValue
me.FullA_MouseLeaveValue.To = buttonOffHexValue
me.FullB_MouseLeaveValue.To = buttonOffHexValue
me.FullC_MouseLeaveValue.To = buttonOffHexValue
me.FullD_MouseLeaveValue.To = buttonOffHexValue
# set the storyboard To values for mouseenter events
buttonOverHexValue = ConvertHexToColor(ButtonOverHexValue)
me.Stop_MouseEnterValue.To = buttonOverHexValue
me.Play_MouseEnterValue.To = buttonOverHexValue
me.Pause_MouseEnterValue.To = buttonOverHexValue
me.Volume_MouseEnterValue.To = buttonOverHexValue
me.Volume1_MouseEnterValue.To = buttonOverHexValue
me.Volume2_MouseEnterValue.To = buttonOverHexValue
me.NextA_MouseEnterValue.To = buttonOverHexValue
me.NextB_MouseEnterValue.To = buttonOverHexValue
me.PreviousA_MouseEnterValue.To = buttonOverHexValue
me.PreviousB_MouseEnterValue.To = buttonOverHexValue
me.FullA_MouseEnterValue.To = buttonOverHexValue
me.FullB_MouseEnterValue.To = buttonOverHexValue
me.FullC_MouseEnterValue.To = buttonOverHexValue
me.FullD_MouseEnterValue.To = buttonOverHexValue
# UI update timer
_positionTimer = DispatcherTimer()
_positionTimer.Interval = TimeSpan(0, 0, 0, 0, 100)
_positionTimer.Tick += positionTimer_Tick
_positionTimer.Start()
#get XML from page DOM
name = Application.Current.Host.InitParams["xamlid"].Split("-")[0]
xmlSettings = HtmlPage.Document.GetElementById(name + "-settings").text
settings = MediaInfo(xmlSettings)
# assign values declared in markup
_loop = settings.Loop
me.Width = settings.Width
me.Height = settings.Height
if settings.Poster != "":
me.Poster.Source = BitmapImage(Uri(EnsureAbsoluteFilePath(settings.Poster), UriKind.Absolute))
ap = settings.Autoplay
me.Player.AutoPlay = ap
if not ap:
me.Poster.Visibility = Visibility.Visible
me.Player.Volume = settings.Volume
me.Player.IsMuted = settings.Muted
if not settings.Controls:
me.ControlPanel.Visibility = Visibility.Collapsed
if not settings.Video:
me.Player.Visibility = Visibility.Collapsed
me.LayoutRoot.RowDefinitions[0].Height = GridLength(0)
me.MediaBackground.Visibility = Visibility.Collapsed
me.Poster.Visibility = Visibility.Collapsed
me.FullSymbol.Visibility = Visibility.Collapsed
me.SplitterCD.Width = GridLength(0)
me.FullCD.Width = GridLength(0)
me.ControlPanel.Opacity = 1
MediaCollection = SelectableSourceElementList()
for i in range( 0, settings.Sources.Count):
s = SourceElement()
s.src = EnsureAbsoluteFilePath(settings.Sources[i])
MediaCollection.Add(s)
mediaCount = settings.Sources.Count
if mediaCount == 1:
me.PreviousSymbol.Cursor = Cursors.Arrow
me.PreviousA.Fill = SolidColorBrush(ConvertHexToColor("#FF333333"))
me.PreviousB.Fill = SolidColorBrush(ConvertHexToColor("#FF333333"))
me.NextSymbol.Cursor = Cursors.Arrow
me.NextA.Fill = SolidColorBrush(ConvertHexToColor("#FF333333"))
me.NextB.Fill = SolidColorBrush(ConvertHexToColor("#FF333333"))
me.Player.Source = Uri(MediaCollection.SelectedItem.src, UriKind.RelativeOrAbsolute)
```
|
{
"source": "jdmaguire/gstreamer-pravega",
"score": 2
}
|
#### File: plugins/python/example_python_transform_tensorflow.py
```python
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import Gst, GObject, GstBase, GstVideo
import tensorflow as tf
import numpy as np
FIXED_CAPS_SRC = Gst.Caps.from_string('video/x-raw,format=GRAY8,width=[1,2147483647],height=[1,2147483647]')
FIXED_CAPS_SINK = Gst.Caps.from_string('video/x-raw,format=GRAY8,width=[1,2147483647],height=[1,2147483647]')
class ExampleTransform(GstBase.BaseTransform):
__gstmetadata__ = (
'example_python_transform_tensorflow',
'Transform',
'Demonstrates how to run a simple Python Tensorflow transformation on a video',
'<NAME>')
__gsttemplates__ = (Gst.PadTemplate.new("src",
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
FIXED_CAPS_SRC),
Gst.PadTemplate.new("sink",
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
FIXED_CAPS_SINK))
def do_set_caps(self, incaps, outcaps):
struct = incaps.get_structure(0)
self.width = struct.get_int("width").value
self.height = struct.get_int("height").value
Gst.info("width=%d, height=%d" % (self.width, self.height))
return True
def do_transform_ip(self, buf):
try:
with buf.map(Gst.MapFlags.READ | Gst.MapFlags.WRITE) as info:
Gst.trace('info=%s, size=%d' % (str(info), info.size))
# Create a NumPy ndarray from the memoryview and modify it in place.
buf_np = np.ndarray(shape=(self.height, self.width), dtype=np.uint8, buffer=info.data)
Gst.trace("buf_np=%s" % (str(buf_np)))
# Create tensors.
t1 = tf.constant(buf_np)
Gst.trace("t1=%s" % (str(t1)))
t2 = t1 / 4
Gst.trace("t2=%s" % (str(t2)))
# Copy tensor to overwrite input/output buffer.
buf_np[:] = t2
return Gst.FlowReturn.OK
except Gst.MapError as e:
Gst.error("Mapping error: %s" % e)
return Gst.FlowReturn.ERROR
GObject.type_register(ExampleTransform)
__gstelementfactory__ = ("example_python_transform_tensorflow", Gst.Rank.NONE, ExampleTransform)
```
|
{
"source": "jdmanton/pyOTF",
"score": 2
}
|
#### File: pyOTF/tests/integration_tests.py
```python
from nose.tools import *
import unittest
from pyotf.otf import *
from pyotf.phaseretrieval import *
import numpy as np
class TestHanserPhaseRetrieval(unittest.TestCase):
"""Test for self consistency, generate a pupil with random zernike
coefficients generate a psf and phase retrieve it."""
def setUp(self):
"""Set up the test"""
# random but no
np.random.seed(12345)
# model kwargs
self.model_kwargs = dict(
wl=525,
na=1.27,
ni=1.33,
res=100,
size=128,
zrange=[-1000, -500, 0, 250, 1000, 3000],
vec_corr="none",
condition="none",
)
# make the model
model = HanserPSF(**self.model_kwargs)
# extract kr
model._gen_kr()
kr = model._kr
theta = model._phi
# make zernikes (need to convert kr to r where r = 1 when kr is at
# diffraction limit)
r = kr * model.wl / model.na
self.mask = r <= 1
zerns = zernike(r, theta, np.arange(5, 16))
# make fake phase and magnitude coefs
self.pcoefs = np.random.rand(zerns.shape[0])
self.mcoefs = np.random.rand(zerns.shape[0])
self.pupil_phase = (zerns * self.pcoefs[:, np.newaxis, np.newaxis]).sum(0)
self.pupil_mag = (zerns * self.mcoefs[:, np.newaxis, np.newaxis]).sum(0)
self.pupil_mag = self.pupil_mag + model._gen_pupil() * (2.0 - self.pupil_mag.min())
# phase only test
model._gen_psf(self.pupil_mag * np.exp(1j * self.pupil_phase) * model._gen_pupil())
self.PSFi = model.PSFi
# we have to converge really close for this to work.
self.PR_result = retrieve_phase(
self.PSFi, self.model_kwargs, max_iters=200, pupil_tol=0, mse_tol=0, phase_only=False
)
def test_mag(self):
"""Make sure phase retrieval returns same magnitude"""
np.testing.assert_allclose(
fftshift(self.pupil_mag), self.PR_result.mag, err_msg="Mag failed"
)
def test_phase(self):
"""Make sure phase retrieval returns same phase"""
# from the unwrap_phase docs:
# >>> np.std(image_unwrapped - image) < 1e-6 # A constant offset is normal
np.testing.assert_allclose(
(fftshift(self.pupil_phase) - self.PR_result.phase) * self.mask,
0,
err_msg="Phase failed",
)
def test_zernike_modes(self):
"""Make sure the fitted zernike modes agree"""
self.PR_result.fit_to_zernikes(15)
np.testing.assert_allclose(
self.PR_result.zd_result.pcoefs[4:], self.pcoefs, err_msg="Phase coefs failed"
)
np.testing.assert_allclose(
self.PR_result.zd_result.mcoefs[4:], self.mcoefs, err_msg="Mag coefs failed"
)
def test_psf_mse(self):
"""Does the phase retrieved PSF converge to the fake PSF"""
np.testing.assert_allclose(self.PR_result.model.PSFi, self.PSFi)
```
|
{
"source": "jdmartin86/dopamine",
"score": 2
}
|
#### File: agents/sdqn/sdqn_agent_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.sdqn import sdqn_agent
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
class DominatingQuantileAgentTest(tf.test.TestCase):
def setUp(self):
self._num_actions = 4
self.observation_shape = dqn_agent.OBSERVATION_SHAPE
self.stack_size = dqn_agent.STACK_SIZE
self.ones_state = np.ones(
[1, self.observation_shape, self.observation_shape, self.stack_size])
def _create_test_agent(self, sess):
class MockDominatingQuantileAgent(
sdqn_agent.DominatingQuantileAgent):
def _network_template(self, state, num_quantiles):
# This dummy network allows us to deterministically anticipate that the
# state-action-quantile outputs will be equal to sum of the
# corresponding quantile inputs.
# State/Quantile shapes will be k x 1, (N x batch_size) x 1,
# or (N' x batch_size) x 1.
state_net = slim.flatten(state)
state_net = tf.ones(shape=state_net.shape)
state_net = tf.cast(state_net[:, 0:self.num_actions], tf.float32)
state_net_tiled = tf.tile(state_net, [num_quantiles, 1])
batch_size = state_net.get_shape().as_list()[0]
quantiles_shape = [num_quantiles * batch_size, 1]
quantiles = tf.ones(quantiles_shape)
quantile_net = tf.tile(quantiles, [1, self.num_actions])
quantile_values = state_net_tiled * quantile_net
quantile_values = slim.fully_connected(
quantile_values, self.num_actions, activation_fn=None,
weights_initializer=tf.ones_initializer(),
biases_initializer=tf.zeros_initializer())
return self._get_network_type()(quantile_values=quantile_values,
quantiles=quantiles)
agent = MockDominatingQuantileAgent(
sess=sess,
num_actions=self._num_actions,
ssd_lambda=1.0,
num_samples=3,
num_quantiles=4)
# This ensures non-random action choices (since epsilon_eval = 0.0) and
# skips the train_step.
agent.eval_mode = True
sess.run(tf.global_variables_initializer())
return agent
def testCreateAgentWithDefaults(self):
# Verifies that we can create and train an agent with the default values.
with self.test_session(use_gpu=False) as sess:
agent = sdqn_agent.DominatingQuantileAgent(sess, num_actions=4)
sess.run(tf.global_variables_initializer())
observation = np.ones([84, 84, 1])
agent.begin_episode(observation)
agent.step(reward=1, observation=observation)
agent.end_episode(reward=1)
def testShapes(self):
with self.test_session(use_gpu=False) as sess:
agent = self._create_test_agent(sess)
# Replay buffer batch size:
self.assertEqual(agent._replay.batch_size, 32)
# quantile values, q-values, q-argmax at sample action time:
self.assertEqual(agent._net_outputs.quantile_values.shape[0],
agent.num_quantiles)
self.assertEqual(agent._net_outputs.quantile_values.shape[1],
agent.num_actions)
self.assertEqual(agent._q_values.shape[0], agent.num_actions)
# Check the setting of num_actions.
self.assertEqual(self._num_actions, agent.num_actions)
# input quantiles, quantile values, and output q-values at loss
# computation time.
self.assertEqual(agent._replay_net_quantile_values.shape[0],
agent.num_quantiles * agent._replay.batch_size)
self.assertEqual(agent._replay_net_quantile_values.shape[1],
agent.num_actions)
# num_target_quantile values: (num_quantiles*batch_size, num_actions)
self.assertEqual(agent._replay_net_target_quantile_values.shape[0],
agent.num_quantiles * agent._replay.batch_size)
self.assertEqual(agent._replay_net_target_quantile_values.shape[1],
agent.num_actions)
# num_target_q values: (batch_size, num_actions)
self.assertEqual(agent._replay_net_target_q_values.shape[0],
agent._replay.batch_size)
self.assertEqual(agent._replay_net_target_q_values.shape[1],
agent.num_actions)
# num_reference_quantile values: (num_quantiles*batch_size, num_actions)
self.assertEqual(agent._replay_net_reference_quantile_values.shape[0],
agent.num_quantiles * agent._replay.batch_size)
self.assertEqual(agent._replay_net_reference_quantile_values.shape[1],
agent.num_actions)
def test_q_value_computation(self):
with self.test_session(use_gpu=False) as sess:
agent = self._create_test_agent(sess)
quantiles = np.ones(agent.num_quantiles)
q_value = np.sum(quantiles)
quantiles = quantiles.reshape([agent.num_quantiles, 1])
state = self.ones_state
feed_dict = {agent.state_ph: state}
q_values, q_argmax = sess.run([agent._q_values, agent._q_argmax],
feed_dict)
q_values_arr = np.ones([agent.num_actions]) * q_value
for i in xrange(agent.num_actions):
self.assertEqual(q_values[i], q_values_arr[i])
self.assertEqual(q_argmax, 0)
q_values_target = sess.run(agent._replay_net_target_q_values, feed_dict)
batch_size = agent._replay.batch_size
for i in xrange(batch_size):
for j in xrange(agent.num_actions):
self.assertEqual(q_values_target[i][j], q_values[j])
def test_replay_quantile_value_computation(self):
with self.test_session(use_gpu=False) as sess:
agent = self._create_test_agent(sess)
replay_quantile_vals, replay_target_quantile_vals = sess.run(
[agent._replay_net_quantile_values,
agent._replay_net_target_quantile_values])
batch_size = agent._replay.batch_size
replay_quantile_vals = replay_quantile_vals.reshape([
agent.num_quantiles, batch_size, agent.num_actions])
replay_target_quantile_vals = replay_target_quantile_vals.reshape([
agent.num_quantiles, batch_size, agent.num_actions])
for i in xrange(agent.num_quantiles):
for j in xrange(agent._replay.batch_size):
self.assertEqual(replay_quantile_vals[i][j][0], agent.num_actions)
for i in xrange(agent.num_quantiles):
for j in xrange(agent._replay.batch_size):
self.assertEqual(replay_target_quantile_vals[i][j][0],
agent.num_actions)
if __name__ == '__main__':
tf.test.main()
```
|
{
"source": "jdmartinez36/azure-batch-cli-extensions",
"score": 2
}
|
#### File: samples/sdk/ffmpeg.py
```python
import os
import json
import time
import sys
import datetime
from azure.common.credentials import ServicePrincipalCredentials
import azext.batch as batch
from azext.batch import models, operations
OUTPUT_CONTAINER_SAS = ""
BATCH_ENDPOINT = ""
BATCH_ACCOUNT = ""
SUBSCRIPTION_ID = ""
BATCH_CLIENT_ID = ""
BATCH_SECRET = ""
BATCH_TENANT = ""
BATCH_RESOURCE = "https://batch.core.windows.net/"
SAMPLE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if __name__ == '__main__':
# Authentication.
# Note that providing credentials and subscription ID is not required
# if the Azure CLI is installed and already authenticated.
creds = ServicePrincipalCredentials(
client_id=BATCH_CLIENT_ID,
secret=BATCH_SECRET,
tenant=BATCH_TENANT,
resource=BATCH_RESOURCE
)
# Setup client
client = batch.BatchExtensionsClient(
credentials=creds,
base_url=BATCH_ENDPOINT,
batch_account=BATCH_ACCOUNT,
subscription_id=SUBSCRIPTION_ID)
# Setup test input data
input_data = os.path.join(SAMPLE_DIR, 'ffmpeg', 'data')
filegroup = 'music-data'
client.file.upload(input_data, filegroup)
## Create pool from template
pool_template = os.path.join(SAMPLE_DIR, 'ffmpeg', 'pool.json')
pool_json = client.pool.expand_template(pool_template)
pool_param = operations.ExtendedPoolOperations.poolparameter_from_json(pool_json)
client.pool.add(pool_param)
# Create task-per-file job from template file with json parameters
job_template = os.path.join(SAMPLE_DIR, 'ffmpeg', 'job.perFile.json')
parameters = {
"jobId": {
"value": "ffmpeg-task-per-file-test"
},
"inputFileGroup": {
"value": filegroup
},
"outputFileStorageUrl": {
"value": OUTPUT_CONTAINER_SAS
},
"poolId": {
"value": pool_param.properties.id
}
}
job_def = client.job.expand_template(job_template, parameters)
job_param = operations.ExtendedJobOperations.jobparameter_from_json(job_def)
client.job.add(job_param)
# Create parametric sweep job using models
job_id = "ffmpeg-parametric-sweep-test"
task_factory = models.ParametricSweepTaskFactory(
parameter_sets=[models.ParameterSet(start=1, end=5)],
repeat_task=models.RepeatTask(
command_line="ffmpeg -y -i sample{0}.mp3 -acodec libmp3lame output.mp3",
resource_files=[models.ExtendedResourceFile(source=models.FileSource(file_group=filegroup))],
output_files=[models.OutputFile(
file_pattern="output.mp3",
destination=models.ExtendedOutputFileDestination(
auto_storage=models.OutputFileAutoStorageDestination(job_id, path="audio{0}.mp3")),
upload_options=models.OutputFileUploadOptions(models.OutputFileUploadCondition.task_success))],
package_references=[models.AptPackageReference(id="ffmpeg")]))
job = models.ExtendedJobParameter(
id=job_id,
pool_info=models.PoolInformation(pool_id=pool_param.properties.id),
constraints=models.JobConstraints(
max_wall_clock_time=datetime.timedelta(hours=5),
max_task_retry_count=1),
on_all_tasks_complete = models.OnAllTasksComplete.terminate_job,
task_factory=task_factory)
client.job.add(job)
# Wait for job to complete and download outputs from file group.
while True:
time.sleep(15)
job = client.job.get(job_id)
print("Watching job: {}".format(job.state))
if job.state == models.JobState.completed:
client.file.download(SAMPLE_DIR, job_id)
break
```
#### File: automation/setup/install_modules.py
```python
import sys
import os
import subprocess
import automation.utilities.path as autmation_path
INSTALL_COMMAND = 'python -m pip install -e {}'
def install_modules():
all_modules = autmation_path.get_command_modules_paths()
print('Installing command modules')
print('Modules: {}'.format(', '.join(name for name, _ in all_modules)))
failures = []
for name, path in all_modules:
try:
subprocess.check_call(INSTALL_COMMAND.format(path).split())
except subprocess.CalledProcessError as err:
# exit code is not zero
failures.append('Failed to install {}. Error message: {}'.format(name, err.output))
for f in failures:
print(f)
return not any(failures)
if __name__ == '__main__':
sys.exit(0 if install_modules() else 1)
```
#### File: automation/style/run.py
```python
import argparse
import multiprocessing
import os
import os.path
import sys
from subprocess import call
from distutils.sysconfig import get_python_lib
import automation.utilities.path as automation_path
def run_pylint():
print('\n\nRun pylint')
modules = [os.path.join(automation_path.get_repo_root(), 'azext')]
modules.append(os.path.join(automation_path.get_repo_root(), 'batch-cli-extensions', 'azext_batch'))
modules_list = ' '.join(modules)
print(modules_list)
arguments = '{} --rcfile={} -j {} -r n -d I0013'.format(
modules_list,
os.path.join(automation_path.get_repo_root(), 'pylintrc'),
multiprocessing.cpu_count())
return_code = call(('python -m pylint ' + arguments).split())
if return_code:
print('Pylint failed')
else:
print('Pylint passed')
return return_code
def run_pep8():
print('\n\nRun flake8 for PEP8 compliance')
modules_list = ' '.join([os.path.join(automation_path.get_repo_root(), m) for m in MODULES])
print(modules_list)
command = 'flake8 --statistics --append-config={} {}'.format(
os.path.join(automation_path.get_repo_root(), '.flake8'), modules_list)
return_code = call(command.split())
if return_code:
print('Flake8 failed')
else:
print('Flake8 passed')
return return_code
if __name__ == '__main__':
parser = argparse.ArgumentParser('Code style tools')
parser.add_argument('--ci', action='store_true', help='Run in CI mode')
parser.add_argument('--pep8', dest='suites', action='append_const', const='pep8',
help='Run flake8 to check PEP8')
parser.add_argument('--pylint', dest='suites', action='append_const', const='pylint',
help='Run pylint')
args = parser.parse_args()
if args.ci:
# Run pylint on all modules
return_code_sum = run_pylint()
sys.exit(return_code_sum)
if not args.suites or not any(args.suites):
return_code_sum = run_pylint()
else:
return_code_sum = 0
if 'pep8' in args.suites:
return_code_sum += run_pep8()
if 'pylint' in args.suites:
return_code_sum += run_pylint()
sys.exit(return_code_sum)
```
#### File: azure-batch-cli-extensions/scripts/dev_setup.py
```python
from __future__ import print_function
import sys
import os
from subprocess import check_call, CalledProcessError
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), '..', '..'))
def exec_command(command):
try:
print('Executing: ' + command)
check_call(command.split(), cwd=root_dir)
print()
except CalledProcessError as err:
print(err, file=sys.stderr)
sys.exit(1)
print('Running dev setup...')
print(os.environ)
print('Root directory \'{}\'\n'.format(root_dir))
# install general requirements
exec_command('pip install -r requirements.txt')
# install to edge build of azure-cli
exec_command('pip install --pre azure-cli --extra-index-url https://azurecliprod.blob.core.windows.net/edge --no-cache-dir')
# upgrade to latest azure-batch
exec_command('pip install --upgrade azure-batch')
# install automation package
exec_command('pip install -e ./scripts')
# install reference to extension module package
exec_command('pip install -e {}'.format(root_dir))
exec_command('pip install --upgrade --target ./.azure/devcliextensions/azure-batch-cli-extensions {0}'.format(root_dir))
exec_command('pip install --no-deps --upgrade --target ./.azure/devcliextensions/azure-batch-cli-extensions {0}/batch-cli-extensions'.format(root_dir))
print('Finished dev setup.')
```
|
{
"source": "jdmartinez36/azure-iot-cli-extension",
"score": 2
}
|
#### File: tests/digitaltwins/test_dt_resource_lifecycle_int.py
```python
import pytest
from time import sleep
from knack.log import get_logger
from azext_iot.digitaltwins.common import ADTEndpointType
from ..settings import DynamoSettings
from . import DTLiveScenarioTest
from . import (
MOCK_RESOURCE_TAGS,
MOCK_RESOURCE_TAGS_DICT,
MOCK_DEAD_LETTER_SECRET,
generate_resource_id,
)
logger = get_logger(__name__)
resource_test_env_vars = [
"azext_dt_ep_eventhub_namespace",
"azext_dt_ep_eventhub_policy",
"azext_dt_ep_eventhub_topic",
"azext_dt_ep_servicebus_namespace",
"azext_dt_ep_servicebus_policy",
"azext_dt_ep_servicebus_topic",
"azext_dt_ep_eventgrid_topic",
"azext_dt_ep_rg",
]
settings = DynamoSettings(opt_env_set=resource_test_env_vars)
run_resource_tests = False
run_endpoint_route_tests = False
if all(
[
settings.env.azext_dt_ep_eventhub_namespace,
settings.env.azext_dt_ep_eventhub_policy,
settings.env.azext_dt_ep_eventhub_topic,
settings.env.azext_dt_ep_servicebus_namespace,
settings.env.azext_dt_ep_servicebus_policy,
settings.env.azext_dt_ep_servicebus_topic,
settings.env.azext_dt_ep_eventgrid_topic,
settings.env.azext_dt_ep_rg,
]
):
run_endpoint_route_tests = True
class TestDTResourceLifecycle(DTLiveScenarioTest):
def __init__(self, test_case):
super(TestDTResourceLifecycle, self).__init__(test_case)
def test_dt_resource(self):
instance_names = [generate_resource_id(), generate_resource_id()]
dt_location_custom = "eastus2euap"
create_output = self.cmd(
"dt create -n {} -g {} -l {} --tags {}".format(
instance_names[0],
self.dt_resource_group,
self.dt_location,
MOCK_RESOURCE_TAGS,
)
).get_output_in_json()
assert_common_resource_attributes(
create_output,
instance_names[0],
self.dt_resource_group,
self.dt_location,
MOCK_RESOURCE_TAGS_DICT,
)
# Explictly assert create prevents provisioning on a name conflict (across regions)
self.cmd(
"dt create -n {} -g {} -l {} --tags {}".format(
instance_names[0],
self.dt_resource_group,
dt_location_custom,
MOCK_RESOURCE_TAGS,
),
expect_failure=True,
)
# No location specified. Use the resource group location.
create_output = self.cmd(
"dt create -n {} -g {}".format(
instance_names[1], self.dt_resource_group
)
).get_output_in_json()
assert_common_resource_attributes(
create_output,
instance_names[1],
self.dt_resource_group,
self.dt_resource_group_loc,
None,
)
show_output = self.cmd(
"dt show -n {}".format(instance_names[0])
).get_output_in_json()
assert_common_resource_attributes(
show_output,
instance_names[0],
self.dt_resource_group,
self.dt_location,
MOCK_RESOURCE_TAGS_DICT,
)
show_output = self.cmd(
"dt show -n {} -g {}".format(instance_names[1], self.dt_resource_group)
).get_output_in_json()
assert_common_resource_attributes(
show_output,
instance_names[1],
self.dt_resource_group,
self.dt_location,
None,
)
list_output = self.cmd("dt list").get_output_in_json()
filtered_list = filter_dt_list(list_output, instance_names)
assert len(filtered_list) == len(instance_names)
list_group_output = self.cmd(
"dt list -g {}".format(self.dt_resource_group)
).get_output_in_json()
filtered_group_list = filter_dt_list(list_group_output, instance_names)
assert len(filtered_group_list) == len(instance_names)
# Delete does not currently return output
self.cmd("dt delete -n {}".format(instance_names[0]))
self.cmd(
"dt delete -n {} -g {}".format(instance_names[1], self.dt_resource_group)
)
def test_dt_rbac(self):
rbac_assignee_owner = self.current_user
rbac_assignee_reader = self.current_user
rbac_instance_name = generate_resource_id()
self.cmd(
"dt create -n {} -g {} -l {}".format(
rbac_instance_name, self.dt_resource_group, self.dt_location,
)
)
assert (
len(
self.cmd(
"dt role-assignment list -n {}".format(rbac_instance_name)
).get_output_in_json()
)
== 0
)
assign_output = self.cmd(
"dt role-assignment create -n {} --assignee {} --role '{}'".format(
rbac_instance_name, rbac_assignee_owner, self.role_map["owner"]
)
).get_output_in_json()
assert_common_rbac_attributes(
assign_output, rbac_instance_name, "owner", rbac_assignee_owner,
)
assign_output = self.cmd(
"dt role-assignment create -n {} --assignee {} --role '{}' -g {}".format(
rbac_instance_name,
rbac_assignee_reader,
self.role_map["reader"],
self.dt_resource_group,
)
).get_output_in_json()
assert_common_rbac_attributes(
assign_output, rbac_instance_name, "reader", rbac_assignee_reader,
)
list_assigned_output = self.cmd(
"dt role-assignment list -n {}".format(rbac_instance_name)
).get_output_in_json()
assert len(list_assigned_output) == 2
# role-assignment delete does not currently return output
# Remove specific role assignment (reader) for assignee
self.cmd(
"dt role-assignment delete -n {} --assignee {} --role '{}'".format(
rbac_instance_name, rbac_assignee_owner, self.role_map["reader"],
)
)
list_assigned_output = self.cmd(
"dt role-assignment list -n {} -g {}".format(
rbac_instance_name, self.dt_resource_group
)
).get_output_in_json()
assert len(list_assigned_output) == 1
# Remove all role assignments for assignee
self.cmd(
"dt role-assignment delete -n {} --assignee {}".format(
rbac_instance_name, rbac_assignee_reader
)
)
list_assigned_output = self.cmd(
"dt role-assignment list -n {} -g {}".format(
rbac_instance_name, self.dt_resource_group
)
).get_output_in_json()
assert len(list_assigned_output) == 0
self.cmd("dt delete -n {}".format(rbac_instance_name))
@pytest.mark.skipif(
not run_endpoint_route_tests,
reason="All azext_dt_ep_* env vars are required for endpoint and route tests.",
)
def test_dt_endpoints_routes(self):
endpoints_instance_name = generate_resource_id()
self.cmd(
"dt create -n {} -g {} -l {}".format(
endpoints_instance_name, self.dt_resource_group, self.dt_location,
)
)
# Setup RBAC so we can interact with routes
self.cmd(
"dt role-assignment create -n {} --assignee {} --role '{}' -g {}".format(
endpoints_instance_name,
self.current_user,
self.role_map["owner"],
self.dt_resource_group,
)
)
sleep(20) # Wait for service to catch-up
list_ep_output = self.cmd(
"dt endpoint list -n {}".format(endpoints_instance_name)
).get_output_in_json()
assert len(list_ep_output) == 0
eventgrid_rg = settings.env.azext_dt_ep_rg
eventgrid_topic = settings.env.azext_dt_ep_eventgrid_topic
eventgrid_endpoint = "myeventgridendpoint"
logger.debug("Adding eventgrid endpoint...")
add_ep_output = self.cmd(
"dt endpoint create eventgrid -n {} -g {} --egg {} --egt {} --en {} --dsu {}".format(
endpoints_instance_name,
self.dt_resource_group,
eventgrid_rg,
eventgrid_topic,
eventgrid_endpoint,
MOCK_DEAD_LETTER_SECRET
)
).get_output_in_json()
assert_common_endpoint_attributes(
add_ep_output,
eventgrid_endpoint,
ADTEndpointType.eventgridtopic,
)
servicebus_rg = settings.env.azext_dt_ep_rg
servicebus_namespace = settings.env.azext_dt_ep_servicebus_namespace
servicebus_policy = settings.env.azext_dt_ep_servicebus_policy
servicebus_topic = settings.env.azext_dt_ep_servicebus_topic
servicebus_endpoint = "myservicebusendpoint"
logger.debug("Adding servicebus topic endpoint...")
add_ep_output = self.cmd(
"dt endpoint create servicebus -n {} --sbg {} --sbn {} --sbp {} --sbt {} --en {} --dsu {}".format(
endpoints_instance_name,
servicebus_rg,
servicebus_namespace,
servicebus_policy,
servicebus_topic,
servicebus_endpoint,
MOCK_DEAD_LETTER_SECRET
)
).get_output_in_json()
assert_common_endpoint_attributes(
add_ep_output,
servicebus_endpoint,
ADTEndpointType.servicebus,
)
eventhub_rg = settings.env.azext_dt_ep_rg
eventhub_namespace = settings.env.azext_dt_ep_eventhub_namespace
eventhub_policy = settings.env.azext_dt_ep_eventhub_policy
eventhub_topic = settings.env.azext_dt_ep_eventhub_topic
eventhub_endpoint = "myeventhubendpoint"
logger.debug("Adding eventhub endpoint...")
add_ep_output = self.cmd(
"dt endpoint create eventhub -n {} --ehg {} --ehn {} --ehp {} --eh {} --ehs {} --en {} --dsu {}".format(
endpoints_instance_name,
eventhub_rg,
eventhub_namespace,
eventhub_policy,
eventhub_topic,
self.current_subscription,
eventhub_endpoint,
MOCK_DEAD_LETTER_SECRET
)
).get_output_in_json()
assert_common_endpoint_attributes(
add_ep_output, eventhub_endpoint, ADTEndpointType.eventhub
)
show_ep_output = self.cmd(
"dt endpoint show -n {} --en {}".format(
endpoints_instance_name, eventhub_endpoint,
)
).get_output_in_json()
assert_common_endpoint_attributes(
show_ep_output, eventhub_endpoint, ADTEndpointType.eventhub
)
show_ep_output = self.cmd(
"dt endpoint show -n {} -g {} --en {}".format(
endpoints_instance_name, self.dt_resource_group, servicebus_endpoint,
)
).get_output_in_json()
assert_common_endpoint_attributes(
show_ep_output,
servicebus_endpoint,
ADTEndpointType.servicebus,
)
list_ep_output = self.cmd(
"dt endpoint list -n {} -g {}".format(
endpoints_instance_name, self.dt_resource_group
)
).get_output_in_json()
assert len(list_ep_output) == 3
endpoint_names = [eventgrid_endpoint, servicebus_endpoint, eventhub_endpoint]
filter_values = ["", "false", "type = Microsoft.DigitalTwins.Twin.Create"]
# Test Routes
list_routes_output = self.cmd(
"dt route list -n {}".format(endpoints_instance_name)
).get_output_in_json()
assert len(list_routes_output) == 0
for endpoint_name in endpoint_names:
is_last = endpoint_name == endpoint_names[-1]
route_name = "routefor{}".format(endpoint_name)
filter_value = filter_values.pop()
add_route_output = self.cmd(
"dt route create -n {} --rn {} --en {} --filter '{}' {}".format(
endpoints_instance_name,
route_name,
endpoint_name,
filter_value,
"-g {}".format(self.dt_resource_group) if is_last else "",
)
).get_output_in_json()
assert_common_route_attributes(
add_route_output, route_name, endpoint_name, filter_value
)
show_route_output = self.cmd(
"dt route show -n {} --rn {} {}".format(
endpoints_instance_name,
route_name,
"-g {}".format(self.dt_resource_group) if is_last else "",
)
).get_output_in_json()
assert_common_route_attributes(
show_route_output, route_name, endpoint_name, filter_value
)
list_routes_output = self.cmd(
"dt route list -n {} -g {}".format(
endpoints_instance_name, self.dt_resource_group
)
).get_output_in_json()
assert len(list_routes_output) == 3
for endpoint_name in endpoint_names:
is_last = endpoint_name == endpoint_names[-1]
route_name = "routefor{}".format(endpoint_name)
self.cmd(
"dt route delete -n {} --rn {} {}".format(
endpoints_instance_name,
route_name,
"-g {}".format(self.dt_resource_group) if is_last else "",
)
)
list_routes_output = self.cmd(
"dt route list -n {} -g {}".format(
endpoints_instance_name, self.dt_resource_group
)
).get_output_in_json()
assert len(list_routes_output) == 0
# Unfortuntely the service does not yet know how to delete child resouces
# of a dt parent automatically. So we have to explictly delete every endpoint first.
for endpoint_name in endpoint_names:
logger.debug("Cleaning up {} endpoint...".format(endpoint_name))
is_last = endpoint_name == endpoint_names[-1]
self.cmd(
"dt endpoint delete -n {} --en {} {}".format(
endpoints_instance_name,
endpoint_name,
"-g {}".format(self.dt_resource_group) if is_last else "",
)
)
list_endpoint_output = self.cmd(
"dt endpoint list -n {} -g {}".format(
endpoints_instance_name, self.dt_resource_group
)
).get_output_in_json()
assert len(list_endpoint_output) == 0
self.cmd(
"dt delete -n {} -g {}".format(
endpoints_instance_name, self.dt_resource_group
)
)
def assert_common_resource_attributes(
instance_output, resource_id, group_id, location, tags
):
assert instance_output["createdTime"]
assert instance_output["hostName"].startswith(resource_id)
assert instance_output["location"] == location
assert instance_output["id"].endswith(resource_id)
assert instance_output["lastUpdatedTime"]
assert instance_output["name"] == resource_id
assert instance_output["provisioningState"] == "Succeeded"
assert instance_output["resourceGroup"] == group_id
assert instance_output["type"] == "Microsoft.DigitalTwins/digitalTwinsInstances"
assert instance_output["tags"] == tags
def assert_common_route_attributes(
route_output, route_name, endpoint_name, filter_value
):
assert route_output["endpointName"] == endpoint_name
assert route_output["id"] == route_name
assert route_output["filter"] == filter_value if filter_value else "true"
def assert_common_endpoint_attributes(
endpoint_output, endpoint_name, endpoint_type, dead_letter_secret=None
):
assert endpoint_output["id"].endswith("/{}".format(endpoint_name))
assert (
endpoint_output["type"]
== "Microsoft.DigitalTwins/digitalTwinsInstances/endpoints"
)
assert endpoint_output["resourceGroup"]
assert endpoint_output["properties"]["provisioningState"]
assert endpoint_output["properties"]["createdTime"]
if dead_letter_secret:
assert endpoint_output["properties"]["deadLetterSecret"]
if endpoint_type == ADTEndpointType.eventgridtopic:
assert endpoint_output["properties"]["topicEndpoint"]
assert endpoint_output["properties"]["accessKey1"]
assert endpoint_output["properties"]["accessKey2"]
assert endpoint_output["properties"]["endpointType"] == "EventGrid"
return
if endpoint_type == ADTEndpointType.servicebus:
assert endpoint_output["properties"]["primaryConnectionString"]
assert endpoint_output["properties"]["secondaryConnectionString"]
assert endpoint_output["properties"]["endpointType"] == "ServiceBus"
return
if endpoint_type == ADTEndpointType.eventhub:
assert endpoint_output["properties"]["connectionStringPrimaryKey"]
assert endpoint_output["properties"]["connectionStringSecondaryKey"]
assert endpoint_output["properties"]["endpointType"] == "EventHub"
return
def assert_common_rbac_attributes(rbac_output, instance_name, role_name, assignee):
role_def_id = None
if role_name == "owner":
role_def_id = "/bcd981a7-7f74-457b-83e1-cceb9e632ffe"
elif role_name == "reader":
role_def_id = "/d57506d4-4c8d-48b1-8587-93c323f6a5a3"
assert rbac_output["roleDefinitionId"].endswith(role_def_id)
assert rbac_output["type"] == "Microsoft.Authorization/roleAssignments"
assert rbac_output["scope"].endswith("/{}".format(instance_name))
def filter_dt_list(list_output, valid_names):
return [inst for inst in list_output if inst["name"] in valid_names]
```
|
{
"source": "jdmartinez36/azure-keyvault-cli-extension",
"score": 2
}
|
#### File: keyvault/models/deleted_sas_definition_bundle_py3.py
```python
from .sas_definition_bundle import SasDefinitionBundle
class DeletedSasDefinitionBundle(SasDefinitionBundle):
"""A deleted SAS definition bundle consisting of its previous id, attributes
and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The SAS definition id.
:vartype id: str
:ivar secret_id: Storage account SAS definition secret id.
:vartype secret_id: str
:ivar template_uri: The SAS definition token template signed with an
arbitrary key. Tokens created according to the SAS definition will have
the same properties as the template.
:vartype template_uri: str
:ivar sas_type: The type of SAS token the SAS definition will create.
Possible values include: 'account', 'service'
:vartype sas_type: str or ~azure.keyvault.models.SasTokenType
:ivar validity_period: The validity period of SAS tokens created according
to the SAS definition.
:vartype validity_period: str
:ivar attributes: The SAS definition attributes.
:vartype attributes: ~azure.keyvault.models.SasDefinitionAttributes
:ivar tags: Application specific metadata in the form of key-value pairs
:vartype tags: dict[str, str]
:param recovery_id: The url of the recovery object, used to identify and
recover the deleted SAS definition.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the SAS definition is scheduled
to be purged, in UTC
:vartype scheduled_purge_date: datetime
:ivar deleted_date: The time when the SAS definition was deleted, in UTC
:vartype deleted_date: datetime
"""
_validation = {
'id': {'readonly': True},
'secret_id': {'readonly': True},
'template_uri': {'readonly': True},
'sas_type': {'readonly': True},
'validity_period': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'secret_id': {'key': 'sid', 'type': 'str'},
'template_uri': {'key': 'templateUri', 'type': 'str'},
'sas_type': {'key': 'sasType', 'type': 'str'},
'validity_period': {'key': 'validityPeriod', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SasDefinitionAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, recovery_id: str=None, **kwargs) -> None:
super(DeletedSasDefinitionBundle, self).__init__(, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
```
#### File: keyvault/models/deleted_storage_account_item_py3.py
```python
from .storage_account_item import StorageAccountItem
class DeletedStorageAccountItem(StorageAccountItem):
"""The deleted storage account item containing metadata about the deleted
storage account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Storage identifier.
:vartype id: str
:ivar resource_id: Storage account resource Id.
:vartype resource_id: str
:ivar attributes: The storage account management attributes.
:vartype attributes: ~azure.keyvault.models.StorageAccountAttributes
:ivar tags: Application specific metadata in the form of key-value pairs.
:vartype tags: dict[str, str]
:param recovery_id: The url of the recovery object, used to identify and
recover the deleted storage account.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the storage account is scheduled
to be purged, in UTC
:vartype scheduled_purge_date: datetime
:ivar deleted_date: The time when the storage account was deleted, in UTC
:vartype deleted_date: datetime
"""
_validation = {
'id': {'readonly': True},
'resource_id': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'StorageAccountAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, recovery_id: str=None, **kwargs) -> None:
super(DeletedStorageAccountItem, self).__init__(, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
```
#### File: keyvault/models/sas_definition_create_parameters_py3.py
```python
from msrest.serialization import Model
class SasDefinitionCreateParameters(Model):
"""The SAS definition create parameters.
All required parameters must be populated in order to send to Azure.
:param template_uri: Required. The SAS definition token template signed
with an arbitrary key. Tokens created according to the SAS definition
will have the same properties as the template.
:type template_uri: str
:param sas_type: Required. The type of SAS token the SAS definition will
create. Possible values include: 'account', 'service'
:type sas_type: str or ~azure.keyvault.models.SasTokenType
:param validity_period: Required. The validity period of SAS tokens
created according to the SAS definition.
:type validity_period: str
:param sas_definition_attributes: The attributes of the SAS definition.
:type sas_definition_attributes:
~azure.keyvault.models.SasDefinitionAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_validation = {
'template_uri': {'required': True},
'sas_type': {'required': True},
'validity_period': {'required': True},
}
_attribute_map = {
'template_uri': {'key': 'templateUri', 'type': 'str'},
'sas_type': {'key': 'sasType', 'type': 'str'},
'validity_period': {'key': 'validityPeriod', 'type': 'str'},
'sas_definition_attributes': {'key': 'attributes', 'type': 'SasDefinitionAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, template_uri: str, sas_type, validity_period: str, sas_definition_attributes=None, tags=None, **kwargs) -> None:
super(SasDefinitionCreateParameters, self).__init__(**kwargs)
self.template_uri = template_uri
self.sas_type = sas_type
self.validity_period = validity_period
self.sas_definition_attributes = sas_definition_attributes
self.tags = tags
```
|
{
"source": "jdmartin/thesis-codesamples",
"score": 3
}
|
#### File: thesis-codesamples/Appendix A/stats.py
```python
import os
# utils
import re
files = os.listdir('data/flattened')
#Remove unwanted files:
files.remove('.DS_Store')
files.remove('DHSI20_flat.csv')
# plotting packages
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# model building package
import sklearn
for file in files:
print(file)
current_file = file.split('.')[0]
filename = 'data/flattened/' + current_file + '.csv'
df = pd.read_csv(filename)
df.text.unique().shape
# make a new column to highlight retweets
df['is_retweet'] = df['text'].apply(lambda x: x[:2]=='RT')
df['is_retweet'].sum() # number of retweets
# number of unique retweets
df.loc[df['is_retweet']].text.unique().size
# 10 most repeated tweets
highRT = df.groupby(['text']).size().reset_index(name='counts').sort_values('counts', ascending=False).head(10)
#Store It
pd.DataFrame(highRT).to_csv('output' + '/' + 'highRT' + '/' + current_file + '.csv', index = False)
# number of times each tweet appears
counts = df.groupby(['text']).size().reset_index(name='counts').counts
# define bins for histogram
my_bins = np.arange(0,counts.max()+2, 1)-0.5
def find_retweeted(text):
'''This function will extract the twitter handles of retweeted people'''
return re.findall('(?<=RT\s)(@[A-Za-z]+[A-Za-z0-9-_]+)', text)
def find_mentioned(text):
'''This function will extract the twitter handles of people mentioned in the tweet'''
return re.findall('(?<!RT\s)(@[A-Za-z]+[A-Za-z0-9-_]+)', text)
def find_hashtags(text):
'''This function will extract hashtags'''
return re.findall('(#[A-Za-z]+[A-Za-z0-9-_]+)', text)
# make new columns for retweeted usernames, mentioned usernames and hashtags
df['retweeted'] = df.text.apply(find_retweeted)
df['mentioned'] = df.text.apply(find_mentioned)
df['hashtags'] = df.text.apply(find_hashtags)
# take the rows from the hashtag columns where there are actually hashtags
hashtags_list_df = df.loc[df.hashtags.apply(lambda hashtags_list: hashtags_list !=[]),['hashtags']]
# create dataframe where each use of hashtag gets its own row
flattened_hashtags_df = pd.DataFrame([hashtag for hashtags_list in hashtags_list_df.hashtags for hashtag in hashtags_list], columns=['hashtag'])
# number of unique hashtags
print('Number of Unique Hashtags: ' + str(flattened_hashtags_df['hashtag'].unique().size))
# count of appearances of each hashtag
print('Appearances of Each Hashtag')
print(flattened_hashtags_df.groupby('hashtag').size().reset_index(name='counts').sort_values('counts', ascending=False).reset_index(drop=True))
popular_hashtags = flattened_hashtags_df.groupby('hashtag').size().reset_index(name='counts').sort_values('counts', ascending=False).reset_index(drop=True)
#Store It
pd.DataFrame(popular_hashtags).to_csv('output' + '/' + 'hashtags' + '/' + current_file + '.csv', index = False)
###Visualizing
# take hashtags which appear at least this amount of times
min_appearance = 25
# find popular hashtags - make into python set for efficiency
popular_hashtags_set = set(popular_hashtags[popular_hashtags.counts>=min_appearance]['hashtag'])
# make a new column with only the popular hashtags
hashtags_list_df['popular_hashtags'] = hashtags_list_df.hashtags.apply(lambda hashtag_list: [hashtag for hashtag in hashtag_list if hashtag in popular_hashtags_set])
# drop rows without popular hashtag
popular_hashtags_list_df = hashtags_list_df.loc[hashtags_list_df.popular_hashtags.apply(lambda hashtag_list: hashtag_list !=[])]
# make new dataframe
hashtag_vector_df = popular_hashtags_list_df.loc[:, ['popular_hashtags']]
#Just the first 18
i = 0
for hashtag in popular_hashtags_set:
if i <= 17:
# make columns to encode presence of hashtags
hashtag_vector_df['{}'.format(hashtag)] = hashtag_vector_df.popular_hashtags.apply(lambda hashtag_list: int(hashtag in hashtag_list))
i+=1
hashtag_matrix = hashtag_vector_df.drop('popular_hashtags', axis=1)
# calculate the correlation matrix
correlations = hashtag_matrix.corr()
# # plot the correlation matrix
plt.figure(figsize=(25, 25))
ax=plt.subplot(111)
sns.set(font_scale=1.5) # font size 2
sns.color_palette("icefire", as_cmap=True)
sns.heatmap(correlations, annot=True, fmt=".1g", center=0, linecolor='white', linewidths=.5, robust=True, vmin=-1, vmax=1, cbar_kws={'label':'correlation'}, ax=ax)
ax.tick_params(labelsize='19', width=3)
ax.tick_params(axis='x', which='minor', labelsize=9, width=3)
plt.xticks(rotation=90)
plt.yticks(rotation=0)
plt.savefig('output' + '/' + 'plots' + '/' + current_file + '.png')
```
|
{
"source": "jdmcbr/blaze",
"score": 2
}
|
#### File: compute/tests/test_optimize_compute.py
```python
from blaze.expr import Expr, symbol
from blaze.dispatch import dispatch
from blaze import compute
class Foo(object):
def __init__(self, data):
self.data = data
@dispatch(Expr, Foo)
def compute_up(expr, data, **kwargs):
return data
def optimize(expr, data):
""" Renames leaf """
leaf = expr._leaves()[0]
return expr._subs({leaf: symbol('newname', leaf.dshape)})
def test_scope_gets_updated_after_optimize_call():
a = symbol('a', 'int')
result = compute(a + 1, Foo('foo'), optimize=optimize)
assert result.data == 'foo'
```
#### File: compute/tests/test_pmap.py
```python
from blaze import compute, resource, symbol, discover
from blaze.utils import example
flag = [False]
def mymap(func, *args):
flag[0] = True
return map(func, *args)
def test_map_called_on_resource_star():
r = resource(example('accounts_*.csv'))
s = symbol('s', discover(r))
flag[0] = False
a = compute(s.count(), r)
b = compute(s.count(), r, map=mymap)
assert a == b
assert flag[0]
```
#### File: compute/tests/test_postgresql_compute.py
```python
from datetime import timedelta
import itertools
import re
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('psycopg2')
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from odo import odo, resource, drop, discover
from blaze import symbol, compute, concat, join
names = ('tbl%d' % i for i in itertools.count())
def normalize(s):
s = ' '.join(s.strip().split()).lower()
s = re.sub(r'(alias)_?\d*', r'\1', s)
return re.sub(r'__([A-Za-z_][A-Za-z_0-9]*)', r'\1', s)
@pytest.fixture
def url():
return 'postgresql://postgres@localhost/test::%s'
@pytest.yield_fixture
def sql(url):
try:
t = resource(url % next(names), dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), ('b', 2)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sqla(url):
try:
t = resource(url % next(names), dshape='var * {A: ?string, B: ?int32}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), (None, 1), ('c', None)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sqlb(url):
try:
t = resource(url % next(names), dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), ('b', 2)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_dts(url):
try:
t = resource(url % next(names), dshape='var * {A: datetime}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(d,) for d in pd.date_range('2014-01-01', '2014-02-01')], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_two_tables(url):
dshape = 'var * {a: int32}'
try:
t = resource(url % next(names), dshape=dshape)
u = resource(url % next(names), dshape=dshape)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield u, t
finally:
drop(t)
drop(u)
@pytest.yield_fixture
def sql_with_float(url):
try:
t = resource(url % next(names), dshape='var * {c: float64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
def test_postgres_create(sql):
assert odo(sql, list) == [('a', 1), ('b', 2)]
def test_postgres_isnan(sql_with_float):
data = (1.0,), (float('nan'),)
table = odo(data, sql_with_float)
sym = symbol('s', discover(data))
assert odo(compute(sym.isnan(), table), list) == [(False,), (True,)]
def test_insert_from_subselect(sql_with_float):
data = pd.DataFrame([{'c': 2.0}, {'c': 1.0}])
tbl = odo(data, sql_with_float)
s = symbol('s', discover(data))
odo(compute(s[s.c.isin((1.0, 2.0))].sort(), tbl), sql_with_float),
tm.assert_frame_equal(
odo(sql_with_float, pd.DataFrame).iloc[2:].reset_index(drop=True),
pd.DataFrame([{'c': 1.0}, {'c': 2.0}]),
)
def test_concat(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
t = symbol('t', discover(t_data))
u = symbol('u', discover(u_data))
tm.assert_frame_equal(
odo(
compute(concat(t, u).sort('a'), {t: t_table, u: u_table}),
pd.DataFrame,
),
pd.DataFrame(np.arange(10), columns=['a']),
)
def test_concat_invalid_axis(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
# We need to force the shape to not be a record here so we can
# create the `Concat` node with an axis=1.
t = symbol('t', '5 * 1 * int32')
u = symbol('u', '5 * 1 * int32')
with pytest.raises(ValueError) as e:
compute(concat(t, u, axis=1), {t: t_table, u: u_table})
# Preserve the suggestion to use merge.
assert "'merge'" in str(e.value)
def test_timedelta_arith(sql_with_dts):
delta = timedelta(days=1)
dates = pd.Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(dates))
assert (
odo(compute(sym + delta, sql_with_dts), pd.Series) == dates + delta
).all()
assert (
odo(compute(sym - delta, sql_with_dts), pd.Series) == dates - delta
).all()
def test_coerce_bool_and_sum(sql):
n = sql.name
t = symbol(n, discover(sql))
expr = (t.B > 1.0).coerce(to='int32').sum()
result = compute(expr, sql).scalar()
expected = odo(compute(t.B, sql), pd.Series).gt(1).sum()
assert result == expected
def test_distinct_on(sql):
t = symbol('t', discover(sql))
computation = compute(t[['A', 'B']].sort('A').distinct('A'), sql)
assert normalize(str(computation)) == normalize("""
SELECT DISTINCT ON (anon_1."A") anon_1."A", anon_1."B"
FROM (SELECT {tbl}."A" AS "A", {tbl}."B" AS "B"
FROM {tbl}) AS anon_1 ORDER BY anon_1."A" ASC
""".format(tbl=sql.name))
assert odo(computation, tuple) == (('a', 1), ('b', 2))
def test_join_type_promotion(sqla, sqlb):
t, s = symbol(sqla.name, discover(sqla)), symbol(sqlb.name, discover(sqlb))
expr = join(t, s, 'B', how='inner')
result = set(map(tuple, compute(expr, {t: sqla, s: sqlb}).execute().fetchall()))
expected = set([(1, 'a', 'a'), (1, None, 'a')])
assert result == expected
```
#### File: expr/tests/test_broadcast.py
```python
from blaze.expr import *
from blaze.expr.broadcast import *
from blaze.expr.broadcast import leaves_of_type, broadcast_collect
from blaze.compatibility import builtins
from toolz import isdistinct
x = symbol('x', '5 * 3 * int32')
xx = symbol('xx', 'int32')
y = symbol('y', '5 * 3 * int32')
yy = symbol('yy', 'int32')
a = symbol('a', 'int32')
def test_broadcast_basic():
b = Broadcast((x, y), (xx, yy), xx + yy)
assert b.shape == x.shape
assert b.schema == (xx + yy).dshape
assert eval(str(b)).isidentical(b)
def test_scalar_symbols():
exprs = [x, y]
scalars = scalar_symbols(exprs)
assert len(scalars) == len(exprs)
assert isdistinct([s._name for s in scalars])
assert builtins.all(s.dshape == e.schema for s, e in zip(scalars, exprs))
def test_broadcast_function():
expr = Pow(Add(x, Mult(2, y)), 2) # (x + (2 * y)) ** 2
b = broadcast(expr, [x, y])
xx, yy = b._scalars
assert b._scalar_expr.isidentical((xx + (2 * yy)) ** 2)
# A different set of leaves
b = broadcast(expr, [x, Mult(2, y)])
xx, yy = b._scalars
assert b._scalar_expr.isidentical((xx + yy) ** 2)
t = symbol('t', 'var * {x: int, y: int, z: int}')
def test_tabular_case():
expr = t.x + t.y * 2
b = broadcast(expr, [t])
tt, = b._scalars
assert b._scalar_expr.isidentical(tt.x + tt.y * 2)
def test_optimize_broadcast():
expr = (t.distinct().x + 1).distinct()
expected = broadcast(t.distinct().x + 1, [t.distinct()]).distinct()
result = broadcast_collect(expr, Broadcastable=(Field, Arithmetic),
WantToBroadcast=(Field, Arithmetic))
assert result.isidentical(expected)
def test_leaves_of_type():
expr = Distinct(Distinct(Distinct(t.x)))
result = leaves_of_type((Distinct,), expr)
assert len(result) == 1
assert list(result)[0].isidentical(t.x)
def test_broadcast_collect_doesnt_collect_scalars():
expr = xx + yy * a
assert broadcast_collect(expr, Broadcastable=Arithmetic,
WantToBroadcast=Arithmetic).isidentical(expr)
def test_table_broadcast():
t = symbol('t', 'var * {x: int, y: int, z: int}')
expr = t.distinct()
expr = (2 * expr.x + expr.y + 1).distinct()
expected = t.distinct()
expected = broadcast(2 * expected.x + expected.y + 1, [expected]).distinct()
assert broadcast_collect(expr).isidentical(expected)
expr = (t.x + t.y).sum()
result = broadcast_collect(expr)
expected = broadcast(t.x + t.y, [t]).sum()
assert result.isidentical(expected)
def test_broadcast_doesnt_affect_scalars():
t = symbol('t', '{x: int, y: int, z: int}')
expr = (2 * t.x + t.y + 1)
assert broadcast_collect(expr).isidentical(expr)
def test_full_expr():
b = Broadcast((x, y), (xx, yy), xx + yy)
assert b._full_expr.isidentical(x + y)
def test_broadcast_naming():
t = symbol('t', 'var * {x: int, y: int, z: int}')
for expr in [t.x, t.x + 1]:
assert broadcast(expr, [t])._name == 'x'
```
#### File: expr/tests/test_reductions.py
```python
from itertools import product
import pytest
from blaze.expr import symbol, summary
from datashape import dshape
def test_reduction_dshape():
x = symbol('x', '5 * 3 * float32')
assert x.sum().dshape == dshape('float64')
assert x.sum(axis=0).dshape == dshape('3 * float64')
assert x.sum(axis=1).dshape == dshape('5 * float64')
assert x.sum(axis=(0, 1)).dshape == dshape('float64')
def test_keepdims():
x = symbol('x', '5 * 3 * float32')
assert x.sum(axis=0, keepdims=True).dshape == dshape('1 * 3 * float64')
assert x.sum(axis=1, keepdims=True).dshape == dshape('5 * 1 * float64')
assert x.sum(axis=(0, 1), keepdims=True).dshape == dshape(
'1 * 1 * float64')
assert x.std(axis=0, keepdims=True).shape == (1, 3)
def test_summary_keepdims():
x = symbol('x', '5 * 3 * float32')
assert summary(a=x.min(), b=x.max()).dshape == \
dshape('{a: float32, b: float32}')
assert summary(a=x.min(), b=x.max(), keepdims=True).dshape == \
dshape('1 * 1 * {a: float32, b: float32}')
def test_summary_axis():
x = symbol('x', '5 * 3 * float32')
assert summary(a=x.min(), b=x.max(), axis=0).dshape == \
dshape('3 * {a: float32, b: float32}')
assert summary(a=x.min(), b=x.max(), axis=1).dshape == \
dshape('5 * {a: float32, b: float32}')
assert summary(a=x.min(), b=x.max(), axis=1, keepdims=True).dshape == \
dshape('5 * 1 * {a: float32, b: float32}')
def test_summary_str():
x = symbol('x', '5 * 3 * float32')
assert 'keepdims' not in str(summary(a=x.min(), b=x.max()))
def test_axis_kwarg_is_normalized_to_tuple():
x = symbol('x', '5 * 3 * float32')
exprs = [x.sum(), x.sum(axis=1), x.sum(axis=[1]), x.std(), x.mean(axis=1)]
for expr in exprs:
assert isinstance(expr.axis, tuple)
def test_summary_with_multiple_children():
t = symbol('t', 'var * {x: int, y: int, z: int}')
assert summary(a=t.x.sum() + t.y.sum())._child.isidentical(t)
def test_dir():
t = symbol('t', '10 * int')
assert 'mean' in dir(t)
t = symbol('t', 'int')
assert 'mean' not in dir(t)
def test_norms():
x = symbol('x', '5 * 3 * float32')
assert x.vnorm().isidentical(x.vnorm('fro'))
assert x.vnorm().isidentical(x.vnorm(2))
assert x.vnorm(axis=0).shape == (3,)
assert x.vnorm(axis=0, keepdims=True).shape == (1, 3)
@pytest.mark.parametrize('reduc', ['max', 'min', 'sum', 'mean', 'std', 'var'])
def test_reductions_on_record_dshape(reduc):
t = symbol('t', '10 * {a: int64, b: string}')
with pytest.raises(AttributeError):
getattr(t, reduc)
@pytest.mark.parametrize('reduc', ['max', 'min', 'sum', 'mean', 'std', 'var'])
def test_boolean_has_reductions(reduc):
assert hasattr(symbol('t', 'var * bool'), reduc)
@pytest.mark.parametrize(['reduc', 'measure'],
product(['max', 'min'],
['date', 'datetime', 'timedelta']))
def test_max_min_on_datetime_and_timedelta(reduc, measure):
assert hasattr(symbol('t', 'var * %s' % measure), reduc)
def test_reduction_naming_with_generated_leaves():
assert symbol('_', 'var * float64').sum()._name == 'sum'
```
#### File: expr/tests/test_strings.py
```python
import datashape
from blaze.expr import TableSymbol, like, Like
def test_like():
t = TableSymbol('t', '{name: string, amount: int, city: string}')
expr = like(t, name='Alice*')
assert eval(str(expr)).isidentical(expr)
assert expr.schema == t.schema
assert expr.dshape[0] == datashape.var
```
#### File: server/tests/test_client.py
```python
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('flask')
from pandas import DataFrame
from blaze import compute, Data, by, into, discover
from blaze.expr import Expr, symbol, Field
from blaze.dispatch import dispatch
from blaze.server import Server
from blaze.server.client import Client, resource
df = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
df2 = DataFrame([['Charlie', 100], ['Dan', 200]],
columns=['name', 'amount'])
data = {'accounts': df, 'accounts2': df}
server = Server(data)
test = server.app.test_client()
from blaze.server import client
client.requests = test # OMG monkey patching
def test_client():
c = Client('localhost:6363')
assert str(discover(c)) == str(discover(data))
t = symbol('t', discover(c))
expr = t.accounts.amount.sum()
assert compute(expr, c) == 300
assert 'name' in t.accounts.fields
assert isinstance(t.accounts.name, Field)
assert compute(t.accounts.name, c) == ['Alice', 'Bob']
def test_expr_client_interactive():
c = Client('localhost:6363')
t = Data(c)
assert compute(t.accounts.name) == ['Alice', 'Bob']
assert (into(set, compute(by(t.accounts.name, min=t.accounts.amount.min(),
max=t.accounts.amount.max())))
== set([('Alice', 100, 100), ('Bob', 200, 200)]))
def test_compute_client_with_multiple_datasets():
c = resource('blaze://localhost:6363')
s = symbol('s', discover(c))
assert compute(s.accounts.amount.sum() + s.accounts2.amount.sum(),
{s: c}) == 600
def test_resource():
c = resource('blaze://localhost:6363')
assert isinstance(c, Client)
assert str(discover(c)) == str(discover(data))
def test_resource_default_port():
ec = resource('blaze://localhost')
assert str(discover(ec)) == str(discover(data))
def test_resource_non_default_port():
ec = resource('blaze://localhost:6364')
assert ec.url == 'http://localhost:6364'
def test_resource_all_in_one():
ec = resource('blaze://localhost:6363')
assert str(discover(ec)) == str(discover(data))
class CustomExpr(Expr):
__slots__ = '_hash', '_child'
@property
def dshape(self):
return self._child.dshape
@dispatch(CustomExpr, DataFrame)
def compute_up(expr, data, **kwargs):
return data
def test_custom_expressions():
ec = Client('localhost:6363')
t = symbol('t', discover(ec))
assert list(map(tuple, compute(CustomExpr(t.accounts), ec))) == into(list, df)
def test_client_dataset_fails():
with pytest.raises(ValueError):
Data('blaze://localhost::accounts')
with pytest.raises(ValueError):
resource('blaze://localhost::accounts')
def test_client_dataset():
d = Data('blaze://localhost')
assert list(map(tuple, into(list, d.accounts))) == into(list, df)
```
|
{
"source": "jdmccaffrey/keras-succinctly",
"score": 3
}
|
#### File: keras-succinctly/MNIST/make_data.py
```python
def generate(img_bin_file, lbl_bin_file,
result_file, n_images):
img_bf = open(img_bin_file, "rb") # binary image pixels
lbl_bf = open(lbl_bin_file, "rb") # binary labels
res_tf = open(result_file, "w") # result file
img_bf.read(16) # discard image header info
lbl_bf.read(8) # discard label header info
for i in range(n_images): # number images requested
# digit label first
lbl = ord(lbl_bf.read(1)) # get label like '3' (one byte)
res_tf.write(str(lbl))
# encoded = [0] * 10 # make one-hot vector
# encoded[lbl] = 1
# for i in range(10):
# res_tf.write(str(encoded[i]))
# res_tf.write(" ") # like 0 0 0 1 0 0 0 0 0 0
res_tf.write(" ** ") # arbitrary seperator char for readibility
# now do the image pixels
for j in range(784): # get 784 vals for each image file
val = ord(img_bf.read(1))
res_tf.write(str(val))
if j != 783: res_tf.write(" ") # avoid trailing space
res_tf.write("\n") # next image
img_bf.close(); lbl_bf.close(); # close the binary files
res_tf.close() # close the result text file
# ================================================================
def main():
# generate(".\\UnzippedBinary\\train-images.idx3-ubyte.bin",
# ".\\UnzippedBinary\\train-labels.idx1-ubyte.bin",
# ".\\mnist_train_keras_1000.txt",
# n_images = 1000) # first n images
generate(".\\UnzippedBinary\\t10k-images.idx3-ubyte.bin",
".\\UnzippedBinary\\t10k-labels.idx1-ubyte.bin",
".\\mnist_test_keras_foo.txt",
n_images = 100) # first n images
if __name__ == "__main__":
main()
```
|
{
"source": "jdmcgraw/4CTracker",
"score": 2
}
|
#### File: jdmcgraw/4CTracker/label_video.py
```python
import numpy as np
from threading import Timer
import cv2
import pickle
import pyrealsense2 as rs
import os.path
from matplotlib import pyplot as plt
#from kmeans_clustering import KmeansClassifier
def clamp(value, min_value, max_value):
return max(min(value, max_value), min_value)
class LabelingTool:
def __init__(self, overwrite=False, time_length=0, perform_sampling=True, frames_to_label=100):
self.project = "marker"
self.distance_mult = 8
self.video_path = f"{self.project}.rgbd"
self.label_path = f"{self.project}.labels"
self.current_frame_index = 0
self.current_key_index = 0
self.current_frame = None
self.current_display = None
self.playback_speed = 0.2
self.key_colors = [(0, 0, 255),
(0, 106, 255),
(0, 216, 255),
(0, 255, 182),
(144, 255, 0),
(255, 148, 0),
(255, 0, 72)]
self.display_size = 512 # The size "we" view the image, regardless of actual image dimensions underneath
self.frames = []
self.model_frames = []
self.key_points = ['head', 'tail']
self.perform_sampling = perform_sampling
self.playback = False
self.color_view = True
self.overwrite = overwrite
cv2.namedWindow('Tool')
cv2.setMouseCallback('Tool', self.on_mouse)
if os.path.isfile(self.video_path):
print(f"[INFO] Loading {self.video_path}")
self.read_frames()
else:
raise FileNotFoundError(f"{self.video_path} not found. (Was it spelled correctly?)")
if self.perform_sampling:
clustered_frames = KmeansClassifier(self.frames, clusters=frames_to_label)
new_frames = []
# This allows us to optionally sample around the clustered points, rather than just individually
if time_length > 0:
for i in clustered_frames.get_clusters():
for j in range(-time_length, time_length):
time_frame = i+j
if 0 <= time_frame < len(self.frames):
new_frames.append(self.frames[time_frame])
self.frames = np.array(new_frames)
else:
self.frames = np.array([self.frames[k] for k in clustered_frames.get_clusters()])
# Negative Ones Array
self.frame_labels = np.ones(shape=(len(self.frames), len(self.key_points), 2)) * -1
if os.path.exists(self.label_path):
self.load_labels()
self.current_frame_index = 0
self.current_frame = self.frames[self.current_frame_index]
self.deliver_preview_frame(self.current_frame_index)
while True:
key = cv2.waitKey(0)
print(key)
if key == 8: # Backspace
self.frame_labels[self.current_frame_index][self.current_key_index] = np.array([-1, -1])
self.deliver_preview_frame(self.current_frame_index)
self.deliver_preview_frame(self.current_frame_index)
if key == ord(' '):
self.playback = not self.playback
t = Timer(self.playback_speed, self.play)
t.start()
if key == ord('1'):
print(f"[MODE] View Toggled to {'Color' if not self.color_view else 'Depth'}")
self.color_view = not self.color_view
self.deliver_preview_frame(self.current_frame_index)
if key == ord('.'): # >
self.current_key_index += 1
self.current_key_index = clamp(self.current_key_index, 0, len(self.key_points)-1)
self.deliver_preview_frame(self.current_frame_index)
if key == ord(','): # <
self.current_key_index -= 1
self.current_key_index = clamp(self.current_key_index, 0, len(self.key_points)-1)
self.deliver_preview_frame(self.current_frame_index)
if key == ord('b'):
self.blur_current_frame()
if key == 45 and not self.color_view:
self.distance_mult = max(self.distance_mult - 1, 1)
self.deliver_preview_frame(self.current_frame_index)
if key == 61 and not self.color_view:
self.distance_mult = min(self.distance_mult + 1, 10)
self.deliver_preview_frame(self.current_frame_index)
if key == ord('q'):
self.playback = False
print("[QUIT] Closing")
return
def play(self):
if self.current_frame_index >= len(self.frames):
self.current_frame_index = 0
self.deliver_preview_frame(self.current_frame_index)
self.current_frame_index += 1
if self.playback:
t = Timer(self.playback_speed, self.play)
t.start()
def blur_current_frame(self):
frame = self.frames[self.current_frame_index, :, :, :].copy()
blur = cv2.GaussianBlur(frame, (5, 5), 0)
self.frames[self.current_frame_index] = blur
self.deliver_preview_frame()
def get_color_frame(self, frame):
return self.frames[frame, :, :, :-1].copy()
def get_depth_frame(self, frame):
return self.frames[frame, :, :, -1].copy()
def save_labels(self):
with open(self.label_path, 'wb') as labels:
pickle.dump(self.frame_labels, labels)
def load_labels(self):
if self.overwrite:
return
with open(self.label_path, 'rb') as labels:
self.frame_labels = pickle.load(labels)
if len(self.frame_labels) > len(self.frames):
self.frames = []
def read_frames(self):
print("[INFO] Reading Frames...")
with open(self.video_path, 'rb') as in_file:
self.frames = np.array(pickle.load(in_file), dtype=np.uint8)
# Remove any black frames where the RealSense camera is initializing
self.frames = np.array([i for k, i in enumerate(self.frames) if np.mean(i[:2]) > 0])
print(f"[INFO] RGBD Video: {len(self.frames)} Frames")
def deliver_preview_frame(self, frame=0):
if self.color_view:
self.current_display = cv2.resize(self.get_color_frame(frame), (self.display_size, self.display_size))
else:
depth_frame = self.get_depth_frame(frame) * self.distance_mult
colored_depth = cv2.applyColorMap(depth_frame, cv2.COLORMAP_JET)
self.current_display = cv2.resize(colored_depth, (self.display_size, self.display_size))
#self.current_display = 1 - cv2.cvtColor(self.current_display, cv2.COLOR_GRAY2RGB)
text_pos = (int(0.03 * self.display_size), int(0.05 * self.display_size))
color = (255, 255, 255) if self.current_frame_index < len(self.frames)-1 else (0, 0, 255)
if self.playback:
color = (0, 255, 0)
cv2.putText(self.current_display, f'[Frame {self.current_frame_index+1}/{len(self.frames)}]',
text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 1, cv2.LINE_AA)
text_pos = (int(0.03 * self.display_size), int(0.11 * self.display_size))
text_color = (0, 144, 255) if self.color_view else (255, 0, 255)
cv2.putText(self.current_display, f"[{'RGB' if self.color_view else 'Depth'} View]",
text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, text_color, 1, cv2.LINE_AA)
if self.current_key_index > 0:
text_pos = (int(0.1 * self.display_size), int(0.95 * self.display_size))
text_color = self.key_colors[self.current_key_index - 1]
cv2.putText(self.current_display, f"{self.key_points[self.current_key_index - 1]}".ljust(10),
text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, text_color, 1, cv2.LINE_AA)
text_pos = (int(0.4 * self.display_size), int(0.95 * self.display_size))
text_color = self.key_colors[self.current_key_index]
cv2.putText(self.current_display, f"<{self.key_points[self.current_key_index]}>".ljust(10),
text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, text_color, 1, cv2.LINE_AA)
if self.current_key_index < len(self.key_points)-1:
text_pos = (int(0.7 * self.display_size), int(0.95 * self.display_size))
text_color = self.key_colors[self.current_key_index + 1]
cv2.putText(self.current_display, f"{self.key_points[self.current_key_index + 1]}".ljust(10),
text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, text_color, 1, cv2.LINE_AA)
# Draw the current frame keypoints
for i, keypoint in enumerate(self.frame_labels[self.current_frame_index]):
pos = tuple(int(p * self.display_size) for p in keypoint)
key_color = self.key_colors[i]
cv2.circle(self.current_display, pos, 4, key_color, 1)
cv2.line(self.current_display, (pos[0] + 3, pos[1]), (pos[0] + 8, pos[1]), key_color)
cv2.line(self.current_display, (pos[0] - 3, pos[1]), (pos[0] - 8, pos[1]), key_color)
cv2.line(self.current_display, (pos[0], pos[1] + 3), (pos[0], pos[1] + 8), key_color)
cv2.line(self.current_display, (pos[0], pos[1] - 3), (pos[0], pos[1] - 8), key_color)
cv2.imshow('Tool', self.current_display)
def on_mouse(self, event, x, y, flags, param):
if not (x and y) or self.playback:
return
self.current_display = self.current_frame.copy()
if event == 1: # Click
self.current_frame = self.frames[self.current_frame_index]
self.frame_labels[self.current_frame_index][self.current_key_index] = (x/self.display_size,
y/self.display_size)
self.current_frame_index = self.current_frame_index + 1
self.current_frame_index = int(clamp(self.current_frame_index, 0, len(self.frames) - 1))
self.deliver_preview_frame(self.current_frame_index)
self.save_labels()
if self.current_frame_index == len(self.frame_labels) and self.current_key_index < len(self.key_points)-1:
self.current_frame_index = 0
self.current_key_index += 1
self.current_key_index = clamp(self.current_key_index, 0, len(self.key_points) - 1)
self.deliver_preview_frame(self.current_frame_index)
if abs(flags) > 1: # Scroll
self.current_frame = self.frames[self.current_frame_index]
self.current_frame_index = self.current_frame_index + (np.sign(flags))
self.current_frame_index = int(clamp(self.current_frame_index, 0, len(self.frames) - 1))
self.deliver_preview_frame(self.current_frame_index)
tool = LabelingTool(overwrite=False, perform_sampling=False)
```
#### File: jdmcgraw/4CTracker/predict_video.py
```python
import numpy as np
from threading import Timer
import cv2
import pickle
import torch
import torch.nn as nn
import numpy as np
import time
TARGET_MODEL_SIZE = 128
class VideoWriter:
def __init__(self, path, frame_size, codec="mp4v", fps=60.0, color=True):
codec = cv2.VideoWriter_fourcc(*codec)
self.stream = cv2.VideoWriter(path, codec, fps, frame_size, color)
def write(self, frame):
self.stream.write(frame)
def close(self):
self.stream.release()
return not self.stream.isOpened()
class DepthNet(nn.Module):
def __init__(self):
super(DepthNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(4, 16, kernel_size=5, stride=1, padding=1),
nn.LeakyReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=1),
nn.LeakyReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc1 = nn.Sequential(
nn.Linear((30**2) * 32, 512, bias=True),
nn.LeakyReLU())
self.drop_out = nn.Dropout()
self.fc2 = nn.Sequential(
nn.Linear(512, 256, bias=True),
nn.LeakyReLU())
self.fc3 = nn.Linear(256, 2)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.drop_out(out)
out = self.fc1(out)
out = self.drop_out(out)
out = self.fc2(out)
out = self.drop_out(out)
out = self.fc3(out)
return out
class ModelViewer:
def __init__(self, model_name, frame_source):
self.model = DepthNet()
self.project_model = model_name
self.project_frames = frame_source
self.model.load_state_dict(torch.load(f'{self.project_model}.net'))
self.model.eval()
with open(f'{self.project_frames}.rgbd', 'rb') as frames_in:
self.frames = np.array(pickle.load(frames_in))
self.current_frame_index = 0
cv2.namedWindow('Tool')
self.play()
while True:
key = cv2.waitKey(0)
if key == ord('q'):
return
def play(self):
if self.current_frame_index >= len(self.frames):
self.current_frame_index = 0
self.deliver_preview_frame(preview_size=512)
self.current_frame_index += 1
t = Timer(0.03, self.play)
t.start()
def deliver_preview_frame(self, preview_size, verbose=True):
torch_frames = torch.from_numpy(self.frames).type(torch.float).reshape(-1, 4, TARGET_MODEL_SIZE,
TARGET_MODEL_SIZE)
current_torch_frame = torch_frames[self.current_frame_index].reshape(1, 4, TARGET_MODEL_SIZE,
TARGET_MODEL_SIZE)
start_time = time.time()
x, y = self.model(current_torch_frame).data[0]
#if verbose:
# print(f"Executing Model at {round(60/((time.time() - start_time) * 1000), 1)}Hz")
im = self.frames[self.current_frame_index, :, :, :-1]
im_resize = cv2.resize(im, (preview_size, preview_size))
x_resize, y_resize = int(x * preview_size), int(y * preview_size)
cv2.circle(im_resize, (x_resize, y_resize), 3, (0, 0, 255), 2)
cv2.imshow('Tool', im_resize)
preview = ModelViewer("test2", "test2")
```
#### File: jdmcgraw/4CTracker/train_model - DenseNet.py
```python
import torch
import pickle
import torch.nn as nn
import numpy as np
import torch.optim as optim
from random import sample
import matplotlib.pyplot as plt
import cv2
from augment_image import *
from DenseNet import *
project = "marker"
print(f"CUDA available: {torch.cuda.is_available()}")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
with open(f'{project}.labels', 'rb') as labels_in:
labels = pickle.load(labels_in)
#num_keypoints = labels_dict.shape[1]
num_keypoints = labels.shape[1]
# Create dictionary temporarily; implement this in annotation script
labels_dict = {'head': labels[:,0,:], 'body': labels[:,1,:]}
# labels_dict = {'head': labels[:,0,:], 'body': labels[:,1,:], 'tail': labels[:,2,:]}
print(labels.shape)
with open(f'{project}.rgbd', 'rb') as frames_in:
frames = pickle.load(frames_in)
# Depth frame normalization and clipping for converting into uint8; implement user input functionality
clip_dist = 2000
np.clip(frames[:,:,:,3], 0, clip_dist, frames[:,:,:,3])
frames[:,:,:,3] = (((frames[:,:,:,3]/clip_dist))*255).astype(np.uint8)
frames = np.uint8(frames)
#frames = [frames[k] for k in range(len(frames)) if k in labels_dict.keys()]
print(len(frames))
frames = np.array(frames)
labels = np.array(labels)
num_frames = len(labels)
frame_size = frames[0].shape[0]
print(f"Frame Size: {frames.shape}")
print(f"Label Size: {labels.shape}")
#class DepthNet(nn.Module):
# def __init__(self):
# super(DepthNet, self).__init__()
# self.layer1 = nn.Sequential(
# nn.Conv2d(4, 16, kernel_size=5, stride=1, padding=1),
# nn.LeakyReLU(),
# nn.MaxPool2d(kernel_size=2, stride=2))
# self.layer2 = nn.Sequential(
# nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=1),
# nn.LeakyReLU(),
# nn.MaxPool2d(kernel_size=2, stride=2))
# self.fc1 = nn.Sequential(
# nn.Linear((30**2) * 32, 512, bias=True),
# nn.LeakyReLU())
# self.drop_out = nn.Dropout()
# self.fc2 = nn.Sequential(
# nn.Linear(512, 256, bias=True),
# nn.LeakyReLU())
# self.fc3 = nn.Linear(256, num_keypoints * 2)
#
# def forward(self, x):
# out = self.layer1(x)
# out = self.layer2(out)
# out = out.reshape(out.size(0), -1)
# out = self.drop_out(out)
# out = self.fc1(out)
# out = self.drop_out(out)
# out = self.fc2(out)
# out = self.drop_out(out)
# out = self.fc3(out)
# return out
#
#
#model = DepthNet()
model_name = 'se_densenet'
model = se_densenet(num_classes = 4)
model.to(device)
# Model Hyper-parameters
num_epochs = 500
batch_size = 32
learning_rate = 0.0001
loss_function = nn.MSELoss()
optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
running_loss = float('inf')
loss_history = [float('inf')]
for epoch in range(num_epochs):
sample_size = min(batch_size, num_frames)
batch_indices = sample([k for k in range(num_frames)], sample_size)
frame_batch = np.array([frames[index] for index in batch_indices])
label_batch = np.array([labels[index] for index in batch_indices])
print(f'[Epoch: {epoch + 1}/{num_epochs}]\tLoss: {round(running_loss, 3)}')
running_loss = 0.0
for i in range(batch_size):
print("|", end='')
frame_batch_i = torch.from_numpy(frame_batch).type(torch.float).reshape(-1, 4, frame_size, frame_size)
# label_batch_i = torch.from_numpy(label_batch).type(torch.float).reshape(-1, 2)
# Reshape to have a format of "num_keypoints * 2" values per image as per predictions
label_batch_i = torch.from_numpy(label_batch).type(torch.float).reshape(-1, num_keypoints * 2)
inputs_i, labels_i = frame_batch_i.to(device), label_batch_i.to(device)
optimizer.zero_grad()
output = model(inputs_i)
# print(frames.shape, labels.shape, frame_batch.shape, label_batch.shape, frame_batch_i.shape, label_batch_i.shape, output.shape, labels_i.shape)
loss = loss_function(output, labels_i)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % batch_size == batch_size - 1:
print("")
if running_loss < min(loss_history):
torch.save(model.state_dict(), f'{project}_{model_name}.net')
loss_history.append(running_loss)
print('[INFO] Finished Training')
torch.save(model.state_dict(), f'{project}_{model_name}.net')
with open(f'{project}_{model_name}.loss_hist', 'wb') as f:
np.save(f, loss_history)
plt.loglog(loss_history)
plt.title("Log-Log Loss History (Epoch vs. Loss)")
plt.show()
```
|
{
"source": "JDMcIninch/PantryDriveUp",
"score": 2
}
|
#### File: src/PantryDriveUp/server.py
```python
from flask import Flask, render_template, request
from pandas import read_excel
from datetime import datetime
import os
import pdfkit # pdfkit requires that wkhtmltopdf be installed in order to work
import platform
import shutil
import socket
NAME_DICTIONARY = {
'Fresh Food': 'fresh-food',
'Freezer Meats': 'freezer-meats',
'Freezer Bonus': 'freezer-bonus',
'Fridge': 'fridge',
'Canned Vegetables': 'canned-veg',
'Broth': 'broth',
'Canned Soup': 'canned-soup',
'Canned Meat': 'canned-meat',
'Beans & Lentils': 'beans',
'Juice': 'juice',
'Shelf-stable Milk': 'up-milk',
'Snacks': 'snacks',
'Pantry': 'pantry',
'Rice': 'rice',
'Canned Fruit': 'canned-fruit',
'Pantry 2': 'pantry-2',
'Breakfast': 'breakfast',
'Peanut Butter & Jelly': 'pbj',
'Canned Tomatoes': 'canned-tom',
'Bonus Items': 'bonus',
'Bonus Items 2': 'bonus-2',
'Personal Hygiene Items': 'hygiene',
'Paper Goods': 'paper',
'Snack Bags for Kids': 'snack_bags',
'Diapers & Pull-ups': 'diapers',
'Formula': 'formula',
'Baby Food': 'baby-food',
'Coffee/Tea/Cocoa': 'coffee',
'Vegetable Oil': 'oil'
}
if not os.path.isfile(os.path.expanduser('~/Desktop/DriveThruGroceryList.xlsx')):
if __name__ == '__main__':
shutil.copy(os.path.dirname(__file__) + '/static/DriveThruGroceryList.xlsx',
os.path.expanduser('~/Desktop/DriveThruGroceryList.xlsx'))
else:
try:
from importlib.resources.pkg_resources import resource_filename
except ImportError:
# Try backported to PY<37 `importlib_resources`.
from pkg_resources import resource_filename
spreadsheet = resource_filename(__package__, 'static/DriveThruGroceryList.xlsx')
shutil.copy(spreadsheet, os.path.expanduser('~/Desktop/DriveThruGroceryList.xlsx'))
DriveThruGroceryList = read_excel(os.path.expanduser('~/Desktop/DriveThruGroceryList.xlsx'), engine='openpyxl')
app = Flask(__name__, static_url_path='/static')
def print_html(html, name):
""" Convert HTML markup to PDF and then send the PDF to the default printer.
Windows is unique in that it has no support on it's own for printing PDF
files, so users of Windows must install PDFtoPrinter from this URL:
http://www.columbia.edu/~em36/PDFtoPrinter.exe
"""
packing_list_path = os.path.join(os.path.expanduser('~'), 'Desktop', 'PackingLists')
if not os.path.isdir(packing_list_path):
try:
os.makedirs(packing_list_path, 0o777)
except Exception:
print('Failed to create directory {}; could not create PDF'.format(packing_list_path))
return
pdf_path = os.path.join(packing_list_path, '{0} {1}.pdf'.format(datetime.now().strftime('%Y-%m-%d'), name))
pdfkit.from_string(html, pdf_path, options={'page-size': 'Letter',
'zoom': '1.22',
'margin-bottom': '0',
'margin-left': '5',
'margin-right': '2'})
operating_system = platform.system()
if operating_system in ['Darwin', 'Linux']: # send ot printer on Mac
os.system('lp "{}"'.format(pdf_path))
# os.system('cp "{}" ~/Desktop/packing_list.pdf && open ~/Desktop/packing_list.pdf'.format(pdf_path))
elif operating_system == 'Windows':
os.system('PDFtoPrinter.exe "{}"'.format(pdf_path))
def my_ip_address():
""" Discover the current IP address (other than localhost) of this machine. """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
@app.template_filter('shortname')
def shortname(section):
""" This is a Jinja2 filter that returns a short name for each grocery
list section (the section names must not change). """
return NAME_DICTIONARY[section]
@app.template_filter('simplify')
def simplify(stringlist):
""" This is a Jinja2 filter that takes a list of strings and concatenates them. """
return ', '.join(stringlist)
@app.route('/')
def form():
""" Get the grocery list form. """
return render_template('order_form.html', grocery_options=DriveThruGroceryList)
@app.route('/print', methods=['POST'])
def print_form():
""" Receive the grocery list, prepare a packing list, and print it. """
fam_color = {'1: Yellow': '#ffff00', '2-4: Blue': '#6464ff', '5+: Pink': '#ff69b4'}[request.form['family_size']]
grocery_list = request.form.to_dict(flat=False)
packing_list = render_template('packing_list.html',
grocery_list=grocery_list,
timestamp=datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f%z'),
fam_color=fam_color)
print_html(packing_list, request.form['full_name'])
return "Success"
@app.route('/reprint', methods=['GET', 'POST'])
def reprint_form():
""" (Unimplemented) Reprint a previous list. """
return 'This feature is currently not implemented.'
if __name__ == '__main__':
app.run(host=my_ip_address())
```
|
{
"source": "jdmejiav/Expression-Parser",
"score": 3
}
|
#### File: Expression-Parser/calculadora_python/main.py
```python
import math
import scanner
import parser
vars={}
def main():
while True:
vars["e"]=math.e
vars["pi"]=math.pi
entrada = input(">>> ")
if entrada=="exit":
break;
else:
_var=""
_cond=""
_exp_1=""
_exp_2=""
_var,_cond,_exp_1,_exp_2=scanner.findTokens(entrada)
if not _var in vars:
for i in vars:
if i in _cond:
_cond=_cond.replace(i,str(vars[i]))
if i in _exp_1:
_exp_1=_exp_1.replace(i,str(vars[i]))
if i in _exp_2:
_exp_2=_exp_2.replace(i,str(vars[i]))
_cond=_cond.replace("log","l")
_exp_1=_exp_1.replace("log","l")
_exp_2=_exp_2.replace("log","l")
_cond=_cond.replace("sin","s")
_exp_1=_exp_1.replace("sin","s")
_exp_2=_exp_2.replace("sin","s")
value = 0.0
try:
cond = parser.evaluarCondiciones(_cond)
if cond:
value=parser.evaluarNumeros(_exp_1)
else:
value=parser.evaluarNumeros(_exp_2)
vars[_var]=value
print(value)
except:
print('Something went wrong')
continue
else:
print(vars[_var])
if __name__=="__main__":
main()
print("build finished")
```
|
{
"source": "jdmejiav/invest-AI",
"score": 4
}
|
#### File: jdmejiav/invest-AI/main.py
```python
from prediction_ability import predict
def stocks():
print("Ingresa el stock al que deseas predecir su comportamiento")
print("Ex:\nBitcoin -> BTC-USD\nApple -> AAPL\nTesla -> TSLA")
stock = input("Ingresa el código: ")
predict(stock)
if __name__=='__main':
while True:
print("Bienvenido al agende de inversión, digital el número de la actividad que deseas realizar")
print("(5). Habilidad de predecir stocks")
choose = int(input("Ingresa el número"))
if choose==5:
stocks()
```
|
{
"source": "jdmejiav/python-unal-execises",
"score": 3
}
|
#### File: python-unal-execises/U12/EJ5U12.py
```python
def limpiar(p):
p=p.lower()
p=p.strip('-')
p=p.strip('"')
p=p.lstrip('¿')
p=p.lstrip('¡')
p=p.rstrip('?')
p=p.rstrip('!')
p=p.rstrip(',')
p=p.rstrip('.')
p=p.rstrip(';')
p=p.rstrip(':')
return p
archivo = open('discurso.txt')
frecs = {}
maxFrec=0
for renglon in archivo:
palabra=renglon.split()
palabrasReng=[]
for palabra in palabra:
palabra = limpiar(palabra)
if palabra not in frecs and len(palabra)>4:
frecs[palabra]=1
palabrasReng.append(palabra)
else:
if len(palabra)>4 and palabra not in palabrasReng:
palabrasReng.append(palabra)
frecs[palabra]+=1
for k in sorted(frecs):
print(k+" "+str(frecs[k]))
```
|
{
"source": "jdmejiav/yabd-storage-system",
"score": 3
}
|
#### File: yabd-storage-system/follower/backup.py
```python
import json
import os
def save(data:dict,name):
with open(f'{name}.json', 'w') as fp:
json.dump(data, fp)
def get(name):
with open(f'{name}.json') as json_file:
data = json.load(json_file)
return data
def file_exists(name):
try:
file= open(f'{name}.json')
return True
except:
return False
def delete_f(dir):
if os.path.exists(dir):
os.remove(dir)
return True
else:
return False
```
#### File: yabd-storage-system/oldYadb/main.py
```python
import sys
from follower import Follower
from leader import Leader
import asyncio
import threading
def main():
PORT = 63333
HOST = "127.0.0.1"
follower = Follower(HOST,PORT)
follower2 = Follower(HOST,63334)
leader = Leader(HOST,PORT)
leader2 = Leader(HOST,63334)
file = open("ejemplo.txt")
f = str.encode(file.read())
th = threading.Thread(target = leader.get_value, args=(f,))
th = threading.Thread(target = leader2.get_value, args=(f,))
th = threading.Thread(target = follower.handle_connection)
th = threading.Thread(target = follower2.handle_connection)
th.start()
print ("Pues si funciona ejej")
th.join ()
if __name__=='__main__':
main()
```
|
{
"source": "jd/mergify-engine",
"score": 2
}
|
#### File: mergify-engine/mergify_engine/json.py
```python
import enum
import json
_JSON_TYPES = {}
def register_type(enum_cls):
if enum_cls.__name__ in _JSON_TYPES:
raise RuntimeError(f"{enum_cls.__name__} already registered")
else:
_JSON_TYPES[enum_cls.__name__] = enum_cls
class Encoder(json.JSONEncoder):
def default(self, v):
if isinstance(v, enum.Enum):
return {
"__pytype__": "enum",
"class": type(v).__name__,
"name": v.name,
}
else:
return super().default(v)
def decode_enum(v):
if v.get("__pytype__") == "enum":
cls_name = v["class"]
enum_cls = _JSON_TYPES[cls_name]
enum_name = v["name"]
return enum_cls[enum_name]
return v
def dumps(v):
return json.dumps(v, cls=Encoder)
def loads(v):
return json.loads(v, object_hook=decode_enum)
```
|
{
"source": "JDMGENJITSU671/Gamertag",
"score": 3
}
|
#### File: JDMGENJITSU671/Gamertag/availability.py
```python
import json
import re
import sys
import time
import requests
from colorama import Fore, init
def main():
startTime = time.time()
# Initialize Colorama.
init(autoreset=True)
print(Fore.CYAN + "Gamertag - Bulk Xbox Live Gamertag availability checker")
print(Fore.CYAN + "https://github.com/EthanC/Gamertag\n")
authorization, reservationID = LoadCredentials()
gamertags = LoadList()
print(f"Checking availability of {'{:,}'.format(len(gamertags))} gamertags...")
gamertags = VerifyGamertags(gamertags)
count = CheckAvailability(authorization, reservationID, gamertags)
if count >= 1:
print(Fore.GREEN + f"Saved {'{:,}'.format(count)} available gamertag(s)")
endTime = int(time.time() - startTime)
print(f"\nCompleted in {'{:,}'.format(endTime)}s")
def LoadCredentials():
"""Return credential values from credentials.json."""
try:
with open("credentials.json", "r") as credentialsFile:
credentials = json.load(credentialsFile)
authorization = credentials["authorization"]
reservationID = credentials["reservationID"]
return authorization, reservationID
except Exception as e:
print(Fore.RED + f"Failed to load credentials. {e}.")
def LoadList():
"""Return gamertags from list.txt."""
try:
with open("list.txt", "r") as listFile:
gamertagList = listFile.readlines()
gamertags = [gamertag.strip() for gamertag in gamertagList]
return gamertags
except Exception as e:
print(Fore.RED + f"Failed to load gamertag list. {e}.")
def VerifyGamertags(gamertags):
"""Return a list of gamertags which meet the Xbox Live gamertag specifications."""
i = 0
for gamertag in gamertags:
if len(gamertag) > 15:
print(
Fore.LIGHTBLACK_EX
+ f"Skipping gamertag {gamertag}, length {len(gamertag)} when maximum 15"
)
del gamertags[i]
valid = bool(re.match("^[a-zA-Z0-9 ]+$", gamertag))
if valid is False:
print(
Fore.LIGHTBLACK_EX
+ f"Skipping gamertag {gamertag}, contains invalid characters"
)
del gamertags[i]
i = i + 1
return gamertags
def CheckAvailability(authorization, reservationID, gamertags):
"""Return a list of gamertags which are available for purchase on Xbox Live."""
count = 0
for gamertag in gamertags:
headers = {"Authorization": authorization, "Content-Type": "application/json"}
payload = {"gamertag": gamertag, "reservationId": reservationID}
req = requests.post(
"https://user.mgt.xboxlive.com/gamertags/reserve",
headers=headers,
json=payload,
)
# HTTP 409 (Conflict).
if req.status_code == 409:
print(Fore.LIGHTBLACK_EX + f"Gamertag {gamertag} is unavailable")
# HTTP 200 (OK).
if req.status_code == 200:
print(Fore.GREEN + f"Gamertag {gamertag} is available")
SaveAvailable(gamertag)
count += 1
# HTTP 400 (Bad Request).
if req.status_code == 400:
print(
Fore.RED
+ f"Failed to check gamertag {gamertag} availability. HTTP {req.status_code}."
)
print(req.text)
# HTTP 401 (Unauthorized).
if req.status_code == 401:
print(
Fore.RED
+ f"Failed to check gamertag {gamertag} availability, not authorized. HTTP {req.status_code}."
)
print(req.text)
# HTTP 429 (Too Many Requests).
# Allowed 10 requests in 15 seconds OR 50 requests in 300 seconds.
if req.status_code == 429:
res = json.loads(req.text)
currentReq = res["currentRequests"]
maxReq = res["maxRequests"]
period = res["periodInSeconds"]
print(
Fore.RED
+ f"Rate Limited ({currentReq}/{maxReq} {period}s), sleeping for 15 seconds..."
)
time.sleep(15)
req.close()
# Ensure we're avoiding the 10 requests in 15 seconds rate limit.
time.sleep(1.5)
return count
def SaveAvailable(gamertag):
"""Write an available gamertag to the end of available.txt."""
try:
with open("available.txt", "a") as availableFile:
availableFile.write(f"{gamertag}\n")
except Exception as e:
print(Fore.RED + f"Failed to save list of available gamertags. {e}.")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(0)
```
|
{
"source": "jdmichaud/webseed",
"score": 3
}
|
#### File: webseed/backend/demorouter.py
```python
import sys
import ssl
import json
import time
import signal
import logging
import traceback
from optparse import OptionParser
from threading import Thread
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer, SimpleSSLWebSocketServer
import SimpleHTTPServer
import SocketServer
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
class ServerThread(Thread):
def __init__(self, serveFunction, name=''):
Thread.__init__(self)
self.serveFunction = serveFunction
self.name = name
def run(self):
logging.info('starting %s server' % self.name)
self.serveFunction()
class SimpleEmitter(WebSocket):
def handleMessage(self):
if self.data is None:
self.data = ''
try:
print "*** incoming message ***"
print self.data
print "*** end of incoming message ***"
except Exception as e:
print traceback.format_exc()
def handleConnected(self):
logging.info("WebSocket: %s connected" % self.address)
sendMessage(json.dumps({ message: "newpage", template: "index.html" }))
def handleClose(self):
logging.info("WebSocket: %s closed" % self.address)
class AWConnectionServer(SocketServer.BaseRequestHandler):
def __init__(self, wsServer, socketServer):
self.wsServer = wsServer
self.socketServer = socketServer
def handle(self):
# self.request is the TCP socket connected to the client
self.data = self.request.recv(1024).strip()
print "{} wrote:".format(self.client_address[0])
print self.data
# Forward to websocket
self.wsServer.sendMessage(command)
if __name__ == "__main__":
# Start the WebSocket server for full link communication with the tablet
cls = SimpleEmitter
wsServer = SimpleWebSocketServer("0.0.0.0", 9003, cls)
# Start WebServer for static content
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("0.0.0.0", 8000), Handler)
# SIGINT (Ctrl+C) handler
def close_sig_handler(signal, frame):
try:
logging.info("closing WebSocket server...")
wsServer.close()
except Exception as e:
logging.error('Exception while closing WebSocket server')
logging.info(e)
try:
logging.info("closing socket server ...")
awserver.shutdown()
except Exception as e:
logging.error('Exception while closing socket server')
logging.info(e)
try:
logging.info("closing HTTP server...")
# httpd.shutdown()
except Exception as e:
logging.error('Exception while closing HTTP server')
logging.info(e)
logging.info("Exiting...")
sys.exit()
signal.signal(signal.SIGINT, close_sig_handler)
# Start socket server for communication with AW
awserver = SocketServer.TCPServer(("0.0.0.0", 6666), AWConnectionServer)
# Start server
ServerThread(awserver.serve_forever, "socket").start()
ServerThread(wsServer.serveforever, "WebSocket").start()
# Wait for the HTTP server
logging.info('starting HTTP server')
httpd.serve_forever()
```
|
{
"source": "jdmillard/rocket-altitude",
"score": 3
}
|
#### File: jdmillard/rocket-altitude/plotter.py
```python
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import time
class livePlotter:
"""
Class for plotting methods.
"""
def __init__(self, rocket, final_time, plot_real_time):
# store some inputs
self.plot_real_time = plot_real_time
self.tf = final_time
''' setup real time plot using pyqtgraph '''
self.app = QtGui.QApplication([])
# create the widget ("Graphics Window" allows stacked plots)
self.win = pg.GraphicsWindow(title="Live Plotting")
self.win.resize(1500,1000) # set window size
self.win.move(50,50) # set window monitor position
self.win.setWindowTitle('Altitude Controller Truth')
# enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
# set some pen types
pen_green = pg.mkPen(color=(50, 255, 50, 255), width=2)
pen_green2 = pg.mkPen(color=(50, 255, 50, 255), width=1)
pen_blue = pg.mkPen(color=(50, 50, 255, 255), width=2, symbol='t')
pen_blue2 = pg.mkPen(color=(50, 50, 255, 255), width=1)
# FIRST SUBPLOT OBJECT
self.p1 = self.win.addPlot(title="Altitude vs. Time")
self.p1.setXRange(0,final_time,padding=0)
self.p1.setYRange(rocket.h*0.9,rocket.h_f*1.1,padding=0)
self.p1.setLabel('left', "Altitude (m)")
self.p1.setLabel('bottom', "Time (s)") # , units='s'
self.p1.showGrid(x=True, y=True)
self.meas1 = self.p1.plot(pen=pen_blue, name='Curve 1')
# SECOND SUBPLOT OBJECT
self.p2 = self.win.addPlot(title="Velocity vs. Time")
self.p2.setXRange(0,final_time,padding=0)
self.p2.setYRange(0,rocket.hd_0*1.1,padding=0)
self.p2.setLabel('left', "h_dot (m/s)")
self.p2.setLabel('bottom', "Time (s)")
self.p2.showGrid(x=True, y=True)
self.meas2 = self.p2.plot(pen=pen_blue, name='Curve 2')
# THIRD SUBPLOT OBJECT
self.p3 = self.win.addPlot(title="h_dot vs. h")
self.p3.setXRange(rocket.h*0.9,rocket.h_f*1.1,padding=0)
self.p3.setYRange(0,rocket.hd_0*1.1,padding=0)
self.p3.setLabel('left', "h_dot (m/s)")
self.p3.setLabel('bottom', "h (m)")
self.p3.showGrid(x=True, y=True)
self.p3.addLegend(offset=[-10,10])
self.meas3 = self.p3.plot(pen=pen_blue, name='Simulated Trajectory')
self.t_ref = self.p3.plot(pen=pen_green2, name='Reference Trajectory')
self.t_ref.setData(rocket.h_ref, rocket.hd_ref)
self.win.nextRow()
# FOURTH SUBPLOT OBJECT
self.p4 = self.win.addPlot(title="Theta Control Input")
self.p4.setXRange(0,final_time,padding=0)
self.p4.setYRange(0,rocket.th_max*1.1,padding=0)
self.p4.setLabel('left', "theta (deg)")
self.p4.setLabel('bottom', "time (s)")
self.p4.showGrid(x=True, y=True)
self.p4.addLegend(offset=[-10,10])
self.meas4 = self.p4.plot(pen=pen_blue, name='Current Theta')
self.meas4a = self.p4.plot(pen=pen_green2, name='Desired Theta')
# FIFTH SUBPLOT OBJECT
self.p5 = self.win.addPlot(title="Error vs. Time")
#self.p5.setLogMode(False,True)
self.p5.setXRange(0,final_time,padding=0)
#self.p5.setYRange( , ,padding=0)
self.p5.setLabel('left', "Velocity Error (m/s)")
self.p5.setLabel('bottom', "Time (s)")
self.p5.showGrid(x=True, y=True)
self.meas5 = self.p5.plot(pen=pen_green, name='Curve 6')
# SIXTH SUBPLOT OBJECT
self.p6 = self.win.addPlot(title="Error vs. Height")
self.p6.setXRange(rocket.h*0.9,rocket.h_f*1.1,padding=0)
#self.p6.setYRange(rocket.h*0.9,rocket.h_f*1.1,padding=0)
self.p6.setLabel('left', "Velocity Error (m/s)")
self.p6.setLabel('bottom', "h (m)")
self.p6.showGrid(x=True, y=True)
self.meas6 = self.p6.plot(pen=pen_green, name='Curve 6')
# show the plot by calling an update
# it is needed twice (to force display on first iteration) - not sure why
# either method below works, but the app handle method is better practice
self.app.processEvents() #pg.QtGui.QApplication.processEvents()
self.app.processEvents() #pg.QtGui.QApplication.processEvents()
# start timer
self.time0 = time.time()
# method for updating data
def updateItems(self, rocket, sim_time, current_time):
# override the waiting constraint
if self.plot_real_time:
actual_time = current_time - self.time0
else:
actual_time = sim_time
if self.plot_real_time or rocket.hd <= 0 or sim_time==self.tf:
# plot no faster than actual time
# NOTE: simulation can get slower than real time
if actual_time < sim_time:
# pause to wait for actual time to catch up
time.sleep(sim_time-actual_time)
# get time and h for the rocket
x = rocket.t_all[0:rocket.i]
y = rocket.h_all[0:rocket.i]
self.meas1.setData(x,y)
# get time and h_dot for the rocket
#x = rocket.t_all[0:rocket.i] # x is already this
y = rocket.hd_all[0:rocket.i]
self.meas2.setData(x,y)
# get h and h_dot for the rocket
x = rocket.h_all[0:rocket.i]
#y = rocket.hd_all[0:rocket.i] # y is already this
self.meas3.setData(x,y)
# get time and theta for the air brake
x = rocket.t_all[0:rocket.i]
y = rocket.th_all[0:rocket.i]
self.meas4.setData(x,y)
# get time and theta_cmd for the air brake
#x = rocket.t_all[0:rocket.i]
y = rocket.th_cmd_all[0:rocket.i]
self.meas4a.setData(x,y)
# get time and e_hd for the rocket
#x = rocket.t_all[0:rocket.i]
y = rocket.e_hd[0:rocket.i]
self.meas5.setData(x,y)
# get h and e_hd for the rocket
x = rocket.h_all[0:rocket.i]
#y = rocket.e_hd[0:rocket.i]
self.meas6.setData(x,y)
# update the plotted data
self.app.processEvents() #pg.QtGui.QApplication.processEvents()
# hold plot when rocket reaches maximum height
if rocket.hd <= 0 or sim_time==self.tf:
print("simulation finished")
print("rocket altitude:", rocket.h, "m")
print("simulation time:", sim_time, "s")
#print("real time: ", current_time - self.time0, " s")
while 1:
self.app.processEvents() #pg.QtGui.QApplication.processEvents()
self.app.exec_() # hold final plot
#time.sleep(5)
# method for generating 2d ellipse for a given covariance
def generateEllipse(self, P):
# fill in ellipse generation here
return 3
```
|
{
"source": "jdmoldenhauer/python-server-sdk",
"score": 2
}
|
#### File: python-server-sdk/testing/test_config.py
```python
from ldclient.config import Config
def test_copy_config():
old_sdk_key = "OLD_SDK_KEY"
new_sdk_key = "NEW_SDK_KEY"
old_config = Config(sdk_key=old_sdk_key, stream=False)
assert old_config.sdk_key is old_sdk_key
assert old_config.stream is False
new_config = old_config.copy_with_new_sdk_key(new_sdk_key)
assert new_config.sdk_key is new_sdk_key
assert new_config.stream is False
def test_can_set_valid_poll_interval():
config = Config(sdk_key = "SDK_KEY", poll_interval = 31)
assert config.poll_interval == 31
def test_minimum_poll_interval_is_enforced():
config = Config(sdk_key = "SDK_KEY", poll_interval = 29)
assert config.poll_interval == 30
def test_can_set_valid_diagnostic_interval():
config = Config(sdk_key = "SDK_KEY", diagnostic_recording_interval=61)
assert config.diagnostic_recording_interval == 61
def test_minimum_diagnostic_interval_is_enforced():
config = Config(sdk_key = "SDK_KEY", diagnostic_recording_interval=59)
assert config.diagnostic_recording_interval == 60
```
|
{
"source": "jdmonaco/grid-remapping-model",
"score": 2
}
|
#### File: src/analysis/compare.py
```python
import numpy
from scipy.stats import pearsonr
# Package imports
from .map_funcs import remap_quiver_plot
from ..tools.setops import intersection, difference, union, symmetric_difference
def compare_AB(A, B, sparsity=0.614):
"""
Perform several analyses on spatial maps A and B, returning a dict of the
results for visualization and analysis.
Arguments:
A,B -- PlaceMap subclass instances of spatial maps to be compared
sparsity -- expected spatial map sparsity for computing turnover
"""
results = {}
# Get data for active units
udata_A = results['udata_A'] = A.get_unit_data()
udata_B = results['udata_B'] = B.get_unit_data()
# Indices of units active in both rooms
AB_active = intersection(udata_A['unit'], udata_B['unit'])
results['num_active'] = num_active = AB_active.shape[0]
results['frac_active'] = num_active / float(A.num_maps)
results['A_active'] = A.num_active
results['B_active'] = B.num_active
# Allocate paired distance arrays
num_pairs = num_active*(num_active-1)/2
D_A = results['D_A'] = numpy.empty(num_pairs, 'd')
D_B = results['D_B'] = numpy.empty(num_pairs, 'd')
R_A = results['R_A'] = numpy.empty(num_pairs, 'd')
R_B = results['R_B'] = numpy.empty(num_pairs, 'd')
# Compute pair-wise positional and rate distances in both rooms
ix = 0
for i in xrange(num_active):
x_iA, y_iA, r_iA = A.maxima[AB_active[i]]
x_iB, y_iB, r_iB = B.maxima[AB_active[i]]
for j in xrange(i+1, num_active):
x_jA, y_jA, r_jA = A.maxima[AB_active[j]]
x_jB, y_jB, r_jB = B.maxima[AB_active[j]]
D_A[ix] = numpy.sqrt((x_iA-x_jA)**2 + (y_iA-y_jA)**2)
D_B[ix] = numpy.sqrt((x_iB-x_jB)**2 + (y_iB-y_jB)**2)
R_A[ix] = (r_iA - r_jA) / (r_iA + r_jA)
R_B[ix] = (r_iB - r_jB) / (r_iB + r_jB)
ix += 1
# Distribution of remapped distances for active units
D_AB = results['D_AB'] = numpy.empty(num_active, 'd')
D_AB[:] = numpy.sqrt(
((A.maxima[AB_active] - B.maxima[AB_active])**2).sum(axis=1))
# Store peak locations for active units in both maps
results['A_xy'] = A.maxima[AB_active, :2].T
results['B_xy'] = B.maxima[AB_active, :2].T
# Distribution of active unit rate remapping strength: (max-min)/max
R_AB = results['R_AB'] = numpy.empty(num_active, 'd')
peak_rates = numpy.c_[A.maxima[AB_active, 2], B.maxima[AB_active, 2]]
r_max, r_min = peak_rates.max(axis=1), peak_rates.min(axis=1)
R_AB[:] = (r_max - r_min) / r_max
# Active environment counts
counts = results['env_counts'] = numpy.zeros((2, 3), 'i')
counts[0] = 0, 1, 2
counts[1,0] = difference(numpy.arange(A.num_maps),
union(udata_A['unit'], udata_B['unit'])).shape[0]
counts[1,1] = symmetric_difference(udata_A['unit'],
udata_B['unit']).shape[0]
counts[1,2] = num_active
# Compute independent "turnover" as 1-RMSD from expected random turnover
E_rand = numpy.array([sparsity**2, 2*sparsity*(1-sparsity), (1-sparsity)**2])
E0 = numpy.array([sparsity, 0, 1-sparsity])
RMSD = lambda cbar: numpy.sqrt(((cbar - E_rand)**2).mean())
results['turnover'] = 1 - RMSD(counts[1]/float(A.num_maps)) / RMSD(E0)
# Population ratemap correlation coefficient
results['ratecorr'] = pearsonr(A.Map.flatten(), B.Map.flatten())[0]
# Active pair-wise distance correlation: positional remapping strength
results['remapping'] = 1.0 - pearsonr(D_A, D_B)[0]
# Active unit peak-rate correlation: rate remapping strength
results['rate_remapping'] = 1.0 - pearsonr(R_A, R_B)[0]
return results
def compare_AB_figure(r, f=None):
"""
Visualize some A-B spatial map comparison data in a figure
"""
from matplotlib.pyplot import figure, draw
figsize = (13, 10)
if f is None:
f = figure(figsize=figsize)
else:
f.clf()
f.set_size_inches(figsize)
# Plot the inter-environmnet paired distance scatter plot
remap_plot = f.add_subplot(321)
remap_plot.plot(r['D_A'], r['D_B'], 'b.', ms=1)
remap_plot.plot([0, numpy.sqrt(2)*100], [0, numpy.sqrt(2)*100], 'k:')
remap_plot.axis([0, numpy.sqrt(2)*100, 0, numpy.sqrt(2)*100])
remap_plot.set_xlabel('D(A) (cm)', size='smaller')
remap_plot.set_ylabel('D(B) (cm)', size='smaller')
remap_plot.text(3, numpy.sqrt(2)*90, '1 - r = %.2f'%r['remapping'])
# Plot the inter-environmnet paired rate difference scatter plot
rate_plot = f.add_subplot(323)
rate_plot.plot(r['R_A'], r['R_B'], 'b.', ms=1)
rate_plot.axis('tight')
rate_plot.set_xlabel('R(A)', size='smaller')
rate_plot.set_ylabel('R(B)', size='smaller')
rate_plot.text(0.05, 0.9, '1 - r = %.2f'%r['rate_remapping'],
transform=rate_plot.transAxes)
# Plot a histogram of inter-environment remapping distances
dist_hist = f.add_subplot(3, 4, 9)
dist_hist.hist(r['D_AB'], bins=15, histtype='step',
edgecolor='g', lw=2)
dist_hist.set_xlabel('Remapped Distance', size='smaller')
dist_hist.set_ylabel('Count', size='smaller')
v = dist_hist.axis()
dist_hist.set_ylim(ymax=v[3]+3)
rate_hist = f.add_subplot(3, 4, 10)
rate_hist.hist(r['R_AB'], bins=15, histtype='step',
edgecolor='r', lw=2)
rate_hist.set_xlabel('Rate Remapping', size='smaller')
# Bar chart of environment counts (# envs where ith cell active)
# env_plot = f.add_subplot(427)
# env_plot.plot(r['env_counts'][0], r['env_counts'][1], 'kd', ms=12, mew=3,
# mec='k', mfc='w')
# env_plot.set_xticks(r['env_counts'][0])
# env_plot.set_xticklabels(['None', 'Single', 'Both'])
# env_plot.set_xlabel('Environmental Activity', size='smaller')
# env_plot.set_ylabel('# Cells', size='smaller')
# env_plot.axis([-0.5, 2.5, 0, 1.1*max(r['env_counts'][1])])
# env_plot.grid(True)
# Remapping quiver plot on the right column
quiver_plot = f.add_subplot(122)
remap_quiver_plot(r, ax=quiver_plot, rate_colors=True, border_style=False)
quiver_plot.set_xlabel('X (cm)', size='smaller')
quiver_plot.set_ylabel('Y (cm)', size='smaller')
quiver_plot.set_title('Positional/Rate Remapping Vectors')
draw()
return f
```
#### File: src/analysis/map_funcs.py
```python
import scipy.signal, numpy
from scipy.stats import pearsonr
def get_tuned_weights(pmap, W, EC, alpha=0.5, grow_synapses=False):
"""
Perform afferent tuning on the weight matrix and return new weights
Required parameters:
pmap -- a PlaceMap object resulting from the spatial map simulation
W0 -- the afferent weight matrix used in the simulation
EC -- the GridCollection instance used as input in the simulation
Keyword arguments:
alpha -- 0.0 to 1.0 value of how much tuning to (default 0.5)
"""
norm = numpy.sqrt((W[0]**2).sum(axis=0))
W0 = W / norm
W1 = numpy.empty((pmap.num_maps, EC.num_maps), 'd')
for i in xrange(pmap.num_maps):
W1[i] = numpy.tanh(3*(pmap.maxima[i,2]-0.5)) * \
EC.map_value(pmap.maxima[i,0], pmap.maxima[i,1])
if not grow_synapses:
W1[i] *= W0[i] > 0.0
W1[i] /= numpy.sqrt((W1[i]**2).sum(axis=0)) # normalize
W2 = (1.0-alpha)*W0 + alpha*W1 # mixed old and tuned matrices
for i in xrange(pmap.num_maps):
W2[i] *= norm / numpy.sqrt((W2[i]**2).sum(axis=0)) # hetersynaptic LTD
return W2
def remap_quiver_plot(cmp_AB, ax=None, rate_colors=False,
border_style=True, arrow_width=None, **kwextra):
"""
Draw a remapping quiver plot for spatial map comparison data
Requires a compare_AB dictionary as first argument.
Keyword arguments:
ax -- if specified, quiver plot is drawn to the given axes, otherwise
a new figure and axes are created
rate_colors -- whether to color the arrows based on rate remapping
border_style -- if *rate_colors* is True, whether to use a black-bordered
arrow or not (if so, the Reds colormap is used; otherwise, a RKB
diffmap is used)
Additional keywords are passed to the quiver call.
"""
from matplotlib.pyplot import figure, axes, draw
if ax is None:
f = figure()
ax = axes()
# Set vector components for drawing arrows
X, Y = cmp_AB['A_xy']
U, V = cmp_AB['B_xy'] - cmp_AB['A_xy']
args = (X, Y, U, V)
# Calculate rate remapping vector for colors: (max-min)/max
if rate_colors:
C = cmp_AB['R_AB']
args += (C,)
# Set keyword arguments to format the quiver field
if arrow_width is None:
set_width = 0.5 # set width here
else:
set_width = arrow_width
kwargs = { 'units':'x', # scale based on data range
'scale':1, # data per arrow unit
'width':set_width, # arrow units
'headwidth':4, # width units
'headlength':5, # width units
'headaxislength':4, # width units
'minshaft':1, # headlength units, scaling threshold
'minlength':2.5/set_width } # width units, display threshold
if rate_colors:
color_lims = numpy.array([0.0, 1.0])
if border_style:
from matplotlib import cm
kwargs.update({
'cmap':cm.Reds, # colormap for arrows
'clim':color_lims, # colors on a (0,1) scale
'edgecolor':'k', # arrow outline color
'lw':0.5 }) # arrow outline line-width
else:
from ..tools.colormaps import diffmap
kwargs.update({
'headwidth':4.0, # scale up head with no borders
'headlength':5.0, #
'headaxislength':3.8, #
'cmap':diffmap(use_black=True),
'clim':color_lims, # colors on a (0,1) scale
'lw':0.0 }) # arrow outline line-width
kwargs.update(kwextra)
# Execute the quiver command and draw the plot
ax.cla()
q = ax.quiver(*args, **kwargs)
ax.axis('image')
ax.axis([0, 100, 0, 100])
draw()
return q
def scatter_linreg_plot(x, y, ax=None, label='data', d_fmt='b.', l_fmt='k-',
d_kw={}, l_kw={}):
"""Draw a scatter plot with linear regression fit line
Keyword arguments:
ax -- if specified, scatter plot is drawn to the given axes, otherwise
a new figure and axes are created
label -- label for this scatter data if a legend is created
d_fmt/l_fmt -- format specifier for data and line, respectively
d_kw/l_kw -- additional keyword dictionaries for the plot calls
Prints Pearson r and corresponding p-value to console.
Returns the Pearson r coefficient.
"""
assert len(x) == len(y), 'scatter data must be same length'
from matplotlib.pyplot import figure, axes, draw
from scipy.stats import linregress
if ax is None:
f = figure()
ax = axes()
# Draw the scatter data
ax.plot(x, y, d_fmt, label=label, **d_kw)
# Get the linear regression
m, b, r, p, sem = linregress(x, y)
print '(r = %.4f, p = %.4e)' % (r, p)
x0 = numpy.array([x.min(), x.max()], 'd')
y0 = m * x0 + b
# Plot the regression line
ax.plot(x0, y0, l_fmt, zorder=-1, label='_nolegend_', **l_kw)
draw()
return r
def spatial_corr(*args):
"""2D spatial autocorrelation of rank-3 population arrays
Pass in a single z-stack [(num_maps, H, W)-shaped rank-3] array to compute
and return its spatial autocorrelogram.
Pass in two z-stack maps (e.g., A and B) to compute the cross-correlogram
of A with respect to B.
"""
# Handle input arguments
if len(args) == 0 or len(args) > 2:
raise ValueError, 'requires one or two arguments'
if len(args) == 1:
A = B = args[0]
else:
A, B = args
assert A.shape == B.shape, 'shape mismatch between input arrays'
assert A.ndim == 3, 'input arrays must be rank-3'
# Map and correlogram dimensions
num_maps, H, W = A.shape
corr_shape = 2*H-1, 2*W-1
# Fourier transforms
A_ = scipy.signal.fft2(A, shape=corr_shape)
B_ = scipy.signal.fft2(B[:, ::-1, ::-1], shape=corr_shape)
AB_conv = (A_ * B_).sum(axis=0)
return scipy.signal.real(scipy.signal.ifft2(AB_conv))/num_maps
def linearize_spatial_corr(Mcorr):
"""Perform a radial collapse of a 2D spatial autocorrelation to get a
linear autocorrelation
NOTE: This should not be used for cross-correlations!
Mcorr should be a 199x199 autocorrelogram of a 100x100 map.
Returns 2-row autocorrelation (lag, corr) array.
"""
assert type(Mcorr) is numpy.ndarray, 'bad type for matrix argument'
assert Mcorr.shape == (199,199), 'invalid shape for autocorrelation matrix'
# Scan the correlogram and compute the radius from the midpoint
n = numpy.zeros(101, 'h')
c = numpy.zeros(101, 'd')
mid_x, mid_y = 99.5, 99.5
for i in xrange(199):
for j in xrange(199):
d = numpy.sqrt((mid_y - i)**2 + (mid_x - j)**2)
if d > 100:
d = 100
n[int(d)] += 1
c[int(d)] += Mcorr[i, j]
c /= n # get the sample means
# Create the return array: reflect 0->Max correlations to -Max->0
Lcorr = numpy.zeros((2,201), 'd')
Lcorr[0] = numpy.arange(-100, 101)
Lcorr[1] = numpy.r_[c[::-1], c[1:]]
return Lcorr
def peak_vs_neighbors(pmap, k=4, median_dist=True, use_primary=False):
"""Compute scatter data for looking at the relationship between field peak
rates and a measure of nearest neighbor distance.
A PlaceMap (or subclass) instance must be passed in.
Keyword arguments:
k -- number of nearest neighbors to factor into the measure
median_dist -- use the median neighbor distance (if False, the maximum
distance of the k-neighbors is used)
use_primary -- only use primary place fields (most active field per unit)
Returns 2-row concatenation of peaks and neighbor-distance arrays.
"""
# Get field centroids and peak rates from the spatial map
if use_primary:
udata = pmap.get_unit_data()
x, y, peaks = udata['max_x'], udata['max_y'], udata['max_r']
nfields = len(udata)
else:
fdata = pmap.get_field_data()
x, y, peaks = fdata['x'], fdata['y'], fdata['peak']
nfields = len(fdata)
# Main loop through place fields
neighbor_dists = numpy.empty(nfields, 'd')
for f in xrange(nfields):
d = numpy.sqrt((x - x[f])**2 + (y - y[f])**2)
nearest_k = numpy.argsort(d)[1:k+1]
if median_dist:
neighbor_dists[f] = numpy.median(d[nearest_k])
else:
neighbor_dists[f] = d[nearest_k[-1]]
return numpy.c_[peaks, neighbor_dists].T
def peaks_vs_area(pmap):
"""Get scatter data for field peak rates vs field area in cm^2
A PlaceMap (or subclass) instance must be passed in.
Returns 2-row (peak, area) array.
"""
fdata = pmap.get_field_data()
return numpy.c_[fdata['peak'], fdata['area']].T
def secondary_field_data(pmap):
"""Get scatter data for normalized secondary peak vs. distance from primary
A PlaceMap (or subclass) instance must be passed in.
Returns 2-row (primary normed rate, primary distance) array.
"""
# Get place field data from spatial map
fdata = pmap.get_field_data()
units = numpy.unique(fdata['unit'])
# Find dual fields and store data
norm_peak = []
primary_dist = []
for u in units:
ix = (fdata['unit'] == u).nonzero()[0]
if len(ix) <= 1:
continue
fields = fdata[ix]
sort_ix = numpy.argsort(fields['peak'])
P = fields[sort_ix[-1]]
for S in fields[sort_ix[:-1]]:
norm_peak.append(S['peak']/P['peak'])
primary_dist.append(
numpy.sqrt((P['x']-S['x'])**2 + (P['y']-S['y'])**2))
# Return array data
return numpy.c_[numpy.array(norm_peak), numpy.array(primary_dist)].T
def input_vs_output_norms(EC, R):
"""Get scatter data for input and output population vector norms
Arguments:
EC -- the GridCollection cortical object used in the simulation
R -- the PlaceMap object containing the output spatial map
Returns 2-row (|EC|, |R|) scatter array.
"""
return numpy.c_[numpy.sqrt((EC.Map * EC.Map).sum(axis=0)).flatten(),
numpy.sqrt((R.Map * R.Map).sum(axis=0)).flatten()].T
def haff_vs_r_peaks(ca3, pmap=None):
"""Get input-output per-field scatter data to show effects of competition
Arguments:
ca3 -- PlaceNetwork model instance to run comparison data
pmap -- precomputed PlaceMap for ca3 [optional: if omitted, the spatial
map is computed]
Returns 2-row concatenation of field h_aff^i vs. r_i scatter points.
"""
# Deprecate norm keyword
if norm:
import warnings
warnings.warn('The \'norm\' keyword argument is deprecated.')
# Compute the spatial map if not passed in
if pmap is None:
from ..ratemap import CheckeredRatemap
pmap = CheckeredRatemap(ca3)
pmap.compute_coverage()
# Get field data
fdata = pmap.get_field_data()
x, y, peaks, unit = fdata['x'], fdata['y'], fdata['peak'], fdata['unit']
nfields = len(fdata)
# Main loop through place fields
h_aff = numpy.empty(nfields, 'd')
beta = ca3.gamma * ca3.beta_EC
for f in xrange(nfields):
r_EC = ca3.get_afferent_input(x[f], y[f])
h_aff[f] = beta * numpy.dot(ca3.W[unit[f]], r_EC)
return numpy.c_[h_aff, peaks].T
def field_comparison_matrix(pmap, which='overlap'):
"""Get a matrix of pair-wise comparisons of single-max fields
A PlaceMap (or subclass) instance must be passed in. The units are sorted
by the quadrant into which their respective peaks fall.
Keyword arguments:
which -- what sort of comparison to perform: 'overlap' (default) for
a pixel overlap count, 'sim' for cosine similarity
Returns a NxN matrix where N is the number of active place-units.
"""
if which not in ('overlap', 'sim'):
raise ValueError, 'invalid comparison type specified by which keyword'
# Get indices of active units
udata = pmap.get_unit_data()
x, y, units = udata['max_x'], udata['max_y'], udata['unit']
# Spatial sort of units based on peak location
AND = numpy.logical_and
mid_x, mid_y = 50.0, 50.0
ll = AND(x<mid_x, y<mid_y).nonzero()[0]
lr = AND(x>=mid_x, y<mid_y).nonzero()[0]
ul = AND(x<mid_x, y>=mid_y).nonzero()[0]
ur = AND(x>=mid_x, y>=mid_y).nonzero()[0]
units = units[numpy.r_[ll, lr, ul, ur]]
# Set up the matrix
nfields = pmap.num_active
M = numpy.empty((nfields,)*2, 'd')
# Main loop pair-wise for fields
if which is 'overlap':
print 'Pixel overlap matrix...'
for i in xrange(nfields):
i_map = pmap.single_maps[units[i]].astype(bool)
for j in xrange(i, nfields):
j_map = pmap.single_maps[units[j]].astype(bool)
M[i, j] = M[j, i] = (i_map * j_map).sum()
elif which is 'sim':
print 'Field vector cosine matrix...'
for i in xrange(nfields):
i_map = pmap.single_maps[units[i]].flatten()
i_norm = numpy.sqrt(numpy.dot(i_map, i_map))
for j in xrange(i, nfields):
j_map = pmap.single_maps[units[j]].flatten()
j_norm = numpy.sqrt(numpy.dot(j_map, j_map))
M[i, j] = M[j, i] = numpy.dot(i_map, j_map) / (i_norm * j_norm)
return M
def linear_rate_corr_matrix(R, which='corrcoef'):
"""Get a correlation matrix of the population rate vector for a line
scanned through the environment (a diagonal by default)
Arguments:
R -- the 3-index population rate matrix of responses
which -- specify 'corrcoef' for Pearson correlations or 'sim' for cosine
vector similarities
Returns a NxN matrix where N is the number of pixel in a diagonal scan.
"""
if which not in ('corrcoef', 'sim'):
raise ValueError, 'invalid comparison type specified by which keyword'
# Set us up the matrix
npixels = 100
M = numpy.empty((npixels,)*2, 'd')
# Scan the diagonal from (0,0) to (100,100)
if which is 'corrcoef':
print 'Pearson correlation matrix...'
for i in xrange(npixels):
r_i = R[:,npixels-i-1, i]
for j in xrange(npixels):
r_j = R[:,npixels-j-1, j]
r_corr = pearsonr(r_i, r_j)[0]
if numpy.isnan(r_corr) or r_corr < 0:
M[i, j] = M[j, i] = 0.0
else:
M[i, j] = M[j, i] = r_corr
elif which is 'sim':
print 'Cosine similarity matrix...'
for i in xrange(npixels):
r_i = R[:,npixels-i-1, i]
r_i_norm = numpy.sqrt(numpy.dot(r_i, r_i))
for j in xrange(npixels):
r_j = R[:,npixels-j-1, j]
r_j_norm = numpy.sqrt(numpy.dot(r_j, r_j))
r_sim = numpy.dot(r_i, r_j) / (r_i_norm * r_j_norm)
if numpy.isnan(r_sim):
M[i, j] = M[j, i] = 0.0
else:
M[i, j] = M[j, i] = r_sim
else:
raise ValueError, 'invalid correlation measure specified: \'%s\''%which
return M
```
#### File: src/analysis/realign.py
```python
from IPython.kernel import client as IPclient
import numpy as N, scipy as S, os, gc
# Package imports
from ..place_network import PlaceNetworkStd
from ..ratemap import CheckeredRatemap
from ..dmec import GridCollection
from ..tools.interp import BilinearInterp2D
from ..tools.string import snake2title
from .sweep import SingleNetworkSweep
from .compare import compare_AB
# Enthought imports
from enthought.traits.api import Enum
from enthought.chaco.api import ArrayPlotData, HPlotContainer, Plot
def run_sample_point(save_file, d_x, d_y):
gc.collect()
# Create the modules index arrays
mods = nmodules
if x_type == 'modules':
mods = int(d_x)
elif y_type == 'modules':
mods = int(d_y)
modules = EC.get_modules(mods, freq_sort=freq_modules)
# Reset grids and activate transforms if necessary
EC.reset()
if 'ellipticity' in (x_type, y_type):
EC.ellipticity = True
if 'zoom' in (x_type, y_type):
EC.zoom = True
# Modulate grid responses according to realignment parameters
for m, m_ix in enumerate(modules):
# Handle x-axis realignment
if x_type == 'shift':
EC.shift(d_x * delta_phi[m], mask=m_ix)
elif x_type == 'rotate':
EC.rotate(d_x * delta_psi[m], mask=m_ix)
elif x_type == 'ellipticity':
EC.ell_mag[m_ix] = d_x * ell_mags[m]
EC.ell_angle[m_ix] = d_x * ell_angles[m]
elif x_type == 'zoom':
EC.zoom_scale[m_ix] = 1 + d_x * (zoom_scales[m] - 1)
# Handle y-axis realignment
if y_type == 'shift':
EC.shift(d_y * delta_phi[m], mask=m_ix)
elif y_type == 'rotate':
EC.rotate(d_y * delta_psi[m], mask=m_ix)
elif y_type == 'ellipticity':
EC.ell_mag[m_ix] = d_y * ell_mags[m]
EC.ell_angle[m_ix] = d_y * ell_angles[m]
elif y_type == 'zoom':
EC.zoom_scale[m_ix] = 1 + d_y * (zoom_scales[m] - 1)
# Simulate and save the realigned spatial map
model = PlaceNetworkStd(EC=EC, W=W, **pdict)
model.advance()
B = CheckeredRatemap(model)
B.compute_coverage()
B.tofile(save_file)
return
class RealignmentSweep(SingleNetworkSweep):
"""
Analyze a 2D random sample of single-trial network simulations across
realignment magnitudes or variances in A-B environment comparisons.
See core.analysis.AbstractAnalysis documentation and collect_data method
signature and docstring for usage.
"""
label = 'Realign Sweep'
display_data = Enum('remapping', 'rate_remapping', 'turnover', 'sparsity',
'stage_coverage', 'stage_repr', 'peak_rate', 'max_rate', 'num_fields',
'coverage', 'area', 'diameter', 'peak', 'average')
map_data = Enum('remapping', 'rate_remapping', 'turnover', 'sparsity',
'stage_coverage', 'stage_repr', 'peak_rate', 'none')
def collect_data(self, x_type='shift', y_type='rotate', x_density=10, y_density=10,
nmodules=1, freq_modules=False, x_max=None, y_max=None, **kwargs):
"""
Store placemap data from a randomly sampled 2D region of parameter space
for realignment magnitudes or variances (spatial phase vs. orientation).
The same network is used for the simulation at each point, and each sample
is compared to a reference (A) spatial map.
Keyword arguments:
x_type -- realignment type along x axis; must be one of 'shift', 'rotate',
'ellipticity', 'zoom', or 'modules' (default 'shift')
y_type -- realignment type along y axis (default 'rotate)
x_density -- number of x_type samples along the defined x_bounds (10)
y_density -- number of y_type samples along the defined y_bounds (10)
nmodules -- number of independent alignment modules; used as max number
of modules if x_type or y_type is set to 'modules'
freq_modules -- whether modules are spatial frequency partitions
x_max -- set upper bound for extent of x_type realignment along x axis;
(shift should be a 2-tuple value)
y_max -- set upper bound for extent of y_type realignment along y axis
"""
# Parse the realignment types
realignment_types = ('shift', 'rotate', 'ellipticity', 'zoom', 'modules')
if x_type not in realignment_types:
raise ValueError, 'invalid realignment type specification (x_type)'
if y_type not in realignment_types:
raise ValueError, 'invalid realignment type specification (y_type)'
# Split cortical population into modules
self.results['nmodules'] = nmodules = int(nmodules)
self.results['freq_modules'] = freq_modules
self.results['x_type'] = x_type
self.results['y_type'] = y_type
# Make data directory
map_dir = os.path.join(self.datadir, 'data')
if not os.path.exists(map_dir):
os.makedirs(map_dir)
# Set default model parameters
pdict = dict( refresh_weights=False,
refresh_phase=False,
refresh_orientation=False
)
pdict.update(kwargs)
# Simulate reference spatial map for environment A
self.out('Simulating reference spatial map...')
EC = GridCollection()
model = PlaceNetworkStd(EC=EC, **pdict)
model.advance()
A = CheckeredRatemap(model)
A.compute_coverage()
A.tofile(os.path.join(map_dir, 'map_A'))
# Setup namespace on ipengine instances
self.out('Setting up ipengines for task-farming...')
mec = self.get_multiengine_client()
tc = self.get_task_client()
mec.clear_queue()
mec.reset()
mec.execute('import gc')
mec.execute('from grid_remap.place_network import PlaceNetworkStd')
mec.execute('from grid_remap.dmec import GridCollection')
mec.execute('from grid_remap.ratemap import CheckeredRatemap')
# Send some network weights, grid configuration and sweep info
self.out('Pushing network weights and grid configuration...')
W = model.W
mec.push(dict( W=model.W,
pdict=pdict,
spacing=EC.spacing,
phi=EC._phi,
psi=EC._psi,
nmodules=nmodules,
freq_modules=freq_modules,
x_type=x_type,
y_type=y_type
))
mec.execute('EC = GridCollection(spacing=spacing, _phi=phi, _psi=psi)')
# Set up modular realignment parameters, pushing data out to engines
self.results['bounds'] = bounds = N.array([[0, 1]]*2, 'd')
density = [x_density, y_density]
r_max = (x_max, y_max)
r_type = (x_type, y_type)
for i in 0, 1:
if r_type[i] == 'shift':
if nmodules == 1 and r_max[i] is not None:
delta_phi = N.array([r_max[i]], 'd')
elif nmodules > 1 and r_max[i] is not None:
delta_phi = N.array(r_max[i], 'd')
else:
grid_scale = None
if freq_modules and r_type[1-i] == 'modules':
grid_scale = 60.0 # cf. lab notebook @ p.147
delta_phi = \
N.array([GridCollection.get_delta_phi(scale=grid_scale)
for m in xrange(nmodules)])
mec.push(dict(delta_phi=delta_phi))
self.results[r_type[i] + '_params'] = delta_phi
self.out('Pushed shift parameters:\n%s'%str(delta_phi))
elif r_type[i] == 'rotate':
if nmodules == 1 and r_max[i] is not None:
delta_psi = N.array([r_max[i]], 'd')
elif nmodules > 1 and r_max[i] is not None:
delta_psi = N.array(r_max[i], 'd')
else:
delta_psi = N.array([GridCollection.get_delta_psi()
for m in xrange(nmodules)])
mec.push(dict(delta_psi=delta_psi))
self.results[r_type[i] + '_params'] = delta_psi
self.out('Pushed rotate parameters:\n%s'%str(delta_psi))
elif r_type[i] == 'ellipticity':
if nmodules == 1 and r_max[i] is not None:
ell_mags = N.array([r_max[i]], 'd')
ell_angles = N.array([0.0])
else:
ell_mags = N.array([GridCollection.get_ellipticity()
for m in xrange(nmodules)])
ell_angles = N.array([GridCollection.get_elliptic_angle()
for m in xrange(nmodules)])
mec.push(dict(ell_mags=ell_mags, ell_angles=ell_angles))
self.results[r_type[i] + '_params'] = \
N.c_[ell_mags, ell_angles]
self.out('Pushed ellipticity parameters:\n' +
'Flattening: %s\nAngles: %s'%(str(ell_mags),
str(ell_angles)))
elif r_type[i] == 'zoom':
if nmodules == 1 and r_max[i] is not None:
zoom_scales = N.array([r_max[i]], 'd')
else:
zoom_scales = N.array([GridCollection.get_zoom_scale()
for m in xrange(nmodules)])
mec.push(dict(zoom_scales=zoom_scales))
self.results[r_type[i] + '_params'] = zoom_scales
self.out('Pushed zoom parameters:\n%s'%str(zoom_scales))
elif r_type[i] == 'modules':
density[i] = nmodules
bounds[i] = 1, nmodules
self.out('Setting up modularity sweep for %d modules'%nmodules)
# Build the sample grid according to specifications
pts_x = N.linspace(bounds[0,0], bounds[0,1], density[0])
pts_y = N.linspace(bounds[1,0], bounds[1,1], density[1])
x_grid, y_grid = N.meshgrid(pts_x, pts_y)
pts = N.c_[x_grid.flatten(), y_grid.flatten()]
self.results['samples'] = pts
# Initialize stage map sample data arrays
nsamples = density[0] * density[1]
self.results['remapping_samples'] = remapping = N.empty(nsamples, 'd')
self.results['rate_remapping_samples'] = rate_remapping = N.empty(nsamples, 'd')
self.results['turnover_samples'] = turnover = N.empty(nsamples, 'd')
self.results['sparsity_samples'] = sparsity = N.empty(nsamples, 'd')
self.results['stage_coverage_samples'] = stage_coverage = N.empty(nsamples, 'd')
self.results['stage_repr_samples'] = stage_repr = N.empty(nsamples, 'd')
self.results['peak_rate_samples'] = peak_rate = N.empty(nsamples, 'd')
self.results['max_rate_samples'] = max_rate = N.zeros(nsamples, 'd')
self.results['num_fields_samples'] = num_fields = N.zeros(nsamples, 'd')
self.results['coverage_samples'] = coverage = N.zeros(nsamples, 'd')
self.results['area_samples'] = area = N.zeros(nsamples, 'd')
self.results['diameter_samples'] = diameter = N.zeros(nsamples, 'd')
self.results['peak_samples'] = peak = N.zeros(nsamples, 'd')
self.results['average_samples'] = average = N.zeros(nsamples, 'd')
# Method for creating interpolated maps of collated data
def interpolate_data(z, pixels=256):
"""Interpolate value z across sample points with *density* points
"""
M = N.empty((pixels,)*2, 'd')
f = BilinearInterp2D(x=pts_x, y=pts_y, z=z)
x_range = N.linspace(bounds[0,0], bounds[0,1], num=pixels)
y_range = N.linspace(bounds[1,1], bounds[1,0], num=pixels)
for j, x in enumerate(x_range):
for i, y in enumerate(y_range):
M[i,j] = f(x, y)
return M
# Execute data collection process for each sample point
tasks = []
for i, p in enumerate(pts):
self.out('Submitting: d_%s = %.2f, d_%s = %.2f'%
(x_type, p[0], y_type, p[1]))
save_file = os.path.join(map_dir, 'map_%03d.tar.gz'%i)
tasks.append(
tc.run(
IPclient.MapTask(run_sample_point,
args=(save_file, float(p[0]), float(p[1])))))
tc.barrier(tasks)
# Collate data return from task farming
for i in xrange(nsamples):
self.out('Loading data from map %d for analysis...'%i)
B = CheckeredRatemap.fromfile(os.path.join(map_dir, 'map_%03d.tar.gz'%i))
# Get field and unit data record arrays
fdata = B.get_field_data()
udata = B.get_unit_data()
# Collate the stage map data
sparsity[i] = B.sparsity
stage_coverage[i] = B.stage_coverage
stage_repr[i] = B.stage_repr
peak_rate[i] = B.peak_rate
# Collate the per-unit data
if udata.shape[0] != 0:
max_rate[i] = udata['max_r'].mean()
num_fields[i] = udata['num_fields'].mean()
coverage[i] = udata['coverage'].mean()
# Collate the per-field data
if fdata.shape[0] != 0:
area[i] = fdata['area'].mean()
diameter[i] = fdata['diameter'].mean()
peak[i] = fdata['peak'].mean()
average[i] = fdata['average'].mean()
# Compute remapping strength from map A
cmp_AB = compare_AB(A, B)
remapping[i] = cmp_AB['remapping']
rate_remapping[i] = cmp_AB['rate_remapping']
turnover[i] = cmp_AB['turnover']
# Create interpolated maps for the collated data
def dot():
self.out.printf('.', color='purple')
self.out('Creating interpolated parameter maps for collected data'); dot()
self.results['remapping'] = interpolate_data(remapping); dot()
self.results['rate_remapping'] = interpolate_data(rate_remapping); dot()
self.results['turnover'] = interpolate_data(turnover); dot()
self.results['sparsity'] = interpolate_data(sparsity); dot()
self.results['stage_coverage'] = interpolate_data(stage_coverage); dot()
self.results['stage_repr'] = interpolate_data(stage_repr); dot()
self.results['peak_rate'] = interpolate_data(peak_rate); dot()
self.results['max_rate'] = interpolate_data(max_rate); dot()
self.results['num_fields'] = interpolate_data(num_fields); dot()
self.results['coverage'] = interpolate_data(coverage); dot()
self.results['area'] = interpolate_data(area); dot()
self.results['diameter'] = interpolate_data(diameter); dot()
self.results['peak'] = interpolate_data(peak); dot()
self.results['average'] = interpolate_data(average); dot()
self.out.printf('\n')
# Good-bye!
self.out('All done!')
def create_plots(self):
"""Create a simple 2D image plot of the parameter sweep"""
# Figure is horizontal container for main plot + colorbar
self.figure = \
container = HPlotContainer(fill_padding=True, padding=25,
bgcolor='linen')
# Data and bounds for main plot
raw_data = self.results[self.display_data]
data = ArrayPlotData(image=self.get_rgba_data(raw_data), raw=raw_data,
x=self.results['samples'][:,0], y=self.results['samples'][:,1])
x_range = tuple(self.results['x_bounds'])
y_range = tuple(self.results['y_bounds'])
bounds = dict(xbounds=x_range, ybounds=y_range)
# Create main plot
p = Plot(data)
p.img_plot('image', name='sweep', origin='top left', **bounds)
p.contour_plot('raw', name='contour', type='line', origin='top left', **bounds)
p.plot(('x', 'y'), name='samples', type='scatter', marker='circle',
color=(0.5, 0.6, 0.7, 0.4), marker_size=4)
# Tweak main plot
p.title = snake2title(self.display_data)
p.x_axis.orientation = 'bottom'
p.x_axis.title = 'Spatial Phase (cm)'
p.y_axis.title = 'Orientation (rads)'
p.plots['samples'][0].visible = self.show_sample_points
# Add main plot and colorbar to figure
container.add(p)
container.add(
self.get_colorbar_plot(bounds=(raw_data.min(), raw_data.max())))
# Set radio buttons
self.unit_data = self.field_data = 'none'
# Convenience function to reorganize results data
def get_module_columns(res, module_dim='y', which='remapping'):
"""Get matrix of columns of line data from results samples to plot
Arguments:
res -- results dict from a completed RealignmentSweep analysis object
module_dim -- set to 'x' or 'y' to specify modularity axis
which -- which data to retrieve ('remapping', 'turnover', etc.)
Returns modules array, sweep (realignment) array, and column data matrix.
"""
pts, data = res['samples'], res[which+'_samples']
# Get the module and sweep information
mod_dim = int(module_dim == 'y')
modules = N.unique(pts[:,mod_dim]).astype('i')
sweep = pts[pts[:,mod_dim] == modules[0], 1-mod_dim]
# Fill the column matrix
lines = N.empty((len(modules), len(sweep)), 'd')
for m,module in enumerate(modules):
pts_ix = (pts[:,mod_dim] == module).nonzero()[0]
lines[:,m] = data[pts_ix]
return modules, sweep, lines
```
#### File: src/analysis/sweep.py
```python
from IPython.kernel import client as IPclient
import numpy as N, scipy as S, os
# Package imports
from .. import PlaceNetworkStd, CheckeredRatemap, GridCollection
from ..core.analysis import AbstractAnalysis
from ..tools.interp import BilinearInterp2D
from ..tools.cmap_ui import ColormapControl
from ..tools.string import snake2title
# Enthought imports
from enthought.traits.api import Enum, Bool, Button
from enthought.traits.ui.api import View, Group, Item, Include
from enthought.chaco.api import ArrayPlotData, HPlotContainer, Plot
from enthought.enable.component_editor import ComponentEditor
def run_sample_point(**kwargs):
gc.collect()
# Handle file save
do_save = False
if 'save_file' in kwargs:
do_save = True
save_file = kwargs['save_file']
del kwargs['save_file']
# Check for pre-existing data to load
if do_save and os.path.exists(save_file):
self.out('Loading found data:\n%s'%save_file)
pmap = CheckeredRatemap.fromfile(save_file)
else:
# Run the simulation and save the results
model = PlaceNetworkStd(W=W, EC=EC, **kwargs)
model.advance()
pmap = CheckeredRatemap(model)
pmap.compute_coverage()
if do_save:
pmap.tofile(save_file)
# Get field and unit data record arrays
fdata = pmap.get_field_data()
udata = pmap.get_unit_data()
# Collate the place map sample data
sample = {}
sample['sparsity'] = pmap.sparsity
sample['stage_coverage'] = pmap.stage_coverage
sample['stage_repr'] = pmap.stage_repr
sample['peak_rate'] = pmap.peak_rate
# Collate the per-unit data
if udata.shape[0] != 0:
sample['max_rate'] = udata['max_r'].mean()
sample['num_fields'] = udata['num_fields'].mean()
sample['coverage'] = udata['coverage'].mean()
else:
sample['max_rate'] = sample['num_fields'] = \
sample['coverage'] = 0.0
# Collate the per-field data
if fdata.shape[0] != 0:
sample['area'] = fdata['area'].mean()
sample['diameter'] = fdata['diameter'].mean()
sample['peak'] = fdata['peak'].mean()
sample['average'] = fdata['average'].mean()
else:
sample['area'] = sample['diameter'] = sample['peak'] = \
sample['average'] = 0.0
return sample
class SingleNetworkSweep(AbstractAnalysis, ColormapControl):
"""
Analyze a 2D random sample of single-trial network simulations across
parameter space.
See core.analysis.AbstractAnalysis documentation and collect_data method
signature and docstring for usage.
"""
label = 'Single Sweep'
save_current_plot = Button
show_sample_points = Bool(True)
#
# These traits must be kept up-to-date with the data made available in
# the field and unit record arrays of PlaceMap:
#
# display_data -- the actual data to display in the figure plot
# map_data -- the subset of per-map data
# unit_data -- the subset of unit-averaged data
# field_data -- the subset of field-averaged data
#
display_data = Enum('sparsity', 'stage_coverage', 'stage_repr',
'peak_rate', 'max_rate', 'num_fields', 'coverage', 'area',
'diameter', 'peak', 'average')
map_data = Enum('sparsity', 'stage_coverage', 'stage_repr',
'peak_rate', 'none')
unit_data = Enum('max_rate', 'num_fields', 'coverage', 'none')
field_data = Enum('area', 'diameter', 'peak', 'average', 'none')
traits_view = \
View(
Group(
Item('figure', label='Data Map', height=450,
editor=ComponentEditor()),
Group(
Group(
Item('map_data', style='custom'),
Item('unit_data', style='custom'),
Item('field_data', style='custom'),
label='Data to Display',
show_border=True),
Group(
Include('colormap_group'),
Group(
Item('show_sample_points'),
label='Samples',
show_border=True),
Item('save_current_plot', show_label=False),
show_border=False),
show_border=False,
orientation='horizontal'),
layout='split',
orientation='vertical',
show_border=False),
title='Single Network Sweep',
kind='live',
resizable=True,
width=0.6,
height=0.8,
buttons=['Cancel', 'OK'])
def collect_data(self, x_density=10, x_bounds=(0.5,8), x_param='J0',
y_density=10, y_bounds=(0,2.5), y_param='phi_lambda', save_maps=True,
**kwargs):
"""Store placemap data from a grid-sampled 2D region of parameter space
The same network and inputs are used for the simulation at each point.
Keyword arguments:
nsamples -- the number of random samples to collect
x_param -- string name of PlaceNetwork parameter to sweep along the x-axis
y_param -- ibid for y-axis
x_bounds -- bounds on sampling the parameter specified by x_param
y_bounds -- ibid for y_param
"""
# Store bounds and sweep parameters
self.results['x_bounds'] = N.array(x_bounds)
self.results['y_bounds'] = N.array(y_bounds)
self.results['x_param'] = x_param
self.results['y_param'] = y_param
# Get ipcontroller clients
mec = self.get_multiengine_client()
tc = self.get_task_client()
# Setup namespace on ipengine instances
self.out('Setting up ipengines for task-farming...')
mec.clear_queue()
mec.reset()
mec.execute('import gc, os')
mec.execute('from grid_remap.place_network import PlaceNetworkStd')
mec.execute('from grid_remap.dmec import GridCollection')
mec.execute('from grid_remap.ratemap import CheckeredRatemap')
# Set default model parameters
pdict = dict( growl=False,
refresh_weights=False,
refresh_orientation=False,
refresh_phase=False
)
pdict.update(kwargs)
# Update with keyword arguments
all_params = PlaceNetworkStd().traits(user=True).keys()
if x_param not in all_params:
raise KeyError, 'x_param (%s) is not a PlaceNetwork parameter'%x_param
if y_param not in all_params:
raise KeyError, 'y_param (%s) is not a PlaceNetwork parameter'%y_param
# Send some network weights and a grid cell object
self.out('Pushing network weights and grid configuration...')
EC = GridCollection()
mec.push(dict(W=PlaceNetworkStd(EC=EC, **pdict).W,
spacing=EC.spacing, phi=EC._phi, psi=EC._psi))
self.out('...and reconstructing grid collection...')
mec.execute(
'EC = GridCollection(spacing=spacing, _phi=phi, _psi=psi)')
# Build the sample grid according to specifications
pts_x = N.linspace(x_bounds[0], x_bounds[1], x_density)
pts_y = N.linspace(y_bounds[0], y_bounds[1], y_density)
x_grid, y_grid = N.meshgrid(pts_x, pts_y)
pts = N.c_[x_grid.flatten(), y_grid.flatten()]
self.results['samples'] = pts
def interpolate_data(z, density=256):
"""Interpolate value z across sample points with *density* points
"""
M = N.empty((density, density), 'd')
x_range = N.linspace(x_bounds[0], x_bounds[1], num=density)
y_range = N.linspace(y_bounds[1], y_bounds[0], num=density)
f = BilinearInterp2D(x=pts_x, y=pts_y, z=z)
for j, x in enumerate(x_range):
for i, y in enumerate(y_range):
M[i,j] = f(x, y)
return M
# Execute data collection process for each sample point
self.out('Initiating task farming...')
save_dir = os.path.join(self.datadir, 'data')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
tasks = []
for i, p in enumerate(pts):
self.out('Point %d: %s = %.4f, %s = %.4f'%(i, x_param, p[0],
y_param, p[1]))
pdict[x_param], pdict[y_param] = p
if save_maps:
pdict['save_file'] = \
os.path.join(save_dir, 'point_%03d.tar.gz'%i)
tasks.append(
tc.run(
IPclient.MapTask(run_sample_point, kwargs=pdict)))
tc.barrier(tasks)
# Collate sample data returned from map task
samples = [tc.get_task_result(t_id) for t_id in tasks]
# Populate result arrays for interpolation
sparsity = N.array([pt['sparsity'] for pt in samples])
stage_coverage = N.array([pt['stage_coverage'] for pt in samples])
stage_repr = N.array([pt['stage_repr'] for pt in samples])
peak_rate = N.array([pt['peak_rate'] for pt in samples])
max_rate = N.array([pt['max_rate'] for pt in samples])
num_fields = N.array([pt['num_fields'] for pt in samples])
coverage = N.array([pt['coverage'] for pt in samples])
area = N.array([pt['area'] for pt in samples])
diameter = N.array([pt['diameter'] for pt in samples])
peak = N.array([pt['peak'] for pt in samples])
average = N.array([pt['average'] for pt in samples])
# Create interpolated maps for the collated data
def dot():
self.out.printf('.', color='purple')
self.out('Creating interpolated parameter maps for collected data'); dot()
self.results['sparsity'] = interpolate_data(sparsity); dot()
self.results['stage_coverage'] = interpolate_data(stage_coverage); dot()
self.results['stage_repr'] = interpolate_data(stage_repr); dot()
self.results['peak_rate'] = interpolate_data(peak_rate); dot()
self.results['max_rate'] = interpolate_data(max_rate); dot()
self.results['num_fields'] = interpolate_data(num_fields); dot()
self.results['coverage'] = interpolate_data(coverage); dot()
self.results['area'] = interpolate_data(area); dot()
self.results['diameter'] = interpolate_data(diameter); dot()
self.results['peak'] = interpolate_data(peak); dot()
self.results['average'] = interpolate_data(average); dot()
self.out.printf('\n')
# Good-bye!
self.out('All done!')
def create_plots(self):
"""Create a simple 2D image plot of the parameter sweep
"""
# Figure is horizontal container for main plot + colorbar
self.figure = \
container = HPlotContainer(fill_padding=True, padding=25,
bgcolor='linen')
# Convert old data sets to the new generalized style
if 'J0_bounds' in self.results:
self.results['x_bounds'] = self.results['J0_bounds']
self.results['x_param'] = 'J0'
if 'lambda_bounds' in self.results:
self.results['y_bounds'] = self.results['lambda_bounds']
self.results['y_param'] = 'phi_lambda'
# Data and bounds for main plot
raw_data = self.results[self.display_data]
data = ArrayPlotData(image=self.get_rgba_data(raw_data), raw=raw_data,
x=self.results['samples'][:,0], y=self.results['samples'][:,1])
x_range = tuple(self.results['x_bounds'])
y_range = tuple(self.results['y_bounds'])
bounds = dict(xbounds=x_range, ybounds=y_range)
# Create main plot
p = Plot(data)
p.img_plot('image', name='sweep', origin='top left', **bounds)
p.contour_plot('raw', name='contour', type='line', origin='top left', **bounds)
p.plot(('x', 'y'), name='samples', type='scatter', marker='circle',
color=(0.5, 0.6, 0.7, 0.4), marker_size=2)
# Tweak main plot
p.title = snake2title(self.display_data)
p.x_axis.orientation = 'bottom'
p.x_axis.title = snake2title(self.results['x_param'])
p.y_axis.title = snake2title(self.results['y_param'])
p.plots['samples'][0].visible = self.show_sample_points
# Add main plot and colorbar to figure
container.add(p)
container.add(
self.get_colorbar_plot(bounds=(raw_data.min(), raw_data.max())))
# Set radio buttons
self.unit_data = self.field_data = 'none'
# Traits notifications for the interactive GUI
def _cmap_notify_changed(self):
"""Respond to changes to the colormap specification by updating
"""
self._update_figure_plot()
def _save_current_plot_fired(self):
self.save_plots(fmt='png')
def _show_sample_points_changed(self, new):
self.figure.components[0].plots['samples'][0].visible = new
self.figure.request_redraw()
def _update_figure_plot(self):
if self.figure is None:
return
# Update data for the main plot
raw_data = self.results[self.display_data]
main_plot = self.figure.components[0]
main_plot.data.set_data('image', self.get_rgba_data(raw_data))
main_plot.data.set_data('raw', raw_data)
main_plot.title = snake2title(self.display_data)
# Remove old colorbar and add new one
del self.figure.components[1]
self.figure.add(
self.get_colorbar_plot(bounds=(raw_data.min(), raw_data.max())))
self.figure.request_redraw()
def _display_data_changed(self, old, new):
if new in self.results:
self._update_figure_plot()
else:
self.display_data = old
self.out('This analysis does not contain \'%s\' data'%new,
error=True)
def _map_data_changed(self, new):
if new != 'none':
self.unit_data = self.field_data = 'none'
self.display_data = new
def _unit_data_changed(self, new):
if new != 'none':
self.map_data = self.field_data = 'none'
self.display_data = new
def _field_data_changed(self, new):
if new != 'none':
self.unit_data = self.map_data = 'none'
self.display_data = new
```
#### File: grid-remapping-model/src/dmec.py
```python
import numpy as np
from scipy import pi, rand, sqrt, sin, cos
# Package imports
from .tools.filters import halfwave
from .tools.array_container import TraitedArrayContainer
from .tools.radians import xy_to_rad, xy_to_rad_vec, shortcut
# Traits imports
from enthought.traits.api import Float, Int, Tuple, Array, List, false
# Constant values
GRID_SPACING_RANGE = (30.0, 90.0)
ENVIRONMENT_SIZE = (100.0, 100.0)
class GridCollection(TraitedArrayContainer):
"""
Procedural model of a collection of grid cell spatial response maps
"""
num_maps = Int(1000)
spacing_bounds = Tuple(GRID_SPACING_RANGE)
mid = Tuple((ENVIRONMENT_SIZE[0] / 2.0, ENVIRONMENT_SIZE[1] / 2.0))
peak_rate = Float(1)
spacing = Array
k = Array
ellipticity = false
ell_mag = Array
ell_angle = Array
zoom = false
zoom_scale = Array
_ellipticity = false(desc='cache')
_ell_mag = Array(desc='cache')
_ell_angle = Array(desc='cache')
_zoom = false(desc='cache')
_zoom_scale = Array(desc='cache')
_phi = Array
_psi = Array
_phi0 = Array(desc='cache')
_psi0 = Array(desc='cache')
_phi_radius = Array
_thetas = List([0.0, 2*pi/3, 4*pi/3])
_norm = Float
def __init__(self, **traits):
TraitedArrayContainer.__init__(self, **traits)
self.store()
def map_value(self, x, y):
"""Get population rate vector of this grid collection at position (x,y)
"""
x, y = self.map_transforms(x, y)
return self._norm * self.__g(
reduce(np.add,
[cos(
(sin(t-self._psi)*(x-self._phi[:,0]-self.mid[0]) +
cos(t-self._psi)*(y-self._phi[:,1]-self.mid[0]))/self.k
)
for t in self._thetas]))
def __g(self, x):
"""Monotonic gain function for grid responses
"""
return halfwave(np.exp(0.25*x) - 0.75)
# Ellipticity and zoom (scaling) transforms
def map_transforms(self, x, y):
if self.ellipticity:
# Get polar coordinates from midpoint
dx = x - self.mid[0]
dy = y - self.mid[1]
r = sqrt(dx**2 + dy**2)
theta = xy_to_rad_vec(dx, dy)
# Rotational coordinate transform, back to Cartesian
theta_prime = theta - self.ell_angle
dx_prime = r*cos(theta_prime)
dy_prime = r*sin(theta_prime)
# Do the elliptical transform, back to polar
dx_ell = dx_prime / (1+self.ell_mag)
dy_ell = dy_prime * (1+self.ell_mag)
r_ell = sqrt(dx_ell**2 + dy_ell**2)
theta_ell = xy_to_rad_vec(dx_ell, dy_ell) + self.ell_angle
# Revert to absolute Cartesian coordinate frame
x = self.mid[0] + r_ell*cos(theta_ell)
y = self.mid[1] + r_ell*sin(theta_ell)
if self.zoom:
# Get polar coordinates from midpoint
dx = x - self.mid[0]
dy = y - self.mid[1]
# Compute scaled radius and center-angles
r_zoom = sqrt(dx**2 + dy**2) / self.zoom_scale
theta = xy_to_rad_vec(dx, dy)
# Project back to absolute Cartesian coordinates
x = self.mid[0] + r_zoom*cos(theta)
y = self.mid[1] + r_zoom*sin(theta)
return x, y
# Traits default values
def _spacing_default(self):
return self.spacing_bounds[0] + \
(self.spacing_bounds[1] - self.spacing_bounds[0]) * \
rand(self.num_maps)
def _k_default(self):
return (sqrt(3)/(4*pi)) * self.spacing
def _ell_mag_default(self):
return np.zeros(self.num_maps, 'd')
def _ell_angle_default(self):
return np.zeros(self.num_maps, 'd')
def _zoom_scale_default(self):
return np.ones(self.num_maps, 'd')
def __psi_default(self):
return self.new_orientations()
def __phi_default(self):
return self.new_spatial_phases()
def __norm_default(self):
return self.peak_rate / self.__g(3)
def __phi_radius_default(self):
return (self.spacing/2) / cos(pi/6)
# Rotate/shift remapping methods
def shift(self, shift, mask=None):
"""Shift the grids
The phase shift value can be a 2-element array to be applied to all
grid phases (subject to the binary/index *mask* array) or a *phi*-shaped
array specifying per-grid phase shifts.
The phases are wrapped on the half-spacing circle.
"""
# Add the delta shift value to grid phases
shift = np.squeeze(np.array(shift))
try:
if mask is not None:
self._phi[mask] += shift
else:
self._phi += shift
except ValueError:
raise ValueError, 'mask and shift arrays must match'
# Wrap the phase values on the half-spacing circle
hex_angles = np.arange(0, 2*pi, pi/3)
for i in xrange(self.num_maps):
vertices = hex_angles + self._psi[i]
while sqrt((self._phi[i]**2).sum()) > self._phi_radius[i]:
orig = xy_to_rad(self._phi[i,0], self._phi[i,1]) - pi
proj = vertices[np.argmin([shortcut(v, orig) for v in vertices])]
self._phi[i,0] += self.spacing[i] * np.cos(proj)
self._phi[i,1] += self.spacing[i] * np.sin(proj)
def rotate(self, angle, mask=None):
"""Rotate the grids (arena centered)
Grids to be rotated can be optionally specified by bool/index array
*mask*, otherwise population is rotated. Specified *angle* can be a
scalar value to be applied to the population or a population- or
mask-sized array depending on whether *mask* is specified.
"""
rot2D = lambda psi: [[cos(psi), sin(psi)], [-sin(psi), cos(psi)]]
if mask is not None and type(mask) is np.ndarray:
if mask.dtype.kind == 'b':
mask = mask.nonzero()[0]
if type(angle) is np.ndarray and angle.size == mask.size:
for i,ix in enumerate(mask):
self._phi[ix] = np.dot(self._phi[ix], rot2D(angle[i]))
elif type(angle) in (int, float, np.float64):
angle = float(angle)
self._phi[mask] = np.dot(self._phi[mask], rot2D(angle))
else:
raise TypeError, 'angle must be mask-sized array or float'
self._psi[mask] = np.fmod(self._psi[mask]+angle, 2*pi)
elif mask is None:
if type(angle) is np.ndarray and angle.size == self.num_maps:
for i in xrange(self.num_maps):
self._phi[i] = np.dot(self._phi[i], rot2D(angle[i]))
elif type(angle) in (int, float, np.float64):
angle = float(angle)
self._phi = np.dot(self._phi, rot2D(angle))
else:
raise TypeError, 'angle must be num_maps array or float'
self._psi = np.fmod(self._psi+angle, 2*pi)
else:
raise TypeError, 'mask must be bool/index array'
# Store/reset alignment
def store(self):
"""Save the current grid configuration to be restored later
"""
self._phi0 = self._phi.copy()
self._psi0 = self._psi.copy()
self._ellipticity = self.ellipticity
self._ell_mag = self.ell_mag.copy()
self._ell_angle = self.ell_angle.copy()
self._zoom = self.zoom
self._zoom_scale = self.zoom_scale.copy()
def reset(self):
"""Reset the grid configuration to the stored configuration
"""
self._phi[:] = self._phi0
self._psi[:] = self._psi0
self.ellipticity = self._ellipticity
self.ell_mag[:] = self._ell_mag
self.ell_angle[:] = self._ell_angle
self.zoom = self._zoom
self.zoom_scale[:] = self._zoom_scale
# Convenience methoda
def randomize_phase(self):
"""Randomize grid spatial phases noncoherently
"""
self._phi = self.new_spatial_phases()
def randomize_orientation(self):
"""Set grid orientations coherently to a random value
"""
self._psi = self.new_orientations()
def new_orientations(self):
"""Get a new coherent array of grid orientations
"""
return (pi/3) * rand() + np.zeros(self.num_maps)
def new_spatial_phases(self):
"""Get x,y array of random spatial phases on the half-spacing circle
"""
p0 = 2*rand(self.num_maps, 2) - 1
for m in xrange(self.num_maps):
while (p0[m]**2).sum() > 1:
p0[m] = 2*rand(2) - 1
return p0 * self._phi_radius[:,np.newaxis]
def get_modules(self, nmodules, freq_sort=False):
"""Get a list of index arrays for a modular partition of the grids
Arguments:
nmodules -- the number of equal-sized modular partitions
freq_sort -- whether to partition based on spatial frequency
"""
if freq_sort:
grid_ix = np.argsort(self.spacing)
else:
grid_ix = np.arange(self.num_maps)
return np.array_split(grid_ix, nmodules)
def get_z_stack(self, size=ENVIRONMENT_SIZE):
"""Get a z-stack matrix of the population responses
Convenience method to get a matrix array with the spatial responses
of each grid-unit in this GridCollection object. Pixels get value from
the middle of the area represented by the pixel, and the origin is the
lower left corner of the individual spatial maps (index (size[1]-1,0)).
Keyword arguments:
size -- (H,W)-tuple specifying the area in cm-pixels
"""
M = np.squeeze(np.empty((self.num_maps, size[0], size[1]), 'd'))
for i in xrange(int(size[0])):
for j in xrange(int(size[1])):
M[...,i,j] = self.map_value(j+0.5, size[1]-i-0.5)
return M
# Realignment helper functions
@classmethod
def get_delta_phi(cls, scale=None):
"""Generate a random spatial phase displacement
Keyword arguments:
scale -- set grid scale that determines range of possible phase shifts
"""
if scale is None:
scale = max(GRID_SPACING_RANGE)
outer_bound = 0.5 * scale
lower_bound = 0.2 * outer_bound
# Generate and return random displacement
r = (outer_bound - lower_bound) * rand() + lower_bound
theta = 2 * pi * rand()
return r * np.array([cos(theta), sin(theta)])
@classmethod
def get_delta_psi(cls):
"""Generate a random orientation realignment (-30 to +30 degrees)
"""
return (pi/6) * (2 * rand() - 1)
@classmethod
def get_ellipticity(cls, ecc_range=(0.0, 0.2)):
"""Generate a random magnitude for the ellipticity transform
"""
return (ecc_range[1] - ecc_range[0]) * rand() + ecc_range[0]
@classmethod
def get_elliptic_angle(cls):
"""Generate a random angle for the semimajor axis of ellipticity
"""
return pi * (rand() - 0.5)
@classmethod
def get_zoom_scale(cls, zoom_range=(1.0, 1.2)):
"""Generate a random rescaling factor
"""
return (zoom_range[1] - zoom_range[0]) * rand() + zoom_range[0]
```
#### File: grid-remapping-model/src/placemap_viewer.py
```python
import numpy as N, scipy as S
from matplotlib import cm
# Package imports
from .ratemap import PlaceMap
from .tools.images import array_to_rgba
from .tools.stats import integer_hist
from .tools.bash import CPrint
# Traits imports
from enthought.traits.api import HasTraits, Instance, Trait, TraitError, \
Property, Enum, Int, Float, Range, Delegate
from enthought.traits.ui.api import View, Group, Item, Heading
# Chaco imports
from enthought.chaco.api import ArrayPlotData, Plot, BasePlotContainer, VPlotContainer, copper
from enthought.enable.component_editor import ComponentEditor
class PlaceMapViewer(HasTraits):
"""
Chaco viewer for placemap data
Constructor arguments:
pmap -- PlaceMap (or subclass) object to view
Public methods:
view -- Bring up the Chaco View window for looking at data
"""
# Console output
out = Instance(CPrint)
# Reference to PlaceMap object
PMap = Trait(PlaceMap)
# Stage map traits
stage_map = Instance(Plot)
stage_map_type = Enum('representation', 'coverage', 'field_centers')
sparsity = Delegate('PMap')
num_active = Delegate('PMap')
stage_coverage = Delegate('PMap')
stage_repr = Delegate('PMap')
peak_rate = Delegate('PMap')
# Unit map traits
_unit = Int
unit_map = Instance(Plot)
unit_map_type = Enum('ratemap', 'single', 'fields')
num_fields = Int
coverage = Float
avg_area = Float
avg_diameter = Float
max_rate = Float
# Unit data traits
unit_data_plots = Instance(BasePlotContainer)
unit_bins = Range(low=5, high=50, value=20)
# Field data traits
field_data_plots = Instance(BasePlotContainer)
field_bins = Range(low=5, high=50, value=20)
# Chaco view definition
traits_view = \
View(
Group(
Group(
Item('stage_map_type'),
Item('stage_map', editor=ComponentEditor(), show_label=False),
Group(
Item('sparsity', style='readonly'),
Item('num_active', style='readonly'),
Item('stage_coverage', label='Coverage', style='readonly'),
Item('stage_repr', label='Representation', style='readonly'),
Item('peak_rate', style='readonly'),
label='Stage Coding',
show_border=True),
label='Stage Maps',
orientation='v'),
Group(
Item('unit_map_type'),
Item('unit', style='custom'),
Item('unit_map', editor=ComponentEditor(), show_label=False),
Group(
Item('max_rate', style='readonly'),
Item('num_fields', style='readonly'),
Item('coverage', style='readonly'),
Item('avg_area', label='Mean Field Area', style='readonly'),
Item('avg_diameter', label='Mean Field Diameter', style='readonly'),
label='Place Unit',
show_border=True),
label='Unit Maps',
orientation='v'),
Group(
Heading('Distributions of Single-Unit Properties'),
Item('unit_data_plots', editor=ComponentEditor(), show_label=False),
Item('unit_bins', label='Bins'),
label='Unit Data'),
Group(
Heading('Distributions of Single-Field Properties'),
Item('field_data_plots', editor=ComponentEditor(), show_label=False),
Item('field_bins', label='Bins'),
label='Field Data'),
layout='tabbed'),
title='Placemap Viewer',
resizable=True,
height=800,
width=700,
kind='live',
buttons=['Cancel', 'OK'])
def __init__(self, pmap, **traits):
HasTraits.__init__(self, **traits)
try:
self.PMap = pmap
except TraitError:
self.out('PlaceMap subclass instance required', error=True)
return
self.fdata = self.PMap.get_field_data()
self.udata = self.PMap.get_unit_data()
self.add_trait('unit', Range(low=0, high=self.PMap.num_maps-1))
self._update_unit_values()
self.out('Bringing up place-map visualization...')
self.view()
self.out('Done!')
def view(self):
self.configure_traits()
# Plot creation methods
def _stage_map_default(self):
# RGBA maps
rep_map = array_to_rgba(self.PMap.stage_repr_map, cmap=cm.hot)
cov_map = array_to_rgba(self.PMap.stage_coverage_map, cmap=cm.gray)
# Data sources and plot object
data = ArrayPlotData(fields_x=self.fdata['x'], fields_y=self.fdata['y'],
fields_z=self.fdata['peak'], rep=rep_map, cov=cov_map)
p = Plot(data)
# Plot the field centers
p.plot(('fields_x', 'fields_y', 'fields_z'), name='centers', type='cmap_scatter',
marker='dot', marker_size=5, color_mapper=copper, line_width=1, fill_alpha=0.6)
# Plot the representation and coverage maps
p.img_plot('rep', name='rep', xbounds=(0, self.PMap.W), ybounds=(0, self.PMap.H),
origin='top left')
p.img_plot('cov', name='cov', xbounds=(0, self.PMap.W), ybounds=(0, self.PMap.H),
origin='top left')
# Start with only the representation map visible
p.plots['cov'][0].visible = False
p.plots['centers'][0].visible = False
# Plot tweaks
p.aspect_ratio = 1.0
p.y_axis.title = 'Y (cm)'
p.x_axis.title = 'X (cm)'
p.x_axis.orientation = 'bottom'
p.title = 'Stage Maps'
return p
def _unit_map_default(self):
# Set the initial unit map
data = ArrayPlotData(unit_map=self._get_unit_map_data())
p = Plot(data)
# Plot the map
p.img_plot('unit_map', name='unit', xbounds=(0, self.PMap.W), ybounds=(0, self.PMap.H),
origin='top left')
# Plot tweaks
p.aspect_ratio = 1.0
p.y_axis.title = 'Y (cm)'
p.x_axis.title = 'X (cm)'
p.x_axis.orientation = 'bottom'
p.title = 'Single Unit Maps'
return p
def _unit_data_plots_default(self):
# Plot data and vertical container object
data = ArrayPlotData(**self._get_unit_plots_data())
container = VPlotContainer()
# Add individual distribution plots to container
for key in ('avg_diameter', 'avg_area', 'coverage', 'max_r', 'num_fields'):
p = Plot(data)
p.plot((key+'_bins', key), name=key, type='polygon', edge_width=2,
edge_color='mediumblue', face_color='lightsteelblue')
p.x_axis.title = key
p.y_axis.title = 'count'
p.padding = [50, 30, 20, 40]
if key == 'num_fields':
p.x_axis.tick_interval = 1
container.add(p)
return container
def _field_data_plots_default(self):
# Plot data and vertical container object
data = ArrayPlotData(**self._get_field_plots_data())
container = VPlotContainer()
# Add individual distributions plots to container
for key in ('area', 'diameter', 'average', 'peak'):
p = Plot(data)
p.plot((key+'_bins', key), name=key, type='polygon', edge_width=2,
edge_color='red', face_color='salmon')
p.x_axis.title = key
p.y_axis.title = 'count'
p.padding = [50, 30, 20, 40]
container.add(p)
return container
# Plot update methods
def _update_stage_map(self):
"""Handle switching between different stage maps"""
# Update and equalize bounds for all subplots
self.stage_map.plots['rep'][0].bounds = self.stage_map.bounds
self.stage_map.plots['cov'][0].bounds = self.stage_map.bounds
self.stage_map.plots['centers'][0].bounds = self.stage_map.bounds
# Set visibility flags
if self.stage_map_type is 'representation':
self.stage_map.title = 'Relative Representation'
vis_plots = (True, False, False)
elif self.stage_map_type is 'coverage':
self.stage_map.title = 'Total Stage Coverage'
vis_plots = (False, True, False)
elif self.stage_map_type is 'field_centers':
self.stage_map.title = 'Place Field Centroids'
vis_plots = (False, False, True)
# Toggle plot visibility and redraw
self.stage_map.plots['rep'][0].visible, \
self.stage_map.plots['cov'][0].visible, \
self.stage_map.plots['centers'][0].visible = vis_plots
self.stage_map.request_redraw()
def _update_unit_map(self):
"""Update current image source and title; then redraw the plot"""
self.unit_map.data.set_data('unit_map', self._get_unit_map_data())
self.unit_map.title = '%s of Unit %d'%(self.unit_map_type.capitalize(), self.unit)
self.unit_map.request_redraw()
def _update_unit_values(self):
"""Update the scalar readonly values"""
if self._unit == -1:
self.num_fields = 0
self.coverage = self.avg_area = self.avg_diameter = 0.0
self.max_rate = self.PMap.maxima[self.unit, 2]
else:
self.num_fields = int(self.udata[self._unit]['num_fields'])
self.coverage = float(self.udata[self._unit]['coverage'])
self.avg_area = float(self.udata[self._unit]['avg_area'])
self.avg_diameter = float(self.udata[self._unit]['avg_diameter'])
self.max_rate = float(self.udata[self._unit]['max_r'])
def _get_unit_map_data(self):
"""Helper function to get RGBA array for current unit and map type"""
if self.unit_map_type is 'ratemap':
map_data = array_to_rgba(self.PMap.Map[self.unit], cmap=cm.jet,
norm=False, cmax=self.peak_rate)
elif self.unit_map_type is 'single':
map_data = array_to_rgba(self.PMap.single_maps[self.unit], cmap=cm.hot)
elif self.unit_map_type is 'fields':
map_data = array_to_rgba(self.PMap.coverage_maps[self.unit], cmap=cm.gray)
return map_data
def _get_unit_plots_data(self):
"""Helper function for getting unit data distributions"""
# Integer distribution for number of fields
data = {}
data['num_fields_bins'], data['num_fields'] = integer_hist(self.udata['num_fields'])
# Continuous distributions of other unit statistics
for key in ('avg_area', 'avg_diameter', 'coverage', 'max_r'):
keyb = key + '_bins'
data[key], data[keyb] = S.histogram(self.udata[key], bins=self.unit_bins)
data[keyb] += (data[keyb][1] - data[keyb][0]) / 2
data[keyb] = data[keyb][:-1]
# Add 0-value end-points for polygon display
for key in data:
if key[-4:] == 'bins':
data[key] = N.r_[data[key][0], data[key], data[key][-1]]
else:
data[key] = N.r_[0, data[key], 0]
return data
def _get_field_plots_data(self):
"""Helper function for getting field data distributions"""
# Continuous distributions of place field properties
data = {}
for key in ('area', 'diameter', 'average', 'peak'):
keyb = key + '_bins'
data[key], data[keyb] = S.histogram(self.fdata[key], bins=self.field_bins)
data[keyb] += (data[keyb][1] - data[keyb][0]) / 2
data[keyb] = data[keyb][:-1]
# Add 0-value end-points for polygon display
for key in data:
if key[-4:] == 'bins':
data[key] = N.r_[data[key][0], data[key], data[key][-1]]
else:
data[key] = N.r_[0, data[key], 0]
return data
# Map traits notifications
def _unit_bins_changed(self):
"""Update plot data for unit distributions"""
data = self._get_unit_plots_data()
plot_data = self.unit_data_plots.components[0].data
for key in data:
plot_data.set_data(key, data[key])
def _field_bins_changed(self):
data = self._get_field_plots_data()
plot_data = self.field_data_plots.components[0].data
for key in data:
plot_data.set_data(key, data[key])
def _stage_map_type_changed(self):
self._update_stage_map()
def _unit_map_type_changed(self):
self._update_unit_map()
def _unit_changed(self):
"""Update the unit map and scalar values"""
find_unit = (self.udata['unit'] == self.unit).nonzero()[0]
if find_unit.shape[0]:
self._unit = find_unit[0]
else:
self._unit = -1
self._update_unit_map()
self._update_unit_values()
# Output object default
def _out_default(self):
return CPrint(prefix=self.__class__.__name__, color='purple')
```
#### File: src/tools/images.py
```python
import os as _os
import numpy as _N
from sys import platform as _plat
if _plat == "win32":
import Image
else:
from PIL import Image
def image_blast(M, savedir, stem='image', fmt='%s_%03d', rev=False, **kwargs):
"""Save a rank-3 stacked intensity matrix *M* to a set of individual PNG
image files in the directory *savedir*.
If *savedir* does not exist it will be created. Set **stem** to specify
the filename suffix.
Keyword arguments:
stem -- file name stem to be used for output images
fmt -- a unique_path fmt specification (need an %s followed by a %d)
rev -- indicate use of a reversed fmt specification (%d followed by a %s)
Extra keyword arguments will get passed through to array_to_rgba. See its
doc string for details.
"""
assert M.ndim == 3, 'requires rank-3 array of intensity values'
d = _os.path.realpath(str(savedir))
if not _os.path.exists(d):
_os.makedirs(d)
stem = _os.path.join(d, stem)
N = M.shape[0]
first, middle, last = "", "", ""
for i,m in enumerate(M):
image_fn = unique_path(stem, fmt=fmt, ext="png", reverse_fmt=rev)
if i == 0:
first = image_fn
elif i == N-1:
last = image_fn
array_to_image(m, image_fn, **kwargs)
if N == 2:
middle += '\n'
elif N > 2:
middle += '\n\t...\n'
print first, middle, last
return
def array_to_rgba(mat, cmap=None, norm=True, cmin=0, cmax=1):
"""Intensity matrix (float64) -> RGBA colormapped matrix (uint8)
Keyword arguments:
cmap -- a matplotlib.cm colormap object
norm -- whether the color range is normalized to values in M
If *norm* is set to False:
cmin -- minimum clipping bound of the color range (default 0)
cmax -- maximum clipping bound of the color range (default 1)
"""
if cmap is None:
from matplotlib import cm
cmap = cm.hot
M = mat.copy()
data_min, data_max = M.min(), M.max()
if norm:
cmin, cmax = data_min, data_max
else:
if cmin > data_min:
M[M < cmin] = cmin # clip lower bound
if cmax < data_max:
M[M > cmax] = cmax # clip uppder bound
return cmap((M-cmin)/float(cmax-cmin), bytes=True)
def array_to_image(M, filename, **kwargs):
"""Save matrix, autoscaled, to image file (use PIL fmts)
Keyword arguments are passed to array_to_rgba.
"""
if M.ndim != 2:
raise ValueError, 'requires rank-2 matrix'
img = Image.fromarray(array_to_rgba(M, **kwargs), 'RGBA')
img.save(filename)
return
def tile2D(M, mask=None, gridvalue=0.5, shape=None):
"""
Construct a tiled 2D matrix from a 3D matrix
Keyword arguments:
mask -- an (H,W)-shaped binary masking array for each cell
gridvalue -- the intensity value for the grid
shape -- a (rows, columns) tuple specifying the shape of the tiling to use
If shape is specified, rows+columns should equal M.shape[0].
"""
if len(M.shape) != 3:
return
N, H, W = M.shape
if mask is not None and (H,W) != mask.shape:
mask = None
if shape and (type(shape) is type(()) and len(shape) == 2):
rows, cols = shape
else:
rows, cols = tiling_dims(N)
Mtiled = _N.zeros((rows*H, cols*W), 'd')
for i in xrange(N):
r, c = int(i/cols), _N.fmod(i, cols)
if mask is None:
Mtiled[r*H:(r+1)*H, c*W:(c+1)*W] = M[i]
else:
Mtiled[r*H:(r+1)*H, c*W:(c+1)*W] = mask * M[i]
Mtiled[H::H,:] = gridvalue
Mtiled[:,W::W] = gridvalue
return Mtiled
def tiling_dims(N):
"""Square-ish (rows, columns) for tiling N things
"""
d = _N.ceil(_N.sqrt(N))
return int(_N.ceil(N / d)), int(d)
```
#### File: src/tools/setops.py
```python
import numpy as _N
# Generic set-to-array translation function
def _do_set_op(u, v, set_op):
assert type(u) is _N.ndarray and type(v) is _N.ndarray, 'need arrays'
u_func = getattr(set(u), set_op)
return _N.array(list(u_func(set(v))))
# Create set operation functions
def intersection(u, v):
"""Get array intersection of input arrays u and v"""
return _do_set_op(u, v, 'intersection')
def union(u, v):
"""Get array union of input arrays u and v"""
return _do_set_op(u, v, 'union')
def difference(u, v):
"""Get array difference of input arrays u and v"""
return _do_set_op(u, v, 'difference')
def symmetric_difference(u, v):
"""Get array symmetric_difference of input arrays u and v"""
return _do_set_op(u, v, 'symmetric_difference')
# _ops = ('intersection', 'union', 'difference', 'symmetric_difference')
# for _op in _ops:
# # tmp = lambda u, v: _do_set_op(u, v, _op)
# def tmp(u, v): return _do_set_op(u, v, _op)
# tmp.__doc__ = "Get array %s of input arrays u and v"%_op
# exec '%s = tmp'%_op
#
```
#### File: grid-remapping-model/src/trajectories.py
```python
import numpy as N, scipy as S
from scipy.interpolate import interp1d as _i1d
# Package imports
from .stage import StagingMap
from .core.timeseries import TimeSeries
# Traits imports
from enthought.traits.api import (HasTraits, Trait, Constant, Range, Array,
Float, Int)
class BaseTrajectory(HasTraits):
"""
Superclass for spatiotemporal trajectories. Subclasses should override
the array constructor __full_traj_default() so that it returns the
full trajectory data in a (2, _time.size) matrix.
Public methods:
advance -- update x and y attributes to next position
Keyword arguments:
dt -- timestep between contiguous spatial samples
T -- total duration for the trajectory
"""
dt = Trait(TimeSeries.__class_traits__['dt'])
T = Trait(TimeSeries.__class_traits__['T'])
Map = Trait(StagingMap)
x = Float
y = Float
_full_traj = Array
_time = Array
_i = Int
def advance(self):
"""Move the current position one step along the trajectory"""
self._i += 1
try:
self.x = self._full_traj[self._i,0]
self.y = self._full_traj[self._i,1]
except IndexError:
pass
def reset(self):
"""Return this trajectory to its initial position"""
self._i = 0
self.x = self._x_default()
self.y = self._y_default()
def _Map_default(self):
return StagingMap(map_type='TrajectoryMap', quiet=True)
def _x_default(self):
return self._full_traj[0,0]
def _y_default(self):
return self._full_traj[0,1]
def __time_default(self):
return N.arange(0.0, self.T + 5*self.dt, self.dt, 'd')
def __full_traj_default(self):
"""Construct 2 x nTimeSteps array containing trajectory"""
return N.zeros((2, self.T / self.dt), 'd')
class RandomWalk(BaseTrajectory):
"""
A smoothed random-walk trajectory.
Keyword arguments:
v_bar -- mean velocity for the trajectory
step_freq -- average frequency for random turns
"""
v_bar = Float(15.0, unit='cm/s')
step_freq = Float(2.0, unit='Hz')
def __full_traj_default(self):
step = int(1 / (self.step_freq * self.dt))
tstep = N.arange(0, self.T + self.dt*step, self.dt*step)
v_sigma = self.v_bar * self.dt * step
X = N.empty((tstep.shape[0], 2), 'd')
X[0] = self.Map.x0
def random_step(x0, x1):
_angle_ = 2*S.pi * N.random.random_sample()
_x = x0 + (v_sigma * N.cos(_angle_), v_sigma * N.sin(_angle_))
while not self.Map.inbounds(_x[0], _x[1]):
_angle_ = 2*S.pi * N.random.random_sample()
_x = x0 + (v_sigma * N.cos(_angle_), v_sigma * N.sin(_angle_))
x1[:] = _x
for t in xrange(1, tstep.shape[0]):
random_step(X[t-1], X[t])
return N.c_[
_i1d(tstep, X[:,0], kind='cubic', bounds_error=False, fill_value=X[-1,0])(self._time),
_i1d(tstep, X[:,1], kind='cubic', bounds_error=False, fill_value=X[-1,1])(self._time)]
class AbstractImpulseRaster(BaseTrajectory):
"""
Abstract base class provides functionality for creating x,y trajectories
through a StagingMap environment that sequentially 'clamp' on a set of
stage pixels for a predetermined 'dwell-time'.
Subclasses must implement the _get_sample_index method to specify the
subset of pixels to clamp.
"""
dwell = Float(0.2)
sample_freq = Range(low=0.01, high=1.0, value=0.1)
_full_raster = Array
_nsamples = Int
_transit = Float(10**-7)
_nsteps = Int
_req_time = Float
_init_factor = Float(10)
def get_points(self):
"""
Convenience method to return a 2-row matrix containing the raster
points scanned by this trajectory
"""
return self._full_raster[:, self._get_sample_index()]
def _get_sample_index(self):
"""Subclass provided; return column-index array into stage raster"""
raise NotImplementedError
def __full_raster_default(self):
"""
A (2, H*W)-shaped matrix containing full stage raster locations
"""
Xfull = N.empty((2, self._nsteps), 'd')
# X-values
Xfull[0] = N.repeat(self.Map._xrange, self.Map._yrange.shape[0])
# Y-values
_tmp = N.empty(
(self.Map._xrange.shape[0], self.Map._yrange.shape[0]), 'd')
_tmp[:] = self.Map._yrange[N.newaxis]
Xfull[1] = _tmp.flatten()
return Xfull
def __full_traj_default(self):
"""
A (2, ntimesteps)-shaped matrix containing the full temporal trajectory
"""
# Down sample the stage raster according to sample index
X = self._full_raster[:, N.repeat(self._get_sample_index(), 2)]
# Dwell vector and full-series time vector
_init = self._init_factor*self.dwell
_dwell_t = N.linspace(0, self._req_time - _init, int(self._nsteps/2))
t = N.repeat(_dwell_t, 2)
t[1::2] += self.dwell - self._transit
# Insert initial dwell time for transients
t[1:] += _init
return N.c_[
_i1d(t, X[0], kind='linear', bounds_error=False,
fill_value=X[0,-1])(self._time),
_i1d(t, X[1], kind='linear', bounds_error=False,
fill_value=X[1,-1])(self._time)]
def _T_default(self):
return self._req_time
def __req_time_default(self):
return (self._init_factor + self._nsamples) * self.dwell
def __nsteps_default(self):
return self.Map._xrange.shape[0] * self.Map._yrange.shape[0]
def __nsamples_default(self):
return int(self._nsteps * self.sample_freq)
class BipartiteRaster(AbstractImpulseRaster):
"""
Raster-scan stage with clamped input impulses on every other pixel.
Specifically, a bisampled, even partitioning into sampled and non-sampled
pixels in a checkered pattern.
Keyword arguments:
dwell -- residence time for each pixel in the scan
"""
sample_freq = Constant(0.5)
def _get_sample_index(self):
"""
Pick out bipartite checkered pattern of pixel samples
"""
odd_cols = self.Map._xrange[1::2]
s_ix = N.arange(0, self._nsteps, 2)
for i in xrange(s_ix.size):
if self._full_raster[0, s_ix[i]] in odd_cols:
s_ix[i] += 1
if s_ix[i] >= self._nsteps:
s_ix[i] = self._nsteps - 2
return s_ix
```
|
{
"source": "jdmonaco/neuroswarms",
"score": 2
}
|
#### File: neuroswarms/neuroswarms/matrix.py
```python
__all__ = ('tile_index', 'pairwise_tile_index', 'pairwise_distances',
'distances', 'pairwise_phasediffs', 'pairwise_unit_diffs',
'somatic_motion_update', 'reward_motion_update')
from numpy import (empty, zeros, newaxis as AX, swapaxes, hypot, sin, inf,
broadcast_arrays, broadcast_to)
from .utils.types import *
DEBUGGING = False
def _check_ndim(Mstr, M, ndim):
assert M.ndim == ndim, f'{Mstr}.ndim != {ndim}'
def _check_shape(Mstr, M, shape, axis=None):
if axis is None:
assert M.shape == shape, f'{Mstr}.shape != {shape}'
else:
assert M.shape[axis] == shape, f'{Mstr}.shape[{axis}] != {shape}'
def tile_index(A, B):
"""
Entrywise comparison index of tile index (column) vectors.
"""
AA, BB = broadcast_arrays(A, B)
if DEBUGGING:
shape = (max(A.shape[0], B.shape[0]), 1)
_check_shape('AA', AA, shape)
_check_shape('BB', BB, shape)
return (AA, BB)
def pairwise_tile_index(A, B):
"""
Pairwise comparison index of tile index (column) vectors.
"""
AA, BB = broadcast_arrays(A, B.T)
if DEBUGGING:
shape = (len(A), len(B))
_check_shape('AA', AA, shape)
_check_shape('BB', BB, shape)
return (AA, BB)
def pairwise_phasediffs(A, B):
"""
Compute synchronizing phase differences between phase pairs.
"""
N_A = len(A)
N_B = len(B)
DD_shape = (N_A, N_B)
if DEBUGGING:
_check_ndim('A', A, 2)
_check_ndim('B', B, 2)
_check_shape('A', A, 1, axis=1)
_check_shape('B', B, 1, axis=1)
return B.T - A
def distances(A, B):
"""
Compute distances between points in entrywise order.
"""
AA, BB = broadcast_arrays(A, B)
shape = AA.shape
if DEBUGGING:
_check_ndim('AA', AA, 2)
_check_ndim('BB', BB, 2)
_check_shape('AA', AA, 2, axis=1)
_check_shape('BB', BB, 2, axis=1)
return hypot(AA[:,0] - BB[:,0], AA[:,1] - BB[:,1])[:,AX]
def pairwise_unit_diffs(A, B):
"""
Compute attracting unit-vector differences between pairs of points.
"""
DD = pairwise_position_deltas(A, B)
D_norm = hypot(DD[...,0], DD[...,1])
nz = D_norm.nonzero()
DD[nz] /= D_norm[nz][...,AX]
return DD
def pairwise_distances(A, B):
"""
Compute distances between pairs of points.
"""
DD = pairwise_position_deltas(A, B)
return hypot(DD[...,0], DD[...,1])
def pairwise_position_deltas(A, B):
"""
Compute attracting component deltas between pairs of points.
"""
N_A = len(A)
N_B = len(B)
if DEBUGGING:
_check_ndim('A', A, 2)
_check_ndim('B', B, 2)
_check_shape('A', A, 2, axis=1)
_check_shape('B', B, 2, axis=1)
# Broadcast the first position matrix
AA = empty((N_A,N_B,2), DISTANCE_DTYPE)
AA[:] = A[:,AX,:]
return B[AX,...] - AA
def somatic_motion_update(D_up, D_cur, X, V):
"""
Compute updated positions by averaging pairwise difference vectors for
mutually visible pairs with equal bidirectional adjustments within each
pair. The updated distance matrix does not need to be symmetric; it
represents 'desired' updates based on recurrent learning.
:D_up: R(N,N)-matrix of updated distances
:D_cur: R(N,N)-matrix of current distances
:X: R(N,2)-matrix of current positions
:V: {0,1}(N,2)-matrix of current agent visibility
:returns: R(N,2)-matrix of updated positions
"""
N = len(X)
D_shape = (N, N)
if DEBUGGING:
_check_ndim('X', X, 2)
_check_shape('X', X, 2, axis=1)
_check_shape('D_up', D_up, D_shape)
_check_shape('D_cur', D_cur, D_shape)
_check_shape('V', V, D_shape)
# Broadcast field position matrix and its transpose
XX = empty((N,N,2))
XX[:] = X[:,AX,:]
XT = swapaxes(XX, 0, 1)
# Find visible & valid values (i.e., corresponding to non-zero weights)
#
# NOTE: The normalizing factor is divided by 2 because the somatic update
# represents one half of the change in distance between a pair of units.
D_inf = D_up == inf
norm = V * ~D_inf
N = norm.sum(axis=1)
valid = N.nonzero()[0]
norm[valid] /= 2*N[valid,AX]
# Zero out the inf elements of the updated distance matrix and corresponding
# elements in the current distance matrix
D_up[D_inf] = D_cur[D_inf] = 0.0
# Construct the agent-agent avoidant unit vectors
DX = XX - XT
DX_norm = hypot(DX[...,0], DX[...,1])
valid = DX_norm.nonzero()
DX[valid] /= DX_norm[valid][:,AX]
return (norm[...,AX]*(D_up - D_cur)[...,AX]*DX).sum(axis=1)
def reward_motion_update(D_up, D_cur, X, R, V):
"""
Compute updated positions by averaging reward-based unit vectors for
adjustments of the point only. The updated distance matrix represents
'desired' updates based on reward learning.
:D_up: R(N,N_R)-matrix of updated distances between points and rewards
:D_cur: R(N,N_R)-matrix of current distances between points and rewards
:X: R(N,2)-matrix of current point positions
:R: R(N_R,2)-matrix of current reward positions
:V: {0,1}(N_R,2)-matrix of current agent-reward visibility
:returns: R(N,2)-matrix of updated positions
"""
N = len(X)
N_R = len(R)
D_shape = (N, N_R)
if DEBUGGING:
_check_ndim('X', X, 2)
_check_ndim('R', R, 2)
_check_shape('X', X, 2, axis=1)
_check_shape('R', R, 2, axis=1)
_check_shape('D_up', D_up, D_shape)
_check_shape('D_cur', D_cur, D_shape)
_check_shape('V', V, D_shape)
# Broadcast field position matrix
XX = empty((N,N_R,2))
XX[:] = X[:,AX,:]
# Find valid values (i.e., corresponding to non-zero weights)
D_inf = D_up == inf
norm = V * ~D_inf
N = norm.sum(axis=1)
valid = N.nonzero()[0]
norm[valid] /= N[valid,AX]
# Zero out the inf elements of the updated distance matrix and corresponding
# elements in the current distance matrix
D_up[D_inf] = D_cur[D_inf] = 0.0
# Construct the agent-reward avoidant unit vectors
DR = XX - R[AX]
DR_norm = hypot(DR[...,0], DR[...,1])
valid = DR_norm.nonzero()
DR[valid] /= DR_norm[valid][:,AX]
return (norm[...,AX]*(D_up - D_cur)[...,AX]*DR).sum(axis=1)
```
#### File: neuroswarms/utils/geometry.py
```python
import os
import json
import time
import queue
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.signal import medfilt2d
from matplotlib.patches import Circle
from .. import MAPDIR, MOVIE_DPI
from .images import uint8color, rgba_to_image, _fill_rgba
from .data import DataStore
from .console import ConsolePrinter
from .svg import load_environment
from .types import *
ALPHA = 10.0
K_H = 20.0
COLORMAP = 'gray_r'
MASK_COLOR = 'cyan'
CUE_COLOR = 'purple'
REWARD_COLOR = 'gold'
def map_index(X):
"""
Return a tuple index for map matrixes based on a set of position points.
"""
return tuple(to_points(X).T)
class EnvironmentGeometry(object):
"""
Import, processing, and data functions on environmental geometry.
"""
def __init__(self, name, mapdir=None, recompute=False, alpha=ALPHA,
k_H=K_H):
"""
Find the named map specification file (.svg) and begin processing.
:name: Name of environment
:mapdir: Path to directory containing map data folders
:recompute: Recompute all geometry regardless of existing data
:alpha: Barrier repulsion spatial constant (in points)
"""
self.out = ConsolePrinter(prefix=f'EnvGeom(\'{name}\')',
prefix_color='green')
self.name = name
self.mapdir = MAPDIR if mapdir is None else mapdir
self.envdir = os.path.join(self.mapdir, name)
self.svgpath = os.path.join(self.mapdir, f'{name}.svg')
self.h5path = os.path.join(self.envdir, 'geometry.h5')
self.datafile = DataStore(self.h5path)
self.infopath = os.path.join(self.envdir, 'info.json')
self.backupdir = os.path.join(self.envdir, 'backups')
self.recompute = recompute
assert os.path.isdir(self.mapdir), f'not a directory: {mapdir}'
if os.path.isfile(self.svgpath):
self.out(self.svgpath, prefix='MapFile')
self.alpha = alpha
self.k_H = k_H
self.process()
else:
self.out(f"Missing geometry data or map file for '{name}':" \
f"Please save map file to {self.svgpath}.", error=True)
def __str__(self):
return f'<{self.__class__.__name__}(\'{self.name}\'): ' \
f'{self.shape[0]}x{self.shape[1]}, ' \
f'{self.N_B} barriers, {self.N_C} cues, {self.N_R} rewards, ' \
f'{len(self.H)} visibility tiles>'
def __repr__(self):
return f'{self.__class__.__name__}(\'{self.name}\', ' \
f'alpha={self.alpha}, k_H={self.k_H})'
def process(self):
"""
Load the SVG map file for parsing and processing the environment.
"""
try:
env = load_environment(self.svgpath)
except Exception:
self.out(self.svgpath, prefix='LoadError', error=True)
return
info = self.info = {k:env[k] for k in ('origin','width','height',
'extent','figsize')}
self.origin = info['origin']
self.width = info['width']
self.height = info['height']
self.extent = info['extent']
self.figsize = info['figsize']
self.B = env['barriers']
self.C = env['cues'][:,:2]
self.C_W = env['cues'][:,2]
self.R = env['rewards'][:,:2]
self.R_W = env['rewards'][:,2]
self.S0 = env['spawns']
info['N_B'] = self.N_B = len(self.B)
info['N_C'] = self.N_C = len(self.C)
info['N_R'] = self.N_R = len(self.R)
info['N_0'] = self.N_0 = len(self.S0)
info['shape'] = self.shape = (self.width, self.height)
info['name'] = self.name
info['alpha'] = self.alpha
info['k_H'] = self.k_H
if not os.path.isdir(self.envdir):
os.makedirs(self.envdir)
self._compute_geometry()
try:
with open(self.infopath, 'w') as fd:
json.dump(info, fd, indent=2, separators=(', ', ': '),
sort_keys=True)
except:
self.out(self.infopath, prefix='SaveError', error=True)
else:
self.out(self.infopath, prefix='InfoFile')
def sample_spawn_points(self, N=1):
"""
Randomly sample spawn locations from all possible points.
:N: The number of random samples to draw
:returns: (N, 2)-matrix of random spawn locations
"""
N_X0 = len(self.X0)
if N > N_X0:
rnd = lambda n: np.random.randint(N_X0, size=n)
else:
rnd = lambda n: np.random.permutation(np.arange(N_X0))[:n]
ix = rnd(N)
dmin = self.G_PD[map_index(self.X0[ix])]
while np.any(dmin < self.alpha):
fix = dmin < self.alpha
ix[fix] = rnd(fix.sum())
dmin = self.G_PD[map_index(self.X0[ix])]
return self.X0[ix]
def maps(self):
"""
Return a attribute-key dict of map-like matrix arrays.
"""
maps = {}
for k in self.__dict__.keys():
X = getattr(self, k)
if isinstance(X, np.ndarray) and X.shape[:2] == self.shape:
maps[k] = X
return maps
def save_all_maps(self, **imagefmt):
"""
Save images of all environmental map matrixes.
"""
for name in self.maps().keys():
self.save_map(name, **imagefmt)
def save_map(self, name, **imagefmt):
"""
Save images of all environmental map matrixes.
"""
M = getattr(self, name)
if M.ndim == 3:
for j in range(M.shape[2]):
self._save_matrix_image(M[...,j], f'{name}_{j:02d}', **imagefmt)
elif M.ndim == 2:
self._save_matrix_image(M, name, **imagefmt)
def plot_all_map_figures(self, **imagefmt):
"""
Plot all environment maps in new figure windows.
"""
for name in self.maps().keys():
self.plot_map(name, **imagefmt)
def plot_map_figure(self, name, **imagefmt):
"""
Plot full-bleed figure window(s) of the named map.
"""
assert name in self.maps().keys(), f'not a map name {name}'
M = getattr(self, name)
if M.ndim == 3:
for j in range(M.shape[2]):
self.figure(mapname=(name, j), **imagefmt)
elif M.ndim == 2:
f, ax = self.figure(mapname=name, **imagefmt)
return f, ax
def plot_tile_map(self, cue_color=CUE_COLOR, reward_color=REWARD_COLOR,
**imagefmt):
"""
Verify tile map organization by plotting with index numbers.
"""
cmap = imagefmt.pop('cmap', 'cubehelix')
f, ax = self.figure(mapname='G_PH', cmap=cmap, **imagefmt)
# Plot index labels at the center of each grid tile
dpi = mpl.rcParams['figure.dpi']
font = dict(fontsize=3.2*(245/dpi), weight='light')
for i, (x,y) in enumerate(self.H):
ax.text(x + 0.5, y + 0.5, str(i), fontdict=font, ha='center',
va='center', color='hotpink', zorder=0)
# Draw circles around tiles for each cue
fmt = dict(fill=False, facecolor=None, alpha=0.9, zorder=10)
[ax.add_artist(Circle(self.H[self.C_H[c]], radius=self.k_H/2,
edgecolor=cue_color, linewidth=0.5+0.5*self.C_W[c], **fmt))
for c in range(self.N_C)]
# Draw circles around tiles for each reward
[ax.add_artist(Circle(self.H[self.R_H[r]], radius=self.k_H/2,
edgecolor=reward_color, linewidth=0.5+0.5*self.R_W[r], **fmt))
for r in range(self.N_R)]
plt.draw()
def plot_visibility(self, which='cue', **imagefmt):
"""
Plot visibility of cues (which='cue') or rewards (which='reward').
"""
if which == 'cue':
P = self.C
N_P = self.N_C
C_HP = self.V_HC
elif which == 'reward':
P = self.R
N_P = self.N_R
C_HP = self.V_HR
else:
self.out('Must be cue or reward: {}', which, error=True)
return
plt.ioff()
f, ax = self.figure(clear=True, tag=f'{which}vis', mapname='G_P')
alpha = 0.5
ms0 = 2
lw = 0.5
cfmt = dict(marker='o', ms=3*ms0, mec='k', mew=lw, alpha=(2+alpha)/3,
zorder=10)
vfmt = dict(ls='-', lw=lw, marker='.', ms=ms0, mec='k', mfc='k',
mew=lw, alpha=alpha, zorder=5)
cols = [mpl.cm.tab10.colors[c%10] for c in range(N_P)]
for c, (cx, cy) in enumerate(P):
Vx, Vy = tuple(map(lambda v: v[np.newaxis,:],
self.H[C_HP[:,c].nonzero()].T))
Cx = np.zeros((1,Vx.size), dtype=POINT_DTYPE) + cx
Cy = np.zeros((1,Vy.size), dtype=POINT_DTYPE) + cy
X = np.vstack((Cx, Vx))
Y = np.vstack((Cy, Vy))
ax.plot([cx], [cy], mfc=cols[c], **cfmt)
ax.plot(X, Y, c=cols[c], **vfmt)
plt.ion()
plt.show()
plt.draw()
savepath = os.path.join(self.envdir, f'G_P-{which}-visibility.png')
plt.savefig(savepath, dpi=mpl.rcParams['savefig.dpi'])
self.out(f'Saved: {savepath}')
return f, ax
def figure(self, clear=True, tag=None, mapname=None, **imagefmt):
"""
Get a figure window and full-bleed axes for plotting maps.
"""
wasinteractive = plt.isinteractive()
if wasinteractive:
plt.ioff()
# Name the figure and retrieve background map if specified
figname = self.name
if tag is not None:
figname += f'+{tag}'
do_mapshow = False
ix = None
if mapname is not None:
if type(mapname) is tuple and len(mapname) == 2:
mapname, ix = mapname
if mapname in self.maps():
figname += f'.{mapname}'
Mmap = getattr(self, mapname)
if Mmap.ndim == 3:
Mmap = Mmap[...,ix]
figname += f'[{ix}]'
do_mapshow = True
else:
self.out(mapname, prefix='InvalidMapName', error=True)
# Get the figure, clear it, and set the correct size
f = plt.figure(num=figname, figsize=self.figsize, dpi=MOVIE_DPI)
if clear:
f.clear()
f.set_size_inches(self.figsize, forward=True)
# Plot the map to full-bleed axes
ax = plt.axes([0,0,1,1])
if do_mapshow:
self.plot(Mmap, ax=ax, clear=clear, **imagefmt)
if wasinteractive:
plt.ion()
plt.show()
plt.draw()
return f, ax
def plot(self, envmap, index=None, ax=None, clear=True, **imagefmt):
"""
Plot an environment map to an axes object.
"""
if ax is None:
ax = plt.gca()
if clear:
ax.clear()
if type(envmap) is str:
M = getattr(self, envmap)
elif isinstance(envmap, np.ndarray):
M = envmap
if M.ndim == 3:
if index is None:
self.out('Dim >2 arrays require index argument', error=True)
return
M = M[...,index]
assert M.shape == self.shape, f'matrix is not a map {Mmap.shape}'
imagefmt.update(asmap=True, forimshow=True)
im = ax.imshow(
self._rgba_matrix_image(M, **imagefmt),
origin='lower', interpolation='nearest',
extent=self.extent, zorder=-100)
ax.axis(self.extent)
ax.set_axis_off()
ax.axis('equal')
return im
def _save_matrix_image(self, M, name, **imagefmt):
"""
Save a matrix image to a pre-determined path based on the name.
"""
if not (M.shape == self.shape or
(M.ndim == 2 and M.shape[0] == M.shape[1])):
return
rgba = self._rgba_matrix_image(M, **imagefmt)
savepath = os.path.join(self.envdir, f'{name}-matrix.png')
self._move_to_backup(savepath)
rgba_to_image(rgba, savepath)
self.out(f'Saved: {savepath}')
def _rgba_matrix_image(self, M, asmap=True, forimshow=False,
mask_color=MASK_COLOR, cmap=COLORMAP, cmin=None, cmax=None):
"""
Convert a matrix to an RGBA color array for image output.
"""
if asmap:
if forimshow:
M = M.T # must use origin='lower'
else:
M = np.flipud(M.T)
mask = None
if np.ma.isMA(M):
mask = M.mask
if np.all(M.mask):
M = np.zeros_like(M.data)
else:
vmin = M.min()
M = M.data.copy()
M[mask] = vmin
if M.dtype is np.dtype(bool):
M = M.astype('f')
if cmin is None:
cmin = M.min()
if cmax is None:
cmax = M.max()
np.clip(M, cmin, cmax, out=M)
cm = plt.get_cmap(cmap)
if cmin == cmax:
rgba = _fill_rgba(M.shape, cm(0.0))
else:
rgba = cm((M - cmin) / (cmax - cmin), bytes=True)
if mask is not None:
rgba[mask] = uint8color(mask_color)
return rgba
def _move_to_backup(self, f):
"""
Move an existing file to the backup directory.
"""
if not os.path.isfile(f):
return
if not os.path.isdir(self.backupdir):
os.makedirs(self.backupdir)
head, ext = os.path.splitext(f)
os.rename(f, os.path.join(self.backupdir, os.path.basename(head) + \
time.strftime('+%Y-%m-%d-%H%M-%S') + ext))
def _compute_geometry(self):
"""
Pipeline script for computing the environmental geometry.
"""
# Flip all y-values to allow a lower-left origin
self.B[:,[1,3]] = self.height - self.B[:,[1,3]]
self.C[:,1] = self.height - self.C[:,1]
self.R[:,1] = self.height - self.R[:,1]
self.S0[:,1] = self.height - self.S0[:,1]
self._rasterize_barriers()
self._create_environment_mask()
self._find_closest_barriers()
self._calculate_cue_reward_distances()
self._mark_spawn_locations()
self._construct_visibility_map()
self._make_visibility_graphs()
self._compute_tile_maps()
def _has_data(self, *names):
"""
Test whether all named objects are stored in the h5 file.
"""
with self.datafile:
for name in names:
if not self.datafile.has_node(f'/{name}'):
return False
return True
def _remove_arrays(self, *names):
"""
Remove array data from the h5 file.
"""
removed = []
with self.datafile:
for name in names:
if self.datafile.has_node(f'/{name}'):
self.datafile.remove_node(f'/{name}')
delattr(self, name)
removed.append(f'{name}')
self.out(f'Removed: {", ".join(removed)}')
def _load_arrays(self, *names):
"""
Read array data from the h5 file into instance attributes.
"""
loaded = []
with self.datafile:
for name in names:
arr = self.datafile.read_array(f'/{name}')
setattr(self, name, arr)
shape = 'x'.join(list(map(str, arr.shape)))
if np.ma.isMA(arr):
loaded.append(f'{name}<{shape}:masked>')
else:
loaded.append(f'{name}<{shape}>')
self.out(", ".join(loaded), prefix='Loaded')
def _store_arrays(self, imagefmt={}, **data):
"""
Save arrays to Array objects in the h5 file.
"""
saved = []
with self.datafile:
for name, arr in data.items():
setattr(self, name, arr)
res = self.datafile.new_array('/', name, arr)
if arr.ndim == 2:
self._save_matrix_image(arr, name, **imagefmt)
elif arr.ndim == 3:
for z in range(arr.shape[2]):
self._save_matrix_image(arr[...,z], f'{name}_{z:02d}',
**imagefmt)
shape = 'x'.join(list(map(str, arr.shape)))
if np.ma.isMA(arr):
saved.append(f'{name}<{shape}:masked>')
else:
saved.append(f'{name}<{shape}>')
self.out(f'Stored: {", ".join(saved)}')
def _meshgrid(self):
"""
Get a pixel-centered coordinate mesh-grid for the environment.
"""
x = 0.5 + np.arange(*self.extent[:2])
y = 0.5 + np.arange(*self.extent[2:])
return np.array(np.meshgrid(x, y, indexing='ij'), dtype=DISTANCE_DTYPE)
def _pipeline(self, *names):
"""
Load data into instance attributes and return True if available and
recompute is not being forced or step-specific read-only.
"""
if not self.recompute:
if self._has_data(*names):
self._load_arrays(*names)
return True
return False
def _rasterize_barriers(self):
"""
Rasterize the environment with barriers.
"""
if self._pipeline('G_B'): return
B = np.zeros(self.shape, BINARY_DTYPE)
for x1, y1, x2, y2 in self.B:
if x1 == x2:
ymin = min(y1,y2)
ymax = max(y1,y2)
B[x1,ymin:ymax+1] = 1
elif y1 == y2:
xmin = min(x1,x2)
xmax = max(x1,x2)
B[xmin:xmax+1,y1] = 1
else:
self.out(f'Non-rectilinear barrier: {(x1,y1,x2,y2)}',
error=True)
self._store_arrays(G_B=B)
def _scale_factor(self, P_exterior):
"""
Calculate a radial, adjusted scale factor for the environment that
loosely represents an inscribed circle if the interior space were
reconfigured as a square.
"""
return (np.sqrt(2)/2)*np.sqrt((~P_exterior).sum()/np.pi)
def _create_environment_mask(self):
"""
Flood fill the interior to create a mask of occupiable points.
"""
if self._pipeline('G_P'):
self.info['G_scale'] = self._scale_factor(self.G_P)
return
P = self.G_B.copy()
target = 0
barrier = 1
repl = 2
# Starting from each of the spawn disc center points, flood-fill the
# barrier image to mark all interiorly occupiable points
for x0, y0 in self.S0[:,:2]:
Q = queue.deque()
Q.append([x0,y0])
while Q:
N = Q.pop()
W = N.copy()
E = N.copy()
y = N[1]
while W[0] > 0 and P[W[0],y] == target:
W[0] -= 1
while E[0] < self.width and P[E[0],y] == target:
E[0] += 1
for x in range(W[0]+1, E[0]):
P[x,y] = repl
if P[x,y+1] == target:
Q.append([x,y+1])
if P[x,y-1] == target:
Q.append([x,y-1])
# Convert values to {0,1} for {valid,masked}
P[P != repl] = 1
P[P == repl] = 0
G_P = P.astype('?')
self.info['G_scale'] = self._scale_factor(G_P)
self._store_arrays(G_P=G_P)
def _find_closest_barriers(self):
"""
Find the closest barriers and store the interior normal vectors.
"""
if self._pipeline('G_PD', 'G_PB', 'G_PN'): return
P = self.G_P.astype('i2')
PD = np.zeros(self.shape, DISTANCE_DTYPE)
PB = np.zeros_like(PD)
PN = np.zeros(self.shape + (2,), DISTANCE_DTYPE)
halfsq = float(np.sqrt(2)/2)
W, H, alpha = self.width, self.height, self.alpha
B = np.hypot(W, H)
U = np.array([[0 , 1] ,
[0 , -1] ,
[1 , 0] ,
[-1 , 0] ,
[halfsq , halfsq] ,
[halfsq , -halfsq] ,
[-halfsq , halfsq] ,
[-halfsq , -halfsq]] , DISTANCE_DTYPE)
w_d = np.empty_like(U)
d = np.empty((U.shape[0],1), DISTANCE_DTYPE)
k = np.empty_like(d)
def min_normal_vec(P0, x, y):
n = s = e = w = ne = se = nw = sw = 1
while (y+n < H) and (P[x,y+n] == P0): n += 1
if y+n >= H: n = B
while (y-s >= 0) and (P[x,y-s] == P0): s += 1
if y-s < 0: s = B
while (x+e < W) and (P[x+e,y] == P0): e += 1
if x+e >= W: e = B
while (x-w >= 0) and (P[x-w,y] == P0): w += 1
if x-w < 0: w = B
while (x+ne < W) and (y+ne < H) and (P[x+ne,y+ne] == P0): ne += 1
if (x+ne >= W) or (y+ne >= H): ne = B
while (x+se < W) and (y-se >= 0) and (P[x+se,y-se] == P0): se += 1
if (x+se >= W) or (y-se < 0): se = B
while (x-nw >= 0) and (y+nw < H) and (P[x-nw,y+nw] == P0): nw += 1
if (x-nw < 0) or (y+nw >= H): nw = B
while (x-sw >= 0) and (y-sw >= 0) and (P[x-sw,y-sw] == P0): sw += 1
if (x-sw < 0) or (y-sw < 0): sw = B
# Save wall distances and compute the interior barrier coefficients
d[:] = np.array([n, s, e, w, ne, se, nw, sw])[:,np.newaxis]
kmax = 1 if P0 else np.exp(-d/alpha).max()
# Inverse-distance weights in the interior and distance weights in
# the exterior
inout = 2*P0 - 1
w_d[:] = d**inout
w_d[np.isclose(w_d, B**inout)] = 0.0
U_avg = np.average(inout*U, weights=w_d, axis=0)
return (d.min(), kmax, U_avg)
self.out('Starting barrier search...')
i = 0
for x in range(W):
for y in range(H):
PD[x,y], PB[x,y], PN[x,y] = min_normal_vec(P[x,y], x, y)
i += 1
if i % 1000 == 0:
self.out.printf('.')
self.out.newline()
# Median-filter the coefficient map and set all exterior points to the
# maximum coefficient (1)
k_alpha = int(alpha)
if k_alpha % 2 == 0: k_alpha += 1
PB = medfilt2d(PB, kernel_size=k_alpha)
PB[self.G_P] = 1
PB -= PB.min()
PB /= PB.max()
self._store_arrays(G_PD=PD, G_PB=PB, G_PN=PN)
def _calculate_cue_reward_distances(self):
"""
Calculate distances between points and cues/rewards.
"""
if self._pipeline('D_PC', 'D_PR'): return
PC = np.zeros(self.shape + (self.N_C,), DISTANCE_DTYPE)
PR = np.zeros(self.shape + (self.N_R,), DISTANCE_DTYPE)
XX, YY = self._meshgrid()
for i, (cx,cy) in enumerate(self.C):
PC[...,i] = np.hypot(XX - cx, YY - cy)
for i, (rx,ry) in enumerate(self.R):
PR[...,i] = np.hypot(XX - rx, YY - ry)
Cmask = np.empty(PC.shape, '?')
Cmask[:] = self.G_P[...,np.newaxis]
PC = np.ma.MaskedArray(data=PC, mask=Cmask)
Rmask = np.empty(PR.shape, '?')
Rmask[:] = self.G_P[...,np.newaxis]
PR = np.ma.MaskedArray(data=PR, mask=Rmask)
self._store_arrays(D_PC=PC, D_PR=PR)
def _mark_spawn_locations(self):
"""
Compute the allowable spawn locations.
"""
if self._pipeline('G_PS', 'X0'): return
PS = np.zeros(self.shape, BINARY_DTYPE)
XX, YY = self._meshgrid()
for i, (xs, ys, radius) in enumerate(self.S0):
D = np.hypot(XX - xs, YY - ys)
PS[D<=radius] = 1
PS = np.ma.MaskedArray(data=PS, mask=self.G_P)
X0 = np.array(PS.nonzero()).T
# Verify that the spawn points match the matrix
P0 = np.zeros_like(PS)
P0[tuple(X0.T)] = 1
assert np.all(P0 == PS), 'spawn point mismatch'
self._store_arrays(G_PS=PS, X0=X0)
def _construct_visibility_map(self):
"""
Construct a coarse hexagonal grid for visibility computations.
"""
if self._pipeline('H', 'G_H'):
self.info['N_H'] = self.N_H = self.H.shape[0]
return
H = []
angles = np.linspace(0, 2*np.pi, 7)[:-1]
Q = queue.deque()
Q.append(self.origin)
while Q:
v = Q.pop()
existing = False
for u in H:
if np.isclose(v[0], u[0]) and np.isclose(v[1], u[1]):
existing = True
break
if existing:
continue
if not (self.extent[0] <= v[0] < self.extent[1]):
continue
if not (self.extent[2] <= v[1] < self.extent[3]):
continue
Q.extend([(v[0] + self.k_H*np.cos(a), v[1] + self.k_H*np.sin(a))
for a in angles])
H.append(v)
self.out.printf('.')
self.out.newline()
# Mask grid points and sort from top-left to bottom-right
Hint = np.round(H).astype(TILE_DTYPE)
Hvalid = Hint[~self.G_P[tuple(Hint.T)]]
H = Hvalid[np.lexsort(tuple(reversed(tuple(Hvalid.T))))]
# Store filtered grid points in an image matrix
G_H = np.zeros(self.shape, BINARY_DTYPE)
G_H[tuple(H.T)] = 1
G_H = np.ma.MaskedArray(data=G_H, mask=self.G_P)
self._store_arrays(H=H, G_H=G_H)
def _make_visibility_graphs(self):
"""
Make several visibility graphs for relating objects and locations.
"""
if self._pipeline('V_HH', 'V_HR', 'V_HC'): return
N_H = len(self.H)
HH = np.zeros((N_H, N_H), BOOL_DTYPE)
HC = np.zeros((N_H, self.N_C), BOOL_DTYPE)
HR = np.zeros((N_H, self.N_R), BOOL_DTYPE)
for i, (x0, y0) in enumerate(self.H):
self.out.printf('.')
for V, S in [(HH, self.H), (HC, self.C), (HR, self.R)]:
for j, (x1, y1) in enumerate(S):
if (x0 == x1) and (y0 == y1):
V[i,j] = True
continue
theta = np.arctan2(float(y1 - y0), float(x1 - x0))
dx, dy = np.cos(theta), np.sin(theta)
xgtr = x1 > x0
ygtr = y1 > y0
xf, yf = float(x0), float(y0)
while True:
xf += dx
yf += dy
xri = int(round(xf))
yri = int(round(yf))
if self.G_P[xri,yri]:
break
xgtr_ = x1 > xri
ygtr_ = y1 > yri
if (xgtr_ != xgtr) or (ygtr_ != ygtr):
V[i,j] = True
break
self.out.newline()
self._store_arrays(V_HH=HH, V_HC=HC, V_HR=HR, imagefmt={'asmap':False})
def _compute_tile_maps(self):
"""
Create maps of points, cues, and rewards to tile index.
"""
if self._pipeline('G_PH', 'C_H', 'R_H'): return
N_H = len(self.H)
CH = np.empty((self.N_C,), TILE_INDEX_DTYPE)
RH = np.empty((self.N_R,), TILE_INDEX_DTYPE)
# Broadcast the point mask between (x,y)-coordinates and tile points
xy_mesh_tile_shape = (2,) + self.shape + (N_H,)
VV = np.empty(xy_mesh_tile_shape, '?')
VV[:] = self.G_P[np.newaxis,...,np.newaxis]
# Broadcast the meshgrid into tile points
XY = np.empty(xy_mesh_tile_shape, DISTANCE_DTYPE)
XY[:] = self._meshgrid()[...,np.newaxis]
XY = np.ma.MaskedArray(data=XY, mask=VV)
# Splitcast the tile points through the meshgrid
HH = np.empty(xy_mesh_tile_shape, DISTANCE_DTYPE)
HH[:] = self.H.T[:,np.newaxis,np.newaxis,:]
HH = np.ma.MaskedArray(data=HH, mask=VV)
# Find indexes of closest tiles to every point in the meshgrid
D_XH = XY - HH
PH = np.ma.MaskedArray(
data=np.argmin(np.hypot(D_XH[0], D_XH[1]), axis=2).astype(
TILE_INDEX_DTYPE),
mask=self.G_P)
# Directly index the point-tile map for cue/reward tiles
CH[:] = PH[tuple(self.C.T)]
RH[:] = PH[tuple(self.R.T)]
self._store_arrays(G_PH=PH, C_H=CH, R_H=RH,
imagefmt=dict(cmap='cool', mask_color='k'))
```
|
{
"source": "jdmonaco/pouty",
"score": 3
}
|
#### File: pouty/pouty/repo.py
```python
import subprocess
class NotARepoError(Exception):
pass
def git_revision(srcdir, short=False):
"""
Commit hash for the repository.
"""
if srcdir is None:
return None
cmd = ['git', 'rev-parse', '--short', 'HEAD']
if not short:
cmd.remove('--short')
try:
output = subprocess.check_output(cmd, cwd=srcdir)
except subprocess.CalledProcessError:
raise NotARepoError(srcdir)
else:
rev = output.decode().strip()
return rev
```
|
{
"source": "jdmonaco/roto",
"score": 3
}
|
#### File: roto/roto/filters.py
```python
import numpy as np
import scipy.signal
def find_minima(s, wrapped=False):
"""Index array of the local minima of a continuous signal."""
return _extrema(s, lambda x: x == +1, wrapped)
def find_maxima(s, wrapped=False):
"""Index array of the local maxima of a continuous signal."""
return _extrema(s, lambda x: x == -1, wrapped)
def find_peaks(s, wrapped=False):
"""Index array of local extrema of a continuous signal."""
return _extrema(s, lambda x: x != 0, wrapped)
def _extrema(s, which, wrapped):
if wrapped:
s = np.r_[s[-1], s, s[0]]
ex = np.r_[0, np.diff((np.diff(s) >= 0).astype('i')), 0]
if wrapped:
ex = ex[1:-1]
return np.nonzero(which(ex))[0]
def smart_medfilt2d(M, base=20, xwrap=False, ywrap=False):
"""Median filter the given matrix based on its rank size and optionally
wrapping the filter around the x or y dimension
"""
kernel = 2*int(np.sqrt(M.shape[0]*M.shape[1])/base)+1
if kernel <= 1:
return M
if xwrap:
M = np.c_[M[:,-kernel:], M, M[:,:kernel]]
if ywrap:
M = np.r_[M[-kernel:], M, M[:kernel]]
M = scipy.signal.medfilt2d(M, kernel_size=kernel)
if xwrap:
M = M[:,kernel:-kernel]
if ywrap:
M = M[kernel:-kernel]
return M
def filtfilt(b, a, s):
"""Forward-backward filter: linear filtering that preserves phase
Modified from: http://www.scipy.org/Cookbook/FiltFilt
"""
from numpy import r_, flipud, zeros
if type(a) is type(0):
len_a = 1
else:
len_a = len(a)
ntaps = max(len_a, len(b))
wrap = 3 * ntaps
if s.ndim != 1:
raise ValueError("filtfilt: requires a 1D signal vector")
# x must be bigger than edge
if s.size < wrap:
raise ValueError("filtfilt: signal not big enough for filter")
# pad b coefficients if necessary
if len_a > len(b):
b = r_[b, zeros(len_a - len(b))]
elif len_a < len(b):
a = 1
# reflect-wrap the signal for filter stability
s = r_[2*s[0] - s[wrap:0:-1], s, 2*s[-1] - s[-1:-wrap-1:-1]]
# filter forward, filter backward
y = scipy.signal.lfilter(b, a, s, -1)
y = scipy.signal.lfilter(b, a, flipud(y), -1)
return flipud(y[wrap:-wrap])
def quick_boxcar(s, M=4, centered=True):
"""Returns a boxcar-filtered version of the input signal
Keyword arguments:
M -- number of averaged samples (default 4)
centered -- recenter the filtered signal to reduce lag (default False)
"""
# Sanity check on signal and filter window
length = s.shape[0]
if length <= 2*M:
raise ValueError('signal too short for specified filter window')
# Set up staggered arrays for vectorized average
z = np.empty((M, length+M-1), 'd')
for i in range(M):
z[i] = np.r_[np.zeros(i)+s[0], s, np.zeros(M-i-1)+s[-1]]
# Center the average if specified
start_ix = 0
end_ix = length
if centered:
start_ix += int(M/2)
end_ix += int(M/2)
return z.mean(axis=0)[start_ix:end_ix]
def circular_blur(s, blur_width):
"""Return a wrapped gaussian smoothed (blur_width in degrees) signal for
data binned on a full circle range [0, 2PI/360).
"""
bins = s.shape[0]
width = blur_width / (360.0/bins)
size = np.ceil(8*width)
if size > bins:
size = bins
wrapped = np.r_[s[-size:], s, s[:size]]
G = scipy.signal.gaussian(size, width)
G /= np.trapz(G)
S = scipy.signal.convolve(wrapped, G, mode='same')
return S[size+1:-size+1]
def unwrapped_blur(s, blur_width, bins_per_cycle):
"""Return a gaussian smoothed (blur_width in degrees) signal for
unwrapped angle data across multiple cycles.
"""
width = blur_width / (360.0/bins_per_cycle)
size = np.ceil(8*width)
G = scipy.signal.gaussian(size, width)
G /= np.trapz(G)
S = scipy.signal.convolve(s, G, mode='same')
return S
```
#### File: roto/roto/strings.py
```python
import re
def naturalize(s):
"""Normalize to 'natural' naming for identifiers or data storage."""
return camel2snake(s).strip().lower().replace(' ', '_').replace('-', '_'
).replace('.', '_')
def sluggify(s):
"""Normalize to a url-style slug: hyphenated lower-case words."""
return camel2snake(s, sep='-').lower().strip().replace(' ', '-')
def camel2snake(s, sep='_'):
"""Convert a camel-case name to snake case.
Shamelessly stolen from a Stackoverflow answer:
http://stackoverflow.com/a/1176023
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1%s\2' % sep, s)
s2 = re.sub('([a-z0-9])([A-Z])', r'\1%s\2' % sep, s1).lower()
return s2.replace('%s%s' % (sep, sep), sep)
def snake2title(s):
"""Convert 'snake_case' string to 'Title Case' string."""
return ' '.join(s.split('_')).strip().title()
# Unicode decoding/encoding
def to_str(bytes_or_str):
"""Given a string or bytes instance, return a string."""
if isinstance(bytes_or_str, bytes):
value = bytes_or_str.decode('utf-8')
else:
value = bytes_or_str
return value
def to_bytes(bytes_or_str):
"""Given a string or bytes instance, return a bytes object."""
if isinstance(bytes_or_str, str):
value = bytes_or_str.encode('utf-8')
else:
value = bytes_or_str
return value
```
#### File: roto/roto/writers.py
```python
class CSVWriter(object):
"""
Pass in a filename and list(tuple(colname, 's|d|f')) to define columns,
call `get_row` for a row dictionary, fill it up, and then call `write_row`
and close when you're done.
"""
def __init__(self, fn, cols, sep=','):
self._init = dict(s='', d=0, f=0.0)
self._cols = cols
self._rowstr = sep.join(['%%(%s)%s' % col for col in _cols]) + '\n'
self.filename = fn
self._fd = open(fn, 'w')
self._fd.write(','.join([col for col, dtype in _cols]) + '\n')
sys.stdout.write(f'Opened spreadsheet {fn}.\n')
def get_row(self):
return { col: self._init[dtype] for col, dtype in self._cols }
def write_row(self, record):
self._fd.write(self._rowstr % record)
def close(self):
self._fd.close()
sys.stdout.write(f'Closed spreadsheet {self.filename}.\n')
```
|
{
"source": "jdmonaco/skaggs",
"score": 3
}
|
#### File: skaggs/skaggs/labels.py
```python
from .. import store
def session(session_id):
"""A label describing a session."""
df = store.get()
session = df.root.sessions[session_id]
label = 'Rat {}, Day {} ({})'.format(
session['rat'], session['day'], session['comment'].decode('utf-8'))
return label
def session_id(session_id):
"""A label describing a session and its id."""
return '{} [#{}]'.format(session(session_id), session_id)
def cell(c_id):
"""A label describing a cell."""
return store.get().root.recordings[c_id]['ttc'].decode('utf-8')
def session_cell(c_id):
"""A label describing a session and a cell."""
df = store.get()
cell = df.root.recordings[c_id]
label = '{}, Cell {}'.format(session(cell['session_id']),
cell['ttc'].decode('utf-8'))
return label
def rat_cell_id(c_id):
"""A label describing a rat and cell name/id."""
df = store.get()
s_id = df.root.recordings[c_id]['session_id']
rat = df.root.sessions[s_id]['rat']
return 'Rat {}, {}'.format(rat, cell_id(c_id))
def session_cell_id(c_id):
"""A label describing a session and a cell with its id."""
return '{} [#{}]'.format(session_cell(c_id), c_id)
def cell_id(c_id):
"""A label describing a cell with its id."""
return '{} [#{}]'.format(cell(c_id), c_id)
```
#### File: skaggs/skaggs/parsers.py
```python
import re
import tables as tb
from .. import store
TTC_PATTERN = re.compile('tt(\d+)_c(\d+)')
STR_PATTERN = re.compile('^b\'(.*)\'')
class ParserError(Exception):
pass
def raise_or_none(raze, msg):
if raze:
raise ParserError(msg)
return None
def _process_table_row(row, cols):
"""Convert a table row with bytes-strings to a dict with normal strings."""
values = {}
for name, coltype in cols.items():
if coltype == 'string':
try:
val = re.match(STR_PATTERN, str(row[name])).groups()[0]
except AttributeError:
val = str(row[name])
val = val[2:-1] # remove "^b'" and "'$"
else:
val = row[name]
values[name] = val
return values
def parse_session(index, raise_on_fail=False):
"""Get dict of session info for /sessions index or row object."""
return _parse_table_row(store.get().root.sessions, index, raise_on_fail)
def parse_recording(index, raise_on_fail=False):
"""Get dict of cell info for /recordigns index or row object."""
return _parse_table_row(store.get().root.recordings, index, raise_on_fail)
def _parse_table_row(table, index, rof):
if type(index) is tb.tableextension.Row:
row = index
table = row.table
else:
try:
index = int(index)
except (TypeError, ValueError):
return raise_or_none(rof, "bad session index: '%s'" % str(index))
else:
row = table[index]
return _process_table_row(row, table.coltypes)
def parse_ttc(ttc, raise_on_fail=False):
"""Get (tetrode, cluster) integer tuple for any specification."""
if type(ttc) is str:
return parse_ttc_str(ttc, raise_on_fail=raise_on_fail)
try:
tt, cl = int(ttc['tetrode']), int(ttc['cluster'])
except (KeyError, TypeError):
pass
else:
return (tt, cl)
try:
tt, cl = int(ttc['tt']), int(ttc['cl'])
except (KeyError, TypeError):
pass
else:
return (tt, cl)
try:
tt, cl = int(ttc.tetrode), int(ttc.cluster)
except (AttributeError, TypeError):
pass
else:
return (tt, cl)
try:
tt, cl = int(ttc.tt), int(ttc.cl)
except (AttributeError, TypeError):
pass
else:
return (tt, cl)
try:
tt, cl = list(map(int, ttc))
except (ValueError, TypeError):
pass
else:
return (tt, cl)
return raise_or_none(raise_on_fail, "invalid ttc: %s" % str(ttc))
def parse_ttc_str(ttc_str, raise_on_fail=False):
"""Convert ttc string (e.g. 'tt11_c3') -> (11, 3) tuple."""
match = re.match(TTC_PATTERN, ttc_str)
if match:
return tuple(map(int, match.groups()))
return raise_or_none(raise_on_fail,
"could not parse ttc string '{}'".format(ttc_str))
```
#### File: skaggs/skaggs/phasers.py
```python
from functools import reduce
import numpy as np
from pouty import ConsolePrinter
from roto.strings import snake2title
from . import data, labels
# Phaser cell criteria
PHASER_IPHASE_IMIN = 0.1
PHASER_IPHASE_PMAX = 0.02
PHASER_RMIN = 3.5
PHASER_RP_CMIN = 0.2
PHASER_RP_PMAX = 0.02
PHASER_RP_SMIN = np.pi / 4
# Validation
def validate_cells(cell_list,
rmin=PHASER_RMIN,
pmax=PHASER_IPHASE_PMAX,
imin=PHASER_IPHASE_IMIN,
cmin=PHASER_RP_CMIN,
cpmax=PHASER_RP_PMAX,
smin=PHASER_RP_SMIN):
"""List how cells fulfill or fail the phaser criteria."""
P = properties_dataframe()
out = ConsolePrinter(prefix=snake2title(__name__),
prefix_color='green')
for c_id in cell_list:
cell = P.loc[c_id]
tag = labels.session_cell_id(c_id)
if cell.ratemap_max < rmin:
out('{}: Failed: ratemap_max [{:.2f}] < {}',
tag, cell.ratemap_max, rmin)
elif cell.I_phase < imin:
out('{}: Failed: I_phase [{:.4f}] < {}',
tag, cell.I_phase, imin)
elif cell.I_phase_p > pmax:
out('{}: Failed: I_phase_p [{:.3f}] < {}',
tag, cell.I_phase_p, pmax)
elif np.abs(cell.C_rp_r) < cmin:
out('{}: Failed: |C_rp_r| [{:.4f}] < {}',
tag, cell.C_rp_r, cmin)
elif cell.C_rp_p > cpmax:
out('{}: Failed: C_rp_p [{:.3f}] < {}',
tag, cell.C_rp_p, cpmax)
elif np.abs(cell.rp_shift) < smin:
out('{}: Failed: |rp_shift| [{:.3f}] < {}',
tag, cell.rp_shift, smin)
else:
out('{}: Phaser!', tag)
# Functions for phaser cell data
def properties_dataframe(
rmin=PHASER_RMIN,
pmax=PHASER_IPHASE_PMAX,
imin=PHASER_IPHASE_IMIN,
cmin=PHASER_RP_CMIN,
cpmax=PHASER_RP_PMAX,
smin=PHASER_RP_SMIN):
"""Cell properties dataframe with subtype/phaser columns based on the
phaser-cell phase-coding criteria.
"""
P = data.properties_dataframe()
P['phaser'] = reduce(np.logical_and, [
P.ratemap_max >= rmin,
P.I_phase >= imin,
P.I_phase_p <= pmax,
np.abs(P.C_rp_r) >= cmin,
P.C_rp_p <= cpmax,
np.abs(P.rp_shift) >= smin
])
P['subtype'] = 'none'
P.loc[P.phaser, 'subtype'] = [
{True: 'positive', False: 'negative'}[sl > 0]
for sl in P.loc[P.phaser, 'C_rp_sl']]
return P
def filtered_dataframe(**kwds):
"""Cell properties dataframe of only phaser cells."""
P = properties_dataframe(**kwds)
P = P.loc[P.phaser]
return P
```
|
{
"source": "jdmonaco/spikemaps",
"score": 2
}
|
#### File: spikemaps/spikemaps/adaptive.py
```python
from scipy import signal
import numpy as np
from pouty import debug
from roto import circstats, arrays
from roto.decorators import lazyprop, datamemoize
from . import kernels
from ..lib.motion import NORM_POS_EXTENT, NORM_POS_MAX
NPIXELS = 64
NBR_FRAC = 0.04
MINRAD = 8.0
MAXRAD = 30.0
MASK_SMOOTHING = 1/15
CM_SCALE = 0.8
@datamemoize
def smooth_mask2d(x, y, bins=NPIXELS, extent=NORM_POS_EXTENT,
smoothing=MASK_SMOOTHING, scale_max=NORM_POS_MAX):
"""Generate 2D fill mask based on a smoothed histogram."""
debug('generating arena mask')
l, r, b, t = extent
w, h = r - l, t - b
try:
nx, ny = bins
except TypeError:
nx = ny = int(bins)
finally:
n = np.sqrt(nx * ny)
s = smoothing / (np.max((w, h)) / scale_max)
k = max(int(s * n), 3)
k = (k % 2) and k or k + 1
dx, dy = w / nx, h / ny
xedges = np.linspace(l - k * dx, r + k * dy, nx + 2 * k + 1)
yedges = np.linspace(b - k * dy, t + k * dy, ny + 2 * k + 1)
H = np.histogram2d(x, y, bins=[xedges, yedges])[0]
H = signal.medfilt2d((H>0).astype('d'), kernel_size=3)
M = np.zeros_like(H, 'i')
r, c = H.shape
for i in range(r):
j = 0
while j < c and H[i,j] == 0.0:
M[i,j] += 1
j += 1
j = c - 1
while j >= 0 and H[i,j] == 0.0:
M[i,j] += 1
j -= 1
for j in range(c):
i = 0
while i < r and H[i,j] == 0.0:
M[i,j] += 1
i += 1
i = r - 1
while i >= 0 and H[i,j] == 0.0:
M[i,j] += 1
i -= 1
Ms = signal.medfilt2d((M<2).astype('d'), kernel_size=k) == 0
return Ms[k:-k,k:-k]
class AbstractAdaptiveMap(object):
def __init__(self, mdata, scale='norm', nfrac=NBR_FRAC, alim=(MINRAD, MAXRAD),
res=NPIXELS, extent=NORM_POS_EXTENT, mask_value=np.nan):
"""Compute spatial maps using adaptive Gaussian kernels.
Arguments:
mdata -- a MotionData object for the trajectory being mapped
Keyword arguments:
scale -- 'norm'|'cm', set to 'cm' if using map for cm-scaled data
nfrac -- fraction of data points that constitute a neighborhood
alim -- adaptive range limits for the kernel radius
res -- pixel resolution of the output maps (in pixel rows)
extent -- scalars (left, right, bottom, top), map data extent
mask_value -- value for setting masked pixels
Returns a callable that produces spatial maps.
"""
if hasattr(mdata, 'motion'):
mdata = mdata.motion # get motion data if session passed in
self.mdata = mdata
assert scale in ('norm', 'cm'), 'scale must be in ("norm", "cm")'
self.scaled = scale == 'cm'
if self.scaled:
if alim == (MINRAD, MAXRAD):
alim = (CM_SCALE * MINRAD, CM_SCALE * MAXRAD)
if extent == NORM_POS_EXTENT:
extent = tuple(map(lambda x: CM_SCALE * x, NORM_POS_EXTENT))
debug('adaptive ratemap scaled to cm')
self.nfrac = nfrac
self.alim = alim
self.res = res
self.extent = extent
self.mask_value = mask_value
self._cache = {}
def _get_dataset(self, X, Y):
pts = np.atleast_2d((X, Y))
if pts.shape[0] == 2:
pts = pts.T
return pts
def _reshape_grid(self, pts):
m = self.arena_mask.flatten()
grid = np.zeros(m.size)
grid[m] = self.mask_value
grid[np.logical_not(m)] = pts
grid = np.reshape(grid, self.pixel_shape)
return grid
@lazyprop
def aspect_ratio(self):
x = self.extent
return (x[1] - x[0]) / (x[3] - x[2])
@lazyprop
def pixel_shape(self):
return int(self.aspect_ratio * self.res), self.res
@lazyprop
def eval_pixels(self):
"""Compute the pixel grid of kernel evaluation points."""
_nx, _ny = self.pixel_shape
_x = np.linspace(self.extent[0], self.extent[1], _nx)
_y = np.linspace(self.extent[2], self.extent[3], _ny)
X, Y = np.meshgrid(_x, _y)
pixels = np.c_[X.T.flatten(), Y.T.flatten()]
test = np.logical_not(self.arena_mask.flatten())
return pixels[test]
@lazyprop
def arena_mask(self):
"""Generate map mask based on whole trajectory."""
if self.scaled:
xdata, ydata = self.mdata.x_cm, self.mdata.y_cm
scale_max = CM_SCALE * NORM_POS_MAX
else:
xdata, ydata = self.mdata.x, self.mdata.y
scale_max = NORM_POS_MAX
return smooth_mask2d(xdata, ydata, bins=self.pixel_shape,
extent=self.extent, scale_max=scale_max)
def knbrs(self, N):
try:
N = N.shape[0]
except AttributeError:
pass
return max(1, int(N * self.nfrac))
def __call__(self, *data):
"""Subclasses must override this to evaluate their kernels."""
raise NotImplementedError
class _SpikeCountMap(AbstractAdaptiveMap):
def __call__(self, xs, ys):
"""Compute the rate map with supplied spike and position data."""
_hash = arrays.datahash(xs, ys)
if _hash in self._cache:
return self._cache[_hash]
debug('running kernel estimation for spikes')
spkdata = self._get_dataset(xs, ys)
k = kernels.AdaptiveGaussianKernel(spkdata,
k_neighbors=self.knbrs(spkdata))
P_spk = k(self.eval_pixels, minrad=self.alim[0], maxrad=self.alim[1])
G_spk = self._reshape_grid(P_spk) * spkdata.shape[0] # scale spike estimate
self._cache[_hash] = G_spk
return G_spk
class _OccupancyMap(AbstractAdaptiveMap):
def __call__(self, xp, yp, Fs=None):
"""Compute the rate map with supplied spike and position data."""
_hash = arrays.datahash(xp, yp)
if _hash in self._cache:
return self._cache[_hash]
if Fs is None:
Fs = self.mdata.Fs
debug('running kernel estimation for occupancy')
posdata = self._get_dataset(xp, yp)
duration = posdata.shape[0] / Fs
k = kernels.AdaptiveGaussianKernel(posdata,
k_neighbors=self.knbrs(posdata))
P_occ = k(self.eval_pixels, minrad=self.alim[0], maxrad=self.alim[1])
G_occ = self._reshape_grid(P_occ) * duration # scale occupancy estimate
self._cache[_hash] = G_occ
return G_occ
class AdaptiveRatemap(object):
"""
Manage spike-count and occupancy estimates to compute firing-rate maps.
"""
def __init__(self, *args, **kwargs):
self._spkmap = _SpikeCountMap(*args, **kwargs)
self._occmap = _OccupancyMap(*args, **kwargs)
self.mask_value = kwargs.get('mask_value', np.nan)
def __call__(self, xs, ys, xp, yp, Fs=None):
if xs.size == 0:
G_spk = np.zeros(self._spkmap.pixel_shape)
else:
G_spk = self._spkmap(xs, ys)
G_occ = self._occmap(xp, yp, Fs=Fs)
G_rate = np.zeros_like(G_spk) + self.mask_value
valid = np.logical_and(np.isfinite(G_spk), np.isfinite(G_occ))
G_rate[valid] = G_spk[valid] / G_occ[valid]
return G_rate
def phase_vector(weights, values):
"""Multi-output kernel function to compute mean phase vectors."""
return circstats.mean_resultant_vector(values, weights=weights)
class AdaptivePhasemap(AbstractAdaptiveMap):
"""
Manage occupancy and phase estimates for adaptive phase maps.
"""
def __call__(self, xs, ys, phase):
"""Compute the phase mean and spread estimates on bursting data."""
_hash = arrays.datahash(xs, ys, phase)
if _hash in self._cache:
return self._cache[_hash]
if xs.size == 0:
self._cache[_hash] = np.zeros(self.pixel_shape) + self.mask_value
return self._cache[_hash]
debug('running kernel estimation of spike phase')
posdata = self._get_dataset(xs, ys)
k = kernels.AdaptiveGaussianKernel(posdata,
k_neighbors=self.knbrs(posdata),
values=phase)
L_phase = k(self.eval_pixels, minrad=self.alim[0], maxrad=self.alim[1],
kernel_func=phase_vector, n_outputs=2)
G_phase = np.empty((2,) + self.pixel_shape)
G_phase[0] = self._reshape_grid(L_phase[0])
G_phase[1] = self._reshape_grid(L_phase[1])
self._cache[_hash] = G_phase
return G_phase
def weighted_avg(weights, values):
"""Compute a weighted average across neighbor values."""
totw = np.sum(weights, axis=-1)
return (weights * values).sum(axis=-1) / totw
class AdaptiveAveragerMap(AbstractAdaptiveMap):
"""
Compute a local weighted average of values across nearest neighbors.
"""
def __call__(self, xp, yp, values):
_hash = arrays.datahash(xp, yp, values)
if _hash in self._cache:
return self._cache[_hash]
debug('running weighted averager on values')
posdata = self._get_dataset(xp, yp)
k = kernels.AdaptiveGaussianKernel(posdata,
k_neighbors=self.knbrs(posdata),
values=values)
L_avg = k(self.eval_pixels, minrad=self.alim[0], maxrad=self.alim[1],
kernel_func=weighted_avg)
G_avg = self._reshape_grid(L_avg)
self._cache[_hash] = G_avg
return G_avg
```
#### File: spikemaps/spikemaps/decoder.py
```python
import pandas as pd
import numpy as np
import scipy.stats as st
from pouty import debug
from roto.decorators import lazyprop
from roto.radians import cdiff
from ..ana.phaser_model import DEFAULT_F_THETA
from ..lib.motion import CIRCLE_AND_BOX_SIZE as SIZE
ARENA_EXTENT = [0, SIZE, 0, SIZE]
XMIN, XMAX, YMIN, YMAX = ARENA_EXTENT
GNORM = 1 / (np.sqrt(2*np.pi))
THETA_WINDOW = 1 / DEFAULT_F_THETA
class BayesPhaseDecoder(object):
def __init__(self, phasemaps, xmin=XMIN, xmax=XMAX, ymin=YMIN, ymax=YMAX):
self.P = phasemaps
self.P[np.isnan(self.P)] = 0.0
self.N = phasemaps.shape[0]
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.ngrid = phasemaps.shape[1]
self.argmax = None
def _validate_activation(self, H):
H = np.atleast_1d(np.squeeze(H))
assert H.ndim == 1 and H.size == self.N, 'size or dimension mismatch'
assert np.all(np.isfinite(H)), 'invalid activation values'
return H
def decode(self, spike_phase, window=THETA_WINDOW, tau=1, continuity=0.0):
"""Bayesian posterior for decoding windowed spike-phase averages."""
H = self._validate_activation(spike_phase)
L = np.prod(np.exp(np.cos(cdiff(H.reshape(-1,1,1), self.P))), axis=0)
P = L * np.exp(-window * tau)
P /= np.trapz(np.trapz(P, x=self._y_bins), x=self._x_bins)
if continuity > 0 and self.argmax is not None:
dist2 = (self._eval_grid[0]-self.argmax[0])**2 + \
(self._eval_grid[1]-self.argmax[1])**2
prior = (GNORM/continuity) * np.exp(-dist2/(2*continuity)**2)
P *= prior
P /= np.trapz(np.trapz(P, x=self._y_bins), x=self._x_bins)
# Save spatial argmax for continuity constraint
self.argmax = self._argmax_ij(P)
return P
def _argmax_ij(self, P):
"""Find the spatial coordinates (ij-index) for the maximum of a map."""
YY, XX = self._eval_grid
i = P.ravel().argmax()
return YY.ravel()[i], XX.ravel()[i]
@lazyprop
def _eval_grid(self):
"""Mesh grid for Poisson sampling and evaluations."""
return np.meshgrid(self._x_bins, self._y_bins, indexing='ij')
@lazyprop
def _x_bins(self):
aspect = (self.xmax - self.xmin) / (self.ymax - self.ymin)
return np.linspace(self.xmin, self.xmax, int(aspect*self.ngrid))
@lazyprop
def _y_bins(self):
return np.linspace(self.ymin, self.ymax, self.ngrid)
```
#### File: spikemaps/spikemaps/kernels.py
```python
from numpy import atleast_1d as in1d, atleast_2d as in2d
from sklearn import neighbors
import numpy as np
ROOT_2PI = np.sqrt(2 * np.pi)
KERNEL_RADIUS_RATIO = 0.35
NUM_NEIGHBORS = 10
class AdaptiveGaussianKernel(object):
"""
Nearest neighbors method for estimating density or custom functions.
"""
def __init__(self, dataset, values=None, k_neighbors=NUM_NEIGHBORS):
"""Set up the nearest neighbors model for evaluation.
Arguments:
dataset -- (n,2)-shaped array of spatial points
Keyword arguments:
values -- scalar values for each point in the dataset
k_neighbors -- number of neighbors to consider in the model
"""
dataset = in2d(dataset)
if dataset.shape[0] == 2:
dataset = dataset.T
if values is None:
self.values = np.ones(dataset.shape[0])
else:
values = in1d(values)
if values.ndim != 1:
raise ValueError("values can be at most 1-dimensional")
if values.size != dataset.shape[0]:
raise ValueError("size mismatch with values (%d)" % values.size)
self.values = values
self.model = neighbors.NearestNeighbors(
n_neighbors=k_neighbors,
algorithm='kd_tree')
self.model.fit(dataset)
def _Gk(self, dists, radius, kfrac):
H = kfrac * radius
G = lambda x: np.exp(-x**2 / (2 * H**2)) / (ROOT_2PI * H)
return G(dists) - G(radius)
def evaluate(self, points, minrad=0.0, maxrad=100.0, kernel_func=None,
n_outputs=1, mask_value=np.nan, kernel_ratio=KERNEL_RADIUS_RATIO,
debug=None):
"""Evaluate the nearest-neighbors model at test points.
Arguments:
points -- (n,2)-shaped array of test points
Keyword arguments:
minrad -- minimum allowed kernel radius
maxrad -- maximum allowed kernel radius
kernel_func -- neighbor function that produces `n_outputs` values
n_outputs -- number of outputs generated by the `kernel_func`
kernel_ratio -- Gaussian kernel size as fraction of radius
Returns:
(n,)-shaped array, or tuple of arrays (see Notes)
Notes:
If neither `values` nor `kernel_func` are provided, then this method
computes a probability density estimate of the data points by default.
If `values` was provided for the training set, then a weighted average
of these data point values is calculated instead of the density
estimate.
Providing a kernel function as `kernel_func` can generate multiple
output evaluations organized along the first axis of the evaluated
matrix. The kernel function must have the following form:
```
def foo(weights, values):
...
return output
```
where each argument is a (n,k)-shaped array where `n` is some number of
test points, `k` is the number of nearest neighbors for that test point,
and `output` is a (n_outputs,n)-shaped array (or (n,)-shaped if only one
output). If `values` was not provided, then the argument passed will be
the nearest-neighbor distances.
"""
do_density = False
if kernel_func is None:
kernel_func = lambda w, v: np.sum(w, axis=-1)
do_density = True
n_outputs = 1
points = in2d(points)
if points.shape[0] == 2:
points = points.T
L = np.zeros((n_outputs, points.shape[0])) + mask_value
try:
D, I = self.model.kneighbors(points)
V = self.values[I]
except ValueError:
return L
# Separate adaptive test points from points that hit the bounds
rad = np.sort(D)[:,-1].reshape(-1,1)
ihigh = (rad > maxrad).nonzero()[0]
iadapt = np.logical_and(rad >= minrad, rad <= maxrad).nonzero()[0]
ilow = (rad < minrad).nonzero()[0]
def eval_fixed_radius(index, radius):
dists, inds = self.model.radius_neighbors(points[index],
radius=radius)
for i, (d, ix) in enumerate(zip(dists, inds)):
vals = self.values[ix]
g = self._Gk(d, radius, kernel_ratio)
L[:,index[i]] = kernel_func(g, vals)
if ihigh.size:
eval_fixed_radius(ihigh, maxrad)
if iadapt.size:
vals = V[iadapt]
g = self._Gk(D[iadapt], rad[iadapt], kernel_ratio)
L[:,iadapt] = kernel_func(g, vals)
if ilow.size:
eval_fixed_radius(ilow, minrad)
L = L.squeeze()
if debug is not None:
debug['rad'] = rad
debug['adapt'] = a = rad.copy()
a[ilow] = 0
a[iadapt] = 1
a[ihigh] = 2
if do_density:
if np.isnan(L).all():
return L
return L / L[np.isfinite(L)].sum()
return L
__call__ = evaluate
```
|
{
"source": "jdmonaco/vmo-feedback-model",
"score": 2
}
|
#### File: vmo-feedback-model/src/session.py
```python
import os
import numpy as np
from glob import glob
from numpy import pi
from scipy.signal import hilbert
# Package imports
from .vmo import VMOModel
from .double_rotation import VMODoubleRotation
from .tools.radians import radian, get_angle_histogram
from .tools.bash import CPrint
from .tools.path import unique_path
from .tools.filters import halfwave, circular_blur
from .tools.array_container import TraitedArrayContainer
# Traits imports
from enthought.traits.api import Trait, Instance, Array, Float, Int, false
class VMOSession(TraitedArrayContainer):
"""
A container for a completed VMOModel simulation object that automatically
computes input signal envelopes, median thresholds, a population matrix
and place cell spatial information for each trial.
"""
out = Instance(CPrint)
params = Trait(dict)
center = Array
dt = Float
trial = Int(1)
num_trials = Int
num_units = Int
angle = Array
alpha = Array
t = Array
x = Array
y = Array
laps = Array
N_laps = Int
E = Array(desc='units x track response matrix')
E_laps = Array(desc='units x track x laps response matrix')
thresh = Float
R = Array
R_laps = Array
I_rate = Array(desc='spatial information')
sortix = Array(desc='active unit index sorted by max responses')
active_units = Array(desc='active unit index')
is_mismatch = false(desc='whether this is a mismatch session')
mismatch = Int
# Firing rate smoothing parameters
default_blur_width = Float(4.3)
bins = Int(360)
# Saving time-series data
I_cache = Array(desc='saved input time-series')
E_cache = Array(desc='saved envelope time-series')
save_I = false(desc='save input time-series')
save_E = false(desc='save envelope time-series')
# Activity threshold for counting a unit as active
min_spike_count = Trait(0.05, Float, desc='min. fraction pop. max')
def __init__(self, model, **kwargs):
super(VMOSession, self).__init__(**kwargs)
try:
if not model.done:
raise ValueError, "model simulation must be completed"
except AttributeError:
raise ValueError, "argument must be a VMOModel object"
# Get basic data about this model simulation/session
self.trait_set( params=model.parameter_dict(),
num_trials=model.num_trials,
num_units=model.N_outputs,
dt=model.dt,
center=model.center)
if self.num_trials == 1:
pm = model.post_mortem()
else:
pm = model.post_mortem().get_trial_data(self.trial)
if hasattr(model, 'mismatch'):
self.is_mismatch = True
self.mismatch = int((180/pi)*model.mismatch[self.trial-1])
self.trait_set(alpha=pm.alpha, x=pm.x, y=pm.y, t=pm.t)
# Compute envelopes, thresholds, and population responses
self.out('Computing responses for trial %d of %d...'%(self.trial,
self.num_trials))
self._compute_envelopes(pm.I, model.track)
self._set_threshold()
self.compute_circle_responses()
self.out('Done!')
def _compute_envelope_timeseries(self, I_t):
"""Compute raw time-series signal envelopes of oscillatory drive from
synaptic drive matrix (timesteps x units).
"""
# Compute signal envelope via Hilbert transform
E = amplitude_envelope(I_t)
# Cache the time-series before track binning if specified
if self.save_I:
self.out('Warning: saving input cache')
self.I_cache = I_t
if self.save_E:
self.out('Warning: saving envelopes cache')
self.E_cache = E
return E
def _compute_envelopes(self, I_theta, track):
"""Compute the amplitude envelopes for each of the output units
Required arguments:
I_theta -- synaptic drive time-series matrix for all outputs
track -- CircleTrackData object containing trajectory data
Session and per-lap envelope matrices are computed.
"""
# Compute envelope time-series
E = self._compute_envelope_timeseries(I_theta.T)
# Reduce envelope data to binned track angle histogram
t, alpha = self.t, self.alpha
angle = np.linspace(0, 2*pi, self.bins+1)
self.angle = angle[:-1]
# Get completed laps
lap_times = track.elapsed_time_from_timestamp(track.laps)
lap_ix = (lap_times<=self.t[-1]).nonzero()[0].max()
self.laps = lap_times[:lap_ix]
self.N_laps = lap_ix - 1 # only including *complete* laps, last lap is always incomplete
# Compute track responses: session- and lap-averages
self.E = np.zeros((self.num_units, self.bins), 'd')
self.E_laps = \
np.zeros((self.num_units, self.bins, self.N_laps), 'd')
for b in xrange(self.bins):
ix = np.logical_and(
alpha >= angle[b], alpha < angle[b+1]).nonzero()[0]
if len(ix):
self.E[:,b] = E[:,ix].mean(axis=1)
for lap in xrange(self.N_laps):
ix = reduce(np.logical_and,
[alpha >= angle[b], alpha < angle[b+1],
t >= self.laps[lap], t < self.laps[lap+1]]).nonzero()[0]
if len(ix):
self.E_laps[:,b,lap] = E[:,ix].mean(axis=1)
def _set_threshold(self):
"""Compute median peak inputs as an activity threshold
"""
self.thresh = np.median(self.E.max(axis=1))
def compute_circle_responses(self):
"""Top-level function to recompute the population matrix, information
rates and active place units.
"""
self._compute_population_matrix()
self._compute_spatial_information()
self._set_active_units()
def _compute_population_matrix(self):
"""Compute radial place field ratemaps for each output unit
"""
self.R = halfwave(self.E - self.thresh)
self.R_laps = halfwave(self.E_laps - self.thresh)
def _compute_spatial_information(self):
"""Compute overall spatial information for each output unit
Calculates bits/spike as (Skaggs et al 1993):
I(R|X) = (1/F) * Sum_i[p(x_i)*f(x_i)*log_2(f(x_i)/F)]
"""
self.I_rate = np.empty(self.num_units, 'd')
occ = get_angle_histogram(
self.x-self.center[0], self.y-self.center[1], self.bins)
occ *= self.dt # convert occupancy to seconds
p = occ/occ.sum()
for i in xrange(self.num_units):
f = self.R[i]
F = halfwave(self.E[i]-self.thresh).mean()
I = p*f*np.log2(f/F)/F
I[np.isnan(I)] = 0.0 # handle zero-rate bins
self.I_rate[i] = I.sum()
def _set_active_units(self):
"""Apply minimal firing rate threshold to determine which active units
are active.
"""
self.active_units = (
self.R.max(axis=1) >= self.min_spike_count*self.R.max()
).nonzero()[0]
self.sortix = self.active_units[
np.argsort(np.argmax(self.R[self.active_units], axis=1))]
def get_spatial_information(self, unit=None):
"""Get overall spatial information for the population or a single unit
"""
return np.squeeze(self.I_rate[unit])
def get_population_matrix(self, bins=None, norm=False, clusters=None,
smoothing=True, blur_width=None, inplace=False):
"""Retrieve the population response matrix for this session simulation
Keyword arguments:
bins -- recompute responses for a different number of bins (deprecated)
norm -- whether to integral normalize each unit's response
clusters -- optional index array for row-sorting the response matrix;
if not specified, a peak-location sort of the place-active subset
of the population is used by default
smoothing -- whether to do circular gaussian blur on ratemaps
blur_width -- width of gaussian window to use for smoothing; a value of
None defaults to default_blur_width
Returns (units, bins) matrix of population spatial responses.
"""
self.compute_circle_responses()
if clusters is None:
clusts = self._get_active_units()
elif type(clusters) in (np.ndarray, list):
clusts = np.asarray(clusters)
if inplace:
R = self.R[clusts]
else:
R = self.R[clusts].copy()
if smoothing:
if blur_width is None:
blur_width = self.default_blur_width
for Runit in R:
Runit[:] = circular_blur(Runit, blur_width)
if norm:
Rsum = np.trapz(R, axis=1).reshape(R.shape[0], 1)
Rsum[Rsum==0.0] = 1
R /= Rsum
return R
def get_population_lap_matrix(self, clusters=None, smoothing=True,
blur_width=None, inplace=False, **kwargs):
"""Construct concatentation of per-lap population response matrices
Keyword arguments:
clusters -- optional index array for row-sorting the response matrix;
if not specified, a peak-location sort of the place-active subset
of the population is used by default
smoothing -- whether to do circular gaussian blur on ratemaps
blur_width -- width of gaussian window to use for smoothing; a value of
None defaults to default_blur_width
Returns (N_clusts, bins, N_laps) response matrix.
"""
self.compute_circle_responses()
if clusters is None:
clusts = self._get_active_units()
elif type(clusters) in (np.ndarray, list):
clusts = np.asarray(clusters)
if inplace:
R = self.R_laps[clusts]
else:
R = self.R_laps[clusts].copy()
if smoothing:
if blur_width is None:
blur_width = self.default_blur_width
for Runit in R:
for Rlap in Runit.T:
Rlap[:] = circular_blur(Rlap, blur_width)
return R
def recover_cues(self):
"""Simulate a dummy model with identical cue configuration as was used
to create this session data. A post-mortem object is returned that can
be plotted using (e.g.) rat.oi_funcs.plot_external_cues.
"""
pdict = dict(
N_theta = 1,
N_outputs = 1,
monitoring = False,
N_cues_local = self.params['N_cues_local'],
N_cues_distal = self.params['N_cues_distal'],
local_cue_std = self.params['local_cue_std'],
distal_cue_std = self.params['distal_cue_std'],
refresh_fixed_points = False
)
if self.is_mismatch:
pdict.update(mismatch=[(np.pi/180)*self.mismatch])
klass = VMODoubleRotation
else:
klass = VMOModel
model = klass(**pdict)
model.advance()
return model.post_mortem()
@classmethod
def get_session_list(cls, model, **kwargs):
"""Convenience method to get a list of VMOSession objects for the
trials in a model object.
"""
res = []
if model.num_trials == 1:
res = VMOSession(model, **kwargs)
else:
res = []
for trial in xrange(1, model.num_trials+1):
res.append(VMOSession(model, trial=trial, **kwargs))
return res
@classmethod
def save_session_list(cls, session_list, save_dir):
"""Save all sessions in an experiment to the specified directory
"""
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for session in session_list:
if session.is_mismatch:
if session.mismatch == 0:
fn = 'STD.tar.gz'
else:
fn = 'MIS_%03d.tar.gz'%session.mismatch
session.tofile(os.path.join(save_dir, fn))
else:
fn = unique_path(os.path.join(save_dir, 'session_'),
ext='tar.gz')
session.tofile(fn)
@classmethod
def load_session_list(cls, load_dir):
"""Load all sessions from files found in the specified load directory
"""
files = glob(os.path.join(load_dir, '*.tar.gz'))
files.sort()
return [cls.fromfile(fn) for fn in files]
def _get_active_units(self):
"""Get the list of active place units
"""
return self.sortix
def _out_default(self):
return CPrint(prefix=self.__class__.__name__)
```
|
{
"source": "jdmonin/simple-revert",
"score": 3
}
|
#### File: simple-revert/simple_revert/simple_revert.py
```python
import sys
import logging
from collections import defaultdict
from copy import deepcopy
from .common import (
obj_to_dict,
upload_changes,
api_request,
HTTPError,
RevertError,
changes_to_osc
)
def make_diff(obj, obj_prev):
"""Takes two object dicts and produces a diff."""
diff = [('version', obj['version'])]
if obj_prev is None or obj_prev['deleted']:
if obj['deleted']:
return diff
else:
diff.append(('create', obj))
elif obj['deleted']:
diff.append(('delete', obj_prev))
else:
# Both objects are present, compare them
# Moving nodes back
if 'coords' in obj_prev:
if obj['coords'] != obj_prev['coords']:
diff.append(('move', obj_prev['coords'], obj['coords']))
# Restoring old tags
for k in obj['tags']:
if k in obj_prev['tags'] and obj_prev['tags'][k] != obj['tags'][k]:
diff.append(('tag', k, obj_prev['tags'][k], obj['tags'][k]))
elif k not in obj_prev['tags']:
diff.append(('tag', k, None, obj['tags'][k]))
for k in obj_prev['tags']:
if k not in obj['tags']:
diff.append(('tag', k, obj_prev['tags'][k], None))
# Keeping references for ways and relations
if 'refs' in obj and obj_prev['refs'] != obj['refs']:
diff.append(('refs', obj_prev['refs'], obj['refs']))
return diff
def merge_diffs(diff, diff_newer):
"""Merge two sequential diffs."""
if diff is None:
return diff_newer
result = [diff_newer[0]]
# First, resolve creating and deleting
if len(diff) == 2 and diff[1][0] == 'create':
if (len(diff_newer) == 2 and diff_newer[0][1] == diff[0][1] + 1 and
diff_newer[1][0] == 'delete'):
# A special case: deletion negates creation
return None
# On creation, return the first diff: reverting it means deleting the object. No options
return diff
elif len(diff) == 2 and diff[1][0] == 'delete':
if len(diff_newer) == 2 and diff_newer[1][0] == 'create':
# Deletion and creation basically means changing some fields. Make a proper diff
return make_diff(diff_newer[1][1], diff[1][1])
elif len(diff_newer) == 2 and diff_newer[1][0] == 'delete':
# Two deletions, return the earlier one
return diff
else:
# Undoing deletion will clear any changes from the second diff
return diff
else:
if len(diff_newer) == 2 and diff_newer[1][0] == 'create':
# We assume the second change was a simple undeletion, so we ignore it.
# Not going to delete
return diff
elif len(diff_newer) == 2 and diff_newer[1][0] == 'delete':
# This is a tough one. We need to both restore the deleted object
# and apply a diff on top
result.append(('delete', apply_diff(diff, diff_newer[1][1])))
else:
# O(n^2) complexity, because diffs are usually small
moved = False
tags = set()
for change in diff:
if change[0] == 'version':
pass
elif change[0] == 'move' or change[0] == 'refs':
moved = True
op_newer = None
for k in diff_newer:
if k[0] == change[0]:
op_newer = k
if op_newer is None:
result.append(change)
elif change[2] == op_newer[1]:
result.append((change[0], change[1], op_newer[2]))
else:
result.append(op_newer)
elif change[0] == 'tag':
tags.add(change[1])
op_newer = None
for k in diff_newer:
if k[0] == 'tag' and k[1] == change[1]:
op_newer = k
if op_newer is None:
result.append(change)
elif change[2] == op_newer[3]:
pass # Tag value was reverted
elif change[3] == op_newer[2]:
result.append(('tag', change[1], change[2], op_newer[3]))
else:
result.append(op_newer)
else:
raise Exception('Missing processor for merging {0} operation'.format(change[0]))
# Process changes from diff_newer
for op_newer in diff_newer:
if op_newer[0] == 'move' and not moved:
result.append(op_newer)
elif op_newer[0] == 'tag' and op_newer[1] not in tags:
result.append(op_newer)
if len(result) > 1:
return result
# We didn't come up with any changes, return empty value
return None
def apply_diff(diff, obj):
"""Takes a diff and the last version of the object, and produces an initial object from it."""
for change in diff:
if change[0] == 'version':
dver = change[1]
elif change[0] == 'move':
if 'coords' not in obj:
raise Exception('Move action found for {0} {1}'.format(obj['type'], obj['id']))
# If an object was moved after the last change, keep the coordinates
if dver == obj['version'] or change[2] == obj['coords']:
obj['coords'] = change[1]
elif change[0] == 'tag':
if change[1] in obj['tags']:
if change[3] is None:
pass # Somebody has already restored the tag
elif obj['tags'][change[1]] == change[3]:
if change[2] is None:
del obj['tags'][change[1]]
else:
obj['tags'][change[1]] = change[2]
else:
# If a modified tag was deleted after, do not restore it
if change[3] is None:
obj['tags'][change[1]] = change[2]
elif change[0] == 'refs':
if obj['refs'] != change[2]:
raise Exception('Members for {0} {1} were changed, cannot roll that back'.format(
obj['type'], obj['id']))
else:
obj['refs'] = change[1]
else:
raise Exception('Unknown or unprocessed by apply_diff change type: {0}'.format(
change[0]))
return obj
def print_changesets_for_user(user, limit=15):
"""Prints last 15 changesets for a user."""
try:
root = api_request('changesets', params={'closed': 'true', 'display_name': user})
for changeset in root[:limit]:
created_by = '???'
comment = '<no comment>'
for tag in changeset.findall('tag'):
if tag.get('k') == 'created_by':
created_by = tag.get('v')
elif tag.get('k') == 'comment':
comment = tag.get('v')
logging.info(
'Changeset %s created on %s with %s:\t%s',
changeset.get('id'), changeset.get('created_at'), created_by, comment)
except HTTPError as e:
if e.code == 404:
logging.error('No such user found.')
else:
raise
def print_status(changeset_id, obj_type=None, obj_id=None, count=None, total=None):
if changeset_id == 'flush':
sys.stderr.write('\n')
elif changeset_id is not None:
info_str = '\rDownloading changeset {0}'.format(changeset_id)
if obj_type is None:
sys.stderr.write(info_str)
else:
sys.stderr.write('{0}, historic version of {1} {2} [{3}/{4}]{5}'.format(
info_str, obj_type, obj_id, count, total, ' ' * 15))
else:
info_str = '\rReverting changes'
sys.stderr.write('{0}, downloading {1} {2} [{3}/{4}]{5}'.format(
info_str, obj_type, obj_id, count, total, ' ' * 15))
sys.stderr.flush()
def download_changesets(changeset_ids, print_status):
"""Downloads changesets and all their contents from API,
returns (diffs, changeset_users) tuple."""
ch_users = {}
diffs = defaultdict(dict)
for changeset_id in changeset_ids:
print_status(changeset_id)
root = api_request(
'changeset/{0}/download'.format(changeset_id),
sysexit_message='Failed to download changeset {0}'.format(changeset_id))
# Iterate over each object, download previous version (unless it's creation) and make a diff
count = total = 0
for action in root:
if action.tag != 'create':
total += len(action)
for action in root:
for obj_xml in action:
if action.tag != 'create':
count += 1
if changeset_id not in ch_users:
ch_users[changeset_id] = obj_xml.get('user')
obj = obj_to_dict(obj_xml)
if obj['version'] > 1:
print_status(changeset_id, obj['type'], obj['id'], count, total)
try:
obj_prev = obj_to_dict(api_request('{0}/{1}/{2}'.format(
obj['type'], obj['id'], obj['version'] - 1))[0])
except HTTPError as e:
if e.code != 403:
raise
msg = ('\nCannot revert redactions, see version {0} at ' +
'https://openstreetmap.org/{1}/{2}/history')
raise RevertError(msg.format(obj['version'] - 1, obj['type'], obj['id']))
else:
obj_prev = None
diffs[(obj['type'], obj['id'])][obj['version']] = make_diff(obj, obj_prev)
print_status('flush')
return diffs, ch_users
def revert_changes(diffs, print_status):
"""Actually reverts changes in diffs dict. Returns a changes list for uploading to API."""
# merge versions of same objects in diffs
for k in diffs:
diff = None
for v in sorted(diffs[k].keys()):
diff = merge_diffs(diff, diffs[k][v])
diffs[k] = diff
changes = []
count = 0
for kobj, change in diffs.items():
count += 1
if change is None:
continue
try:
# Download the latest version of an object
print_status(None, kobj[0], kobj[1], count, len(diffs))
obj = obj_to_dict(api_request('{0}s?{0}s={1}'.format(kobj[0], kobj[1]))[0])
# Apply the change
obj_new = None
if len(change) == 2 and change[1][0] == 'create':
if not obj['deleted']:
obj_new = {'type': obj['type'], 'id': obj['id'], 'deleted': True}
elif len(change) == 2 and change[1][0] == 'delete':
# Restore only if the object is still absent
if obj['deleted']:
obj_new = change[1][1]
else:
# Controversial, but I've decided to replace the object
# with the old one in this case
obj_new = change[1][1]
else:
obj_new = apply_diff(change, deepcopy(obj))
if obj_new is not None:
obj_new['version'] = obj['version']
if obj_new != obj:
changes.append(obj_new)
except Exception as e:
raise RevertError('\nFailed to download the latest version of {0} {1}: {2}'.format(
kobj[0], kobj[1], e))
print_status('flush')
return changes
def main():
if len(sys.argv) < 2:
print('This script reverts simple OSM changesets. It will tell you if it fails.')
print('Usage: {0} <changeset_id> [<changeset_id> ...] ["changeset comment"]'.format(
sys.argv[0]))
print('To list recent changesets by a user: {0} <user_name>'.format(sys.argv[0]))
sys.exit(1)
logging.basicConfig(level=logging.INFO, format='%(message)s')
if len(sys.argv) == 2 and not sys.argv[1].isdigit():
print_changesets_for_user(sys.argv[1])
sys.exit(0)
# Last argument might be a changeset comment
ids = sys.argv[1:]
comment = None
if not ids[-1].isdigit():
comment = ids[-1]
ids.pop()
changesets = [int(x) for x in ids]
try:
diffs, ch_users = download_changesets(changesets, print_status)
except RevertError as e:
sys.stderr.write(e.message + '\n')
sys.exit(2)
if not diffs:
sys.stderr.write('No changes to revert.\n')
sys.exit(0)
try:
changes = revert_changes(diffs, print_status)
except RevertError as e:
sys.stderr.write(e.message + '\n')
sys.exit(3)
if not changes:
sys.stderr.write('No changes to upload.\n')
elif sys.stdout.isatty():
tags = {
'created_by': 'simple_revert.py',
'comment': comment or 'Reverting {0}'.format(', '.join(
['{0} by {1}'.format(str(x), ch_users[x]) for x in changesets]))
}
upload_changes(changes, tags)
else:
print(changes_to_osc(changes))
```
|
{
"source": "jdmonnier/mircx_mystic",
"score": 3
}
|
#### File: mircx_mystic/bin/mircx_polsplit.py
```python
from __future__ import print_function
import argparse
import os
from time import sleep
from astropy.io import fits
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Process MIRC-X raw data files')
parser.add_argument("--no-warn", action="store_true")
parser.add_argument("--crop-bad", action="store_true")
parser.add_argument("files", nargs="+", help="File(s) to process")
args = parser.parse_args()
if not args.no_warn:
print("Warning: Make sure you have plenty of disk space; this is going to hurt.")
print("(Hint: ^C while you still can! Sleeping 10 seconds for your benefit.)")
sleep(10)
for dir in ["pol1", "pol2"]:
try:
os.mkdir(dir)
except FileExistsError:
if os.path.isdir(dir):
print("Warning: directory `" + dir + "` already exists")
else:
raise FileExistsError("Looks like you have a file named `" + dir + "`; please remove it.")
def polstate(file, state):
f = fits.open(file)
f[0].header["POLSTATE"] = state
f[0].header["CONF_NA"] = "H_PRISM50" # TEMPORARY FIX
rows = f[0].header["CROPROWS"].split(",")
if len(rows) != 2:
raise ValueError("There must be exactly 2 detector regions. Is this a polarization data file?")
span = 1 - eval(rows[0]) # 50-50 chance it should be rows[1]
if state == 1:
f[0].data = f[0].data[:,:,:span,:]
elif state == 2:
if args.crop_bad:
f[0].data = f[0].data[:,:,span:-2,:]
else:
f[0].data = f[0].data[:,:,span:,:]
else:
raise ValueError("`state` (2nd arg of fcn `polstate`) must have the value either 1 or 2")
path = "pol" + str(state) + "/" + file
f.writeto(path)
f.close()
os.system("fpack " + path)
os.remove(path)
for file in tqdm(args.files):
fz = file[-3:] == ".fz"
if fz:
os.system("funpack " + file)
file = file[:-3]
polstate(file, 1)
polstate(file, 2)
if fz:
os.remove(file)
```
#### File: mircx_mystic/bin/mircx_redcal_wrap.py
```python
import argparse, subprocess, os, glob, socket, datetime
from mircx_pipeline import log, lookup, mailfile, headers, files, summarise
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits as pyfits
import smtplib
from contextlib import redirect_stdout
try:
from email.mime.multipart import MIMEMultipart
except ModuleNotFoundError:
from email.MIMEMultipart import MIMEMultipart
try:
from email.mime.text import MIMEText
except ModuleNotFoundError:
from email.MIMEText import MIMEText
try:
from email.mime.base import MIMEBase
except ModuleNotFoundError:
from email.MIMEBase import MIMEBase
from email import encoders
import mirc_bot as slack
class cd:
"""
Context manager for changing the current working directory
"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
#####################################################
# Description of script and parsable options
description = \
"""
description use #1:
Wrapper for mircx_reduce.py, mircx_calibrate.py,
mircx_report.py and mircx_transmission.py.
(calibrator checks can now be conducted using the
wrapper: add option --calib-cal=TRUE. NB: requires
CANDID to be installed)
description use #2:
Wrapper for mircx_reduce.py to explore different
values of ncoherent and their effect on vis SNR
and T3PHI error.
"""
epilog = \
"""
examples use #1:
mircx_redcal_wrap.py --dates=2018Oct29,2018Oct28
--ncoherent=5,10 --ncs=1,1 --nbs=4,4 --snr-threshold=2.0,2.0
NB: length of ncoherent, ncs, nbs, snr-threshold must be
equal.
examples use #2:
mircx_redcal_wrap.py --dates=2018Oct25 --ncoh-plots=TRUE
--email=<EMAIL>
"""
parser = argparse.ArgumentParser(description=description,epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,add_help=True)
TrueFalseDefault = ['TRUE','FALSE','TRUEd']
TrueFalse = ['TRUE','FALSE']
TrueFalseOverwrite = ['TRUE','FALSE','OVERWRITE']
parser.add_argument("--raw-dir",dest="raw_dir",default='/data/CHARADATA/MIRCX',type=str,
help="directory base for the raw data paths [%(default)s]")
parser.add_argument("--red-dir",dest="red_dir",default='/data/MIRCX/reduced',type=str,
help="directory base for the reduced data paths [%(default)s]")
parser.add_argument("--dates",dest="dates",type=str,
help="comma-separated list of observation dates to be reduced [%(default)s]")
preproc = parser.add_argument_group ('(1) preproc',
'\nSet of options used to control the book-keeping'
' as well as the preproc and rts reduction steps.')
preproc.add_argument("--reduce",dest="reduce",default='TRUE',
choices=TrueFalseOverwrite,
help="(re)do the reduction process [%(default)s]")
preproc.add_argument("--ncs",dest="ncs",type=str,default='1d',
help="list of number of frame-offset for cross-spectrum [%(default)s]")
preproc.add_argument("--nbs",dest="nbs",type=str,default='4d',
help="list of number of frame-offset for bi-spectrum [%(default)s]")
preproc.add_argument ("--bbias", dest="bbias",type=str,default='TRUEd',
help="list of bools (compute the BBIAS_COEFF product [%(default)s]?)")
preproc.add_argument("--max-integration-time-preproc", dest="max_integration_time_preproc",
default='30.d',type=str,
help='maximum integration into a single file, in (s).\n'
'This apply to PREPROC, and RTS steps [%(default)s]')
oifits = parser.add_argument_group ('(2) oifits',
'\nSet of options used to control the oifits\n'
' reduction steps.')
oifits.add_argument("--ncoherent",dest="ncoherent",type=str,default='10d',
help="list of number of frames for coherent integration [%(default)s]")
oifits.add_argument("--snr-threshold",dest="snr_threshold",type=str,default='2.0d',
help="list of SNR threshold for fringe selection [%(default)s]")
oifits.add_argument("--flux-threshold",dest="flux_threshold",type=str,default='10.0d',
help="list of flux threshold for faint signal rejection [%(default)s]")
oifits.add_argument("--max-integration-time-oifits", dest="max_integration_time_oifits",
default='150.d',type=str,
help='maximum integration into a single file, in (s).\n'
'This apply to OIFITS steps [%(default)s]')
calib = parser.add_argument_group ('(3) calibrate',
'\nSet of options used to control the calibration steps.')
calib.add_argument("--calibrate",dest="calibrate",default='TRUE',
choices=TrueFalseOverwrite,
help="(re)do the calibration process [%(default)s]")
calib.add_argument("--targ-list",dest="targ_list",default='mircx_targets.list',type=str,
help="local database to query to identify SCI and CAL targets [%(default)s]")
calib.add_argument("--calib-cal",dest="calibCal",default='FALSE',
choices=TrueFalse, help="calibrate the calibrators? [%(default)s]")
summary = parser.add_argument_group ('(4) summary',
'\nSet of options used to control the summary report\n'
'file production and email alerts.')
summary.add_argument("--email",dest="email",type=str,default='',
help='email address to send summary report file TO [%(default)s]')
summary.add_argument("--sender",dest="sender",type=str,default='<EMAIL>',
help='email address to send summary report file FROM [%(default)s]')
compare = parser.add_argument_group ('(5) compare',
'\nOptions used to control the exploration of the impact'
'of varying ncoherent on the vis SNR and T3ERR.')
compare.add_argument("--ncoh-plots", dest="ncoh_plots",default='FALSE',
choices=TrueFalse,
help="use the wrapper to produce plots of ncoherent vs\n"
"vis SNR and T3ERR [%(default)s].")
# Parse arguments:
argopt = parser.parse_args ()
# Verbose:
elog = log.trace('mircx_redcal_wrapper')
# Check length of ncs,nbs,mitp,bbias,snr,mito and dates are equal
dates = argopt.dates.split(',')
ncs = str(argopt.ncs).split(',')
nbs = str(argopt.nbs).split(',')
mitp = str(argopt.max_integration_time_preproc).split(',')
bbias = str(argopt.bbias).split(',')
snr = str(argopt.snr_threshold).split(',')
fth = str(argopt.flux_threshold).split(',')
mito = str(argopt.max_integration_time_oifits).split(',')
for item in [ncs,nbs,mitp,bbias,snr,fth,mito]:
if isinstance(item, str):
item = [item]
if len(ncs) == 1 and 'd' in ncs[0]:
# Account for some being default settings:
ncs = [ncs[0].replace('d','')]*len(dates)
if len(nbs) == 1 and 'd' in nbs[0]:
# Account for some being default settings:
nbs = [nbs[0].replace('d','')]*len(dates)
if len(mitp) == 1 and 'd' in mitp[0]:
# Account for some being default settings:
mitp = [mitp[0].replace('.d','')]*len(dates)
if len(bbias) == 1 and 'd' in bbias[0]:
# Account for some being default settings:
bbias = [bbias[0].replace('d','')]*len(dates)
if len(snr) == 1 and 'd' in snr[0]:
# Account for some being default settings:
snr = [snr[0].replace('d','')]*len(dates)
if len(fth) == 1 and 'd' in fth[0]:
# Account for some being default settings:
fth = [fth[0].replace('d','')]*len(dates)
if len(mito) == 1 and 'd' in mito[0]:
# Account for some being default settings:
mito = [mito[0].replace('.d','')]*len(dates)
if len(ncs) == len(nbs) == len(mitp) == len(bbias) == len(snr) == len(fth) == len(mito) == len(dates):
log.info('Length of reduction options checked: ok')
else:
log.error('Error in setup: length of options is not equal!')
sys.exit()
# Force choices of nbs and ncs when bbias=TRUE:
for bb in range(0, len(bbias)):
if bbias[bb] == 'TRUE':
log.info('bbias instance set to true so setting corresponding ncs=1 and nbs=0')
ncs[bb] = 1
nbs[bb] = 0
elif bbias[bb] != 'FALSE':
log.error('Option '+str(bbias[bb])+' not a valid input for bbias')
sys.exit()
# check argopt.ncoherent:
ncoh = str(argopt.ncoherent).split(',')
if argopt.ncoh_plots == 'FALSE':
if len(ncoh) == 1 and 'd' in ncoh[0]:
ncoh = [ncoh[0].replace('d','')]*len(dates)
elif len(ncoh) != len(dates):
log.error("Error: length of --ncoherent doesn't match length of --dates!")
sys.exit()
else:
if len(ncoh) == 1 and 'd' in ncoh[0]:
ncoh = range(2,16)
# remove '/' from end of the reduction and raw base directories
if argopt.raw_dir[-1] == '/':
rawBase = argopt.raw_dir[:-1]
else:
rawBase = argopt.raw_dir
if argopt.red_dir[-1] == '/':
redBase = argopt.red_dir[:-1]
else:
redBase = argopt.red_dir
# Ensure emailing will work:
try:
pw = os.environ['MAILLOGIN']
except KeyError:
log.error('Password for '+argopt.sender+' not found!')
log.info('The password for the email account parsed to --sender')
log.info(' needs to be saved to environment variable $MAILLOGIN.')
sys.exit()
# Ensure that the pipeline can be found
try:
ext = os.environ['MIRCX_PIPELINE']
except KeyError:
log.error('Environment variable $MIRCX_PIPELINE not found')
log.info('Please rectify this before continuing')
sys.exit()
if not os.path.isfile(os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list):
log.error(os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list+' not found!')
log.info('Please rectify this before continuing')
sys.exit()
else:
localDB = os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list
# ^-- this is the local target history database
for d in range(0, len(dates)):
# special setting for execution on protostar @ exeter:
if socket.gethostname() in ['protostar','mircx','yso']:
rawBase_p = rawBase+'/'+dates[d][0:7]
rawBase = rawBase_p
# 1. Make directory dates_nbsncsbbiasmitp in argopt.red-dir
if bbias[d] == 'TRUE':
bbstr = 'T'
else:
bbstr = 'F'
suf1 = '_nbs'+str(nbs[d])+'ncs'+str(ncs[d])+'bbias'+bbstr+'mitp'+mitp[d]
redDir = redBase+'/'+dates[d]+suf1
files.ensure_dir(redDir)
# 2. run reduce.py with --oifits=FALSE
opt1 = '--ncs='+str(ncs[d])+' --nbs='+str(nbs[d])+' --bbias='+str(bbias[d])
opt2 = ' --max-integration-time-preproc='+str(mitp[d])
opts = opt1+opt2
rawDir = rawBase+'/'+dates[d]
with cd(redDir):
com = "mircx_reduce.py "+opts+" --raw-dir="+rawDir
ma = " --preproc-dir="+redDir+"/preproc --rts-dir="+redDir+"/rts"
nd = " --oifits=FALSE --reduce="+argopt.reduce
pipe = "> nohup_preproc_rts.out"
with open("nohup_preproc_rts.out", 'w') as output:
output.write('\n')
log.info('Execute nohup '+com+ma+nd+' '+pipe)
subprocess.call('nohup '+com+ma+nd+' '+pipe+' &', shell=True)
nf = open('nohup_preproc_rts.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# 3. Make directory snrfthmito in argopt.red-dir/dates_nbsncsbbiasmitp
suf2 = 'snr'+str(snr[d]).replace('.','p')+'fth'+str(fth[d]).replace('.','p')+'mito'+str(mito[d])
files.ensure_dir(redDir+'/'+suf2)
oiDir = redDir+'/'+suf2+"/oifits_nc"+str(ncoh[d])
# 4: identify calibrators
targs = lookup.targList(dates[d],rawBase,redDir) # produces target summary file if directory is new
calInfo, scical = lookup.queryLocal(targs, localDB)
if argopt.ncoh_plots == 'FALSE':
# --------------------------------------------------------------
# 5. Run reduce.py with --rts=FALSE and --preproc=FALSE
# assuming different ncoherent are for different argopt.dates
# --------------------------------------------------------------
opt3 = ' --max-integration-time-oifits='+str(mito[d])+' --snr-threshold='+str(snr[d])+' --flux-threshold='+str(fth[d])
opts2 = opt1+' --ncoherent='+str(ncoh[d])+opt3
with cd(redDir+'/'+suf2):
com = "mircx_reduce.py "+opts2+" --raw-dir="+rawDir+" --preproc=FALSE"
ma = " --preproc-dir="+redDir+"/preproc --rts=FALSE --rts-dir="+redDir+"/rts"
nd = " --oifits-dir="+oiDir+" --rm-preproc=TRUE --rm-rts=TRUE --reduce="+argopt.reduce
pipe = "> nohup_oifits.out"
with open("nohup_oifits.out", 'w') as output:
output.write('\n')
log.info('Execute nohup '+com+ma+nd+' '+pipe)
subprocess.call('nohup '+com+ma+nd+' '+pipe+' &', shell=True)
nf = open('nohup_oifits.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# 6. Check that the oifits step successfully created .fits files in oiDir:
if os.path.isdir(oiDir):
if len(glob.glob(oiDir+'/*.fits')) > 0:
redF = False # reduction did not fail
# a: run report.py script
with cd(oiDir):
command = "mircx_report.py --oifits-dir="+oiDir
pipe = " > nohup_report.out"
with open('nohup_report.out', 'w') as output:
output.write('\n')
log.info('Execute nohup '+command+' '+pipe)
subprocess.call("nohup "+command+' '+pipe+' &', shell=True)
nf = open('nohup_report.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# b: run mircx_transmission.py
today = datetime.datetime.strptime(dates[d], '%Y%b%d')
nextDay = today + datetime.timedelta(days=1)
nD = nextDay.strftime('%Y%b%d')
with cd(redDir):
com = "mircx_transmission.py --dir="+redBase+" --num-nights=14"
ma = " --targ-list="+argopt.targ_list
nd = " --oifits-dir="+suf2+"/oifits_nc"+str(ncoh[d])
pipe = "> nohup_transmission.out"
with open('nohup_transmission.out', 'w') as output:
output.write('\n')
log.info('Execute nohup '+com+ma+nd+' '+pipe)
subprocess.call("nohup "+com+ma+nd+' '+pipe+' &', shell=True)
nf = open('nohup_transmission.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# d: run calibrate.py
if argopt.calibrate != 'FALSE':
with cd(oiDir):
com = "mircx_calibrate.py --oifits-calibrated="+argopt.calibrate
ma = " --calibrators="+calInfo[:-1]+" --oifits-dir="+oiDir
nd = " --oifits-calibrated-dir="+oiDir+'/calibrated'
pipe = "> nohup_calibrate.out"
with open('nohup_calibrate.out', 'w') as output:
output.write('\n')
log.info('Execute nohup '+com+ma+nd+' '+pipe)
subprocess.call("nohup "+com+ma+nd+" "+pipe+" &", shell=True)
nf = open('nohup_calibrate.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
else:
redF = True
else:
redF = True
# 7. Check that the calibration step was successful:
if os.path.isdir(oiDir+'/calibrated'):
if len(glob.glob(oiDir+'/calibrated/*.fits')) > 0:
calF = False
# make summary uv coverage plots for the calibrated files:
summarise.plotUV(oiDir+'/calibrated')
else:
calF = True
else:
calF = True
# 8. Write summary and report files
log.info('Read headers from raw data directory')
rawhdrs = headers.loaddir(rawBase+'/'+dates[d]) ############ !!!!!!!
log.info('Create report summary files')
outfiles = summarise.texSumTitle(oiDir, rawhdrs, redF, calF)
#summarise.texSumTables(oiDir,targs,calInfo,scical,redF,rawhdrs,outfiles)
summarise.texTargTable(targs,calInfo,redF,outfiles)
# !!!! This is where the calibrating calibrators table can go
# 9. NEW: calibrate the calibrators!
if os.path.isdir(oiDir) and argopt.calibCal == 'TRUE':
log.info('Calibrating calibrators!')
import shutil
from mircx_pipeline import inspect as inspect
calibrators = calInfo[:-1].split(',')[::3]
calDir = oiDir+'/calibCAL'
with cd(oiDir):
# 1. copy all calibrator .fits files to a new temporary directory
files.ensure_dir(calDir)
hdrs = headers.loaddir(oiDir)
for h in hdrs:
if 'groundoifits.fits' not in h['ORIGNAME']:
if h['OBJECT'] in calibrators:
try:
calFits.append(h['ORIGNAME']) # origname gives the full path to the fle
except NameError:
calFits = [h['ORIGNAME']]
#else:
# print(h['OBJECT'])
del hdrs
for item in calFits:
shutil.copy2(item, calDir+'/')
for outfile in outfiles:
with open(outfile, 'a') as outtex:
outtex.write('\\subsection*{Calibrator test:')
outtex.write(' goodness of fit of UDD model with added companion in CANDID}\n')
outtex.write('{\\fontsize{7pt}{7pt}\n \\selectfont\n')
outtex.write('\\begin{longtable}{p{.20\\textwidth} | p{.09\\textwidth} | ')
outtex.write('p{.14\\textwidth} | p{.06\\textwidth} | p{.08\\textwidth}')
outtex.write(' | p{.08\\textwidth} | p{.06\\textwidth}}\n \\hline\n')
outtex.write(' Cal ID & UDD (mas) & UDD fit & nsigma & sep (mas) & PA (deg) & $\Delta$Mag \\\\ \n')
outtex.write(' \\hline\n')
for cal in calibrators:
# B. trim calInfo string to isolate cal of interest:
ind = calInfo[:-1].split(',').index(cal)
otherCals = ','.join(calInfo[:-1].split(',')[:ind]+calInfo[:-1].split(',')[ind+3:])
with cd(calDir):
# C. run calibration step for selected cal
com = "mircx_calibrate.py --oifits-calibrated=TRUE --oifits-dir="+calDir
ma = " --calibrators="+otherCals+" --use-detmode=FALSE"
nd = " --oifits-calibrated-dir="+calDir+'/calibrated_'+cal
pipe = "> nohup_inspect_"+str(cal)+".out"
with open('nohup_inspect_'+str(cal)+'.out', 'w') as output:
output.write('\n')
subprocess.call("nohup "+com+ma+nd+" "+pipe+" &", shell=True)
nf = open('nohup_inspect_'+str(cal)+'.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# D. Inspect the calibrator:
log.setFile(oiDir+'/candid_'+cal+'.log') # Attempt to make script save the output written to screen by candid
fs = glob.glob(calDir+'/calibrated_'+cal+'/*.fits')
UDD = calInfo[:-1].split(',')[ind+1] # 0.272748
with open(oiDir+'/candid_'+cal+'.log', 'a') as f_stdout:
with redirect_stdout(f_stdout):
try:
status = inspect.calTest(fs, UDD=UDD, obj=cal, outDir=oiDir, uset3amp=False, fixUDD=False, detLim=True)
except ValueError:
status = ['failed', 0]
if 'failed' in status[0]:
log.error('Calibrating '+cal+' '+status[0]+'!')
log.closeFile() # candid output finished so stop writing screen output to this log
# E. Append summary report with fit info
for outfile in outfiles:
with open(outfile, 'a') as outtex:
fudd = float(UDD)
bf = status[1]
outtex.write(' '+cal.replace('_', ' ')+' & ')
try:
outtex.write(str("%.3f"%bf['best']['diam*'])+'$\\pm$'+str("%.3f"%bf['uncer']['diam*']))
except:
outtex.write(str("%.3f"%fudd))
try:
outtex.write(' & '+status[0]+bf['reliability'])
except:
outtex.write(' & '+status[0])
try:
nsig = str("%.1f"%bf['nsigma'])
except TypeError:
nsig = '--'
except KeyError:
nsig = '--'
try:
bf_r = str("%.2f"%np.sqrt(bf['best']['x']**2 + bf['best']['y']**2))
bf_er = str("%.2f"%np.sqrt( ((bf['uncer']['x']*bf['best']['x'])**2 + (bf['uncer']['y']*bf['best']['y'])**2) / (bf['best']['x']**2 + bf['best']['y']**2) ))
bf_p = str("%.1f"%np.degrees(np.arctan2(bf['best']['x'],bf['best']['y'])))
bf_ep = str("%.1f"%np.degrees(np.sqrt( ((bf['uncer']['y']*bf['best']['x'])**2 + (bf['uncer']['x']*bf['best']['y'])**2) / (bf['best']['x']**2 + bf['best']['y']**2)**2 )))
except TypeError:
bf_r = '--'
bf_p = '--'
try:
bf_f = str("%.2f"%(-2.5*np.log10(bf['best']['f']/100.)))
except TypeError:
bf_f = '--'
outtex.write(' & '+nsig+' & '+bf_r+'$\\pm$'+bf_er+' & '+bf_p+'$\\pm$'+bf_ep+' & '+bf_f)
outtex.write(' \\\\ \n')
try:
del status
except:
thisx = 'is fine'
for outfile in outfiles:
with open(outfile, 'a') as outtex:
outtex.write(' \\hline\n\\end{longtable}\n\n')
outtex.write('CANDID plots are located in the following ')
outtex.write('folder on '+socket.gethostname()+':\n\n')
outtex.write(oiDir.replace('_','\\_')+'\n')
outtex.write('and are included in the longform version of this report\n\n')
# F. delete the temporary directory
shutil.rmtree(calDir+'/')
summarise.texReducTable(oiDir,redF,outfiles)
log.info('Cleanup memory')
del rawhdrs
summarise.texReportPlts(oiDir,outfiles,dates[d])
summarise.texSumUV(oiDir,calF,outfiles)
summarise.texSumPlots(oiDir,redF,calF,outfiles,calInfo[:-1].split(',')[::3])
with cd(redDir):
subprocess.call('pdflatex '+outfiles[1], shell=True)
subprocess.call('pdflatex '+outfiles[0] , shell=True)
log.info('Write and compile summary report')
# 10. Email summary file to argopt.email
if '@' in argopt.email:
mailfile.sendSummary(argopt.email,argopt.sender,outfiles[1].replace('.tex','.pdf'),rawDir)
else:
log.info('Exploring impact of ncoherent on SNR and T3PHI')
log.info('Values parsed to --ncoherent to be used for all --dates')
# -------------------------------------------------------------------------------
# 5. Run reduce.py with --rts=FALSE and --preproc=FALSE for each argopt.ncoherent
# -------------------------------------------------------------------------------
opt3 = ' --max-integration-time-oifits='+str(mito[d])+' --snr-threshold='+str(snr[d])+' --flux-threshold='+str(fth[d])
for nc in ncoh:
oiDir = redDir+'/'+suf2+"/oifits_nc"+str(nc)
if not os.path.isdir(oiDir):
opts2 = opt1+' --ncoherent='+str(nc)+opt3
log.info('Run oifits step for ncoherent='+str(nc))
with cd(redDir):
com = "mircx_reduce.py "+opts2+" --raw-dir="+rawDir+" --preproc=FALSE"
ma = " --preproc-dir="+redDir+"/preproc --rts=FALSE --rts-dir="+redDir+"/rts"
nd = " --oifits-dir="+oiDir+" --reduce="+argopt.reduce
pipe = "> nohup_oifits.out"
with open("nohup_oifits.out", 'w') as output:
output.write('\n')
log.info('Execute nohup '+com+ma+nd+' '+pipe)
subprocess.call('nohup '+com+ma+nd+' '+pipe+' &', shell=True)
nf = open('nohup_oifits.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
else:
log.info(oiDir+' already exists')
log.info('Skipped ncoherent='+str(nc))
# 6. Produce the plot of ncoherent vs SNR and ncoherent vs T3PHI:
snr_keys = ['SNR01 MEAN', 'SNR02 MEAN', 'SNR03 MEAN', 'SNR04 MEAN', 'SNR05 MEAN',
'SNR12 MEAN', 'SNR13 MEAN', 'SNR14 MEAN', 'SNR15 MEAN','SNR23 MEAN',
'SNR24 MEAN', 'SNR25 MEAN', 'SNR34 MEAN', 'SNR35 MEAN', 'SNR45 MEAN']
T3err_keys = ['T3PHI012 ERR', 'T3PHI013 ERR', 'T3PHI014 ERR', 'T3PHI015 ERR',
'T3PHI023 ERR', 'T3PHI024 ERR', 'T3PHI025 ERR', 'T3PHI034 ERR',
'T3PHI035 ERR','T3PHI045 ERR', 'T3PHI123 ERR', 'T3PHI124 ERR',
'T3PHI125 ERR', 'T3PHI134 ERR', 'T3PHI135 ERR', 'T3PHI145 ERR',
'T3PHI234 ERR', 'T3PHI235 ERR', 'T3PHI245 ERR', 'T3PHI345 ERR']
nc_values = [float(n) for n in ncoh]
snr_data = []
T3err_data = []
for nc in ncoh:
fs = glob.glob(redDir+'/'+suf2+'/oifits_nc'+str(nc)+'/*_oifits.fits')[::2]
log.info(redDir+'/'+suf2+'/oifits_nc'+str(nc)+" # files = "+str(len(fs)))
hdrs = [];
for f in fs:
hdulist = pyfits.open(f)
hdrs.append(hdulist[0].header)
hdulist.close()
snr_data.append(np.array([[ h.get('HIERARCH MIRC QC '+k, 0.) for k in snr_keys] for h in hdrs]))
T3err_data.append(np.array([[ h.get('HIERARCH MIRC QC '+k, 0.) for k in T3err_keys] for h in hdrs]))
snr_data = np.asarray(snr_data)
T3err_data = np.asarray(T3err_data)
files.ensure_dir(redDir+'/'+suf2+'/PNG/')
# SNR vs Ncoherent:
for nf in range(0, snr_data.shape[1]): # number of files
fig,ax = plt.subplots(5,3,figsize=(10,12)) # 15 SNR for each file
ax = ax.flatten()
for i in range(0, snr_data.shape[2]):
ax[i].plot(nc_values, snr_data[:,nf,i], '-o')
ax[i].set_ylabel('SNR')
ax[i].set_xlabel('Ncoherent')
fig.savefig(redDir+'/'+suf2+'/PNG/snr_vs_ncoh'+str(nf)+'.png', dpi=300,bbox_inches='tight')
log.info('Created file: '+redDir+'/'+suf2+'/PNG/snr_vs_ncoh'+str(nf)+'.png')
plt.close()
# T3err vs Ncoherent:
for nf in range(0, snr_data.shape[1]):
fig,ax = plt.subplots(5,4,figsize=(10,12)) # 20 T3 for each file
ax = ax.flatten()
for i in range(0, T3err_data.shape[2]):
ax[i].plot(nc_values, T3err_data[:,nf,i], '-o')
ax[i].set_ylabel('T3 Err')
ax[i].set_xlabel('Ncoherent')
fig.savefig(redDir+'/'+suf2+'/PNG/t3err_vs_ncoh_oifits'+str(nf)+'.png', dpi=300,bbox_inches='tight')
log.info('Created file: '+redDir+'/'+suf2+'/PNG/t3err_vs_ncoh_oifits'+str(nf)+'.png')
plt.close()
# 7. email user when this procedure finishes and prompt them to run the calibrate
# section of the script with the best value of ncoherent.
line1 = 'ncoherent vs SNR and T3PHI plots for '+argopt.dates+' located in '+redDir+'/'+suf2+'/PNG/ \n\n'
line2 = 'To calibrate the data with the best ncoherent value (X), use:\n\n'
line3 = 'mircx_redcal_wrap.py --reduce=FALSE --dates='+dates[d]+' '+opt1+opt3+' --ncoherent=X\n\n'
if '@' in argopt.email:
msg = MIMEMultipart()
msg['From'] = argopt.sender
msg['To'] = argopt.email
msg['Subject'] = 'Finished: MIRC-X redcal ncoherent vs SNR and T3PHI plots for '+argopt.dates
body = line1+line2+line3
msg.attach(MIMEText(body, 'plain'))
try:
mailfile.send_email(msg, argopt.sender, argopt.email)
log.info('Emailed note to:')
log.info(argopt.email)
except smtplib.SMTPAuthenticationError:
log.error('Failed to send note to '+argopt.email)
log.error('Check with Narsi Anugu for permissions')
sys.exit()
else:
log.info(line1)
log.info(line2)
log.info(line3)
################
# Check the disk usage and post to Slack if exceeds 90%
def fmtbytes(nbytes):
if nbytes > 1e14:
out = str(int(nbytes/1e12)) + "T"
elif nbytes > 1e13:
out = " " + str(int(nbytes/1e12)) + "T"
elif nbytes > 1e12:
out = str(round(nbytes/1e12, 1)) + "T"
elif nbytes > 1e11:
out = str(int(nbytes/1e9)) + "G"
elif nbytes > 1e10:
out = " " + str(int(nbytes/1e9)) + "G"
elif nbytes > 1e9:
out = str(round(nbytes/1e9, 1)) + "G"
elif nbytes > 1e8:
out = str(int(nbytes/1e6)) + "M"
elif nbytes > 1e7:
out = " " + str(int(nbytes/1e6)) + "M"
elif nbytes > 1e6:
out = str(round(nbytes/1e6, 1)) + "M"
elif nbytes > 1e5:
out = str(int(nbytes/1e3)) + "k"
elif nbytes > 1e4:
out = " " + str(int(nbytes/1e3)) + "k"
else:
out = str(round(nbytes/1e3, 1)) + "k"
return out
if socket.gethostname() == 'mircx':
for i in range(1,7):
drive = "/data"+str(i)
statvfs = os.statvfs(drive)
used = 1 - (statvfs.f_bavail/statvfs.f_blocks)
free = fmtbytes(statvfs.f_bavail * statvfs.f_frsize)
if used > 0.9:
percentage = "{:.1f}".format(100*used)
warn = "*Warning:* `" + drive + "` is " + percentage + "%"+ " full! (" + free + " free space remaining)"
slack.post("data_pipeline", warn)
```
#### File: mircx_mystic/bin/mircx_transmission.py
```python
import mircx_pipeline as mrx
import argparse, glob, os, sys
import datetime as dattime
from datetime import datetime
import numpy as np
from astropy.io import fits as pyfits
import matplotlib.pyplot as plt
import os
from dateutil.parser import parse
from mircx_pipeline import log, headers, plot, files
from mircx_pipeline.headers import HMQ
# Describe the script
description = \
"""
description:
Plot a report of the transmission across multiple
nights of observations.
"""
epilog = \
"""
examples:
python mircx_transmission.py --num-nights=10
or
python mircx_transmission.py --date-from=2018Oct25 --date-to=2018Oct29
"""
TrueFalse = ['TRUE','FALSE']
parser = argparse.ArgumentParser(description=description, epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=True)
parser.add_argument("--dir", dest="dir",default='/data/MIRCX/reduced',type=str,
help="main trunk of reduction directory [%(default)s]")
parser.add_argument("--num-nights",dest="num_of_nights",default=0,type=int,
help="Number of nights to be included in plot [50]")
parser.add_argument("--date-from",dest="night_from",default='',type=str,
help="Earliest date to be included in plot (YYYYMmmDD)")
parser.add_argument("--date-to",dest="night_to",default='',type=str,
help="Latest date to be included in plot (YYYYMmmDD)")
parser.add_argument("--targ-list",dest="targ_list",default='mircx_targets.list',type=str,
help="local database with SCI and CAL IDs [%(default)s]")
parser.add_argument("--only-reference", dest="only_reference",default='FALSE',
choices=TrueFalse,
help="Use only REFERENCE (calibrator) stars [%(default)s]")
parser.add_argument("--oifits-dir",dest="oifits_dir",default='.',type=str,
help="directory of products [%(default)s]")
# Parse arguments:
argopt = parser.parse_args()
# Verbose:
elog = log.trace('mircx_transmission')
o1 = ' --num-nights='+str(float(argopt.num_of_nights))+' --date-from='+argopt.night_from
o2 = ' --date-to='+argopt.night_to+' --targ-list='+argopt.targ_list
o3 = ' --only-reference='+str(argopt.only_reference)+' --oifits-dir='+argopt.oifits_dir
log.info('Run mircx_transmission.py --dir='+argopt.dir+o1+o2+o3)
# Check how many nights are to be plotted:
now = datetime.now()
if argopt.num_of_nights != 0:
nNight = argopt.num_of_nights
else:
if argopt.night_from == '':
nNight = 14 # default to plotting the 14 most recent nights of data
else:
fNight = argopt.night_from
try:
fN = datetime.strptime(fNight,'%Y%b%d')
except ValueError:
log.error('Argument "date-from" does not match format "%Y%b%d"')
sys.exit()
if argopt.night_to == '':
lNight = now.strftime('%Y%b%d') # takes current date in YYYMmmDD format
else:
lNight = argopt.night_to
# Get the list of observation dates from directory trunk:
if argopt.dir == './':
sDir = ''
else:
sDir = argopt.dir
if sDir[-1] != '/':
sDir = sDir+'/'
def is_date(string, fuzzy=False):
"""
Return whether the string can be interpreted as a date.
param string: str, string to check for date
param fuzzy: bool, ignore unknown tokens in string if True
"""
try:
parse(string, fuzzy=fuzzy)
return True
except ValueError:
return False
# Check the directory structure read in directory names from trunk
if is_date(sDir.split('/')[-2]):
# we're in a /data/reduced/YYYYMmm/YYYYMmmDD format directory tree
dirList = glob.glob('/'.join(sDir.split('/')[:-2])+'/*/*')
else:
# we're in a /data/reduced/YYYYMmmDD format directory tree
dirList = glob.glob(sDir+'*')
dL = list(set([d.split('_')[0].split('/')[-1] for d in dirList])) # remove duplicate dates
for d in dL:
try:
dL1.append(datetime.strptime(d,'%Y%b%d')) # for sorting, translate these into datetime format
except NameError:
# first instance:
dL1 = []
dL1.append(datetime.strptime(d,'%Y%b%d'))
except ValueError:
# ensure other things in the directory are skipped over but keep a note of what they are
log.info('Skipped file in directory: '+d)
dL2 = [dL1[i] for i in np.argsort(dL1)] # sort the dates (earliest first)
dateList = [d.strftime('%Y%b%d') for d in dL2] # convert these back into their original format
try:
if len(dateList) > nNight:
log.info('Number of observation dates exceeds '+str(nNight))
dL3 = dateList[len(dateList)-nNight:]
dateList = dL3
log.info('Cropped earlier dates from dateList')
else:
log.info('Number of observation dates is less than '+str(nNight))
log.info('All observation nights in current directory will be plotted')
except NameError:
# catch instances where fNight and lNight are used to limit date range rather than nNight
# Check that lNight is in the dateList:
while lNight not in dateList and lNight != now.strftime('%Y%b%d'):
# increase the day by one until the next obs date or current date is reached:
today = datetime.strptime(lNight, '%Y%b%d')
nextDay = today + dattime.timedelta(days=1)
nD = nextDay.strftime('%Y%b%d')
lNight = nD
if lNight not in dateList:
dL3 = dateList[dateList.index(fNight)]
else:
dL3 = dateList[dateList.index(fNight):dateList.index(lNight)]
dateList = dL3
log.info('Removed dates earlier than '+fNight+' from dateList')
if lNight != now.strftime('%Y%b%d'):
log.info('Removed dates later than '+lNight+' from dateList')
# Locate calibrator names
if not os.path.isfile(os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list):
log.error(os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list+' not found!')
log.info('Please rectify this before continuing')
sys.exit()
else:
localDB = os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list
# ^-- this is the local target history database
calL = []
with open(localDB) as input:
head = input.readline()
for line in input:
try:
if line.split(',')[5] == 'CAL':
calL.append(line.split(',')[0])
except IndexError:
log.info('Final line in localDB file is blank: please fix')
# Load astroquery
try:
from astroquery.vizier import Vizier;
log.info ('Load astroquery.vizier');
from astroquery.simbad import Simbad;
log.info ('Load astroquery.simbad');
except:
log.warning ('Cannot load astroquery.vizier, try:');
log.warning ('sudo conda install -c astropy astroquery');
# ----------------------------
# Date non-specific values for calculating the transmission:
# ----------------------------
# Zero point of 2MASS:H from Cohen et al. (2003, AJ 126, 1090):
Hzp = 9.464537e6 # [photons/millisec/m2/mircons]
# internal transmission * quantum efficiency from Cyprien [dimensionless]:
iTQE = 0.5
# collecting area of 1 telescope (assuming circular aperture) [m2]:
telArea = np.pi * 0.5*0.5
# ----------------------------
# Set up the plot window:
# ----------------------------
fig,axes = plt.subplots(7,1,sharex=True,figsize=(9,16))
plot.compact(axes)
# ----------------------------
# For each date being plotted...
# ----------------------------
calCol = ['darkred', 'palegreen']
calColI = 0
count = 0
cObj = ''
tLoc = [] # array for x-axis tick locations to mark the dates on the plot
oiDir = argopt.oifits_dir
dNames = []
for d in dateList:
# Find an oifits directory for this date:
oiDirs = []
for dd in dirList:
if d in dd and 'ncoh' not in dd and '.png' not in dd and 'bracket' not in dd:
oiDirs.append(dd)
if d == '2018Oct25':
oiDirs = ['2018Oct25_nbs0ncs1bbiasTmitp30']
log.info('Found the following data directories for '+d)
log.info(oiDirs)
oi,i = 0,0
if oiDirs == []:
oi += 1 # ensures that the user doesn't get stuck in the while loop
while oi == 0 and i < len(oiDirs):
if os.path.isdir(oiDirs[i]+'/'+oiDir):
hdrs = mrx.headers.loaddir(oiDirs[i]+'/'+oiDir)
if hdrs != []:
# once hdrs are found and read in, break the while loop
oi += 1
else:
# if an oifits directory does not exist in that directory,
# check another directory for the same obs date
i += 1
else:
i += 1
try:
# sort the headers by time:
ids = np.argsort([h['MJD-OBS'] for h in hdrs])
hdrs = [hdrs[i] for i in ids]
log.info('Sorted headers by observation date')
# Keep only the calibrator stars?:
if argopt.only_reference == 'TRUE':
hdrs = [h for h in hdrs if h['OBJECT'].replace('_',' ') in calL]
log.info('Cropped SCI targets from header list')
# Check if transmission information has already been saved to the header:
for b in range(6):
try:
bandF = np.append(bf, headers.getval(hdrs,HMQ+'TRANS%i'%b))
except NameError:
bandF = headers.getval(hdrs,HMQ+'TRANS%i'%b,default='no')
if 'no' in bandF:
log.info('Calculate transmission information')
# Read in the data:
objList = list(set([h['OBJECT'] for h in hdrs]))
objList[:] = [x for x in objList if x not in ['NOSTAR', '', 'STS']]
# ^--- removes NOSTAR and blank object name instances from object list
objCat = dict()
exclude = ['NOSTAR', '', 'STS']
for obj in objList:
try:
cat = Vizier.query_object(obj, catalog='JSDC')[0]
# ^-- IndexError raised if object not found
log.info('Find JSDC for '+obj+':')
ind = list(cat['Name']).index(obj.replace('_', ' '))
# ^-- ValueError raised if object name in JSDC is not what we use
log.info(' diam = %.3f mas'%cat['UDDH'][ind])
log.info(' Hmag = %.3f mas'%cat['Hmag'][ind])
objCat[obj] = cat[ind]
del ind
except IndexError:
log.info('Cannot find JSDC for '+obj)
exclude.append(obj)
except ValueError:
ind = -999
# (sometimes we get here when JSDC finds neighbouring stars but not our target)
# (other times we get here if the object name in JSDC is an alias)
alt_ids = Simbad.query_objectids(obj)
for a_id in list(cat['Name']):
if a_id in list(alt_ids['ID']):
ind = list(cat['Name']).index(a_id)
elif a_id in list([a.replace(' ', '') for a in alt_ids['ID']]):
ind = list(cat['Name']).index(a_id)
if ind != -999:
log.info(' diam = %.3f mas'%cat['UDDH'][ind])
log.info(' Hmag = %.3f mas'%cat['Hmag'][ind])
objCat[obj] = cat[ind]
else:
log.info('Cannot find JSDC for '+obj)
exclude.append(obj)
del ind
kl = 0 # dummy variable used to ensure that info message is only printed to log once per date
log.info('Extract camera settings from headers')
log.info('Calculate transmission on each beam')
for h in hdrs:
if h['OBJECT'] not in exclude:
expT = h['EXPOSURE']
bWid = abs(h['BANDWID'])
gain = 0.5 * h['GAIN']
try:
Hmag = float(objCat[h['OBJECT']]['Hmag']) # raises NameError if nothing was returned from JSDC
fH = Hzp * 10**(-Hmag/2.5)
fExpect = fH * expT * bWid * telArea * iTQE
for b in range(6):
fMeas = h[HMQ+'BANDFLUX%i MEAN'%b] / gain # raises KeyError if reduction was done before this keyword was introduced
if fMeas < 0.:
h[HMQ+'TRANS%i'%b] = -1.0
else:
h[HMQ+'TRANS%i'%b] = 100. * (fMeas / fExpect)
except NameError:
# if info for the object was NOT returned from JSDC:
for b in range(6):
h[HMQ+'TRANS%i'%b] = -1.0
except KeyError:
# if info was returned but the reduction is old or object name not in JSDC:
for b in range(6):
h[HMQ+'TRANS%i'%b] = -1.0
if kl == 0:
log.info('QC parameter BANDFLUX missing from header.')
log.info('Re-running the reduction is recommended.')
kl += 1
else:
for b in range(6):
h[HMQ+'TRANS%i'%b] = -1.0
# assign colours to data based on SCI or CAL ID and add data to plot:
countmin = count
for h in hdrs:
objname = headers.getval([h],'OBJECT')[0]
if objname not in exclude:
r0 = headers.getval([h],'R0')[0]
if objname.replace('_', ' ') in calL and objname == cObj:
# cal is the same as previous so colour must be maintained
col = calCol[calColI]
mkr = 'o'
elif objname.replace('_', ' ') in calL and objname != cObj:
# cal is different to previous so colour must be changed
try:
tcol = calCol[calColI+1]
calColI += 1
except:
calColI += -1
col = calCol[calColI]
mkr = 'o'
cObj = objname
else:
# target is sci, not cal
col = 'k'
mkr = '+'
# plot the seeing data:
axes.flatten()[0].plot(count,r0,marker=mkr,color=col,ls='None',ms=5)
# plot the transmission data:
for b in range(6):
transm = headers.getval([h], HMQ+'TRANS%i'%b)
if transm > 0:
axes.flatten()[b+1].plot(count, transm, marker=mkr, color=col, ls='None', ms=5)
try:
if transm > transmax:
transmax = max(transm)
except NameError:
transmax = max(transm)
count += 1
del col, mkr, transm, objname
elif objname != 'NOSTAR' and objname != '' and objname != 'STS':
# plot the seeing data:
axes.flatten()[0].plot(count,headers.getval([h],'R0')[0],marker='+',color='k',ls='None',ms=5)
# don't bother plotting the transmission data cos the values are just '-1'
count += 1
countmax = count
# add vertical line to plot:
for b in range(7):
axes.flatten()[b].plot([count,count],[-0.1,18],ls='-.',color='k')
count += 1
tLoc.append(int(np.ceil((countmax-countmin)/2))+countmin)
del countmin, countmax
del hdrs, oiDirs
dNames.append(d)
except NameError:
log.info('No calibrated data found for '+d+'...skipped date')
# -------------------------
# edit the tick parameters and locations:
# -------------------------
for b in range(1, 7):
axes.flatten()[b].set_ylim([-0.1, transmax])
axes.flatten()[0].set_title('Mean seeing [10m average]')
axes.flatten()[1].set_title('Transmission [$\%$ of expected $F_\star$]')
axes.flatten()[5].set_xticks(tLoc)
axes.flatten()[5].set_xticklabels(dNames,rotation=70, fontsize=12)
# -------------------------
# save the figure:
# -------------------------
plt.tight_layout()
#plt.show()
if dateList[0] != dateList[-1]:
files.write(fig,sDir+'overview_transmission_'+dNames[0]+'_'+dNames[-1]+'.png')
else:
files.write(fig,sDir+'transmission_'+dNames[0]+'.png')
```
#### File: jdmonnier/mircx_mystic/catalog.py
```python
from astropy.io import fits as pyfits;
import os;
import numpy as np;
import matplotlib.pyplot as plt;
import matplotlib.colors as mcolors;
from . import log, files, headers, signal;
# Try import astroquery
try:
from astroquery.vizier import Vizier;
except:
print ('WARNING: cannot import astroquery.vizier');
print ('WARNING: some functionalities will crash');
# Columns of our generic catalog
columns = [('NAME','20A',''),
('ISCAL','I',''),
('RAJ2000','20A','hms'),
('DEJ2000','20A','dms'),
('_r','E','arcm'),
('SpType','20A',''),
('Vmag','E','mag'),
('Hmag','E','mag'),
('MODEL','20A',''),
('PARAM1','E',''),
('e_PARAM1','E',''),
('PARAM2','E',''),
('e_PARAM2','E',''),
('PARAM3','E',''),
('e_PARAM3','E',''),
('PARAM4','E',''),
('e_PARAM4','E',''),
('PARAM5','E',''),
('e_PARAM5','E','')];
def model (u, v, lbd, mjd, data):
'''
Models for calibration stars. u, v and lbd are in [m]
mjd is in Modified Julian Day.
u, v and lbd should be conformable.
data should accept the following calls and return valid data:
data['MODEL'], data['PARAM1'], data['e_PARAM1']...
The function returns the a tupple with
the complex vis and its error.
'''
name = data['MODEL'];
if name == 'UDD':
spf = np.sqrt (u**2 + v**2) / lbd * 4.84813681109536e-09;
diam = data['PARAM1'];
ediam = data['e_PARAM1'];
vis = signal.airy (diam * spf);
evis = np.abs (signal.airy ((diam-ediam) * spf) - signal.airy ((diam+ediam) * spf));
elif name == 'LDD':
log.warning ('LDD model is crap !!!');
vis = u + v;
evis = vis * 0.0;
else:
raise ValueError ('Model name is unknown');
return vis, evis;
def create_from_jsdc (filename, hdrs, overwrite=True):
'''
Create a new catalog file for stars in hdrs
by querying the JSDC.
The hdrs argument can be a list of star name,
or a list of headers loaded by the function
headers.loaddir ();
The function write the catalog as a FITS file
called "filename.fits". It erase any file existing
with the same name.
'''
if overwrite == False:
ValueError ('overwrite=False mode is not supported yet')
# Import and init astroquery
Vizier.columns = ['+_r','*'];
# List of object
objlist = list(set([h if type(h) is str else h['OBJECT'] for h in hdrs]));
# If file exist
if os.path.exists (filename+'.fits'):
log.info ('Load existing file');
hdulist = pyfits.open (filename+'.fits');
hdu0 = hdulist[0].copy();
hdu1 = hdulist[1].copy();
hdulist.close ();
# Create catalog file
else:
hdu0 = pyfits.PrimaryHDU ([]);
hdu0.header['FILETYPE'] = 'CATALOG';
# Create FITS binary table, empty except the names
bincols = [pyfits.Column (name='NAME', format='20A', array=objlist)];
hdu1 = pyfits.BinTableHDU.from_columns (bincols);
hdu1.header['EXTNAME'] = 'CATALOG';
# Add missing columns if any
for c in columns[1:]:
if c[0] not in hdu1.columns.names:
hdu1.columns.add_col (pyfits.Column (name=c[0], format=c[1], unit=c[2]));
# Loop on object in the list
for i,obj in enumerate (objlist):
try:
cat = Vizier.query_object (obj, catalog='JSDC')[0][0];
log.info ('Find JSDC for '+obj);
log.info ("diam = %.3f mas"%cat['UDDH']);
log.info ("Hmag = %.3f mas"%cat['Hmag']);
# Set all info available in catalog
for c in columns:
if c[0] in cat.colnames:
hdu1.data[i][c[0]] = cat[c[0]];
# Set LDD model
hdu1.data['MODEL'] = 'UDD';
hdu1.data['PARAM1'] = cat['UDDH'];
hdu1.data['e_PARAM1'] = cat['e_LDD'];
# Check if confident to be a calibrator
if cat['UDDH'] > 0 and cat['UDDH'] < 1.0 and cat['e_LDD'] < 0.3 and cat['_r'] < 1./10:
log.info (obj+' declared as calibrator');
hdu1.data['ISCAL'] = 1;
except:
log.info ('Cannot find JSDC for '+obj);
# Remove file if existing
if os.path.exists (filename):
os.remove (filename);
# Write file
hdulist = pyfits.HDUList ([hdu0,hdu1]);
files.write (hdulist, filename+'.fits');
```
#### File: mircx_mystic/devel/opd.py
```python
import numpy as np;
import matplotlib.pyplot as plt;
from scipy.optimize import fsolve;
def dist (p1,p2):
d = 0.0
for i in np.arange(len(p1)): d += (p1[i] - p2[i])**2;
return np.sqrt (d);
def myFunction (y):
'''
x is from fold to instrument
y is from prism to opd-machine
z is from table to top
'''
# h1,h2 = 6.0, 6.0; # Jacob
h1,h2 = 4.25, 6.0; # Michigan
# h1,h2 = 5.25, 7.55; # CHARA
# positions of prism
x1 = 3.125 - np.arange (6) * 1.25; # ok
# y1 = np.array([0., 2.5, -3.0619, 3.0619, -2.5,0.]); # ok
y1 = np.array([0., 2.34, -3.059, 3.06, -2.502, 0.]); # ok
z1 = np.ones (6) * h1;
# positions of opd-machine
x2 = x1;
y2 = y;
z2 = np.ones (6) * h2;
# position of fold
x3 = x2;
y3 = -np.arange (6) * 3.0;
z3 = z2;
# position of exits
x4 = 3.125;
y4 = y3;
z4 = z3;
# current OPL
dd = dist ((x1,y1,z1),(x2,y2,z2));
dd += dist ((x2,y2,z2),(x3,y3,z3));
dd += dist ((x3,y3,z3),(x4,y4,z4));
# requested OPL (match Jacob)
req = 33.902 + np.arange (6) * 11.2;
# residuals
return dd - req;
# Solve
y2guess = 20 + np.ones(6) * 3;
x2 = fsolve (myFunction, y2guess);
print (x2);
print (x2 - 15);
```
#### File: jdmonnier/mircx_mystic/headers.py
```python
import pdb
from pydoc import pathdirs
from syslog import LOG_WARNING
import numpy as np;
import pandas as pd;
import sys
from astropy.io import fits as pyfits;
from astropy.time import Time;
from astropy.io import ascii;
from astropy.table import Table;
import os, glob, pickle, datetime, re, csv, gc;
from . import log
counters={'gpstime':0, 'etalon':0, 'sts':0}
# Global shortcut
# removing HIERARCH for 2.0
HM = 'MIRC ';
HMQ = 'MIRC QC ';
HMP = 'MIRC PRO ';
HMW = 'MIRC QC WIN ';
HC = 'CHARA ';
def str2bool (s):
if s == True or s == 'TRUE': return True;
if s == False or s == 'FALSE': return False;
raise ValueError('Invalid boolean string');
def getval (hdrs, key, default=np.nan):
'''
Return a numpy array with the values in header
'''
return np.array ([h.get(key,default) for h in hdrs]);
def summary (hdr):
'''
Return a short string to
summarize the header
'''
value = 'G%i-L%i-R%i %.4f %s'%(hdr.get('GAIN',0),hdr.get('NLOOPS',0),hdr.get('NREADS',0),
hdr.get('MJD-OBS',0.0),hdr.get('OBJECT','unknown'));
if 'MIRC PRO NCOHER' in hdr:
value += ' NCOHER=%i'%(hdr.get('MIRC PRO NCOHER',0));
return value;
def setup (hdr, params):
'''
Return the setup as string
'''
value = ' / '.join([str(hdr.get(p,'--')) for p in params]);
return value;
def get_beam (hdr):
'''
Return the i of BEAMi
'''
n = hdr if type(hdr) is str else hdr['FILETYPE'];
for i in range(1,7):
if 'beam%i'%i in n: return i;
if 'BEAM%i'%i in n: return i;
return None;
def clean_date_obs (hdr):
'''
Clean DATE-OBS keyword to always match
ISO format YYYY-MM-DD
'''
if 'DATE-OBS' not in hdr:
return;
if hdr['DATE-OBS'][4] == '/':
# Reformat DATE-OBS YYYY/MM/DD -> YYYY-MM-DD
hdr['DATE-OBS'] = hdr['DATE-OBS'][0:4] + '-' + \
hdr['DATE-OBS'][5:7] + '-' + \
hdr['DATE-OBS'][8:10];
elif hdr['DATE-OBS'][2] == '/':
# Reformat DATE-OBS MM/DD/YYYY -> YYYY-MM-DD
hdr['DATE-OBS'] = hdr['DATE-OBS'][6:10] + '-' + \
hdr['DATE-OBS'][0:2] + '-' + \
hdr['DATE-OBS'][3:5];
def get_mjd (hdr, origin=['linux','gps','mjd'], check=2.0,Warning=True):
'''
Return the MJD-OBS as computed either by Linux time
TIME_S + 1e-9 * TIME_US (note than TIME_US is actually
nanosec) or by GPS time DATE-OBS + UTC-OBS, or by an
existing keyword 'MJD-OBS'.
'''
# Check input
if type(origin) is not list: origin = [origin];
# Read header silently
try:
mjdu = Time (hdr['DATE-OBS'] + 'T'+ hdr['UTC-OBS'], format='isot', scale='utc').mjd;
except:
mjdu = 0.0;
try:
mjdl = Time (hdr['TIME_S']+hdr['TIME_US']*1e-9,format='unix').mjd;
except:
mjdl = 0.0;
try:
mjd = hdr['MJD-OBS'];
except:
mjd = 0.0;
# Check the difference in [s]
delta = np.abs (mjdu-mjdl) * 24 * 3600;
if (delta > check) & (Warning == True):
log.warning ('IN %s :\n UTC-OBS and TIME are different by %.1f s!! '%(hdr['ORIGNAME'],delta));
# Return the requested one
for o in origin: # if origin in array, then returns result in priority order.
if o == 'linux' and mjdl != 0.0:
return mjdl, (delta > check);
if o == 'gps' and mjdu != 0.0:
return mjdu, (delta > check);
if o == 'mjd' and mjd != 0.0:
return mjd, (delta > check);
return 0.0, None;
def loaddir (dirs, uselog=True):
'''
Load the headers of all files mircx*.fit* from
the input list of directory
'''
elog = log.trace ('loaddir');
# Ensure this is a list
if type(dirs) == str:
dirs = [dirs];
# Load all dirs
hdrs = [];
for dir in dirs:
if os.path.isdir (dir) is False:
log.info ('Skip directory (does not exist): '+dir);
continue;
log.info ('Load directory: '+dir);
files = glob.glob (dir+'/mircx*.fits');
files += glob.glob (dir+'/mystic*.fits');
files += glob.glob (dir+'/mircx*.fits.fz');
files += glob.glob (dir+'/mystic*.fits.fz');
files = [ x for x in files if "fibexpmap" not in x ] # remove non-data files.
# Check if any
if len(files) == 0:
log.warning ('No mircx or mystic data files in this directory');
continue;
# Sort them alphabetically
files = sorted (files);
# Load headers
hdrs_here = load (files);
# Append headers in case of multiple directories -- not used...
hdrs.extend (hdrs_here);
return hdrs;
def load (files):
'''
Load the headers of all input files. The following keywords
are added to each header: MJD-OBS, MJD-LOAD and ORIGNAME.
The output is a list of FITS headers.
'''
hdrs = []
# Loop on files
log.info("Number of files to read: %i "%len(files))
log.info("First File: %s "%files[0])
log.info("Last File : %s"%files[-1])
for fn,f in enumerate (files):
try:
# Read compressed file
if f[-7:] == 'fits.fz':
#hdr = pyfits.getheader(f, 1);
hdulist=pyfits.open(f,memmap=False)
hdr=hdulist[1].header.copy()
del hdulist[1].header
hdulist.close()
del hdulist
fnum=int(f[-13:-8]) # might not always be true.
# Read normal file
else:
#hdr = pyfits.getheader(f, 0);
hdulist=pyfits.open(f,memmap=False)
hdr=hdulist[0].header.copy()
del hdulist[0].header # save a little memory along the way.
hdulist.close()
del hdulist
fnum=int(f[-10:-5])
# Add file name
hdr['ORIGNAME'] = f;
hdr['FILENUM'] = fnum;
# Test if FRAME_RATE is in header
if 'MIRC FRAME_RATE' not in hdr and 'EXPOSURE' in hdr:
log.warning ('Assume FRAME_RATE is 1/EXPOSURE');
hdr['MIRC FRAME_RATE'] = 1e3/hdr['EXPOSURE'];
# Test if NBIN is in header
if 'NBIN' not in hdr:
hdr['NBIN'] = 1;
# Check change of card
if 'ENDFR' in hdr:
log.warning ('Old data with ENDFR');
hdr.rename_keyword ('ENDFR','LASTFR');
# Check NBIN
if 'NBIN' not in hdr and hdr['FILETYPE'] != 'FLAT_MAP':
log.warning ('Old data with no NBIN (set to one)');
hdr['NBIN'] = 1;
# Reformat DATE-OBS
clean_date_obs (hdr);
if 'DPOL_ROW' in hdr:
if hdr['DPOL_ROW'] !=0:
# check if config ends with _WOLL and hdr['CONF_NA']
conf_na = hdr['CONF_NA'].strip()
if conf_na[-5:] != '_WOLL': hdr['CONF_NA']=conf_na+'_WOLL'
# Compute MJD from information in header
mjd, temp_flag = get_mjd (hdr, Warning = (counters["gpstime"] == 0));
#mjd, temp_flag = get_mjd (hdr, Warning = True);
if (counters["gpstime"] == 0) & temp_flag:
log.warning("Additional time discrepancy warnings suppressed.")
if (temp_flag):
counters["gpstime"]+=1
# Set in header
hdr['MJD-OBS'] = (mjd, '[mjd] Observing time');
# Add the loading time
hdr['MJD-LOAD'] = (Time.now().mjd, '[mjd] Last loading time (UTC)');
# Check if STS data
if hdr.get ('MIRC STS_IR_FOLD','OUT') == 'IN':
#log.info ('Set OBJECT = STS because STS_IR_FOLD is IN');
hdr['OBJECT'] = 'STS';
counters["sts"] +=1
# Check if ETALON
if hdr.get ('MIRC ARMADA','OUT') == 'IN':
counters["etalon"] +=1
#if hdr['OBJECT'][-1]=='E':
# #log.info ('ETALON is IN for OBJECT');
#else:
# #log.info ('Set OBJECT = OBJECT_E because ETALON is IN');
if hdr['OBJECT'][-1] != 'E':
hdr['OBJECT'] += '_E'; # JDM slightly preferes ETALON_OBJECT... but we will keep
# Append
hdrs.append (hdr);
except (KeyboardInterrupt, SystemExit):
raise;
except Exception as exc:
log.warning ('Cannot get header of '+f+' ('+str(exc)+')');
#progress bar
if fn == len(files)//4:
log.info("PROGRESS 25% Done")
if fn == len(files)//2:
log.info("PROGRESS 50% Done")
if fn == len(files)*3//4:
log.info("PROGRESS 75% Done")
gc.collect()
log.info('Number of files with time discrepancy: %i '%counters['gpstime'])
log.info('Number of files with STS: %i '%counters['sts'])
log.info('Number of files with Etalon: %i '%counters['etalon'])
log.info ('%i headers loaded'%len(hdrs));
return hdrs;
def frame_mjd (hdr):
'''
Compute MJD time for each frame from STARTFR to LASTFR.
Assumig STARTFR has the MJD-OBS and the time between
frame is given by HIERARCH MIRC FRAME_RATE.
'''
# Check consistency
if hdr['LASTFR'] < hdr['STARTFR']:
raise ValueError ('LASTFR is smaller than STARTFR');
# Number of frame since start
nframe = hdr['LASTFR'] - hdr['STARTFR'] + 1;
# If binning
nbin = hdr.get ('NBIN',1);
if nbin > 1:
log.info ('Data are binned by %i'%nbin);
# Build counter
counter = np.arange (0, nframe, nbin);
# Time step between frames in [d]
# with new headers, the HIERRACH is removed from dictionary.
delta = 1./hdr['MIRC FRAME_RATE'] / 24/3600;
# Compute assuming MJD-OBS is time of first frame
mjd = hdr['MJD-OBS'] + delta * counter;
return mjd;
def match (h1,h2,keys,delta):
'''
Return True fs all keys are the same in header h1
and header h2, and if the time difference is less
than delta (s). The keys shall be a list of string.
'''
# Check all keywords are the same
answer = True;
for k in keys:
answer *= (h1.get(k,None) == h2.get(k,None));
# Check time is close-by
answer *= (np.abs(h1.get('MJD-OBS',0.0) - h2.get('MJD-OBS',0.0))*24.*3600 < delta);
# Ensure binary output
return True if answer else False;
def group (hdrs, mtype, delta=300.0, Delta=300.0, continuous=True, keys=[], logLevel=1):
'''
Group the input headers into list of compatible files.
A new group is started if:
- a file of different type is interleaved,
- the detector or instrument setup is different,
- the time distance between consecutive is larger than delta.
- the total integration is larger than Delta
The output is a list of list.
'''
elog = log.trace ('group_headers');
groups = [[]];
mjd = -10e9;
# Key used to define setup
keys = ['FILETYPE'] + keys;
# Define the regular expression to match file type
regex = re.compile ('^'+mtype+'$');
# Sort by time
hdrs = sorted (hdrs,key=lambda h: h['MJD-OBS']);
# Assume hdrs is sorted
for h in hdrs:
fileinfo = h['ORIGNAME'] + ' (' +h['FILETYPE']+')';
# if different type, start new group and continue
if bool (re.match (regex, h['FILETYPE'])) is False:
if groups[-1] != [] and str2bool (continuous):
groups.append([]);
continue;
# If no previous
if groups[-1] == []:
if logLevel > 4: log.info('New group %s'%fileinfo);
groups[-1].append(h);
continue;
# If no match with last, we start new group
if match (h,groups[-1][-1],keys,delta) is False:
if logLevel > 4: log.info('New group (gap) %s'%fileinfo);
groups.append([h]);
continue;
# If no match with first, we start new group
if match (h,groups[-1][0],keys,Delta) is False:
if logLevel > 4: log.info('New group (integration) %s'%fileinfo);
groups.append([h]);
continue;
# Else, add to current group
if logLevel > 9: log.info('Add file %s'%fileinfo);
groups[-1].append(h);
# Clean from void groups
groups = [g for g in groups if g != []];
# For the BACKGROUND, remove the first file if there is more than 3 files
# because it is often contaminated with light (slow shutter)
# This needs to be more robust for all kinds of shutters. will be done later.
#if mtype == 'BACKGROUND':
# for i in range(np.shape(groups)[0]):
# if np.shape(groups[i])[0] > 3:
# groups[i] = groups[i][1:];
# if logLevel > 4: log.info ('Ignore the first BACKGROUND files (more than 3)');
return groups;
def assoc (h, allh, tag, keys=[], which='closest', required=0, quality=None):
'''
Search for headers with tag and matching criteria
'''
# Keep only the requested tag
atag = [a for a in allh if a['FILETYPE']==tag]
# Keep only the requested criteria
out = [];
for a in atag:
tmp = True;
for k in keys:
tmp *= (h.get(k,None) == a.get(k,None));
if tmp:
out.append(a);
# Keep only the requested quality
l1 = len (out);
if quality is not None:
out = [o for o in out if o.get (HMQ+'QUALITY', 0.0) > quality];
# Check closest
if len (out) > required and which=='closest':
if required < 2:
time_diffs = np.array([o['MJD-OBS'] - h['MJD-OBS'] for o in out])
out = [out[np.abs(time_diffs).argmin()]]
else:
raise NotImplementedError('Not supported yet');
# Check best quality
if len (out) > required and which=='best':
if required < 2:
quality = np.array([o[HMQ+'QUALITY'] for o in out]);
out = [out[np.argmax (quality)]];
else:
raise NotImplementedError('Not supported yet');
# Check required
if len (out) < required:
log.warning ('Cannot find %i %s (%i rejected for quality)'%(required,tag,l1-len(out)))
elif required > 0:
log.info ('Find %i %s (%s ...)'%(len(out),tag,out[0]['ORIGNAME']));
return out
def assoc_flat (h, allh):
'''
Return the best FLAT for a given file. Note that the flat header is return
as a list of one to match the output of 'assoc' function.
'''
# Associate best FLAT based in gain
flats = [a for a in allh if a['FILETYPE']=='FLAT_MAP'];
# Check
if len (flats) < 1:
log.warning ('Cannot find FLAT');
return [];
# Get closest gain
m = np.argmin ([np.abs (h['GAIN'] - f['GAIN']) for f in flats]);
flat = flats[m];
# Return
log.info ('Find 1 FLAT (%s)'%os.path.basename(flat['ORIGNAME']));
return [flat];
def clean_option (opt):
'''
Check options
'''
if opt == 'FALSE': return False;
if opt == 'TRUE': return True;
def check_input (hdrs, required=1, maximum=100000):
'''
Check the input when provided as hdrs
'''
# Ensure a list
if type (hdrs) is not list:
hdrs = [hdrs];
# Check inputs are headers
hdrs = [h for h in hdrs if type(h) is pyfits.header.Header or \
type(h) is pyfits.hdu.compressed.CompImageHeader];
if len(hdrs) < required:
raise ValueError ('Missing mandatory input');
if len(hdrs) > maximum:
raise ValueError ('Too many input');
def rep_nan (val,*rep):
''' Replace nan by value'''
rep = 0.0 if not rep else rep[0];
return val if np.isfinite (val) else rep;
def parse_argopt_catalog (input):
'''
Parse the syntax 'NAME1,d1,e1,NAME2,d2,e2,...'
and return an astropy Table with column NAME,
ISCAL, MODEL_NAME, PARAM1 and PARAM2.
'''
if input == 'name1,diam,err,name2,diam,err':
raise (ValueError('No calibrators specified'));
# Catalog is a list
if input[-5:] == '.list':
log.info ('Calibrators given as list');
catalog = ascii.read (input);
return catalog;
# Check it is a multiple of 3
values = input.split(',');
if float(len (values) / 3).is_integer() is False:
raise (ValueError('Wrong syntax for calibrators'));
# Parse each star
names = np.array (values[0::3]);
diam = np.array (values[1::3]).astype(float);
ediam = np.array (values[2::3]).astype(float);
# Create catalog
catalog = Table ();
catalog['NAME'] = names;
catalog['ISCAL'] = 'CAL';
catalog['MODEL_NAME'] = 'UD_H';
catalog['PARAM1'] = diam;
catalog['PARAM2'] = ediam;
return catalog;
def update_diam_from_jmmc (catalog):
'''
For all stars with diam=0 and err=0 in the catalog, we try
to get the information from the JMMC SearchCal.
FIXME: this is not working anymore, need to deal with the new
format for catalog based on astropy Table.
'''
# Init
searchCal = 'http://apps.jmmc.fr/~sclws/getstar/sclwsGetStarProxy.php';
voTableToTsv = os.path.dirname (log.__file__) + '/sclguiVOTableToTSV.xsl';
# Loop on stars in catalog, query for the one
# with err = 0 and diam = 0
for c in catalog:
if c[1] == 0 and c[2] == 0:
try:
# Call online SearchCal
log.info ('Query JMMC SearchCal for star '+c[0]);
os.system ('wget '+searchCal+'?star='+c[0]+' -O mircx_searchcal.vot -o mircx_searchcal.log');
# Not found
if 'has not been found' in open('mircx_searchcal.vot').read():
log.warning (c[0]+' has not been found');
continue;
# Convert and parse
os.system ('xsltproc '+voTableToTsv+' mircx_searchcal.vot > mircx_searchcal.tsv');
answer = [l for l in csv.reader(open('mircx_searchcal.tsv'),delimiter='\t') if l[0][0] != '#'];
c[1] = float (answer[1][answer[0].index('UD_H')]);
c[2] = float (answer[1][answer[0].index('e_LDD')]);
log.info ('%s found %.4f +- %.4f mas'%(c[0],c[1],c[2]));
except:
log.error ('Cannot reach JMMC SearchCal or parse answer');
def get_sci_cal (hdrs, catalog):
'''
Spread the headers from SCI and CAL according to the
entries defined in catalog. Catalog should be an astropy
Table with the columns NAME, ISCAL, PARAM1 and PARAM2.
'''
# Check format of catalog
try:
t = catalog['NAME'];
t = catalog['ISCAL'];
t = catalog['PARAM1'];
t = catalog['PARAM2'];
except:
log.error ('Calibrators not specified correclty');
raise (ValueError);
# Check if enought
if len (catalog) == 0:
log.error ('No valid calibrators');
raise (ValueError);
# Get values
name,iscal = catalog['NAME'], catalog['ISCAL'];
# Loop on input headers
scis, cals = [], [];
for h in hdrs:
if h['FILETYPE'] != 'OIFITS':
continue;
# Find where in catalog
idx = np.where (name == h['OBJECT'])[0];
if len(idx) > 0 and iscal[idx[0]] == 'CAL':
idx = idx[0];
log.info ('%s (%s) -> OIFITS_CAL (%s, %f,%f)'%(h['ORIGNAME'],h['OBJECT'], \
catalog[idx]['MODEL_NAME'],catalog[idx]['PARAM1'],catalog[idx]['PARAM2']));
h['FILETYPE'] += '_CAL';
h[HMP+'CALIB MODEL_NAME'] = (catalog[idx]['MODEL_NAME']);
h[HMP+'CALIB PARAM1'] = (catalog[idx]['PARAM1']);
h[HMP+'CALIB PARAM2'] = (catalog[idx]['PARAM2']);
cals.append (h);
else:
log.info ('%s (%s) -> OIFITS_SCI'%(h['ORIGNAME'],h['OBJECT']));
h['FILETYPE'] += '_SCI';
scis.append (h);
return scis,cals;
def p2h (phdrs): # convert panda frame to our standard header list of dictionaries
hdr0=[]
allh=phdrs.transpose().to_dict()
keylist=list(allh.keys())
for key in keylist:
temp=allh[key]
hdr0.append(temp)
return hdr0;
```
#### File: jdmonnier/mircx_mystic/log.py
```python
from timeit import default_timer as timer
import time, sys, os, logging, psutil;
import traceback;
# Load colors
try:
import colorama as col
except:
RED = '';
MAGENTA = '';
RESET = '';
BLUE = '';
GREEN = '';
else:
RED = col.Fore.RED;
MAGENTA = col.Fore.MAGENTA;
RESET = col.Fore.RESET;
BLUE = col.Fore.BLUE;
GREEN = col.Fore.GREEN;
# Create the logger
logger = logging.getLogger ('mircx_pipeline');
logger.setLevel (logging.INFO);
# Create the handler for stream
logStream = logging.StreamHandler();
logger.addHandler (logStream);
# Set the formater for this handler
formatter = logging.Formatter (
"[%(color)s%(levelname)-7.7s"+RESET+"] %(asctime)s.%(msecs)03d [%(memory)s]: %(message)s",
datefmt='%Y-%m-%dT%H:%M:%S');
logStream.setFormatter (formatter);
logStream.setLevel (logging.INFO);
def setFile (filename):
'''
Set a log file. The file is ensured
to be writable by all group.
'''
for h in logger.handlers:
if type(h) == logging.FileHandler:
logger.removeHandler (h);
# Create logfile and set permission
info ('Set logFile: '+filename);
open (filename, 'w').close();
os.chmod (filename,0o666);
# Set this file as log (mode 'append')
# since file already exists
logfile = logging.FileHandler (filename, mode='a');
logfile.setLevel (logging.INFO);
formatter = logging.Formatter ("[%(levelname)-7.7s] "
"%(asctime)s.%(msecs)03d [%(memory)s]: %(message)s",
datefmt='%Y-%m-%dT%H:%M:%S');
logfile.setFormatter (formatter);
logger.addHandler (logfile);
def closeFile ():
'''
Stop logging in files
'''
for h in logger.handlers:
if type(h) == logging.FileHandler:
logger.removeHandler (h);
def memory ():
'''
Get memory usage of the process, in
human readble string
'''
value = psutil.Process(os.getpid()).memory_info().rss;
if value >= 1e8: return '%5.2fG'%(value/1e9);
if value >= 1e5: return '%5.2fM'%(value/1e6);
return '%5.2fk'%(value/1e3);
# Logging functions
def info(msg):
mem = memory ();
logger.info (msg, extra={'color':BLUE,'memory':mem});
def warning(msg):
mem = memory ();
logger.warning (msg, extra={'color':MAGENTA,'memory':mem});
def check(flag,msg):
mem = memory ();
if flag:
logger.warning (msg, extra={'color':MAGENTA,'memory':mem});
else:
logger.info (msg, extra={'color':BLUE,'memory':mem});
def error(msg):
mem = memory ();
logger.error (traceback.format_exc(), extra={'color':RED,'memory':mem});
logger.error (msg, extra={'color':RED,'memory':mem});
def debug(msg):
mem = memory ();
logger.debug (debug, extra={'color':RED,'memory':mem});
# Trace class (measure time until killed)
class trace:
def __init__(self, funcname,color=True):
self.color = GREEN if color else BLUE;
self.funcname = funcname;
mem = memory ();
logger.info('Start '+funcname,extra={'color':self.color,'memory':mem});
self.stime = timer();
def __del__(self):
if self.stime is not None and self.funcname is not None:
mem = memory ();
msg = 'End '+self.funcname+' in %.2fs'%(timer()-self.stime);
logger.info (msg,extra={'color':self.color,'memory':mem});
```
#### File: jdmonnier/mircx_mystic/signal.py
```python
import numpy as np;
import matplotlib.pyplot as plt;
import scipy;
from scipy.ndimage import gaussian_filter, uniform_filter, median_filter;
from scipy.special import gammainc, gamma;
from scipy.interpolate import interp1d
from . import log, files, headers, setup, oifits;
def airy (x):
''' Airy function, with its zero at x = 1.22'''
return 2.*scipy.special.jn (1,np.pi*x) / (np.pi*x);
def gaussian_filter_cpx (input,sigma,**kwargs):
''' Gaussian filter of a complex array '''
return gaussian_filter (input.real,sigma,**kwargs) + \
gaussian_filter (input.imag,sigma,**kwargs) * 1.j;
def uniform_filter_cpx (input,sigma,**kwargs):
''' Uniform filter of a complex array '''
return uniform_filter (input.real,sigma,**kwargs) + \
uniform_filter (input.imag,sigma,**kwargs) * 1.j;
def getwidth (curve, threshold=None):
'''
Compute the width of curve around its maximum,
given a threshold. Return the tuple (center,fhwm)
'''
if threshold is None:
threshold = 0.5*np.max (curve);
# Find rising point
f = np.argmax (curve > threshold) - 1;
if f == -1:
log.warning ('Width detected outside the spectrum');
first = 0;
else:
first = f + (threshold - curve[f]) / (curve[f+1] - curve[f]);
# Find lowering point
l = len(curve) - np.argmax (curve[::-1] > threshold) - 1;
if l == len(curve)-1:
log.warning ('Width detected outside the spectrum');
last = l;
else:
last = l + (threshold - curve[l]) / (curve[l+1] - curve[l]);
return 0.5*(last+first), 0.5*(last-first);
def bootstrap_matrix (snr, gd):
'''
Compute the best SNR and GD of each baseline when considering
also the boostraping capability of the array.
snr and gd shall be of shape (...,nb)
Return (snr_b, gd_b) of same size, but including bootstrap.
'''
log.info ('Bootstrap baselines with linear matrix');
# User a power to implement a type of min/max of SNR
power = 4.0;
# Reshape
shape = snr.shape;
snr = snr.reshape ((-1,shape[-1]));
gd = gd.reshape ((-1,shape[-1]));
ns,nb = gd.shape;
# Ensure no zero and no nan
snr[~np.isfinite (snr)] = 0.0;
snr = np.maximum (snr,1e-1);
snr = np.minimum (snr,1e3);
log.info ('Compute OPD_TO_OPD');
# The OPL_TO_OPD matrix
OPL_TO_OPD = setup.beam_to_base;
# OPD_TO_OPL = (OPL_TO_OPD^T.snr.OPL_TO_OPD)^-1 . OPL_TO_OPD^T.W_OPD
# o is output OPL
JtW = np.einsum ('tb,sb->stb',OPL_TO_OPD.T,snr**power);
JtWJ = np.einsum ('stb,bo->sto',JtW,OPL_TO_OPD);
JtWJ_inv = np.array([ np.linalg.pinv (JtWJ[s]) for s in range(ns)]);# 'sot'
OPD_TO_OPL = np.einsum ('sot,stb->sob', JtWJ_inv, JtW);
# OPD_TO_OPD = OPL_TO_OPD.OPD_TO_OPL (m is output OPD)
OPD_TO_OPD = np.einsum ('mo,sob->smb', OPL_TO_OPD, OPD_TO_OPL);
log.info ('Compute gd_b and snr_b');
# GDm = OPD_TO_OPD . GD
gd_b = np.einsum ('smb,sb->sm',OPD_TO_OPD,gd);
# Cm = OPD_TO_OPD . C_OPD . OPD_TO_OPD^T
OPD_TO_OPD_W = np.einsum ('smb,sb->smb',OPD_TO_OPD,snr**-power);
cov_b = np.einsum ('smb,snb->smn',OPD_TO_OPD_W, OPD_TO_OPD);
# Reform SNR from covariance
snr_b = np.diagonal (cov_b, axis1=1, axis2=2)**-(1./power);
snr_b[snr_b < 1e-2] = 0.0;
# Reshape
snr = snr.reshape (shape);
gd = gd.reshape (shape);
snr_b = snr_b.reshape (shape);
gd_b = gd_b.reshape (shape);
return (snr_b,gd_b);
def bootstrap_triangles (snr,gd):
'''
Compute the best SNR and GD of each baseline when considering
also the boostraping capability of the array.
snr and gd shall be of shape (...,nb)
Return (snr_b, gd_b) of same size, but including bootstrap.
'''
log.info ('Bootstrap baselines with triangles');
# Reshape
shape = snr.shape;
snr = snr.reshape ((-1,shape[-1]));
gd = gd.reshape ((-1,shape[-1]));
ns,nb = gd.shape;
# Ensure no zero and no nan
snr[~np.isfinite (snr)] = 0.0;
snr = np.maximum (snr,1e-1);
snr = np.minimum (snr,1e3);
# Create output
gd_b = gd.copy ();
snr_b = snr.copy ();
# Sign of baseline in triangles
sign = np.array ([1.0,1.0,-1.0]);
# Loop several time over triplet to also
# get the baseline tracked by quadruplets.
for i in range (7):
for tri in setup.triplet_base ():
for s in range (ns):
i0,i1,i2 = np.argsort (snr_b[s,tri]);
# Set SNR as the worst of the two best
snr_b[s,tri[i0]] = snr_b[s,tri[i1]];
# Set the GD as the sum of the two best
mgd = gd_b[s,tri[i1]] * sign[i1] + gd_b[s,tri[i2]] * sign[i2];
gd_b[s,tri[i0]] = - mgd * sign[i0];
# Reshape
snr = snr.reshape (shape);
gd = gd.reshape (shape);
snr_b = snr_b.reshape (shape);
gd_b = gd_b.reshape (shape);
return (snr_b,gd_b);
def bootstrap_triangles_jdm (snr,gd):
'''
MIRC/JDM Method: Compute the best SNR and GD of each baseline when considering
also the boostraping capability of the array.
snr and gd shall be of shape (...,nb)
Return (snr_b, gd_b) of same size, but including bootstrap.
'''
log.info ('Bootstrap baselines with triangles using MIRC/JDM method');
w=snr.copy()
opd0=gd.copy()
ns,nf,ny,nb=snr.shape
a=np.zeros((ns,nf,ny,5,5))
b=np.zeros((ns,nf,ny,5))
gd_jdm = np.zeros((ns,nf,ny,15))
# Reshape
shape = snr.shape;
snr = snr.reshape ((-1,shape[-1]));
gd = gd.reshape ((-1,shape[-1]));
ns,nb = gd.shape;
# Ensure no zero and no nan
snr[~np.isfinite (snr)] = 0.0;
snr = np.maximum (snr,1e-1);
snr = np.minimum (snr,1e3);
# Create output
gd_b = gd.copy ();
snr_b = snr.copy ();
# Sign of baseline in triangles
sign = np.array ([1.0,1.0,-1.0]);
# Loop several time over triplet to also
# get the baseline tracked by quadruplets.
for i in range (7):
for tri in setup.triplet_base ():
for s in range (ns):
i0,i1,i2 = np.argsort (snr_b[s,tri]);
# Set SNR as the worst of the two best
snr_b[s,tri[i0]] = snr_b[s,tri[i1]];
# Set the GD as the sum of the two best
mgd = gd_b[s,tri[i1]] * sign[i1] + gd_b[s,tri[i2]] * sign[i2];
gd_b[s,tri[i0]] = - mgd * sign[i0];
# Reshape
snr = snr.reshape (shape);
gd = gd.reshape (shape);
snr_b = snr_b.reshape (shape);
gd_b = gd_b.reshape (shape);
OPD=opd0.copy()
OPD=np.where(w <=1., 0.0, OPD)
w=np.where(w <=1., .01, w)
#inzero=np.argwhere(w <= 100.)
#OPD[inzero]=0.0
#w[inzero]=.01
opd12=OPD[:,:,:,0];
opd13=OPD[:,:,:,1];
opd14=OPD[:,:,:,2];
opd15=OPD[:,:,:,3];
opd16=OPD[:,:,:,4];
opd23=OPD[:,:,:,5];
opd24=OPD[:,:,:,6];
opd25=OPD[:,:,:,7];
opd26=OPD[:,:,:,8];
opd34=OPD[:,:,:,9];
opd35=OPD[:,:,:,10];
opd36=OPD[:,:,:,11];
opd45=OPD[:,:,:,12];
opd46=OPD[:,:,:,13];
opd56=OPD[:,:,:,14];
w12=w[:,:,:,0]+0.001;
w13=w[:,:,:,1]+0.002;
w14=w[:,:,:,2]+0.005;
w15=w[:,:,:,3]+0.007;
w16=w[:,:,:,4]+0.003;
w23=w[:,:,:,5]+0.004;
w24=w[:,:,:,6]+0.008;
w25=w[:,:,:,7]+0.009;
w26=w[:,:,:,8]+0.002;
w34=w[:,:,:,9]+0.003;
w35=w[:,:,:,10]+0.006;
w36=w[:,:,:,11]+0.008;
w45=w[:,:,:,12]+0.009;
w46=w[:,:,:,13]+0.004;
w56=w[:,:,:,14]+0.005;
a[:,:,:,0,0] = w12+w23+w24+w25+w26;
a[:,:,:,1,1] = w13+w23+w34+w35+w36;
a[:,:,:,2,2] = w14+w24+w34+w45+w46;
a[:,:,:,3,3] = w15+w25+w35+w45+w56;
a[:,:,:,4,4] = w16+w26+w36+w46+w56;
a[:,:,:,0,1] = -w23;
a[:,:,:,0,2] = -w24;
a[:,:,:,0,3] = -w25;
a[:,:,:,0,4] = -w26;
a[:,:,:,1,0] = -w23;
a[:,:,:,1,2] = -w34;
a[:,:,:,1,3] = -w35;
a[:,:,:,1,4] = -w36;
a[:,:,:,2,0] = -w24;
a[:,:,:,2,1] = -w34;
a[:,:,:,2,3] = -w45;
a[:,:,:,2,4] = -w46;
a[:,:,:,3,0] = -w25;
a[:,:,:,3,1] = -w35;
a[:,:,:,3,2] = -w45;
a[:,:,:,3,4] = -w56;
a[:,:,:,4,0] = -w26;
a[:,:,:,4,1] = -w36;
a[:,:,:,4,2] = -w46;
a[:,:,:,4,3] = -w56;
b[:,:,:,0] = w12*opd12 - w23*opd23 - w24*opd24 - w25*opd25 - w26*opd26;
b[:,:,:,1] = w13*opd13 + w23*opd23 - w34*opd34 - w35*opd35 - w36*opd36;
b[:,:,:,2] = w14*opd14 + w24*opd24 + w34*opd34 - w45*opd45 - w46*opd46;
b[:,:,:,3] = w15*opd15 + w25*opd25 + w35*opd35 + w45*opd45 - w56*opd56;
b[:,:,:,4] = w16*opd16 + w26*opd26 + w36*opd36 + w46*opd46 + w56*opd56;
#invert!
result=np.linalg.solve(a, b)
gd_jdm[:,:,:,0]=result[:,:,:,0]
gd_jdm[:,:,:,1]=result[:,:,:,1]
gd_jdm[:,:,:,2]=result[:,:,:,2]
gd_jdm[:,:,:,3]=result[:,:,:,3]
gd_jdm[:,:,:,4]=result[:,:,:,4]
gd_jdm[:,:,:,5]=result[:,:,:,1]-result[:,:,:,0]
gd_jdm[:,:,:,6]=result[:,:,:,2]-result[:,:,:,0]
gd_jdm[:,:,:,7]=result[:,:,:,3]-result[:,:,:,0]
gd_jdm[:,:,:,8]=result[:,:,:,4]-result[:,:,:,0]
gd_jdm[:,:,:,9]=result[:,:,:,2]-result[:,:,:,1]
gd_jdm[:,:,:,10]=result[:,:,:,3]-result[:,:,:,1]
gd_jdm[:,:,:,11]=result[:,:,:,4]-result[:,:,:,1]
gd_jdm[:,:,:,12]=result[:,:,:,3]-result[:,:,:,2]
gd_jdm[:,:,:,13]=result[:,:,:,4]-result[:,:,:,2]
gd_jdm[:,:,:,14]=result[:,:,:,4]-result[:,:,:,3]
return (snr_b,gd_jdm,result);
def gd_tracker(opds_trial,input_snr,gd_key):
'''
Used for fitting a self-consistent set of opds. input 5 telscope delays
and compare to the snr vectors in opds space.
return a globabl metric base don logs of the snrs with thresholds.
'''
#log.info ('Bootstrap baselines with triangles using MIRC/JDM method');
# probably replace as matrix in future for vectorizing.
gd_jdm,snr_jdm = get_gds(opds_trial,input_snr,gd_key)
#fit_metric = np.sum(np.log10(snr_jdm))
fit_metric = np.sum(snr_jdm)
return (-fit_metric);
def get_gds(topds,input_snr,gd_key):
'''
Used for fitting a self-consistent set of opds. input 5 telscope delays
and compare to the snr vectors in opds space.
return a gds and snrs for self-consistent set of delays.
'''
nscan,nb=input_snr.shape
gd_jdm=np.zeros(nb)
snr_jdm=np.zeros(nb)
gd_jdm[0]=topds[0]
gd_jdm[1]=topds[1]
gd_jdm[2]=topds[2]
gd_jdm[3]=topds[3]
gd_jdm[4]=topds[4]
gd_jdm[5]=topds[1]-topds[0]
gd_jdm[6]=topds[2]-topds[0]
gd_jdm[7]=topds[3]-topds[0]
gd_jdm[8]=topds[4]-topds[0]
gd_jdm[9]=topds[2]-topds[1]
gd_jdm[10]=topds[3]-topds[1]
gd_jdm[11]=topds[4]-topds[1]
gd_jdm[12]=topds[3]-topds[2]
gd_jdm[13]=topds[4]-topds[2]
gd_jdm[14]=topds[4]-topds[3]
# interpolate into the snr.
for i in range(nb):
#snr_func=interp1d(gd_key,input_snr[:,i],kind='cubic',bounds_error=False,fill_value=(input_snr[:,i]).min(),assume_sorted=True)
snr_func=interp1d(gd_key,input_snr[:,i],kind='cubic',bounds_error=False,fill_value=1.,assume_sorted=True)
snr_jdm[i]=snr_func(gd_jdm[i])
return(gd_jdm,snr_jdm)
def get_gd_gravity(topds, bestsnr_snrs,bestsnr_indices,softlength=2.,nscan=None):
'''
Used for fitting a self-consistent set of opds. input 5 telscope delays
and compare to the snr vectors in opds space.
return a gds and snrs for self-consistent set of delays.
topds = (nramps,nframes, ntels=5)
bestsnr_snrs = (nramps, nframes, npeaks, nbaselines )
bestsnr_indices = (nramps, nframes, npeaks, nbaselines ) ; integers
'''
nr,nf,npeak,nt=topds.shape
nr,nf,npeak,nb=bestsnr_snrs.shape
OPL_TO_OPD = setup.beam_to_base;
temp = setup.base_beam ()
#photo_power = photo[:,:,:,setup.base_beam ()];
#totflux = np.nansum(photo,axis=(1,3))
#bp=np.nanmean(bias_power,axis=2)
topds1= topds[:,:,:,setup.base_beam ()]
gd_jdm= topds1[:,:,:,:,1] - topds1[:,:,:,:,0]
# if gd_jdm > nscan/2 than wraparond. but.. does sign work in fordce equation.. will have to check.
##if nscan != None:
# gd_jdm= np.where( gd_jdm >nscan/2, gd_jdm-nscan ,gd_jdm)
# gd_jdm= np.where( gd_jdm < -nscan/2, nscan + gd_jdm, gd_jdm)
# alternatively instead of adding in a discontunity, we could copy the force centers +/- nscan and apply
# global down-weight.
if nscan != None:
bestsnr_snrs=np.concatenate((bestsnr_snrs,bestsnr_snrs,bestsnr_snrs),axis=2)
bestsnr_indices=np.concatenate((bestsnr_indices,bestsnr_indices+nscan,bestsnr_indices-nscan),axis=2)
bestsnr_snrs = bestsnr_snrs*np.exp(-.5*((bestsnr_indices/(nscan/2.))**2))
snr_wt = np.log10(np.maximum(bestsnr_snrs,1.0))
#snr_wt = np.sqrt(bestsnr_snrs)
gd_forces=np.empty( (nr,nf,1,0))
gd_pot =np.empty( (nr,nf,1,0))
gd_offsets =gd_jdm-bestsnr_indices
for i_b in range(nt):
factor0=OPL_TO_OPD[:,i_b][None,None,None,:]
F0 = np.sum(factor0*snr_wt *np.sign(gd_offsets)*softlength**2/ (gd_offsets**2+softlength**2) ,axis=(2,3))
gd_forces =np.append(gd_forces,F0[:,:,None,None],axis=3)
F1 = np.sum(-2*np.abs(factor0)*snr_wt *softlength/ np.sqrt(gd_offsets**2+softlength**2) ,axis=(2,3)) # approximate!
gd_pot = np.append(gd_pot,F1[:,:,None,None],axis=3)
return(gd_forces,gd_pot,gd_jdm )
def topd_to_gds(topds):
'''
Used for fitting a self-consistent set of opds. input 5 telscope delays
and compare to the snr vectors in opds space.
return a gds and snrs for self-consistent set of delays.
topds = (nramps,nframes, ntels = 6)
bestsnr_snrs = (nramps, nframes, npeaks, nbaselines )
bestsnr_indices = (nramps, nframes, npeaks, nbaselines ) ; integers
'''
#photo_power = photo[:,:,:,setup.base_beam ()];
#totflux = np.nansum(photo,axis=(1,3))
#bp=np.nanmean(bias_power,axis=2)
topds1= topds[:,:,:,setup.base_beam ()]
gd_jdm= topds1[:,:,:,:,0] - topds1[:,:,:,:,1]
return(gd_jdm)
def psd_projection (scale, freq, freq0, delta0, data):
'''
Project the PSD into a scaled theoretical model,
Return the merit function 1. - D.M / sqrt(D.D*M.M)
'''
# Scale the input frequencies
freq_s = freq * scale;
# Compute the model of PSD
model = np.sum (np.exp (- (freq_s[:,None] - freq0[None,:])**2 / delta0**2), axis=-1);
if data is None:
return model;
# Return the merit function from the normalised projection
weight = np.sqrt (np.sum (model * model) * np.sum (data * data));
return 1. - np.sum (model*data) / weight;
def decoherence_free (x, vis2, cohtime, expo):
'''
Decoherence loss due to phase jitter, from Monnier equation:
vis2*2.*cohtime/(expo*x) * ( igamma(1./expo,(x/cohtime)^(expo))*gamma(1./expo) -
(cohtime/x)*gamma(2./expo)*igamma(2./expo,(x/cohtime)^(expo)) )
vis2 is the cohence without jitter, cohtime is the coherence time, expo is the exponent
of the turbulent jitter (5/3 for Kolmogorof)
'''
xc = x/cohtime;
xce = (xc)**expo;
y = gammainc (1./expo, xce) * gamma (1./expo) - gamma (2./expo) / xc * gammainc (2./expo, xce);
y *= 2. * vis2 / expo / xc;
return y;
def decoherence (x, vis2, cohtime):
'''
decoherence function with a fixed exponent
'''
expo = 1.5;
xc = x/cohtime;
xce = (xc)**expo;
y = gammainc (1./expo, xce) * gamma (1./expo) - gamma (2./expo) / xc * gammainc (2./expo, xce);
y *= 2. * vis2 / expo / xc;
return y;
```
|
{
"source": "jdmoorman/clapsolver",
"score": 2
}
|
#### File: clapsolver/benchmarks/bench_clap.py
```python
import argparse
import numpy as np
import pyperf
from utils import (
geometric_matrix,
machol_wien_matrix,
randint_matrix,
random_machol_wien_matrix,
uniform_matrix,
)
def get_solvers():
from laptools.clap import costs
from laptools.clap_naive import costs as costs_naive
return {"naive": costs_naive, "dynamic": costs}
def time_func(n_inner_loops, solver, shape, type):
# Note: If no matrix type is indicated, then the matrix is uniformly random
if type == "uniform":
cost_matrix = uniform_matrix(shape)
elif type == "randint":
cost_matrix = randint_matrix(shape)
elif type == "geometric":
cost_matrix = geometric_matrix(shape)
elif type == "MW":
cost_matrix = machol_wien_matrix(shape)
elif type == "random_MW":
cost_matrix = random_machol_wien_matrix(shape)
else:
cost_matrix = np.random.random(shape)
t0 = pyperf.perf_counter()
for i in range(n_inner_loops):
solver(cost_matrix)
return pyperf.perf_counter() - t0
def get_bench_name(size, type, solver_name):
return "{}x{}-{}-{}".format(size[0], size[1], type, solver_name)
def parse_args(benchopts):
parser = argparse.ArgumentParser()
parser.add_argument(
"--min-row-size-pow",
type=int,
metavar="POW",
default=1,
help="Smallest number of rows is 2^POW.",
)
parser.add_argument(
"--min-col-size-pow",
type=int,
metavar="POW",
default=1,
help="Smallest number of cols is 2^POW.",
)
parser.add_argument(
"--max-row-size-pow",
type=int,
metavar="POW",
default=2,
help="Largest number of rows is 2^POW.",
)
parser.add_argument(
"--max-col-size-pow",
type=int,
metavar="POW",
default=2,
help="Largest number of cols is 2^POW.",
)
parser.add_argument(
"--matrix-type",
type=str,
metavar="X",
default="uniform",
help="The matrix is of type X.",
)
return parser.parse_args(benchopts)
def add_cmdline_args(cmd, args):
cmd.append("--")
cmd.extend(args.benchopts)
def main():
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.argparser.add_argument("benchopts", nargs="*")
args = parse_args(runner.parse_args().benchopts)
solvers = get_solvers()
sizes = [
(n_rows, n_cols)
for n_rows in 2 ** np.arange(args.min_row_size_pow, args.max_row_size_pow + 1)
for n_cols in 2 ** np.arange(args.min_col_size_pow, args.max_col_size_pow + 1)
]
type = args.matrix_type
for size in sizes:
for solver_name, solver_func in solvers.items():
bench_name = get_bench_name(size, type, solver_name)
runner.bench_time_func(bench_name, time_func, solver_func, size, type)
if __name__ == "__main__":
main()
```
#### File: jdmoorman/clapsolver/setup.py
```python
import platform
import sys
import setuptools
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
with open("README.md") as readme_file:
readme = readme_file.read()
test_requirements = [
"codecov",
"pytest",
"pytest-cov",
]
docs_requirements = [
"sphinx==1.8.5",
]
setup_requirements = [
"numpy",
"pytest-runner",
"pybind11>=2.5.0",
]
perf_requirements = [
"pyperf",
"matplotlib",
"numpy",
"scipy",
"munkres",
"lap",
"lapsolver",
"lapjv",
]
dev_requirements = [
*test_requirements,
*docs_requirements,
*setup_requirements,
*perf_requirements,
"pre-commit",
"bumpversion>=0.5.3",
"ipython>=7.5.0",
"tox>=3.5.2",
"twine>=1.13.0",
"wheel>=0.33.1",
]
requirements = [
"numpy",
"scipy",
"numba",
]
extra_requirements = {
"test": test_requirements,
"docs": docs_requirements,
"setup": setup_requirements,
"dev": dev_requirements,
"perf": perf_requirements,
"all": [
*requirements,
*test_requirements,
*docs_requirements,
*setup_requirements,
*dev_requirements,
*perf_requirements,
],
}
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __str__(self):
import pybind11
return pybind11.get_include()
class get_numpy_include(object):
"""Same as ``get_pybind_include``, but for ``numpy``"""
def __str__(self):
import numpy
return numpy.get_include()
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import os
import tempfile
with tempfile.NamedTemporaryFile("w", suffix=".cpp", delete=False) as f:
f.write("int main (int argc, char **argv) { return 0; }")
fname = f.name
try:
compiler.compile([fname], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
finally:
try:
os.remove(fname)
except OSError:
pass
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
flags = ["-std=c++17", "-std=c++14", "-std=c++11"]
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError("Unsupported compiler -- at least C++11 support " "is needed!")
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
"msvc": ["/EHsc", "/std:c++latest", "/arch:AVX2"],
"unix": ["-march=native", "-ftree-vectorize"],
}
l_opts = {
"msvc": [],
"unix": [],
}
if sys.platform == "darwin":
darwin_opts = ["-stdlib=libc++", "-mmacosx-version-min=10.7"]
c_opts["unix"] += darwin_opts
l_opts["unix"] += darwin_opts
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
link_opts = self.l_opts.get(ct, [])
if ct == "unix":
opts.append(cpp_flag(self.compiler))
for ext in self.extensions:
ext.define_macros = [
("VERSION_INFO", '"{}"'.format(self.distribution.get_version()))
]
ext.extra_compile_args = opts
ext.extra_link_args = link_opts
build_ext.build_extensions(self)
setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research ",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
],
cmdclass={"build_ext": BuildExt},
description="Fast constrained linear assignment problem (CLAP) solvers",
ext_modules=[
Extension(
"_augment",
sorted(
["src/cpp/_augment.cpp"]
), # Sort input source files to ensure bit-for-bit reproducible builds
include_dirs=[get_pybind_include()], # Path to pybind11 headers
language="c++",
),
Extension(
"py_lapjv",
sources=["src/cpp/py_lapjv.cpp"],
include_dirs=[get_numpy_include(), "src/cpp"],
language="c++",
),
],
extras_require=extra_requirements,
install_requires=requirements,
license="MIT License",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="laptools",
name="laptools",
packages=find_packages(where="src"),
package_dir={"": "src"},
python_requires=">=3.6",
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/jdmoorman/laptools",
# Do not edit this string manually, always use bumpversion
# Details in CONTRIBUTING.rst
version="0.2.6",
zip_safe=False,
)
```
#### File: src/laptools/clap.py
```python
import numpy as np
from py_lapjv import lapjv
from . import lap
from ._util import one_hot
def costs(cost_matrix):
"""Solve a constrained linear sum assignment problem for each entry.
The output of this function is equivalent to, but significantly more
efficient than,
>>> def costs(cost_matrix):
... total_costs = np.empty_like(cost_matrix)
... num_rows, num_cols = cost_matrix.shape
... for i in range(num_rows):
... for j in range(num_cols):
... total_costs[i, j] = clap.cost(i, j, cost_matrix)
... return total_costs
Parameters
----------
cost_matrix : 2darray
A matrix of costs.
Returns
-------
2darray
A matrix of total constrained lsap costs. The i, j entry of the matrix
corresponds to the total lsap cost under the constraint that row i is
assigned to column j.
"""
cost_matrix = np.array(cost_matrix, dtype=np.double)
n_rows, n_cols = cost_matrix.shape
if n_rows > n_cols:
return costs(cost_matrix.T).T
# Find the best lsap assignment from rows to columns without constrains.
# Since there are at least as many columns as rows, row_idxs should
# be identical to np.arange(n_rows). We depend on this.
row_idxs = np.arange(n_rows)
try:
col4row, row4col, v = lapjv(cost_matrix)
except ValueError as e:
if str(e) == "cost matrix is infeasible":
return np.full((n_rows, n_cols), np.inf)
else:
raise e
# Column vector of costs of each assignment in the lsap solution.
lsap_costs = cost_matrix[row_idxs, col4row]
lsap_total_cost = lsap_costs.sum()
# Find the two minimum-cost columns for each row
best_col_idxs = np.argmin(cost_matrix, axis=1)
best_col_vals = cost_matrix[row_idxs, best_col_idxs]
cost_matrix[row_idxs, best_col_idxs] = np.inf
second_best_col_idxs = np.argmin(cost_matrix, axis=1)
second_best_col_vals = cost_matrix[row_idxs, second_best_col_idxs]
cost_matrix[row_idxs, second_best_col_idxs] = np.inf
third_best_col_idxs = np.argmin(cost_matrix, axis=1)
cost_matrix[row_idxs, best_col_idxs] = best_col_vals
cost_matrix[row_idxs, second_best_col_idxs] = second_best_col_vals
# When a row has its column stolen by a constraint, these are the columns
# that might come into play when we are forced to resolve the assignment.
if n_rows < n_cols:
# unused = col_idxs[~np.isin(col_idxs, col4row)]
# unused = np.setdiff1d(np.arange(n_cols), col4row, assume_unique=True)
# first_unused = np.argmin(cost_matrix[:, unused], axis=1)
# potential_cols = np.union1d(col4row, unused[first_unused])
used_cost_matrix = cost_matrix[:, col4row]
cost_matrix[:, col4row] = np.inf
first_unused = np.argmin(cost_matrix, axis=1)
potential_cols = np.union1d(col4row, first_unused)
cost_matrix[:, col4row] = used_cost_matrix
else:
potential_cols = np.arange(n_cols)
# When we add the constraint assigning row i to column j, lsap_col_idxs[i]
# is freed up. If lsap_col_idxs[i] cannot improve on the cost of one of the
# other row assignments, it does not need to be reassigned to another row.
# If additionally column j is not in lsap_col_idxs, it is not taken away
# from any of the other row assignments. In this situation, the resulting
# total assignment costs are:
total_costs = lsap_total_cost - lsap_costs[:, None] + cost_matrix
for i, freed_j in enumerate(col4row):
# When row i is constrained to another column, can column j be
# reassigned to improve the assignment cost of one of the other rows?
# To deal with that, we solve the lsap with row i omitted. For the
# majority of constraints on row i's assignment, this will not conflict
# with the constraint. When it does conflict, we fix the issue later.
sub_ind = ~one_hot(i, n_rows)
freed_col_costs = cost_matrix[:, freed_j]
# If the freed up column does not contribute to lowering the costs of any
# other rows, simply use the current assignments.
if np.any(freed_col_costs < lsap_costs):
# Need to solve the subproblem in which one row is removed
new_row4col, new_col4row, new_v = lap.solve_lsap_with_removed_row(
cost_matrix, i, row4col, col4row, v, modify_val=False
)
sub_total_cost = cost_matrix[sub_ind, new_col4row[sub_ind]].sum()
# This calculation will end up being wrong for the columns in
# lsap_col_idxs[sub_col_ind]. This is because the constraint in
# row i in these columns will conflict with the sub assignment.
# These miscalculations are corrected later.
total_costs[i, :] = cost_matrix[i, :] + sub_total_cost
else:
new_row4col, new_col4row, new_v = (
row4col,
col4row.copy(),
v,
)
new_col4row[i] = -1 # Row i is having a constraint applied.
# new_col4row now contains the optimal assignment columns ignoring row i.
# TODO: np.setdiff1d is very expensive.
# new_col4row[i] = np.setdiff1d(col4row, new_col4row, assume_unique=True)[0]
new_col4row[i] = set(col4row).difference(set(new_col4row)).pop()
total_costs[i, new_col4row[i]] = cost_matrix[row_idxs, new_col4row].sum()
# A flag that indicates if solve_lsap_with_removed_col has been called.
flag_removed_col = False
for other_i, stolen_j in enumerate(new_col4row):
if other_i == i:
continue
# if not np.isfinite(cost_matrix[i, stolen_j]):
# total_costs[i, stolen_j] = cost_matrix[i, stolen_j]
# continue
# Row i steals column stolen_j from other_i because of constraint.
new_col4row[i] = stolen_j
# Row other_i must find a new column. What is its next best option?
best_j, second_best_j, third_best_j = (
best_col_idxs[other_i],
second_best_col_idxs[other_i],
third_best_col_idxs[other_i],
)
# Note: Problem might occur if we have two j's that are both next
# best, but one is not in col_idxs and the other is in col_idxs.
# In this case, choosing the one not in col_idxs does not necessarily
# give us the optimal assignment.
# TODO: make the following if-else prettier.
if (
best_j != stolen_j
and best_j not in new_col4row
and (
cost_matrix[other_i, best_j] != cost_matrix[other_i, second_best_j]
or second_best_j not in new_col4row
)
):
new_col4row[other_i] = best_j
total_costs[i, stolen_j] = cost_matrix[row_idxs, new_col4row].sum()
elif second_best_j not in new_col4row and (
cost_matrix[other_i, second_best_j]
!= cost_matrix[other_i, third_best_j]
or third_best_j not in new_col4row
):
new_col4row[other_i] = second_best_j
total_costs[i, stolen_j] = cost_matrix[row_idxs, new_col4row].sum()
else:
# If this is the first time solve_lsap_with_removed_col is called
# we initialize a bunch of variables
if not flag_removed_col:
# Otherwise, solve the lsap with stolen_j removed
sub_sub_cost_matrix = cost_matrix[sub_ind, :][:, potential_cols]
sub_j = list(potential_cols).index(stolen_j)
sub_new_col4row = new_col4row[sub_ind]
sub_new_col4row = np.where(
sub_new_col4row.reshape(sub_new_col4row.size, 1)
== potential_cols
)[1]
# When we solve the lsap with row i removed, we update row4col accordingly.
sub_row4col = new_row4col.copy()
sub_row4col[sub_row4col == i] = -1
sub_row4col[sub_row4col > i] -= 1
sub_sub_row4col = sub_row4col[potential_cols]
sub_new_v = new_v[potential_cols]
flag_removed_col = True
else:
sub_j = list(potential_cols).index(stolen_j)
try:
_, new_new_col4row, _ = lap.solve_lsap_with_removed_col(
sub_sub_cost_matrix,
sub_j,
sub_sub_row4col,
sub_new_col4row,
sub_new_v, # dual variable associated with cols
modify_val=False,
)
total_costs[i, stolen_j] = (
cost_matrix[i, stolen_j]
+ sub_sub_cost_matrix[
np.arange(n_rows - 1), new_new_col4row
].sum()
)
except ValueError:
total_costs[i, stolen_j] = np.inf
# Give other_i its column back in preparation for the next round.
new_col4row[other_i] = stolen_j
new_col4row[i] = -1
# For those constraints which are compatible with the unconstrained lsap:
total_costs[row_idxs, col4row] = lsap_total_cost
return total_costs
```
|
{
"source": "jdmoorman/kaczmarz-algorithms",
"score": 3
}
|
#### File: src/kaczmarz/_variants.py
```python
from collections import deque
import numpy as np
from scipy import sparse
import kaczmarz
from ._utils import scale_cols, scale_rows, square
class Cyclic(kaczmarz.Base):
"""Cycle through the equations of the system in order, repeatedly.
References
----------
1. <NAME>.
"Angenäherte Auflösung von Systemen linearer Gleichungen."
*Bulletin International de l’Académie Polonaise
des Sciences et des Lettres.
Classe des Sciences Mathématiques et Naturelles.
Série A, Sciences Mathématiques*, 35, 335–357, 1937
"""
def __init__(self, *base_args, order=None, **base_kwargs):
super().__init__(*base_args, **base_kwargs)
self._row_index = -1
if order is None:
order = range(self._n_rows)
self._order = order
def _select_row_index(self, xk):
self._row_index = (1 + self._row_index) % self._n_rows
return self._order[self._row_index]
class MaxDistanceLookahead(kaczmarz.Base):
"""Choose equations which lead to the most progress after a 2 step lookahead."""
def __init__(self, *base_args, **base_kwargs):
super().__init__(*base_args, **base_kwargs)
self._next_i = None
self._gramian = self._A @ self._A.T
self._gramian2 = square(self._gramian)
def _select_row_index(self, xk):
if self._next_i is not None:
temp = self._next_i
self._next_i = None
return temp
residual = self._b - self._A @ xk
residual_2 = np.square(residual)
cost_mat = np.array(
residual_2[:, None]
+ residual_2[None, :]
- 2 * scale_rows(scale_cols(self._gramian, residual), residual)
+ scale_rows(self._gramian2, residual_2)
)
best_cost = np.max(cost_mat)
sort_idxs = np.argsort(residual_2)[::-1]
best_i = sort_idxs[np.any(cost_mat[sort_idxs, :] == best_cost, axis=1)][0]
self._next_i = np.argwhere(cost_mat[best_i] == best_cost)[0][0]
return best_i
class MaxDistance(kaczmarz.Base):
"""Choose equations which leads to the most progress.
This selection strategy is also known as `Motzkin's method`.
References
----------
1. <NAME> and <NAME>.
"The relaxation method for linear inequalities."
*Canadian Journal of Mathematics*, 6:393–404, 1954.
"""
def _select_row_index(self, xk):
# TODO: use auxiliary update for the residual.
residual = self._b - self._A @ xk
return np.argmax(np.abs(residual))
class Random(kaczmarz.Base):
"""Sample equations according to a `fixed` probability distribution.
Parameters
----------
p : (m,) array_like, optional
Sampling probability for each equation. Uniform by default.
"""
def __init__(self, *base_args, p=None, **base_kwargs):
super().__init__(*base_args, **base_kwargs)
self._p = p # p=None corresponds to uniform.
def _select_row_index(self, xk):
return np.random.choice(self._n_rows, p=self._p)
class SVRandom(Random):
"""Sample equations with probability proportional to the squared row norms.
References
----------
1. <NAME> and <NAME>,
"A Randomized Kaczmarz Algorithm with Exponential Convergence."
Journal of Fourier Analysis and Applications 15, 262 2009.
"""
def __init__(self, *base_args, **base_kwargs):
super().__init__(*base_args, **base_kwargs)
squared_row_norms = self._row_norms ** 2
self._p = squared_row_norms / squared_row_norms.sum()
class UniformRandom(Random):
"""Sample equations uniformly at random."""
# Nothing to do since uniform sampling is the default behavior of Random.
class Quantile(Random):
"""Reject equations whose normalized residual is above a quantile.
This algorithm is intended for use in solving corrupted systems of equations.
That is, systems where a subset of the equations are consistent,
while a minority of the equations are not.
Such systems are almost always overdetermined.
Parameters
----------
quantile : float, optional
Quantile of normalized residual above which to reject.
References
----------
1. There will be a reference soon. Keep an eye out for that.
"""
def __init__(self, *args, quantile=1.0, **kwargs):
super().__init__(*args, **kwargs)
self._quantile = quantile
def _distance(self, xk, ik):
return np.abs(self._b[ik] - self._A[ik] @ xk)
def _threshold_distances(self, xk):
return np.abs(self._b - self._A @ xk)
def _threshold(self, xk):
distances = self._threshold_distances(xk)
return np.quantile(distances, self._quantile)
def _select_row_index(self, xk):
ik = super()._select_row_index(xk)
distance = self._distance(xk, ik)
threshold = self._threshold(xk)
if distance < threshold or np.isclose(distance, threshold):
return ik
return -1 # No projection please
class SampledQuantile(Quantile):
"""Reject equations whose normalized residual is above a quantile of a random subset of residual entries.
Parameters
----------
n_samples: int, optional
Number of normalized residual samples used to compute the threshold quantile.
References
----------
1. There will be a reference soon. Keep an eye out for that.
"""
def __init__(self, *args, n_samples=None, **kwargs):
super().__init__(*args, **kwargs)
if n_samples is None:
n_samples = self._n_rows
self._n_samples = n_samples
def _threshold_distances(self, xk):
idxs = np.random.choice(self._n_rows, self._n_samples, replace=False)
return np.abs(self._b[idxs] - self._A[idxs] @ xk)
class WindowedQuantile(Quantile):
"""Reject equations whose normalized residual is above a quantile of the most recent normalized residual values.
Parameters
----------
window_size : int, optional
Number of recent normalized residual values used to compute the threshold quantile.
Note
----
``WindowedQuantile`` also accepts the parameters of ``Quantile``.
References
----------
1. There will be a reference soon. Keep an eye out for that.
"""
def __init__(self, *args, window_size=None, **kwargs):
super().__init__(*args, **kwargs)
if window_size is None:
window_size = self._n_rows
self._window = deque([], maxlen=window_size)
def _distance(self, xk, ik):
distance = super()._distance(xk, ik)
self._window.append(distance)
return distance
def _threshold_distances(self, xk):
return self._window
class RandomOrthoGraph(kaczmarz.Base):
"""Try to only sample equations which are not already satisfied.
Use the orthogonality graph defined in [1] to decide which rows should
be considered "selectable" at each iteration.
Parameters
----------
p : (m,) array_like, optional
Sampling probability for each equation. Uniform by default.
These probabilities will be re-normalized based on the selectable rows
at each iteration.
References
----------
1. Nutini, Julie, et al.
"Convergence rates for greedy Kaczmarz algorithms,
and faster randomized Kaczmarz rules using the orthogonality graph."
arXiv preprint arXiv:1612.07838 2016.
"""
def __init__(self, *args, p=None, **kwargs):
super().__init__(*args, **kwargs)
self._gramian = self._A @ self._A.T
# Map each row index i to indexes of rows that are NOT orthogonal to it.
self._i_to_neighbors = {}
for i in range(self._n_rows):
self._i_to_neighbors[i] = self._gramian[[i], :].nonzero()[1]
if p is None:
p = np.ones((self._n_rows,))
self._p = p
self._selectable = self._A @ self._x0 - self._b != 0
def _update_selectable(self, ik):
self._selectable[self._i_to_neighbors[ik]] = True
self._selectable[ik] = False
def _select_row_index(self, xk):
p = self._p.copy()
p[~self._selectable] = 0
p /= p.sum()
ik = np.random.choice(self._n_rows, p=p)
self._update_selectable(ik)
return ik
@property
def selectable(self):
"""(s,) array(bool): Selectable rows at the current iteration."""
return self._selectable.copy()
class ParallelOrthoUpdate(RandomOrthoGraph):
"""Perform multiple updates in parallel, using only rows which are mutually orthogonal
Parameters
----------
q : int, optional
Maximum number of updates to do in parallel.
"""
def __init__(self, *args, q=None, **kwargs):
super().__init__(*args, **kwargs)
if q is None:
q = self._n_rows
self._q = q
def _update_iterate(self, xk, tauk):
"""Do a sum of the usual updates."""
# TODO: We should implement averaged kaczmarz as a mixin or something.
xkp1 = xk
for i in tauk:
xkp1 = super()._update_iterate(xkp1, i)
return xkp1
def _select_row_index(self, xk):
"""Select a group of mutually orthogonal rows to project onto."""
curr_selectable = self._selectable.copy() # Equations that are not satisfied.
tauk = []
curr_p = self._p.copy()
while len(tauk) != self._q and np.any(curr_selectable):
curr_p[~curr_selectable] = 0 # Don't want to sample unselectable entries
curr_p /= curr_p.sum() # Renormalize probabilities
i = np.random.choice(self._n_rows, p=curr_p)
tauk.append(i)
# Remove rows from selectable set that are not orthogonal to i
curr_selectable[self._i_to_neighbors[i]] = False
for i in tauk:
self._update_selectable(i)
return tauk
```
#### File: kaczmarz-algorithms/tests/test_abc.py
```python
import numpy as np
import pytest
import kaczmarz
@pytest.fixture()
def DummyStrategy():
class _DummyStrategy(kaczmarz.Base):
def _select_row_index(self, xk):
return 0
return _DummyStrategy
@pytest.fixture()
def NonStrategy():
class _NonStrategy(kaczmarz.Base):
pass
return _NonStrategy
def terminates_after_n_iterations(iterates, n):
iterator = iter(iterates)
for _ in range(n + 1):
next(iterator)
with pytest.raises(StopIteration):
next(iterator)
def test_undefined_abstract_method(eye23, ones2, DummyStrategy, NonStrategy):
"""Forgetting to implement the abstract method ``select_row_index`` should result in a TypeError on instantiation."""
with pytest.raises(TypeError):
NonStrategy()
DummyStrategy(eye23, ones2)
@pytest.mark.timeout(1)
def test_inconsistent_system_terminates(eye23, ones2, DummyStrategy, NonStrategy):
"""Make sure inconsistent systems do not run forever."""
A = np.array([[1], [2]])
b = np.array([1, 1])
DummyStrategy.solve(A, b)
def test_single_row_matrix(DummyStrategy, allclose):
A = np.array([[0, 0, 1, 1]])
b = np.array([1])
iterator = iter(DummyStrategy.iterates(A, b))
next(iterator)
x_exact = next(iterator)
assert allclose([0, 0, 0.5, 0.5], x_exact)
with pytest.raises(StopIteration):
next(iterator)
def test_iterate_shape(eye23, ones2, DummyStrategy):
"""Row selected at each iteration should be accessable through the .ik attribute."""
x0 = np.array([0, 0, 0])
iterator = iter(DummyStrategy(eye23, ones2, x0))
assert x0.shape == next(iterator).shape
assert x0.shape == next(iterator).shape
x0 = np.array([[0], [0], [0]])
iterator = iter(DummyStrategy(eye23, ones2, x0))
assert x0.shape == next(iterator).shape
assert x0.shape == next(iterator).shape
iterator = iter(DummyStrategy(eye23, ones2.reshape(-1)))
assert (3,) == next(iterator).shape
assert (3,) == next(iterator).shape
iterator = iter(DummyStrategy(eye23, ones2.reshape(-1, 1)))
assert (3, 1) == next(iterator).shape
assert (3, 1) == next(iterator).shape
def test_initial_guess(eye23, ones2, DummyStrategy):
# Does the default initial iterate have the right shape?
iterates = DummyStrategy.iterates(eye23, ones2)
assert (3,) == next(iter(iterates)).shape
# Does the supplied initial iterate get used correctly?
x0 = np.array([1, 2, 3])
iterates = DummyStrategy.iterates(eye23, ones2, x0)
assert list(x0) == list(next(iter(iterates)))
def test_ik(eye23, ones2, zeros3, DummyStrategy):
"""Row selected at each iteration should be accessable through the .ik attribute."""
iterates = DummyStrategy(eye23, ones2, zeros3)
iterator = iter(iterates)
next(iterator)
assert -1 == iterates.ik
next(iterator)
assert 0 == iterates.ik
next(iterator)
assert 0 == iterates.ik
def test_maxiter(eye23, ones2, zeros3, DummyStrategy):
"""Passing ``maxiter=n`` should cause the algorithm to terminate after n iterations."""
# [0, 0, 0] is not the exact solution.
args = [eye23, ones2, zeros3]
iterates = DummyStrategy.iterates(*args, maxiter=0)
terminates_after_n_iterations(iterates, 0)
iterates = DummyStrategy.iterates(*args, maxiter=1)
terminates_after_n_iterations(iterates, 1)
for maxiter in range(1, 5):
iterates = DummyStrategy.iterates(*args, maxiter=maxiter, tol=None)
terminates_after_n_iterations(iterates, maxiter)
with pytest.raises(ValueError):
iterates = DummyStrategy.iterates(*args, maxiter=None, tol=None)
def test_solve(eye23, ones2, zeros3, DummyStrategy):
# [0, 0, 0] is not the exact solution.
x = DummyStrategy.solve(eye23, ones2, zeros3, maxiter=0)
assert [0, 0, 0] == list(x)
x = DummyStrategy.solve(eye23, ones2, zeros3, maxiter=1)
assert [1, 0, 0] == list(x)
def test_tolerance(eye23, ones2, DummyStrategy):
x_exact = np.array([1, 1, 0])
# If we start at the answer, we're done.
iterates = DummyStrategy.iterates(eye23, ones2, x_exact)
terminates_after_n_iterations(iterates, 0)
# Initial residual has norm 1.
x0 = np.array([1, 0, 0])
iterates = DummyStrategy.iterates(eye23, ones2, x0, tol=1.01)
terminates_after_n_iterations(iterates, 0)
def test_callback(eye23, ones2, zeros3, DummyStrategy):
"""Callback function should be called after each iteration."""
actual_iterates = []
def callback(xk):
actual_iterates.append(list(xk))
iterator = iter(DummyStrategy.iterates(eye23, ones2, zeros3, callback=callback))
next(iterator)
assert actual_iterates == [[0, 0, 0]]
next(iterator)
assert actual_iterates == [[0, 0, 0], [1, 0, 0]]
def test_sparse(speye23, ones2, zeros3, DummyStrategy):
iterator = iter(DummyStrategy.iterates(speye23, ones2, zeros3))
assert [0, 0, 0] == list(next(iterator))
assert [1, 0, 0] == list(next(iterator))
def test_array_like(eye23, ones2, zeros3, DummyStrategy):
iterator = iter(
DummyStrategy.iterates(eye23.tolist(), ones2.tolist(), zeros3.tolist())
)
assert [0, 0, 0] == list(next(iterator))
assert [1, 0, 0] == list(next(iterator))
def test_iterates_are_copies(speye23, ones2, zeros3, DummyStrategy):
"""Check that modifying the iterate inplace does not affect the underlying iteration."""
iterator = iter(DummyStrategy.iterates(speye23, ones2, zeros3))
xk = next(iterator)
assert [0, 0, 0] == list(xk)
xk[:] = np.inf
xk = next(iterator)
assert [1, 0, 0] == list(xk)
xk[:] = np.inf
xk = next(iterator)
assert [1, 0, 0] == list(xk)
```
#### File: kaczmarz-algorithms/tests/test_parallel_ortho.py
```python
import numpy as np
import pytest
import kaczmarz
def test_selectable_set(eye33, ones3):
x0 = np.zeros(3)
solver = kaczmarz.ParallelOrthoUpdate(eye33, ones3, x0, q=1)
# Check that only one row is selected
assert solver.ik == -1
next(solver)
assert solver.ik == -1
next(solver)
assert 1 == len(solver.ik)
next(solver)
assert 1 == len(solver.ik)
next(solver)
assert 1 == len(solver.ik)
with pytest.raises(StopIteration):
next(solver)
```
|
{
"source": "jdmoorman/Multi-Channel-Subgraph-Matching",
"score": 2
}
|
#### File: Multi-Channel-Subgraph-Matching/experiments/run_erdos_renyi.py
```python
import uclasm
from timeit import default_timer
from time import sleep
from matplotlib import pyplot as plt
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix
from multiprocessing import Process, Queue, cpu_count
np.random.seed(0)
timeout = 10000
def process_fn(tmplt, world, result_queue=None, label=None, count_isomorphisms=False):
result = {}
result["label"] = label # For identifying results afterwards
start_time = default_timer()
tmplt, world, candidates = uclasm.run_filters(tmplt, world, candidates=tmplt.is_cand, filters=uclasm.cheap_filters, verbose=False)
filter_time = default_timer()-start_time
# print("Time taken for filters: {}".format(filter_time))
# filter_times.append(filter_time)
result["filter_time"] = filter_time
# start_time = default_timer()
# from filters.validation_filter import validation_filter
# validation_filter(tmplt, world, candidates=candidates, in_signal_only=False,
# verbose=False)
# print("Time taken for validation: {}".format(default_timer()-start_time))
# validation_times += [default_timer()-start_time]
# # tmplt.candidate_sets = {x: set(world.nodes[candidates[idx,:]]) for idx, x in enumerate(tmplt.nodes)}
if count_isomorphisms:
# # print("Starting isomorphism count")
start_time = default_timer()
count, n_iterations = uclasm.count_isomorphisms(tmplt, world, candidates=candidates, verbose=False, count_iterations=True)
# print("Counted {} isomorphisms in {} seconds".format(count, default_timer()-start_time))
iso_count_time = default_timer() - start_time
# iso_counts += [count]
# iso_count_times += [default_timer()-start_time]
result["n_isomorphisms"] = count
result["iso_count_time"] = iso_count_time
result["has_iso"] = count > 0
else:
start_time = default_timer()
from uclasm.counting.has_isomorphism import has_isomorphism
has_iso, n_iterations = has_isomorphism(tmplt, world, candidates=candidates, verbose=False, count_iterations=True)
# if has_iso:
# print("Has isomorphism")
# else:
# print("No isomorphism")
iso_check_time = default_timer() - start_time
# print("Isomorphism checked in {} seconds".format(iso_check_time))
# iso_check_times.append(iso_check_time)
result["iso_check_time"] = iso_check_time
result["has_iso"] = has_iso
result["n_iterations"] = n_iterations
if result_queue is not None:
result_queue.put(result)
else:
return result
def run_trial(n_tmplt_nodes, n_world_nodes, n_layers, tmplt_prob, world_prob, results, use_timeout=True, count_isomorphisms=False):
run_process = None
try:
if use_timeout:
result_queue = Queue()
run_process = create_process(n_tmplt_nodes, n_world_nodes, n_layers, tmplt_prob, world_prob, count_isomorphisms=count_isomorphisms)
run_process.start()
start_time = default_timer()
while run_process.is_alive() and default_timer() - start_time < timeout:
sleep(0.5)
if run_process.is_alive():
print("Timeout exceeded, killing process")
run_process.terminate()
else:
result = result_queue.get()
result['n_tmplt_nodes'] = n_tmplt_nodes
result['n_world_nodes'] = n_world_nodes
result['tmplt_prob'] = tmplt_prob
result['world_prob'] = world_prob
result['n_layers'] = n_layers
if use_timeout:
result['timeout'] = timeout
# print(result)
results.append(result)
else:
tmplt, world = make_graphs(n_tmplt_nodes, n_world_nodes, n_layers, tmplt_prob, world_prob)
result = process_fn(tmplt, world, label=(tmplt_prob, world_prob), count_isomorphisms=count_isomorphisms)
result['n_tmplt_nodes'] = n_tmplt_nodes
result['n_world_nodes'] = n_world_nodes
result['tmplt_prob'] = tmplt_prob
result['world_prob'] = world_prob
result['n_layers'] = n_layers
if use_timeout:
result['timeout'] = timeout
# print(result)
results.append(result)
except KeyboardInterrupt:
print("Interrupting process")
if run_process is not None and run_process.is_alive():
run_process.terminate()
raise KeyboardInterrupt
def create_process(n_tmplt_nodes, n_world_nodes, n_layers, tmplt_prob, world_prob, result_queue, count_isomorphisms=False):
tmplt, world = make_graphs(n_tmplt_nodes, n_world_nodes, n_layers, tmplt_prob, world_prob)
run_process = Process(target=process_fn, args=(tmplt, world), kwargs={"result_queue": result_queue, "label": (tmplt_prob, world_prob), "count_isomorphisms": count_isomorphisms})
return run_process
def make_graphs(n_tmplt_nodes, n_world_nodes, n_layers, tmplt_prob, world_prob):
tmplt_nodes = [x for x in range(n_tmplt_nodes)]
world_nodes = [x for x in range(n_world_nodes)]
tmplt_shape = (n_tmplt_nodes, n_tmplt_nodes)
world_shape = (n_world_nodes, n_world_nodes)
tmplt_adj_mats = [csr_matrix(np.random.choice([0, 1], tmplt_shape, p=[1-tmplt_prob, tmplt_prob])) for i in range(n_layers)]
world_adj_mats = [csr_matrix(np.random.choice([0, 1], world_shape, p=[1-world_prob, world_prob])) for i in range(n_layers)]
channels = [str(x) for x in range(n_layers)]
tmplt = uclasm.Graph(np.array(tmplt_nodes), channels, tmplt_adj_mats)
world = uclasm.Graph(np.array(world_nodes), channels, world_adj_mats)
tmplt.is_cand = np.ones((tmplt.n_nodes,world.n_nodes), dtype=np.bool)
tmplt.candidate_sets = {x: set(world.nodes) for x in tmplt.nodes}
return tmplt, world
# n_tmplt_nodes = 10
n_world_nodes_min = 10
n_world_nodes_max = 205
n_world_nodes_inc = 5
n_world_nodes = 150
n_trials = 500
n_cores = int(cpu_count()/2)
count_isomorphisms_list = [True, False]
n_tmplt_nodes = 10
n_layers_list = [1, 2, 3]
tmplt_prob = 0.5
layer_probs = True
for n_layers in n_layers_list:
world_prob = 1 - (1 - (1 - tmplt_prob + tmplt_prob**2)**(1.0/n_layers)) / tmplt_prob
results = []
import tqdm
for count_isomorphisms in count_isomorphisms_list:
for n_world_nodes in tqdm.tqdm(range(n_world_nodes_min,
n_world_nodes_max,
n_world_nodes_inc), ascii=True):
n_trials_remaining = n_trials
while n_trials_remaining > 0:
process_list = []
result_queue = Queue()
n_processes = n_cores if n_cores < n_trials_remaining else n_trials_remaining
for i in range(n_processes):
# print("Creating process {}".format(i))
# run_trial(n_tmplt_nodes, n_world_nodes, n_layers, tmplt_prob, world_prob, results, use_timeout=True)
new_process = create_process(n_tmplt_nodes, n_world_nodes, n_layers, tmplt_prob, world_prob, result_queue, count_isomorphisms=count_isomorphisms)
process_list.append(new_process)
new_process.start()
start_time = default_timer()
n_finished = n_processes
while default_timer() - start_time < timeout:
any_alive = False
for process in process_list:
if process.is_alive():
any_alive = True
if not any_alive:
break
sleep(0.5)
for process in process_list:
if process.is_alive():
process.terminate()
n_finished -= 1
if n_finished != n_processes:
print("Finished {} processes out of {}".format(n_finished, n_processes))
for i in range(n_finished):
result = result_queue.get()
result['n_tmplt_nodes'] = n_tmplt_nodes
result['n_world_nodes'] = n_world_nodes
result['tmplt_prob'] = tmplt_prob
result['world_prob'] = world_prob
result['n_layers'] = n_layers
results.append(result)
n_trials_remaining -= n_processes
np.save("erdos_renyi_results_{}_trials_{}_layers{}{}_timeout_{}_vary_world_size".format(n_trials, n_layers, "_count_iso" if count_isomorphisms else "", "_layerprobs" if layer_probs else "", timeout), results)
```
#### File: Multi-Channel-Subgraph-Matching/experiments/run_sudoku.py
```python
import uclasm
from timeit import default_timer
from matplotlib import pyplot as plt
import numpy as np
import scipy as sp
import csv
def display_sudoku(tmplt, show_cands=False):
if not show_cands:
# Easier to visualize result board
print("-"*13)
for i in range(9):
row = "|"
for j in range(9):
square = chr(65+j)+str(i+1)
if stype == "9x9x3":
square += "R"
cands = tmplt.candidate_sets[square]
digit = -1
for cand in cands:
if digit == -1:
digit = cand[0]
elif cand[0] != digit:
digit = "X"
row += str(digit)
if j%3 == 2:
row += "|"
print(row)
if i%3 == 2:
print("-"*13)
else:
# Candidate format
print("-"*37)
for i in range(9):
rows = ["|", "|", "|"]
for j in range(9):
square = chr(65+j)+str(i+1)
if stype == "9x9x3":
square += "R"
cands = tmplt.candidate_sets[square]
digit = -1
possible = []
for cand in cands:
if digit == -1:
digit = int(cand[0])
possible += [digit]
elif int(cand[0]) != digit:
if int(cand[0]) not in possible:
possible += [int(cand[0])]
digit = "X"
# Print possibilities in a nice grid format
for k in range(9):
if k+1 not in possible:
rows[k//3] += " "
else:
rows[k//3] += str(k+1)
for m in range(3):
rows[m] += "|"
for row in rows:
print(row)
print("-"*37)
def display_sudoku2(tmplt, show_cands=False):
fig, ax = plt.subplots(figsize=(9,9))
cur_axes = plt.gca()
fig.patch.set_visible(False)
cur_axes.axes.get_xaxis().set_visible(False)
cur_axes.axes.get_yaxis().set_visible(False)
ax.axis("off")
for i in range(10):
plt.plot([0,9],[i,i],'k',linewidth=(5 if i%3 == 0 else 2))
plt.plot([i,i],[0,9],'k',linewidth=(5 if i%3 == 0 else 2))
if i == 9:
continue
for j in range(9):
square = chr(65+j)+str(i+1)
if stype == "9x9x3":
square += "R"
cands = tmplt.candidate_sets[square]
digit = -1
possible = []
for cand in cands:
if digit == -1:
digit = int(cand[0])
possible += [digit]
elif int(cand[0]) != digit:
if int(cand[0]) not in possible:
possible += [int(cand[0])]
digit = "X"
if len(possible) == 1:
# Plot a large number
plt.text(i+0.5, j+0.46, str(possible[0]), fontsize=32, ha='center', va='center')
elif len(possible) > 0 and show_cands:
for i2 in range(3):
for j2 in range(3):
digit = i2+j2*3+1
if digit in possible:
plt.text((i+i2/3.0)+1/6.0, (j+1-j2/3.0)-0.04-1/6.0, str(digit), fontsize=12, ha='center', va='center',weight='bold')
plt.savefig("sudoku_picture{}.png".format("_cands" if show_cands else ""))
for stype in ['9x9','9x9x3','9x81']:
# stype = "9x9" # 9x9, 9x81, 9x9x3
if stype == "9x9":
channels = range(1)
elif stype == "9x81":
channels = range(3)
elif stype == "9x9x3":
channels = range(2)
start_time = default_timer()
size = 9 # Note: doesn't work with sizes other than 9x9, square link logic would have to be generalized as well as node labels
if stype == "9x9":
tmplt_adj_mats = [np.zeros((size*size,size*size), dtype=np.int8)]
world_nodes = [str(i*10 + j) for i in range(1,size+1) for j in range(1,size+1)] # First digit actual digit, second digit is square it is in
world_adj_mats = [np.ones((size*size,size*size), dtype=np.int8)] # Initialize to fully linked
# Remove links between same digit
for i in range(size):
world_adj_mats[0][i*size:i*size+size, i*size:i*size+size] = 0
tmplt_nodes = [chr(i)+str(j) for i in range(65,65+size) for j in range(1,size+1)] # Chessboard layout: letters are rows, numbers columns
# Add links between rows
link_mat = np.ones((size,size), dtype=np.int8) - np.eye((size), dtype=np.int8)
for i in range(size):
tmplt_adj_mats[0][i*size:i*size+size, i*size:i*size+size] = link_mat
# Add links between columns
for i in range(size):
tmplt_adj_mats[0][i:i+size*(size-1)+1:size, i:i+size*(size-1)+1:size] = link_mat
# Add links between same square
for i in range(3):
for j in range(3):
row_idxs = [i*3+j*27+x for x in [0,1,2,9,10,11,18,19,20]] # i*3+j*27 = coordinate of top left corner of square
tmplt_adj_mats[0][np.ix_(row_idxs, row_idxs)] = link_mat
world_adj_mats[0] = sp.sparse.csr_matrix(world_adj_mats[0])
tmplt_adj_mats[0] = sp.sparse.csr_matrix(tmplt_adj_mats[0])
elif stype == "9x81":
# 3 channels: row links, column links, square links
tmplt_adj_mats = [np.zeros((size*size,size*size), dtype=np.int8) for i in range(3)]
# Nodes in world graph: one node per digit per space
# 3 character name: first character actual digit, 2-3rd are chessboard coordinate of space
world_nodes = [str(k)+chr(i)+str(j) for k in range(1,size+1) for i in range(65,65+size) for j in range(1,size+1)]
world_adj_mats = [np.zeros((len(world_nodes),len(world_nodes)), dtype=np.int8) for i in range(3)]
# Add links between rows
link_mat = np.ones((size*size,size*size), dtype=np.int8)
for i in range(size):
link_mat[i*size:i*size+size,i*size:i*size+size] = 0 # Remove same digit links
for i in range(size):
link_mat[i::size, i::size] = 0 # Remove same space links
# Add links between rows
for i in range(size):
row_idxs = [i*size+j+k*size*size for j in range(size) for k in range(size)]
world_adj_mats[0][np.ix_(row_idxs, row_idxs)] = link_mat
# Add links between columns
for i in range(size):
world_adj_mats[1][i::size, i::size] = link_mat
# Add links between same square
for i in range(3):
for j in range(3):
square_idxs = [i*3+j*27+x+y*size*size for x in [0,1,2,9,10,11,18,19,20] for y in range(size)] # i*3+j*27 = coordinate of top left corner of square
world_adj_mats[2][np.ix_(square_idxs, square_idxs)] = link_mat
tmplt_nodes = [chr(i)+str(j) for i in range(65,65+size) for j in range(1,size+1)] # Chessboard layout: letters are rows, numbers columns
# Add links between rows
link_mat = np.ones((size,size), dtype=np.int8) - np.eye((size), dtype=np.int8)
for i in range(size):
tmplt_adj_mats[0][i*size:i*size+size, i*size:i*size+size] = link_mat
# Add links between columns
for i in range(size):
tmplt_adj_mats[1][i:i+size*(size-1)+1:size, i:i+size*(size-1)+1:size] = link_mat
# Add links between same square
for i in range(3):
for j in range(3):
square_idxs = [i*3+j*27+x for x in [0,1,2,9,10,11,18,19,20]] # i*3+j*27 = coordinate of top left corner of square
tmplt_adj_mats[2][np.ix_(square_idxs, square_idxs)] = link_mat
for i in range(3):
world_adj_mats[i] = sp.sparse.csr_matrix(world_adj_mats[i])
tmplt_adj_mats[i] = sp.sparse.csr_matrix(tmplt_adj_mats[i])
elif stype == "9x9x3":
# 2 channels: adjacency links and same space links
# Each type of link(row, column, square) has its own 9x9
# Template is 3 copies of the squares, linked by same-space
# First is row links, then col links, then square links
tmplt_adj_mats = [np.zeros((size*size*3,size*size*3), dtype=np.int8) for i in range(2)]
squares = [chr(i)+str(j) for i in range(65,65+size) for j in range(1,size+1)] # Chessboard layout: letters are rows, numbers columns
tmplt_nodes = [x+y for y in ["R","C","S"] for x in squares]
# Add links between rows
link_mat = np.ones((size,size), dtype=np.int8) - np.eye((size), dtype=np.int8)
for i in range(size):
tmplt_adj_mats[0][i*size:i*size+size, i*size:i*size+size] = link_mat
# Add links between columns
for i in range(size):
tmplt_adj_mats[0][size*size+i:size*size+i+size*(size-1)+1:size, size*size+i:size*size+i+size*(size-1)+1:size] = link_mat
# Add links between same square
for i in range(3):
for j in range(3):
row_idxs = [2*size*size+i*3+j*27+x for x in [0,1,2,9,10,11,18,19,20]] # i*3+j*27 = coordinate of top left corner of square
tmplt_adj_mats[0][np.ix_(row_idxs, row_idxs)] = link_mat
# Add same space links
# Link from row to square and column to square
link_mat2 = np.zeros((3,3))
link_mat2[0,2] = 1
link_mat2[1,2] = 1
for i in range(size*size):
tmplt_adj_mats[1][i::size*size, i::size*size] = link_mat2
# Nodes in world graph: 3 nodes per digit per row/column/square
# 3 character name: First digit actual digit, second digit is row/column/square it is in, third is R/C/S
digits = [i*10 + j for i in range(1,size+1) for j in range(1,size+1)]
world_nodes = [str(x)+y for y in ["R","C","S"] for x in digits]
world_adj_mats = [np.zeros((len(world_nodes),len(world_nodes)), dtype=np.int8) for i in range(2)]
# Initialize full links in row, column, square
for i in range(3):
world_adj_mats[0][i*size*size:(i+1)*size*size,i*size*size:(i+1)*size*size] = np.ones((size*size, size*size))
# Remove links between same digit
for j in range(size):
world_adj_mats[0][i*size*size+j*size:i*size*size+j*size+size, i*size*size+j*size:i*size*size+j*size+size] = 0
# Initialize same space links
# Add links between same digit, from row to square and column to square
# Only add a link if the row-square or column-square combo is legal
link_row_square = np.zeros((2*size, 2*size))
link_col_square = np.zeros((2*size, 2*size))
for i in range(3):
for j in range(3):
link_row_square[i*3:i*3+3, size+i*3+j] = 1
link_col_square[j*3:j*3+3, size+i*3+j] = 1
for i in range(size):
digit_idxs = [i*size+j for j in range(size)]
rs_idxs = digit_idxs + [x+2*size*size for x in digit_idxs]
cs_idxs = [x+size*size for x in digit_idxs] + [x+2*size*size for x in digit_idxs]
world_adj_mats[1][np.ix_(rs_idxs, rs_idxs)] = link_row_square
world_adj_mats[1][np.ix_(cs_idxs, cs_idxs)] = link_col_square
for i in range(2):
world_adj_mats[i] = sp.sparse.csr_matrix(world_adj_mats[i])
tmplt_adj_mats[i] = sp.sparse.csr_matrix(tmplt_adj_mats[i])
# initial candidate set for template nodes is the full set of world nodes
tmplt = uclasm.Graph(np.array(tmplt_nodes), channels, tmplt_adj_mats)
world = uclasm.Graph(np.array(world_nodes), channels, world_adj_mats)
tmplt.is_cand = np.ones((tmplt.n_nodes,world.n_nodes), dtype=np.bool)
tmplt.candidate_sets = {x: set(world.nodes) for x in tmplt.nodes}
def update_node_candidates(tmplt, world, tmplt_node, cands):
cand_row = np.zeros(world.n_nodes, dtype=np.bool)
for cand in cands:
cand_row[world.node_idxs[cand]] = True
tmplt.is_cand[tmplt.node_idxs[tmplt_node]] &= cand_row
tmplt.labels = np.array(['__'] * tmplt.n_nodes)
world.labels = np.array(['__'] * world.n_nodes)
if stype == "9x9":
# Second digit is square
# Restrict candidates to only allow a particular end digit in the square
for i in range(3):
for j in range(3):
row_idxs = [i*3+j*27+x for x in [0,1,2,9,10,11,18,19,20]] # i*3+j*27 = coordinate of top left corner of square
# for idx in row_idxs:
# cands = tmplt.candidate_sets[tmplt_nodes[idx]]
# new_cands = {cand for cand in cands if cand[-1] == str(i*3+j+1)}
# update_node_candidates(tmplt, world, tmplt_nodes[idx], new_cands)
label = str(i) + str(j)
tmplt.labels[row_idxs] = [label]*len(row_idxs)
cand_idxs = [idx for idx, cand in enumerate(world.nodes) if cand[-1] == str(i*3+j+1)]
world.labels[cand_idxs] = [label]*len(cand_idxs)
elif stype == "9x81":
# Restrict candidates to match the spaces
for idx, tmplt_node in enumerate(tmplt_nodes):
# cands = tmplt.candidate_sets[tmplt_node]
# new_cands = {cand for cand in cands if cand[1:] == tmplt_node}
# update_node_candidates(tmplt, world, tmplt_node, new_cands)
label = str(tmplt_node)
tmplt.labels[idx] = label
new_cands = [idx for idx, cand in enumerate(world_nodes) if cand[1:] == tmplt_node]
world.labels[new_cands] = label
elif stype == "9x9x3":
# Restrict candidates to match R/C/S
# for i in range(size):
# for j in range(size):
# space = chr(65+i)+str(j+1)
# row_cands = set([str(k)+str(i+1)+"R" for k in range(1,size+1)])
# update_node_candidates(tmplt, world, space+"R", row_cands)
# col_cands = set([str(k)+str(j+1)+"C" for k in range(1,size+1)])
# update_node_candidates(tmplt, world, space+"C", col_cands)
# square = i//3 * 3 + j//3 + 1
# square_cands = set([str(k)+str(square)+"S" for k in range(1,size+1)])
# update_node_candidates(tmplt, world, space+"S", square_cands)
for i in range(size):
# Generate row labels
row_label = str(i+1) + "R"
row_tmplt = [chr(65+i)+str(j+1)+"R" for j in range(size)]
row_cands = [str(k)+str(i+1)+"R" for k in range(1,size+1)]
tmplt.labels[[tmplt.node_idxs[node] for node in row_tmplt]] = row_label
world.labels[[world.node_idxs[node] for node in row_cands]] = row_label
# Generate column labels
col_label = str(i+1) + "C"
col_tmplt = [chr(65+j)+str(i+1)+"C" for j in range(size)]
col_cands = [str(k)+str(i+1)+"C" for k in range(1,size+1)]
tmplt.labels[[tmplt.node_idxs[node] for node in col_tmplt]] = col_label
world.labels[[world.node_idxs[node] for node in col_cands]] = col_label
# Generate square labels
square_label = str(i+1) + "S"
square_cands = [str(k)+str(i+1)+"S" for k in range(1,size+1)]
square_tmplt = [chr(65+x+(i//3)*3)+str(y+1+(i%3)*3)+"S" for x in range(3) for y in range(3)]
tmplt.labels[[tmplt.node_idxs[node] for node in square_tmplt]] = square_label
world.labels[[world.node_idxs[node] for node in square_cands]] = square_label
tmplt_orig = tmplt
world_orig = world
for dataset in ['easy50','top95','hardest']:
# Read in puzzles from Project Euler
total_start_time = default_timer()
filter_times = []
validation_times = []
iso_count_times = []
iso_counts = []
with open("sudoku-{}.txt".format(dataset), encoding="utf-8") as fin:
# Format: 81 numbers, separated by newline
for puzzle in fin:
changed_nodes = np.zeros(tmplt.n_nodes, dtype=np.bool)
start_time = default_timer()
tmplt = tmplt_orig.copy()
tmplt.is_cand = tmplt_orig.is_cand.copy()
world = world_orig.copy()
puzzle = puzzle.replace('\ufeff', '')
puzzle = puzzle.replace('\n', '')
for idx, char in enumerate(puzzle):
row = idx // 9 + 1 # One indexed
idx2 = idx % 9
if char in [str(x) for x in range(1,10)]: # Check nonzero
digit = int(char)
letter = chr(65+idx2)
if stype == "9x9":
update_node_candidates(tmplt, world,letter+str(row), set(world_nodes[(digit-1)*size:(digit-1)*size+size]))
changed_nodes[tmplt.node_idxs[letter+str(row)]] = True
elif stype == "9x81":
update_node_candidates(tmplt, world,letter+str(row), set([char+letter+str(row)]))
changed_nodes[tmplt.node_idxs[letter+str(row)]] = True
elif stype == "9x9x3":
for k in range(3):
link_types = ["R","C","S"]
update_node_candidates(tmplt, world,letter+str(row)+link_types[k], set(world_nodes[k*size*size+(digit-1)*size:k*size*size+(digit-1)*size+size]))
changed_nodes[tmplt.node_idxs[letter+str(row)+link_types[k]]] = True
# # Read in a Sudoku puzzle to solve
# with open("sudoku_puzzle2.txt") as fin:
# changed_nodes = np.zeros(tmplt.n_nodes, dtype=np.bool)
# row = 1
# for line in fin: # Each line has 9 characters. Characters not in {1,9} are considered blanks
# for idx2, char in enumerate(line):
# if char in [str(x) for x in range(1,10)]:
# digit = int(char)
# letter = chr(65+idx2)
# if stype == "9x9":
# update_node_candidates(tmplt, world,letter+str(row), set(world_nodes[(digit-1)*size:(digit-1)*size+size]))
# changed_nodes[tmplt.node_idxs[letter+str(row)]] = True
# elif stype == "9x81":
# update_node_candidates(tmplt, world,letter+str(row), set([char+letter+str(row)]))
# changed_nodes[tmplt.node_idxs[letter+str(row)]] = True
# elif stype == "9x9x3":
# for k in range(3):
# link_types = ["R","C","S"]
# update_node_candidates(tmplt, world,letter+str(row)+link_types[k], set(world_nodes[k*size*size+(digit-1)*size:k*size*size+(digit-1)*size+size]))
# changed_nodes[tmplt.node_idxs[letter+str(row)+link_types[k]]] = True
# row += 1
# tmplt.summarize_candidate_sets()
# print("Time to create world and template: {}".format(default_timer()-start_time))
# tmplt.candidate_sets = {x: set(world.nodes[tmplt.is_cand[idx,:]]) for idx, x in enumerate(tmplt.nodes)}
# display_sudoku2(tmplt, show_cands=False)
start_time = default_timer()
tmplt, world, candidates = uclasm.run_filters(tmplt, world, candidates=tmplt.is_cand,
init_changed_cands=changed_nodes, filters=uclasm.all_filters, verbose=False)
print("Time taken for filters: {}".format(default_timer()-start_time))
filter_times += [default_timer()-start_time]
start_time = default_timer()
from filters.validation_filter import validation_filter
validation_filter(tmplt, world, candidates=candidates, in_signal_only=False,
verbose=False)
print("Time taken for validation: {}".format(default_timer()-start_time))
validation_times += [default_timer()-start_time]
# # tmplt.candidate_sets = {x: set(world.nodes[candidates[idx,:]]) for idx, x in enumerate(tmplt.nodes)}
# print("Starting isomorphism count")
start_time = default_timer()
count = uclasm.count_isomorphisms(tmplt, world, candidates=candidates, verbose=False)
print("Counted {} isomorphisms in {} seconds".format(count, default_timer()-start_time))
count = 1
iso_counts += [count]
iso_count_times += [default_timer()-start_time]
print("Dataset:", dataset)
print("Representation:", stype)
print("Total time for {} puzzles: {}".format(len(filter_times),default_timer()-total_start_time))
print("Time spent filtering: {}".format(sum(filter_times)))
print("Time spent counting isomorphisms: {}".format(sum(iso_count_times)))
total_times = np.array(filter_times)+np.array(iso_count_times)
np.save('sudoku_times_{}_{}_validation.npy'.format(dataset,stype), total_times)
np.save('sudoku_filter_times_{}_{}_validation.npy'.format(dataset,stype), filter_times)
np.save('sudoku_validation_times_{}_{}_validation.npy'.format(dataset,stype), validation_times)
np.save('sudoku_iso_count_times_{}_{}_validation.npy'.format(dataset,stype), iso_count_times)
np.save('sudoku_iso_counts_{}_{}_validation.npy'.format(dataset,stype), iso_counts)
```
#### File: uclasm/counting/isomorphisms.py
```python
from ..filters import run_filters, cheap_filters, all_filters
from ..utils.misc import invert, values_map_to_same_key, one_hot
from ..utils.graph_ops import get_node_cover
from .alldiffs import count_alldiffs
import numpy as np
from functools import reduce
# TODO: count how many isomorphisms each background node participates in.
# TODO: switch from recursive to iterative implementation for readability
n_iterations = 0
def recursive_isomorphism_counter(tmplt, world, candidates, *,
unspec_cover, verbose, init_changed_cands, count_iterations=False):
global n_iterations
n_iterations += 1
# If the node cover is empty, the unspec nodes are disconnected. Thus, we
# can skip straight to counting solutions to the alldiff constraint problem
if len(unspec_cover) == 0:
# Elimination filter is not needed here and would be a waste of time
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates, filters=cheap_filters,
verbose=False, init_changed_cands=init_changed_cands)
node_to_cands = {node: world.nodes[candidates[idx]]
for idx, node in enumerate(tmplt.nodes)}
return count_alldiffs(node_to_cands)
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates, filters=all_filters,
verbose=False, init_changed_cands=init_changed_cands)
# Since the node cover is not empty, we first choose some valid
# assignment of the unspecified nodes one at a time until the remaining
# unspecified nodes are disconnected.
n_isomorphisms = 0
node_idx = unspec_cover[0]
cand_idxs = np.argwhere(candidates[node_idx]).flat
for i, cand_idx in enumerate(cand_idxs):
candidates_copy = candidates.copy()
candidates_copy[node_idx] = one_hot(cand_idx, world.n_nodes)
# recurse to make assignment for the next node in the unspecified cover
n_isomorphisms += recursive_isomorphism_counter(
tmplt, world, candidates_copy, unspec_cover=unspec_cover[1:],
verbose=verbose, init_changed_cands=one_hot(node_idx, tmplt.n_nodes), count_iterations=count_iterations)
# TODO: more useful progress summary
if verbose:
print("depth {}: {} of {}".format(len(unspec_cover), i, len(cand_idxs)), n_isomorphisms)
return n_isomorphisms
def count_isomorphisms(tmplt, world, *, candidates=None, verbose=True, count_iterations=False):
"""
counts the number of ways to assign template nodes to world nodes such that
edges between template nodes also appear between the corresponding world
nodes. Does not factor in the number of ways to assign the edges. Only
counts the number of assignments between nodes.
if the set of unspecified template nodes is too large or too densely
connected, this code may never finish.
"""
global n_iterations
n_iterations = 0
if candidates is None:
tmplt, world, candidates = uclasm.run_filters(
tmplt, world, filters=uclasm.all_filters, verbose=verbose)
unspec_nodes = np.where(candidates.sum(axis=1) > 1)[0]
tmplt_subgraph = tmplt.subgraph(unspec_nodes)
unspec_cover = get_node_cover(tmplt_subgraph)
unspec_cover_nodes = [tmplt_subgraph.nodes[node_idx] for node_idx in unspec_cover]
unspec_cover_idxes = [tmplt.node_idxs[node] for node in unspec_cover_nodes]
# Send zeros to init_changed_cands since we already just ran the filters
count = recursive_isomorphism_counter(
tmplt, world, candidates, verbose=verbose, unspec_cover=unspec_cover_idxes,
init_changed_cands=np.zeros(tmplt.nodes.shape, dtype=np.bool), count_iterations=count_iterations)
if count_iterations:
return count, n_iterations
else:
return count
def recursive_isomorphism_finder(tmplt, world, candidates, *,
unspec_node_idxs, verbose, init_changed_cands,
found_isomorphisms):
if len(unspec_node_idxs) == 0:
# All nodes have been assigned, add the isomorphism to the list
new_isomorphism = {}
for tmplt_idx, tmplt_node in enumerate(tmplt.nodes):
if verbose:
print(str(tmplt_node)+":", world.nodes[candidates[tmplt_idx]])
new_isomorphism[tmplt_node] = world.nodes[candidates[tmplt_idx]][0]
found_isomorphisms.append(new_isomorphism)
return found_isomorphisms
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates,
filters=all_filters, verbose=False,
init_changed_cands=init_changed_cands)
node_idx = unspec_node_idxs[0]
cand_idxs = np.argwhere(candidates[node_idx]).flat
for i, cand_idx in enumerate(cand_idxs):
candidates_copy = candidates.copy()
candidates_copy[node_idx] = one_hot(cand_idx, world.n_nodes)
# recurse to make assignment for the next node in the unspecified cover
recursive_isomorphism_finder(
tmplt, world, candidates_copy,
unspec_node_idxs=unspec_node_idxs[1:],
verbose=verbose,
init_changed_cands=one_hot(node_idx, tmplt.n_nodes),
found_isomorphisms=found_isomorphisms)
return found_isomorphisms
def find_isomorphisms(tmplt, world, *, candidates=None, verbose=True):
""" Returns a list of isomorphisms as dictionaries mapping template nodes to
world nodes. Note: this is much slower than counting, and should only be
done for small numbers of isomorphisms and fully filtered candidate matrices
"""
if candidates is None:
tmplt, world, candidates = uclasm.run_filters(
tmplt, world, filters=uclasm.all_filters, verbose=verbose)
unspec_node_idxs = np.where(candidates.sum(axis=1) > 1)[0]
found_isomorphisms = []
return recursive_isomorphism_finder(
tmplt, world, candidates, verbose=verbose,
unspec_node_idxs=unspec_node_idxs,
init_changed_cands=np.zeros(tmplt.nodes.shape, dtype=np.bool),
found_isomorphisms=found_isomorphisms)
def print_isomorphisms(tmplt, world, *, candidates=None, verbose=True):
""" Prints the list of isomorphisms """
print(find_isomorphisms(tmplt, world, candidates=candidates,
verbose=verbose))
```
#### File: uclasm/filters/label_filter.py
```python
def label_filter(tmplt, world, candidates, *, verbose=False, **kwargs):
candidates[:,:] &= tmplt.labels.reshape(-1,1) == world.labels.reshape(1,-1)
return tmplt, world, candidates
```
#### File: uclasm/filters/validation_filter.py
```python
import uclasm
import numpy as np
from functools import reduce
# TODO: switch to keyword arguments throughout
def validate_alldiff_solns(tmplt, world, candidates, marked,
in_signal_only, node_to_marked_col_idx):
""" Check that there exists a solution to the alldiff problem """
# Map from tmplt index to possible candidate indices
var_to_vals = {
tmplt_idx: [
node_to_marked_col_idx[world.nodes[cand_idx]]
for cand_idx in range(world.n_nodes)
if candidates[tmplt_idx, cand_idx]
]
for tmplt_idx in range(tmplt.n_nodes)
}
# if a var has only one possible val, track it then throw it out.
matched_pairs = [(var, list(vals)[0]) for var, vals in var_to_vals.items()
if len(vals) == 1] # TODO: better variable name
var_to_vals = {var: vals for var, vals in var_to_vals.items()
if len(vals) > 1}
unspec_vars = list(var_to_vals.keys())
# which vars is each val a cand for?
val_to_vars = uclasm.invert(var_to_vals)
# gather sets of vals which have the same set of possible vars.
vars_to_vals = uclasm.values_map_to_same_key(val_to_vars)
vars_to_val_counts = {vars: len(vals)
for vars, vals in vars_to_vals.items()}
# each var can belong to multiple sets of vars which key vars_to_val_counts
# so here we find out which sets of vars each var belongs to
var_to_vars_list = {
var: [vars for vars in vars_to_val_counts.keys() if var in vars]
for var in var_to_vals}
def recursive_validate(var_to_vars_list, vars_to_vals, vars_to_val_counts):
if len(var_to_vars_list) == 0:
return True
# Retrieve an arbitrary unspecified variable
var, vars_list = var_to_vars_list.popitem()
found = False
# Iterate through possible assignments of that variable
for vars in vars_list:
# How many ways are there to assign the variable in this way?
n_vals = vars_to_val_counts[vars]
if n_vals == 0:
continue
vars_to_val_counts[vars] -= 1
if recursive_validate(var_to_vars_list, vars_to_vals, vars_to_val_counts):
found = True
# Unmark all nodes found
marked[np.ix_(list(vars), list(vars_to_vals[vars]))] = False
# put the count back so we don't mess up the recursion
vars_to_val_counts[vars] += 1
# put the list back so we don't mess up the recursion
var_to_vars_list[var] = vars_list
return found
if recursive_validate(var_to_vars_list, vars_to_vals, vars_to_val_counts):
# Unmark all pairs that were matched at the beginning
for matched_pair in matched_pairs:
if in_signal_only:
# Unmark all pairs corresponding to the found candidate
marked[:, matched_pair[1]] = False
else:
marked[matched_pair] = False
return True
return False
# TODO: switch to keyword arguments throughout
def validate_isomorphisms(tmplt, world, candidates, unspec_cover, marked,
in_signal_only, node_to_marked_col_idx):
""" Validate that at least one isomorphism exists and unmark it """
if len(unspec_cover) == 0:
return validate_alldiff_solns(tmplt, world, candidates, marked,
in_signal_only, node_to_marked_col_idx)
unspec_idx = unspec_cover[0]
unspec_cands = np.argwhere(candidates[unspec_idx]).flat
# TODO: is this actually an effective heuristic? Compare with random order
# Order unspec_cands to have marked nodes first
unspec_cands = sorted(unspec_cands,
key=lambda cand_idx: marked[unspec_idx, cand_idx],
reverse=True)
for cand_idx in unspec_cands:
# Make a copy to avoid messing up candidate sets during recursion
candidates_copy = candidates.copy()
candidates_copy[unspec_idx, :] = uclasm.one_hot(cand_idx, world.n_nodes)
# rerun filters after picking an assignment for the next unspec node
_, new_world, new_candidates = uclasm.run_filters(
tmplt, world, candidates=candidates_copy,
filters=uclasm.cheap_filters,
init_changed_cands=uclasm.one_hot(unspec_idx, tmplt.n_nodes))
# if any node has no cands due to the current assignment, skip
if not new_candidates.any(axis=1).all():
continue
if validate_isomorphisms(tmplt, new_world, new_candidates,
unspec_cover[1:], marked, in_signal_only,
node_to_marked_col_idx):
marked_col_idx = node_to_marked_col_idx[world.nodes[cand_idx]]
if in_signal_only:
# Unmark all pairs for the found candidate
marked[:, marked_col_idx] = False
else:
# Unmark the found pair
marked[unspec_idx, marked_col_idx] = False
return True
return False
def validation_filter(tmplt, world, *, candidates=None, in_signal_only=False,
verbose=False, **kwargs):
"""
This filter finds the minimum candidate set for each template node by
identifying one isomorphism for each candidate-template node pair
in_signal_only: Rather than checking pairs, if this option is True, only
check that each candidate participates in at least one signal, ignoring
which template node it corresponds to
"""
if candidates is None:
tmplt, world, candidates = uclasm.run_filters(
tmplt, world, filters=uclasm.all_filters,
candidates=np.ones((tmplt.n_nodes, world.n_nodes), dtype=np.bool),
**kwargs)
# Start by marking every current candidate-template node pair to be checked
# A zero entry here means that we have already checked whether or not the
# candidate corresponds to the template node in any signals.
marked = candidates.copy()
node_to_marked_col_idx = {node: idx for idx, node in enumerate(world.nodes)}
while marked.any():
if verbose:
print(marked.sum(), "marks remaining")
candidates_copy = candidates.copy()
# TODO: only recompute unspec_cover when necessary or not at all
# Get node cover for unspecified nodes
cand_counts = candidates.sum(axis=1)
unspec_subgraph = tmplt.subgraph(cand_counts > 1)
unspec_cover = uclasm.get_node_cover(unspec_subgraph)
unspec_cover = np.array([tmplt.node_idxs[unspec_subgraph.nodes[idx]]
for idx in unspec_cover], dtype=np.int)
# Find a marked template node idx and a cand to pair together
# Pick any pair with a mark
marked_tmplt_idx, marked_cand_idx = np.argwhere(marked)[0]
# unspecified template nodes which have any marks
marked_unspecs = marked[unspec_cover].any(axis=1)
# If there is a node in the unspec cover with a mark, prioritize it
if marked_unspecs.any():
# Pick the first node in the unspec cover that has a mark
marked_tmplt_idx = unspec_cover[marked_unspecs][0]
# Set a candidate for the marked template node as the marked cand
marked_cand_idx = np.argwhere(marked[marked_tmplt_idx])[0,0]
candidates_copy[marked_tmplt_idx, :] = uclasm.one_hot(marked_cand_idx,
world.n_nodes)
# TODO: pass arguments as keywords to avoid bugs when changes are made
if not validate_isomorphisms(tmplt, world, candidates_copy,
unspec_cover, marked, in_signal_only,
node_to_marked_col_idx):
# No valid isomorphisms: remove from is_cand
candidates[marked_tmplt_idx, marked_cand_idx] = False
# Unmark the pair that was checked
marked[marked_tmplt_idx, marked_cand_idx] = False
# TODO: run cheap filters to propagate change of candidates
# TODO: reduce world to cands
elif in_signal_only:
# Unmark all pairs for the candidate that was found
marked[:, marked_cand_idx] = False
else:
# Unmark the pair that was found
marked[marked_tmplt_idx, marked_cand_idx] = False
return tmplt, world, candidates
```
|
{
"source": "jdmoravec/nova",
"score": 2
}
|
#### File: functional/api_sample_tests/api_sample_base.py
```python
import os
import testscenarios
import nova.conf
from nova.tests import fixtures
from nova.tests.functional import api_paste_fixture
from nova.tests.functional import api_samples_test_base
CONF = nova.conf.CONF
# API samples heavily uses testscenarios. This allows us to use the
# same tests, with slight variations in configuration to ensure our
# various ways of calling the API are compatible. Testscenarios works
# through the class level ``scenarios`` variable. It is an array of
# tuples where the first value in each tuple is an arbitrary name for
# the scenario (should be unique), and the second item is a dictionary
# of attributes to change in the class for the test.
#
# By default we're running scenarios for 2 situations
#
# - Hitting the default /v2 endpoint with the v2.1 Compatibility stack
#
# - Hitting the default /v2.1 endpoint
#
# Things we need to set:
#
# - api_major_version - what version of the API we should be hitting
#
# - microversion - what API microversion should be used
#
# - _additional_fixtures - any additional fixtures need
#
# NOTE(sdague): if you want to build a test that only tests specific
# microversions, then replace the ``scenarios`` class variable in that
# test class with something like:
#
# [("v2_11", {'api_major_version': 'v2.1', 'microversion': '2.11'})]
class ApiSampleTestBaseV21(testscenarios.WithScenarios,
api_samples_test_base.ApiSampleTestBase):
SUPPORTS_CELLS = False
api_major_version = 'v2'
# any additional fixtures needed for this scenario
_additional_fixtures = []
sample_dir = None
# Include the project ID in request URLs by default. This is overridden
# for certain `scenarios` and by certain subclasses.
# Note that API sample tests also use this in substitutions to validate
# that URLs in responses (e.g. location of a server just created) are
# correctly constructed.
_use_project_id = True
# Availability zones for the API samples tests. Can be overridden by
# sub-classes. If set, the AvailabilityZoneFilter is not used.
availability_zones = ['us-west']
scenarios = [
# test v2 with the v2.1 compatibility stack
('v2', {
'api_major_version': 'v2'}),
# test v2.1 base microversion
('v2_1', {
'api_major_version': 'v2.1'}),
# test v2.18 code without project id
('v2_1_noproject_id', {
'api_major_version': 'v2.1',
'_use_project_id': False,
'_additional_fixtures': [
api_paste_fixture.ApiPasteNoProjectId]})
]
def setUp(self):
self.flags(use_ipv6=False)
self.flags(glance_link_prefix=self._get_glance_host(),
compute_link_prefix=self._get_host(),
group='api')
# load any additional fixtures specified by the scenario
for fix in self._additional_fixtures:
self.useFixture(fix())
if not self.SUPPORTS_CELLS:
# NOTE(danms): Disable base automatic DB (and cells) config
self.USES_DB = False
self.USES_DB_SELF = True
# super class call is delayed here so that we have the right
# paste and conf before loading all the services, as we can't
# change these later.
super(ApiSampleTestBaseV21, self).setUp()
if not self.SUPPORTS_CELLS:
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
self.useFixture(fixtures.DefaultFlavorsFixture())
self.useFixture(fixtures.SingleCellSimple())
super(ApiSampleTestBaseV21, self)._setup_services()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
# this is used to generate sample docs
self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
if self.availability_zones:
self.useFixture(
fixtures.AvailabilityZoneFixture(self.availability_zones))
def _setup_services(self):
pass
def _setup_scheduler_service(self):
"""Overrides _IntegratedTestBase._setup_scheduler_service to filter
out the AvailabilityZoneFilter prior to starting the scheduler.
"""
if self.availability_zones:
# The test is using fake zones so disable the
# AvailabilityZoneFilter which is otherwise enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
if 'AvailabilityZoneFilter' in enabled_filters:
enabled_filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=enabled_filters,
group='filter_scheduler')
return super(ApiSampleTestBaseV21, self)._setup_scheduler_service()
```
#### File: tests/functional/test_json_filter.py
```python
from oslo_serialization import jsonutils
from nova import conf
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
CONF = conf.CONF
class JsonFilterTestCase(integrated_helpers.ProviderUsageBaseTestCase):
"""Functional tests for the JsonFilter scheduler filter."""
microversion = '2.1'
compute_driver = 'fake.SmallFakeDriver'
def setUp(self):
# Need to enable the JsonFilter before starting the scheduler service
# in the parent class.
enabled_filters = CONF.filter_scheduler.enabled_filters
if 'JsonFilter' not in enabled_filters:
enabled_filters.append('JsonFilter')
self.flags(enabled_filters=enabled_filters,
group='filter_scheduler')
# Use our custom weigher defined above to make sure that we have
# a predictable scheduling sort order during server create.
self.useFixture(nova_fixtures.HostNameWeigherFixture())
super(JsonFilterTestCase, self).setUp()
# Now create two compute services which will have unique host and
# node names.
self._start_compute('host1')
self._start_compute('host2')
def test_filter_on_hypervisor_hostname(self):
"""Tests a commonly used scenario for people trying to build a
baremetal server on a specific ironic node. Note that although
an ironic deployment would normally have a 1:M host:node topology
the test is setup with a 1:1 host:node but we can still test using
that by filtering on hypervisor_hostname. Also note that an admin
could force a server to build on a specific host by passing
availability_zone=<zone>::<nodename> but that means no filters get run
which might be undesirable.
"""
# Create a server passing the hypervisor_hostname query scheduler hint
# for host2 to make sure the filter works. If not, because of the
# custom HostNameWeigher, host1 would be chosen.
query = jsonutils.dumps(['=', '$hypervisor_hostname', 'host2'])
server = self._build_minimal_create_server_request(
'test_filter_on_hypervisor_hostname')
request = {'server': server, 'os:scheduler_hints': {'query': query}}
server = self.api.post_server(request)
server = self._wait_for_state_change(server, 'ACTIVE')
# Since we request host2 the server should be there despite host1 being
# weighed higher.
self.assertEqual(
'host2', server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
```
#### File: tests/unit/policy_fixture.py
```python
import os
import fixtures
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import nova.conf
from nova.conf import paths
from nova import policies
import nova.policy
from nova.tests.unit import fake_policy
CONF = nova.conf.CONF
class RealPolicyFixture(fixtures.Fixture):
"""Load the live policy for tests.
A base policy fixture that starts with the assumption that you'd
like to load and enforce the shipped default policy in tests.
Provides interfaces to tinker with both the contents and location
of the policy file before loading to allow overrides. To do this
implement ``_prepare_policy`` in the subclass, and adjust the
``policy_file`` accordingly.
"""
def _prepare_policy(self):
"""Allow changing of the policy before we get started"""
pass
def setUp(self):
super(RealPolicyFixture, self).setUp()
# policy_file can be overridden by subclasses
self.policy_file = paths.state_path_def('etc/nova/policy.json')
self._prepare_policy()
CONF.set_override('policy_file', self.policy_file, group='oslo_policy')
nova.policy.reset()
nova.policy.init()
# NOTE(gmann): Logging all the deprecation warning for every unit
# test will overflow the log files and leads to error. Suppress
# the deprecation warning for tests only.
nova.policy._ENFORCER.suppress_deprecation_warnings = True
self.addCleanup(nova.policy.reset)
def set_rules(self, rules, overwrite=True):
policy = nova.policy._ENFORCER
policy.set_rules(oslo_policy.Rules.from_dict(rules),
overwrite=overwrite)
def add_missing_default_rules(self, rules):
"""Adds default rules and their values to the given rules dict.
The given rulen dict may have an incomplete set of policy rules.
This method will add the default policy rules and their values to
the dict. It will not override the existing rules.
"""
for rule in policies.list_rules():
# NOTE(lbragstad): Only write the rule if it isn't already in the
# rule set and if it isn't deprecated. Otherwise we're just going
# to spam test runs with deprecate policy warnings.
if rule.name not in rules and not rule.deprecated_for_removal:
rules[rule.name] = rule.check_str
class PolicyFixture(RealPolicyFixture):
"""Load a fake policy from nova.tests.unit.fake_policy
This overrides the policy with a completely fake and synthetic
policy file.
NOTE(sdague): the use of this is deprecated, and we should unwind
the tests so that they can function with the real policy. This is
mostly legacy because our default test instances and default test
contexts don't match up. It appears that in many cases fake_policy
was just modified to whatever makes tests pass, which makes it
dangerous to be used in tree. Long term a NullPolicy fixture might
be better in those cases.
"""
def _prepare_policy(self):
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path,
'policy.json')
# load the fake_policy data and add the missing default rules.
policy_rules = jsonutils.loads(fake_policy.policy_data)
self.add_missing_default_rules(policy_rules)
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy_rules, f)
CONF.set_override('policy_dirs', [], group='oslo_policy')
class RoleBasedPolicyFixture(RealPolicyFixture):
"""Load a modified policy which allows all actions only by a single role.
This fixture can be used for testing role based permissions as it
provides a version of the policy which stomps over all previous
declaration and makes every action only available to a single
role.
"""
def __init__(self, role="admin", *args, **kwargs):
super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs)
self.role = role
def _prepare_policy(self):
# Convert all actions to require the specified role
policy = {}
for rule in policies.list_rules():
policy[rule.name] = 'role:%s' % self.role
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path, 'policy.json')
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy, f)
class OverridePolicyFixture(RealPolicyFixture):
"""Load the set of requested rules into policy file
This overrides the policy with the requested rules only into
policy file. This fixture is to verify the use case where operator
has overridden the policy rules in policy file means default policy
not used. One example is when policy rules are deprecated. In that case
tests can use this fixture and verify if deprecated rules are overridden
then does nova code enforce the overridden rules not only defaults.
As per oslo.policy deprecattion feature, if deprecated rule is overridden
in policy file then, overridden check is used to verify the policy.
Example of usage:
self.deprecated_policy = "os_compute_api:os-services"
# set check_str as different than defaults to verify the
# rule overridden case.
override_rules = {self.deprecated_policy: 'is_admin:True'}
# NOTE(gmann): Only override the deprecated rule in policy file so that
# we can verify if overridden checks are considered by oslo.policy.
# Oslo.policy will consider the overridden rules if:
# 1. overridden checks are different than defaults
# 2. new rules for deprecated rules are not present in policy file
self.policy = self.useFixture(policy_fixture.OverridePolicyFixture(
rules_in_file=override_rules))
"""
def __init__(self, rules_in_file, *args, **kwargs):
self.rules_in_file = rules_in_file
super(OverridePolicyFixture, self).__init__(*args, **kwargs)
def _prepare_policy(self):
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file, 'w') as f:
jsonutils.dump(self.rules_in_file, f)
CONF.set_override('policy_dirs', [], group='oslo_policy')
```
|
{
"source": "jdmueller/ArmoniaSaleor",
"score": 2
}
|
#### File: ArmoniaSaleor/custompages/views.py
```python
import datetime
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import get_template
from django.http import Http404
from django.template.response import TemplateResponse
from django.template import TemplateDoesNotExist
from django.core.mail import send_mail
from .forms import ContactForm
def about(request):
return TemplateResponse(request, "custompages/about.html", {})
def technology(request):
return TemplateResponse(request, "custompages/technology.html", {})
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
subject = 'Contact Request: ' + form.cleaned_data['name']
name = form.cleaned_data['name']
email = form.cleaned_data['email']
sender = '<EMAIL>'
phone = form.cleaned_data['phone']
message = ('Client Name: ' + name + '\n\n Phone: ' + phone +
'\n\n Email: ' + email + '\n\n Subject: ' + subject +
'\n\nMessage:\n ' + form.cleaned_data['message'] +
'\n\nMessage sent from contact page')
recipients = ['<EMAIL>']
send_mail(subject, message, sender, recipients)
submit_time = datetime.datetime.now()
message = form.cleaned_data['message']
return redirect("/thank-you/")
else:
form = ContactForm()
context = {
'form': form,
}
return TemplateResponse(request, "custompages/contact.html", context)
def site_demos(request):
return TemplateResponse(request, "custompages/site_demos.html", {})
def thank_you(request):
return TemplateResponse(request, "custompages/thank_you.html", {})
def money_back_guarantee(request):
return TemplateResponse(request, "custompages/money_back_guarantee.html", {})
def pyrealtor_detail(request, slug):
template = "custompages/pyrealtor/" + slug + ".html"
context = {
'slug': slug,
}
print(template)
try:
get_template(template)
return TemplateResponse(request, template, context)
except TemplateDoesNotExist:
raise Http404
def pyrealtor_details(request):
return TemplateResponse(request, "custompages/pyrealtor.html", {'fluid': True, 'fontawesome': True, 'hidenav': True})
```
|
{
"source": "jdmulligan/STAT",
"score": 2
}
|
#### File: jdmulligan/STAT/merge_results.py
```python
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import scipy
import statistics
import os
import sys
import pickle
import argparse
from src.design import Design
from src import emulator, mcmc, init
import run_analysis_base
################################################################
class MergeResults(run_analysis_base.RunAnalysisBase):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, config_file, model, output_dir, **kwargs):
# Initialize base class
super(MergeResults, self).__init__(config_file, model, output_dir, **kwargs)
self.output_dir_holdout = os.path.join(self.output_dir_base, '{}/holdout'.format(model))
self.plot_dir = os.path.join(self.output_dir_base, model)
#---------------------------------------------------------------
# Run analysis
#---------------------------------------------------------------
def run_analysis(self):
# Initialize data and model from files
self.initialize()
# Initialize pickled config settings
init.Init(self.workdir).Initialize(self)
# Emulator validation: Store lists of true RAA, emulator RAA at each holdout point
SystemCount = len(self.AllData["systems"])
true_raa_aggregated = [[] for i in range(SystemCount)]
emulator_raa_mean_aggregated = [[] for i in range(SystemCount)]
emulator_raa_stdev_aggregated = [[] for i in range(SystemCount)]
# Store a list of the chi2 of the holdout residual
self.avg_residuals = []
# Store list of closure test result
T_qhat_closure_result_list = []
T_qhat_closure_result_list2 = []
T_qhat_closure_truth_list = []
E_qhat_closure_result_list = []
E_qhat_closure_result_list2 = []
E_qhat_closure_truth_list = []
theta_closure_list = []
theta_closure_result_dict = {}
theta_closure_result2_dict = {}
for name in self.Names:
theta_closure_result_dict[name] = []
theta_closure_result2_dict[name] = []
n_design_points = len(next(os.walk(self.output_dir_holdout))[1])
print('iterating through {} results'.format(n_design_points))
for i in range(0, n_design_points):
# Load pkl file of results
result_path = os.path.join(self.output_dir_holdout, '{}/result.pkl'.format(i))
if not os.path.exists(result_path):
print('Warning: {} does not exist'.format(result_path))
continue
with open(result_path, 'rb') as f:
result_dict = pickle.load(f)
# Holdout test
true_raa = result_dict['true_raa']
emulator_raa_mean = result_dict['emulator_raa_mean']
emulator_raa_stdev = result_dict['emulator_raa_stdev']
[true_raa_aggregated[i].append(true_raa[i]) for i in range(SystemCount)]
[emulator_raa_mean_aggregated[i].append(emulator_raa_mean[i]) for i in range(SystemCount)]
[emulator_raa_stdev_aggregated[i].append(emulator_raa_stdev[i]) for i in range(SystemCount)]
# Closure test
# qhat vs T
T_array = result_dict['T_array']
T_qhat_truth = result_dict['T_qhat_truth']
T_qhat_mean = result_dict['T_qhat_mean']
T_qhat_closure = result_dict['T_qhat_closure']
T_qhat_closure2 = result_dict['T_qhat_closure2']
T_qhat_closure_result_list.append(T_qhat_closure)
T_qhat_closure_result_list2.append(T_qhat_closure2)
T_qhat_closure_truth_list.append(T_qhat_truth)
# qhat vs E
E_array = result_dict['E_array']
E_qhat_truth = result_dict['E_qhat_truth']
E_qhat_mean = result_dict['E_qhat_mean']
E_qhat_closure = result_dict['E_qhat_closure']
E_qhat_closure2 = result_dict['E_qhat_closure2']
E_qhat_closure_result_list.append(E_qhat_closure)
E_qhat_closure_result_list2.append(E_qhat_closure2)
E_qhat_closure_truth_list.append(E_qhat_truth)
# ABCD closure
theta = result_dict['theta']
theta_closure_list.append(theta)
for name in self.Names:
theta_closure_result_dict[name].append(result_dict['{}_closure'.format(name)])
theta_closure_result2_dict[name].append(result_dict['{}_closure2'.format(name)])
# Plot summary of holdout tests
#self.plot_avg_residuals()
self.plot_emulator_validation(true_raa_aggregated, emulator_raa_mean_aggregated, emulator_raa_stdev_aggregated)
self.plot_emulator_uncertainty_validation(true_raa_aggregated, emulator_raa_mean_aggregated, emulator_raa_stdev_aggregated)
# Plot summary of qhat closure tests
self.plot_closure_summary_qhat(T_array, T_qhat_closure_result_list,
T_qhat_closure_truth_list, type='T', CR='90')
self.plot_closure_summary_qhat(T_array, T_qhat_closure_result_list2,
T_qhat_closure_truth_list, type='T', CR='60')
self.plot_closure_summary_qhat(E_array, E_qhat_closure_result_list,
E_qhat_closure_truth_list, type='E', CR='90')
self.plot_closure_summary_qhat(E_array, E_qhat_closure_result_list2,
E_qhat_closure_truth_list, type='E', CR='60')
# Print theta closure summary
for i,name in enumerate(self.Names):
self.plot_closure_summary_theta(i, name, theta_closure_list, theta_closure_result_dict, CR='90')
self.plot_closure_summary_theta(i, name, theta_closure_list, theta_closure_result2_dict, CR='60')
#---------------------------------------------------------------
# Plot summary of closure tests
#
# theta_closure_list is a list (per design point) of theta values
#
# theta_closure_result_dict is a dictionary (per ABCD) of lists (per design point)
# [{A: [True, True, ...]}, {B: [True, False, ...]}, ... ]
#
#---------------------------------------------------------------
def plot_closure_summary_theta(self, i, name, theta_closure_list, theta_closure_result_dict, CR='90'):
theta_i_list = [theta[i] for theta in theta_closure_list]
qhat_list = [self.qhat(T=0.3, E=100, parameters=theta) for theta in theta_closure_list]
success_list = theta_closure_result_dict[name]
# Construct 2D histogram of qhat vs theta[i],
# where amplitude is fraction of successful closure tests
theta_i_range = self.ranges_transformed[i]
xbins = np.linspace(theta_i_range[0], theta_i_range[1], num=8)
xwidth = (theta_i_range[0]+theta_i_range[1])/(7*2)
ybins = [0, 0.5, 1, 2, 3, 4, 5, 6, 8, 10, 15]
ybins_center = [(ybins[i+1]+ybins[i])/2 for i in range(len(ybins)-1)]
x = np.array(theta_i_list)
y = np.array(qhat_list)
z = np.array(success_list)
# Histogram of fraction of successes
self.N_per_bin = 1
H, xedges, yedges, binnumber= scipy.stats.binned_statistic_2d(x, y, z, statistic=np.mean,
bins=[xbins, ybins])
XX, YY = np.meshgrid(xedges, yedges)
fig = plt.figure(figsize = (11,9))
ax1=plt.subplot(111)
plot1 = ax1.pcolormesh(XX, YY, H.T)
fig.colorbar(plot1, ax=ax1)
# Histogram of efficiency uncertainty
Herr, xedges, yedges, binnumber= scipy.stats.binned_statistic_2d(x, y, z,
statistic=self.efficiency_uncertainty_bayesian,
bins=[xbins, ybins])
plt.xlabel(name, size=14)
plt.ylabel(r'$\left< \hat{q}/T^3 \right>_{T=300\;\rm{MeV}, E=100\;\rm{GeV}}$', size=14)
plt.title('Fraction of closure tests contained in {}% CR'.format(CR), size=14)
mean = np.mean(z)
self.N_per_bin = 1
unc = self.efficiency_uncertainty_bayesian(z)
ax1.legend(title='mean: {:0.2f}{}{:0.2f}'.format(mean, r'$\pm$', unc),
title_fontsize=14, loc='upper right')
for i in range(len(xbins)-1):
for j in range(len(ybins)-1):
zval = H[i][j]
zerr = Herr[i][j]
if np.isnan(zval) or np.isnan(zerr):
continue
ax1.text(xbins[i]+xwidth, ybins_center[j], '{:0.2f}{}{:0.2f}'.format(zval, r'$\pm$',zerr),
size=8, ha='center', va='center',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.3'))
# Save
plt.savefig('{}/Closure_Summary2D_{}_{}.pdf'.format(self.plot_dir, name, CR), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Plot summary of closure tests
#
# qhat_closure_result_list is a list (per design point) of lists (of T values)
# [ [True, True, ...], [True, False, ...], ... ] where each sublist is a given design point
# qhat_closure_truth_list is a list (per design point) of lists (of T values)
# [ [qhat_T1, qhat_T2, ...], [qhat_T1, qhat_T2, ...], ... ] where each sublist is a given design point
#---------------------------------------------------------------
def plot_closure_summary_qhat(self, x_array, qhat_closure_result_list,
qhat_closure_truth_list, type='T', CR='90'):
# Construct 2D histogram of <qhat of design point> vs T,
# where amplitude is fraction of successful closure tests
# For each T and design point, compute <qhat of design point>,
# T, and the fraction of successful closure tests
x_list = []
qhat_mean_list = []
success_list = []
for i,x in enumerate(x_array):
for j,design in enumerate(qhat_closure_result_list):
qhat_mean = statistics.mean(qhat_closure_truth_list[j])
success = qhat_closure_result_list[j][i]
x_list.append(x)
qhat_mean_list.append(qhat_mean)
success_list.append(success)
# Now draw the mean success rate in 2D
if type is 'T':
xbins = np.linspace(0.15, 0.5, num=8)
xwidth = 0.025
self.N_per_bin = 50/7 # We have multiple T points per bin
if type is 'E':
xbins = np.linspace(20, 200, num=10)
xwidth = 10
self.N_per_bin = 50/9 # We have multiple E points per bin
ybins = [0, 0.5, 1, 2, 3, 4, 5, 6, 8, 10, 15]
ybins_center = [(ybins[i+1]+ybins[i])/2 for i in range(len(ybins)-1)]
x = np.array(x_list)
y = np.array(qhat_mean_list)
z = np.array(success_list)
# Histogram of fraction of successes
H, xedges, yedges, binnumber= scipy.stats.binned_statistic_2d(x, y, z, statistic=np.mean,
bins=[xbins, ybins])
H = np.ma.masked_invalid(H) # masking where there was no data
XX, YY = np.meshgrid(xedges, yedges)
fig = plt.figure(figsize = (11,9))
ax1=plt.subplot(111)
plot1 = ax1.pcolormesh(XX, YY, H.T)
fig.colorbar(plot1, ax=ax1)
# Histogram of binomial uncertainty
Herr, xedges, yedges, binnumber= scipy.stats.binned_statistic_2d(x, y, z,
statistic=self.efficiency_uncertainty_bayesian,
bins=[xbins, ybins])
Herr = np.ma.masked_invalid(Herr)
plt.xlabel('{} (GeV)'.format(type), size=14)
if type is 'T':
plt.ylabel(r'$\left< \hat{q}/T^3 \right>_{E=100\;\rm{GeV}}$', size=14)
if type is 'E':
plt.ylabel(r'$\left< \hat{q}/T^3 \right>_{T=300\;\rm{MeV}}$', size=14)
plt.title('Fraction of closure tests contained in {}% CR'.format(CR), size=14)
mean = np.mean(z)
self.N_per_bin = 50 # Here, we take just one point per curve
unc = self.efficiency_uncertainty_bayesian(z)
ax1.legend(title='mean: {:0.2f}{}{:0.2f}'.format(mean, r'$\pm$', unc),
title_fontsize=14, loc='upper right')
for i in range(len(xbins)-1):
for j in range(len(ybins)-1):
zval = H[i][j]
zerr = Herr[i][j]
if np.isnan(zval) or np.isnan(zerr):
continue
ax1.text(xbins[i]+xwidth, ybins_center[j], '{:0.2f}{}{:0.2f}'.format(zval, r'$\pm$',zerr),
size=8, ha='center', va='center',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.3'))
# Save
plt.savefig('{}/Closure_Summary2D_{}_{}.pdf'.format(self.plot_dir, type, CR), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Compute binomial uncertainty from a list of True/False values
# [True, True, False, True, ...]
#---------------------------------------------------------------
def efficiency_uncertainty_binomial(self, success_list):
length = len(success_list)
sum = np.sum(success_list)
mean = 1.*sum/length
# We have multiple T points per bin, which would underestimate the uncertainty
# since neighboring points are highly correlated
real_length = length / self.N_per_bin
variance = real_length*mean*(1-mean)
sigma = np.sqrt(variance)
return sigma/real_length
#---------------------------------------------------------------
# Compute bayesian uncertainty on efficiency from a list of True/False values
# [True, True, False, True, ...]
# http://phys.kent.edu/~smargeti/STAR/D0/Ullrich-Errors.pdf
#---------------------------------------------------------------
def efficiency_uncertainty_bayesian(self, success_list):
length = len(success_list)
sum = np.sum(success_list)
mean = 1.*sum/length
# We have multiple T points per bin, which would underestimate the uncertainty
# since neighboring points are highly correlated
real_length = length / self.N_per_bin
k = mean*real_length
n = real_length
variance = (k+1)*(k+2)/((n+2)*(n+3)) - (k+1)*(k+1)/((n+2)*(n+2))
return np.sqrt(variance)
#---------------------------------------------------------------
# Plot emulator validation
#
# true_raa and emulator_raa are lists (per system) of lists (per design point) of lists
# e.g. true_raa[i] = [[RAA_0, RAA_1,...], [RAA_0, RAA_1, ...], ...]
#
#---------------------------------------------------------------
def plot_emulator_validation(self, true_raa, emulator_raa_mean, emulator_raa_stdev):
# Loop through emulators
for cent in range(0,2):
# Construct a figure with two plots
plt.figure(1, figsize=(10, 6))
ax_scatter = plt.axes([0.1, 0.13, 0.6, 0.8]) # [left, bottom, width, height]
ax_residual = plt.axes([0.81, 0.13, 0.15, 0.8])
markers = ['o', 's', 'D']
SystemCount = len(self.AllData["systems"])
for i in range(SystemCount):
system = self.AllData['systems'][i]
if 'AuAu' in system:
if cent == 0:
system_label = 'Au-Au \;200\; GeV, 0-10\%'
if cent == 1:
system_label = 'Au-Au \;200\; GeV, 40-50\%'
else:
if '2760' in system:
if cent == 0:
system_label = 'Pb-Pb \;2.76\; TeV, 0-5\%'
if cent == 1:
system_label = 'Pb-Pb \;2.76\; TeV, 30-40\%'
elif '5020' in system:
if cent == 0:
system_label = 'Pb-Pb \;5.02\; TeV, 0-10\%'
if cent == 1:
system_label = 'Pb-Pb \;5.02\; TeV, 30-50\%'
#color = sns.color_palette('colorblind')[i]
color = self.colors[i]
# Optionally: Remove outlier points from emulator validation plot
remove_outliers = False
if remove_outliers:
if self.model == 'LBT':
remove = [79, 124, 135]
if self.model == 'MATTER':
remove = [59, 60, 61, 62]
if self.model == 'MATTER+LBT1':
remove = [0, 2, 5, 12, 17, 28, 31, 34, 37, 46, 50, 56, 63, 65, 69]
if self.model == 'MATTER+LBT2':
remove = [2, 3, 14, 19, 20, 21, 27, 28, 33, 56]
for index in sorted(remove, reverse=True):
del true_raa[i][index]
del emulator_raa_mean[i][index]
del emulator_raa_stdev[i][index]
true_raa_flat_i = [item for sublist in true_raa[i] for item in sublist[cent]]
emulator_raa_mean_flat_i = [item for sublist in emulator_raa_mean[i] for item in sublist[cent]]
emulator_raa_stdev_flat_i = [item for sublist in emulator_raa_stdev[i] for item in sublist[cent]]
# Get RAA points
true_raa_i = np.array(true_raa_flat_i)
emulator_raa_mean_i = np.array(emulator_raa_mean_flat_i)
emulator_raa_stdev_i = np.array(emulator_raa_stdev_flat_i)
normalized_residual_i = np.divide(true_raa_i-emulator_raa_mean_i, emulator_raa_stdev_i)
# Draw scatter plot
ax_scatter.scatter(true_raa_i, emulator_raa_mean_i, s=5, marker=markers[i],
color=color, alpha=0.7, label=r'$\rm{{{}}}$'.format(system_label), linewidth=0)
#ax_scatter.set_ylim([0, 1.19])
#ax_scatter.set_xlim([0, 1.19])
ax_scatter.set_xlabel(r'$R_{\rm{AA}}^{\rm{true}}$', fontsize=20)
ax_scatter.set_ylabel(r'$R_{\rm{AA}}^{\rm{emulator}}$', fontsize=20)
ax_scatter.legend(title=self.model, title_fontsize=16,
loc='upper left', fontsize=14, markerscale=5)
plt.setp(ax_scatter.get_xticklabels(), fontsize=14)
plt.setp(ax_scatter.get_yticklabels(), fontsize=14)
# Draw line with slope 1
ax_scatter.plot([0,1], [0,1], sns.xkcd_rgb['almost black'], alpha=0.3,
linewidth=3, linestyle='--')
# Print mean value of emulator uncertainty
stdev_mean_relative = np.divide(emulator_raa_stdev_i, true_raa_i)
stdev_mean = np.mean(stdev_mean_relative)
text = r'$\left< \sigma_{{\rm{{emulator}}}}^{{\rm{{{}}}}} \right> = {:0.1f}\%$'.format(system_label, 100*stdev_mean)
ax_scatter.text(0.4, 0.17-0.09*i, text, fontsize=16)
# Draw normalization residuals
max = 3
bins = np.linspace(-max, max, 30)
x = (bins[1:] + bins[:-1])/2
h = ax_residual.hist(normalized_residual_i, color=color, histtype='step',
orientation='horizontal', linewidth=3, alpha=0.8, density=True, bins=bins)
ax_residual.scatter(h[0], x, color=color, s=10, marker=markers[i])
ax_residual.set_ylabel(r'$\left(R_{\rm{AA}}^{\rm{true}} - R_{\rm{AA}}^{\rm{emulator}}\right) / \sigma_{\rm{emulator}}$',
fontsize=20)
plt.setp(ax_residual.get_xticklabels(), fontsize=14)
plt.setp(ax_residual.get_yticklabels(), fontsize=14)
# Print out indices of points that deviate significantly
if remove_outliers:
stdev = np.std(normalized_residual_i)
for j,true_sublist in enumerate(true_raa[i]):
emulator_sublist = emulator_raa_mean[i][j]
for k,true_raa_value in enumerate(true_sublist):
emulator_raa_value = emulator_sublist[k]
normalized_residual = (true_raa_value-emulator_raa_value)/true_raa_value
if np.abs(normalized_residual) > 3*stdev:
print('Index {} has poor emulator validation...'.format(j))
plt.savefig('{}/EmulatorValidation_{}.pdf'.format(self.plot_dir, cent))
plt.close('all')
#---------------------------------------------------------------
# Plot emulator uncertainty validation
#
# true_raa and emulator_raa are lists (per system) of lists (per design point) of lists
# e.g. true_raa[i] = [[RAA_0, RAA_1,...], [RAA_0, RAA_1, ...], ...]
#
#---------------------------------------------------------------
def plot_emulator_uncertainty_validation(self, true_raa, emulator_raa_mean, emulator_raa_stdev):
# Loop through emulators
for cent in range(0,2):
# Construct a figure with two plots
plt.figure(1, figsize=(10, 6))
ax_scatter = plt.axes([0.1, 0.13, 0.6, 0.8]) # [left, bottom, width, height]
ax_residual = plt.axes([0.81, 0.13, 0.15, 0.8])
SystemCount = len(self.AllData["systems"])
for i in range(SystemCount):
system = self.AllData['systems'][i]
if 'AuAu' in system:
if cent == 0:
system_label = 'Au-Au \;200\; GeV, 0-10\%'
if cent == 1:
system_label = 'Au-Au \;200\; GeV, 40-50\%'
else:
if '2760' in system:
if cent == 0:
system_label = 'Pb-Pb \;2.76\; TeV, 0-5\%'
if cent == 1:
system_label = 'Pb-Pb \;2.76\; TeV, 30-40\%'
elif '5020' in system:
if cent == 0:
system_label = 'Pb-Pb \;5.02\; TeV, 0-10\%'
if cent == 1:
system_label = 'Pb-Pb \;5.02\; TeV, 30-50\%'
#color = sns.color_palette('colorblind')[i]
color = self.colors[i]
# Optionally: Remove outlier points from emulator validation plot
remove_outliers = False
if remove_outliers:
if self.model == 'LBT':
remove = [79, 124, 135]
if self.model == 'MATTER':
remove = [59, 60, 61, 62]
if self.model == 'MATTER+LBT1':
remove = [0, 2, 5, 12, 17, 28, 31, 34, 37, 46, 50, 56, 63, 65, 69]
if self.model == 'MATTER+LBT2':
remove = [2, 3, 14, 19, 20, 21, 27, 28, 33, 56]
for index in sorted(remove, reverse=True):
del true_raa[i][index]
del emulator_raa_mean[i][index]
del emulator_raa_stdev[i][index]
true_raa_flat_i = [item for sublist in true_raa[i] for item in sublist[cent]]
emulator_raa_mean_flat_i = [item for sublist in emulator_raa_mean[i] for item in sublist[cent]]
emulator_raa_stdev_flat_i = [item for sublist in emulator_raa_stdev[i] for item in sublist[cent]]
# Get RAA points
true_raa_i = np.array(true_raa_flat_i)
emulator_raa_mean_i = np.array(emulator_raa_mean_flat_i)
emulator_raa_stdev_i = np.array(emulator_raa_stdev_flat_i)
normalized_residual_i = np.divide(true_raa_i-emulator_raa_mean_i, emulator_raa_stdev_i)
# Draw scatter plot
ax_scatter.scatter(true_raa_i, emulator_raa_stdev_i, s=5,
color=color, alpha=0.7, label=r'$\rm{{{}}}$'.format(system_label), linewidth=0)
#ax_scatter.set_ylim([0, 1.19])
#ax_scatter.set_xlim([0, 1.19])
ax_scatter.set_xlabel(r'$R_{\rm{AA}}^{\rm{true}}$', fontsize=18)
ax_scatter.set_ylabel(r'$\sigma_{\rm{emulator}}$', fontsize=18)
ax_scatter.legend(title=self.model, title_fontsize=16,
loc='upper left', fontsize=14, markerscale=5)
# Draw normalization residuals
max = 3
bins = np.linspace(-max, max, 30)
ax_residual.hist(normalized_residual_i, color=color, histtype='step',
orientation='horizontal', linewidth=3, alpha=0.8, density=True, bins=bins)
ax_residual.set_ylabel(r'$\left(R_{\rm{AA}}^{\rm{true}} - R_{\rm{AA}}^{\rm{emulator}}\right) / \sigma_{\rm{emulator}}$',
fontsize=16)
# Print out indices of points that deviate significantly
if remove_outliers:
stdev = np.std(normalized_residual_i)
for j,true_sublist in enumerate(true_raa[i]):
emulator_sublist = emulator_raa_mean[i][j]
for k,true_raa_value in enumerate(true_sublist):
emulator_raa_value = emulator_sublist[k]
normalized_residual = (true_raa_value-emulator_raa_value)/true_raa_value
if np.abs(normalized_residual) > 3*stdev:
print('Index {} has poor emulator validation...'.format(j))
plt.savefig('{}/EmulatorUncertaintyValidation_{}.pdf'.format(self.plot_dir, cent))
plt.close('all')
##################################################################
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description='Jetscape STAT analysis')
parser.add_argument('-o', '--outputdir', action='store',
type=str, metavar='outputdir',
default='./STATGallery')
parser.add_argument('-c', '--configFile', action='store',
type=str, metavar='configFile',
default='analysis_config.yaml',
help='Path of config file')
parser.add_argument('-m', '--model', action='store',
type=str, metavar='model',
default='LBT',
help='model')
# Parse the arguments
args = parser.parse_args()
print('')
print('Configuring MergeResults...')
# If invalid configFile is given, exit
if not os.path.exists(args.configFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
analysis = MergeResults(config_file = args.configFile, model=args.model,
output_dir=args.outputdir)
analysis.run_model()
```
#### File: jdmulligan/STAT/run_analysis.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import scipy
import pymc3
import os
import sys
import pickle
import argparse
from src.design import Design
from src import emulator, mcmc, init
import run_analysis_base
################################################################
class RunAnalysis(run_analysis_base.RunAnalysisBase):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, config_file, model, output_dir, exclude_index, **kwargs):
# Initialize base class
super(RunAnalysis, self).__init__(config_file, model, output_dir, exclude_index, **kwargs)
# Write dictionary of results to pickle
self.output_dict = {}
#---------------------------------------------------------------
# Run analysis
#---------------------------------------------------------------
def run_analysis(self):
# Initialize data and model from files
self.initialize()
# Initialize pickled config settings
init.Init(self.workdir).Initialize(self)
# If exclude_index < 0, perform standard analysis
if self.exclude_index < 0:
self.run_single_analysis()
# Otherwise, hold out a specific training point from the emulator training
else:
# Check if exclude_index exists
n_design_points = len(self.AllData['design'])
if self.exclude_index > n_design_points-1:
print('Design point {} does not exist for {}, which has {} design points'.format(self.exclude_index, self.model, n_design_points))
os.system('rm -r {}'.format(self.workdir))
return
# For each emulator:
# Store lists of true RAA, emulator RAA at each holdout point
# (For each system+centrality -- over all pt)
self.SystemCount = len(self.AllData["systems"])
self.true_raa = [[[] for _ in range(0, 2)] for _ in range(self.SystemCount)]
self.emulator_raa_mean = [[[] for _ in range(0, 2)] for _ in range(self.SystemCount)]
self.emulator_raa_stdev = [[[] for _ in range(0, 2)] for _ in range(self.SystemCount)]
# Initialize data structures, with the updated holdout information
print('Running holdout test {} / {}'.format(self.exclude_index, n_design_points))
self.initialize(exclude_index = self.exclude_index)
# Transform holdout coordinates
self.holdout_design = self.AllData['holdout_design']
if self.model in ['MATTER+LBT1', 'MATTER+LBT2']:
holdout_design_temp = np.copy(self.holdout_design)
holdout_design_temp[0] = self.holdout_design[0] * self.holdout_design[1]
holdout_design_temp[1] = self.holdout_design[0] - self.holdout_design[0] * self.holdout_design[1]
self.holdout_design = holdout_design_temp
self.output_dict['theta'] = self.holdout_design
print('theta: {}'.format(self.AllData['holdout_design']))
print('theta_transformed: {}'.format(self.holdout_design))
if len(self.AllData['design']) != n_design_points - 1:
sys.exit('Only {} design points remain, but there should be {}!'.format(
len(self.AllData['design']), n_design_points - 1))
# Perform analysis (with holdout and closure tests)
self.run_single_analysis(holdout_test=True, closure_test=True)
plt.close('all')
#---------------------------------------------------------------
# Run analysis
#---------------------------------------------------------------
def run_single_analysis(self, holdout_test = False, closure_test = False):
# Create output dir
self.plot_dir = os.path.join(self.workdir, 'plots')
if not os.path.exists(self.plot_dir):
os.makedirs(self.plot_dir)
# Re-train emulator, if requested
if self.retrain_emulator:
# Clean cache for emulator
for system in self.AllData["systems"]:
if os.path.exists(os.path.join(self.cache_dir, '{}.pkl'.format(system))):
os.remove(os.path.join(self.cache_dir, '{}.pkl'.format(system)))
print('removed {}'.format('{}/{}.pkl'.format(self.cache_dir, system)))
# Re-train emulator
os.system('python -m src.emulator --retrain --npc {} --nrestarts {} --alpha {} -o {}'.format(self.n_pc, self.n_restarts, self.alpha, self.workdir))
# Load trained emulator
self.EmulatorAuAu200 = emulator.Emulator.from_cache('AuAu200', self.workdir)
self.EmulatorPbPb2760 = emulator.Emulator.from_cache('PbPb2760', self.workdir)
self.EmulatorPbPb5020 = emulator.Emulator.from_cache('PbPb5020', self.workdir)
# Construct plots characterizing the emulator
self.plot_design(holdout_test = holdout_test)
self.plot_RAA(self.AllData["design"], 'Design')
if holdout_test:
self.plot_emulator_RAA_residuals(holdout_test = True)
if not closure_test:
return
else:
self.plot_emulator_RAA_residuals()
# Run MCMC
if self.rerun_mcmc:
if os.path.exists(os.path.join(self.cache_dir, 'mcmc_chain.hdf')):
print('removed mcmc_chain.hdf')
os.remove(os.path.join(self.cache_dir, 'mcmc_chain.hdf'))
os.system('python -m src.mcmc --nwalkers {} --nburnsteps {} -o {} {} '.format(self.n_walkers, self.n_burn_steps, self.workdir, self.n_steps))
# Load MCMC chain
self.chain = mcmc.Chain(self.workdir)
self.MCMCSamples = self.chain.load()
# Plot dependence of MC sampling on number of steps
self.plot_MCMC_samples()
# Transform coordinates
if self.model in ['MATTER+LBT1', 'MATTER+LBT2']:
self.TransformedSamples = np.copy(self.MCMCSamples)
self.TransformedSamples[:,0] = self.MCMCSamples[:,0] * self.MCMCSamples[:,1]
self.TransformedSamples[:,1] = self.MCMCSamples[:,0] - self.MCMCSamples[:,0] * self.MCMCSamples[:,1]
else:
self.TransformedSamples = np.copy(self.MCMCSamples)
# Plot posterior distributions of parameters
self.plot_correlation(suffix = '', holdout_test = holdout_test)
if self.model in ['MATTER+LBT1', 'MATTER+LBT2']:
self.plot_correlation(suffix = '_Transformed', holdout_test = holdout_test)
# Plot RAA for samples of the posterior parameter space
sample_points = self.MCMCSamples[ np.random.choice(range(len(self.MCMCSamples)), 100), :]
self.plot_RAA(sample_points, 'Posterior')
if not holdout_test and not closure_test:
self.plot_qhat(E=100.)
self.plot_qhat(T=0.3)
plt.close('all')
# Write result to pkl
if holdout_test:
self.output_dict['true_raa'] = self.true_raa
self.output_dict['emulator_raa_mean'] = self.emulator_raa_mean
self.output_dict['emulator_raa_stdev'] = self.emulator_raa_stdev
# Plot qhat/T^3 for the holdout point
if closure_test:
self.plot_closure_test_qhat(E=100.)
self.plot_closure_test_qhat(T=0.3)
# Write result to pkl
with open(os.path.join(self.workdir, 'result.pkl'), 'wb') as f:
pickle.dump(self.output_dict, f)
plt.close('all')
#---------------------------------------------------------------
# Plot qhat/T^3 for the holdout point
#---------------------------------------------------------------
def plot_qhat(self, E=None, T=None):
# Plot 90% credible interval of qhat solution
# --> Construct distribution of qhat by sampling each ABCD point
if E:
xlabel = 'T (GeV)'
x_array = np.linspace(0.16, 0.5)
qhat_posteriors = [[self.qhat(T=T, E=E, parameters=parameters)
for parameters in self.TransformedSamples]
for T in x_array]
if T:
xlabel = 'E (GeV)'
x_array = np.linspace(5, 200)
qhat_posteriors = [[self.qhat(T=T, E=E, parameters=parameters)
for parameters in self.TransformedSamples]
for E in x_array]
# Get list of mean qhat values for each T or E
qhat_mean = [np.mean(qhat_values) for qhat_values in qhat_posteriors]
plt.plot(x_array, qhat_mean, sns.xkcd_rgb['denim blue'],
linewidth=2., linestyle='--', label='Mean')
plt.xlabel(xlabel)
plt.ylabel(r'$\hat{q}/T^3$')
ymin = 0
ymax = 2*max(qhat_mean)
axes = plt.gca()
axes.set_ylim([ymin, ymax])
# Get credible interval for each T or E
# Specifically: highest posterior density interval (HPDI) via pymc3
h = [pymc3.stats.hpd(np.array(qhat_values), self.confidence[0]) for qhat_values in qhat_posteriors]
credible_low = [i[0] for i in h]
credible_up = [i[1] for i in h]
plt.fill_between(x_array, credible_low, credible_up, color=sns.xkcd_rgb['light blue'],
label='{}% Credible Interval'.format(int(self.confidence[0]*100)))
# Draw legend
first_legend = plt.legend(title=self.model, title_fontsize=15,
loc='upper right', fontsize=12)
ax = plt.gca().add_artist(first_legend)
if E:
label = 'T'
if T:
label = 'E'
plt.savefig('{}/qhat_{}.pdf'.format(self.plot_dir, label), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Plot qhat/T^3 for the holdout point
#---------------------------------------------------------------
def plot_closure_test_qhat(self, E=None, T=None):
# Plot 90% credible interval of qhat solution
# --> Construct distribution of qhat by sampling each ABCD point
# Plot 1D closure tests for qhat vs. T, for fixed E
if E:
xlabel = 'T (GeV)'
x_array = np.linspace(0.16, 0.5)
# Plot truth value
qhat_truth = [self.qhat(T=T, E=E, parameters=self.holdout_design) for T in x_array]
plt.plot(x_array, qhat_truth, sns.xkcd_rgb['pale red'],
linewidth=2., label='Truth')
# Plot 90% credible interval of qhat solution
# --> Construct distribution of qhat by sampling each ABCD point
qhat_posteriors = [[self.qhat(T=T, E=E, parameters=parameters)
for parameters in self.TransformedSamples]
for T in x_array]
# Plot 1D closure tests for qhat vs. E, for fixed T
if T:
xlabel = 'E (GeV)'
x_array = np.linspace(5, 200)
# Plot truth value
qhat_truth = [self.qhat(T=T, E=E, parameters=self.holdout_design) for E in x_array]
plt.plot(x_array, qhat_truth, sns.xkcd_rgb['pale red'],
linewidth=2., label='Truth')
# Plot 90% credible interval of qhat solution
# --> Construct distribution of qhat by sampling each ABCD point
qhat_posteriors = [[self.qhat(T=T, E=E, parameters=parameters)
for parameters in self.TransformedSamples]
for E in x_array]
plt.xlabel(xlabel)
plt.ylabel(r'$\hat{q}/T^3$')
ymin = 0
ymax = 2*max(qhat_truth)
axes = plt.gca()
axes.set_ylim([ymin, ymax])
# Get list of mean qhat values for each T
qhat_mean = [np.mean(qhat_values) for qhat_values in qhat_posteriors]
plt.plot(x_array, qhat_mean, sns.xkcd_rgb['denim blue'],
linewidth=2., linestyle='--', label='Extracted mean')
# Get credible interval for each T
# Specifically: highest posterior density interval (HPDI) via pymc3
h = [pymc3.stats.hpd(np.array(qhat_values), self.confidence[0]) for qhat_values in qhat_posteriors]
credible_low = [i[0] for i in h]
credible_up = [i[1] for i in h]
plt.fill_between(x_array, credible_low, credible_up, color=sns.xkcd_rgb['light blue'],
label='{}% Credible Interval'.format(int(self.confidence[0]*100)))
# Store also 60% CR
h2 = [pymc3.stats.hpd(np.array(qhat_values), self.confidence[1]) for qhat_values in qhat_posteriors]
credible_low2 = [i[0] for i in h2]
credible_up2 = [i[1] for i in h2]
# Store whether truth value is contained within credible region
qhat_closure = [((qhat_truth[i] < credible_up[i]) and (qhat_truth[i] > credible_low[i])) for i,_ in enumerate(x_array)]
qhat_closure2 = [((qhat_truth[i] < credible_up2[i]) and (qhat_truth[i] > credible_low2[i])) for i,_ in enumerate(x_array)]
# Draw legend
first_legend = plt.legend(title=self.model, title_fontsize=15,
loc='upper right', fontsize=12)
ax = plt.gca().add_artist(first_legend)
if E:
label = 'T'
if T:
label = 'E'
plt.savefig('{}/Closure_{}.pdf'.format(self.plot_dir, label), dpi = 192)
plt.close('all')
# Plot distribution of posterior qhat values for a given T
plt.hist(qhat_posteriors[0], bins=50,
histtype='step', color='green')
plt.savefig('{}/ClosureDist.pdf'.format(self.plot_dir), dpi = 192)
plt.close('all')
# Write result to pkl
verbose = True
if E:
self.output_dict['T_array'] = x_array
self.output_dict['T_qhat_truth'] = qhat_truth # Truth
self.output_dict['T_qhat_mean'] = qhat_mean # Extracted mean
self.output_dict['T_qhat_closure'] = qhat_closure # Extracted posteriors
self.output_dict['T_qhat_closure2'] = qhat_closure2 # Extracted posteriors
if verbose:
self.output_dict['T_credible_up'] = credible_up # Extracted posteriors
self.output_dict['T_credible_low'] = credible_low # Extracted posteriors
self.output_dict['T_credible_up2'] = credible_up2 # Extracted posteriors
self.output_dict['T_credible_low2'] = credible_low2 # Extracted posteriors
if T:
self.output_dict['E_array'] = x_array
self.output_dict['E_qhat_truth'] = qhat_truth # Truth
self.output_dict['E_qhat_mean'] = qhat_mean # Extracted mean
self.output_dict['E_qhat_closure'] = qhat_closure # Extracted posteriors
self.output_dict['E_qhat_closure2'] = qhat_closure2 # Extracted posteriors
if verbose:
self.output_dict['E_credible_up'] = credible_up # Extracted posteriors
self.output_dict['E_credible_low'] = credible_low # Extracted posteriors
self.output_dict['E_credible_up2'] = credible_up2 # Extracted posteriors
self.output_dict['E_credible_low2'] = credible_low2 # Extracted posteriors
#---------------------------------------------------------------
# Plot design points
#---------------------------------------------------------------
def plot_design(self, holdout_test = False):
# Tranform {A+C, A/(A+C), B, D, Q} to {A,B,C,D,Q}
design_points = self.AllData['design']
if self.model in ['MATTER+LBT1', 'MATTER+LBT2']:
transformed_design_points = np.copy(design_points)
transformed_design_points[:,0] = design_points[:,0] * design_points[:,1]
transformed_design_points[:,1] = design_points[:,0] - design_points[:,0] * design_points[:,1]
else:
transformed_design_points = np.copy(design_points)
# Plot A vs. C example
i = 2
j = 0
plt.locator_params(nbins=8)
plt.scatter(transformed_design_points[:, j], transformed_design_points[:, i],
c=sns.xkcd_rgb['denim blue'], alpha=0.5)
plt.title('Design Points of Inputs {},{}'.format(self.Names[i], self.Names[j]), fontsize=16, weight='bold')
plt.xlabel(self.Names[j], fontsize=20)
plt.ylabel(self.Names[i], fontsize=20, rotation=0, labelpad=15)
plt.savefig('{}/DesignPoints_AC.pdf'.format(self.plot_dir), dpi = 192)
plt.close('all')
# Plot grid of 2D projections
NDimension = len(self.AllData["labels"])
figure, axes = plt.subplots(figsize = (3 * NDimension, 3 * NDimension), ncols = NDimension, nrows = NDimension)
for i, row in enumerate(axes):
for j, ax in enumerate(row):
if i==NDimension-1 or (j==0 and i>3) or (j==NDimension-1 and i>3):
ax.set_xlabel(self.Names[j], fontsize=20)
if j==0:
ax.set_ylabel(self.Names[i], fontsize=20)
if i==j:
ax.hist(transformed_design_points[:,i], bins=10,
range=self.Ranges_transformed[:,i], histtype='step', color=sns.xkcd_rgb['denim blue'])
ax.set_xlim(*self.Ranges_transformed[:,j])
if i>j:
ax.scatter(transformed_design_points[:, j], transformed_design_points[:, i],
c=sns.xkcd_rgb['denim blue'], alpha=0.5)
ax.set_xlim(*self.Ranges_transformed[:,j])
ax.set_ylim(*self.Ranges_transformed[:,i])
if holdout_test:
ax.plot(self.holdout_design[j], self.holdout_design[i], 'ro')
if i<j:
ax.axis('off')
plt.savefig('{}/DesignPoints.pdf'.format(self.plot_dir), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Plot RAA of the model at a set of points in the parameter space
#---------------------------------------------------------------
def plot_RAA(self, points, name):
TempPrediction = {"AuAu200": self.EmulatorAuAu200.predict(points),
"PbPb2760": self.EmulatorPbPb2760.predict(points),
"PbPb5020": self.EmulatorPbPb5020.predict(points)}
SystemCount = len(self.AllData["systems"])
figure, axes = plt.subplots(figsize = (15, 5 * SystemCount), ncols = 2, nrows = SystemCount)
for s1 in range(0, SystemCount): # Collision system
for s2 in range(0, 2): # Centrality range
axes[s1][s2].set_xlabel(r"$p_{T}$")
axes[s1][s2].set_ylabel(r"$R_{AA}$")
# Plot data points
S1 = self.AllData["systems"][s1]
O = self.AllData["observables"][0][0]
S2 = self.AllData["observables"][0][1][s2]
DX = self.AllData["data"][S1][O][S2]['x']
DY = self.AllData["data"][S1][O][S2]['y']
DE = np.sqrt(self.AllData["data"][S1][O][S2]['yerr']['stat'][:,0]**2 + self.AllData["data"][S1][O][S2]['yerr']['sys'][:,0]**2)
# Plot emulator predictions at design points
for i, y in enumerate(TempPrediction[S1][O][S2]):
axes[s1][s2].plot(DX, y, 'b-', alpha=0.1, label="Posterior" if i==0 else '')
axes[s1][s2].errorbar(DX, DY, yerr = DE, fmt='ro', label="Measurements")
figure.savefig('{}/RAA_{}.pdf'.format(self.plot_dir, name), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Plot residuals of RAA between the emulator and the true model values, at the design points
#---------------------------------------------------------------
def plot_emulator_RAA_residuals(self, holdout_test = False):
# Get training points
if holdout_test:
Examples = [self.AllData['holdout_design']]
else:
Examples = self.AllData['design']
Examples = np.array(Examples, copy=False, ndmin=2)
# Get emulator predictions at training points
TempPrediction = {"AuAu200": self.EmulatorAuAu200.predict(Examples, return_cov=True),
"PbPb2760": self.EmulatorPbPb2760.predict(Examples, return_cov=True),
"PbPb5020": self.EmulatorPbPb5020.predict(Examples, return_cov=True)}
SystemCount = len(self.AllData["systems"])
figure, axes = plt.subplots(figsize = (15, 5 * SystemCount), ncols = 2, nrows = SystemCount)
# Loop through system and centrality range
for s1 in range(0, SystemCount): # Collision system
for s2 in range(0, 2): # Centrality range
axes[s1][s2].set_xlabel(r"$p_{T}$")
axes[s1][s2].set_ylabel(r"$(R_{AA}^{emulator} - R_{AA}^{model}) / R_{AA}^{model}$")
# Get keys for given system, centrality
S1 = self.AllData["systems"][s1]
O = self.AllData["observables"][0][0]
S2 = self.AllData["observables"][0][1][s2]
# Get MC values at training points
if holdout_test:
model_x = self.AllData['holdout_model'][S1][O][S2]['x'] # pt-bin values
model_y = self.AllData['holdout_model'][S1][O][S2]['Y'] # 1d array of model Y-values at holdout point
else:
model_x = self.AllData['model'][S1][O][S2]['x'] # pt-bin values
model_y = self.AllData['model'][S1][O][S2]['Y'] # 2d array of model Y-values at each training point
# Get emulator predictions at training points
mean_prediction, cov_prediction = TempPrediction[S1]
# Get interpolation uncertainty
cov = cov_prediction[(O,S2),(O,S2)][0]
variance = np.diagonal(cov)
stdev_prediction = np.sqrt(variance)
# Plot difference between model and emulator
emulator_y = mean_prediction[O][S2] # 2d array of emulator Y-values at each training point
for i, y in enumerate(emulator_y):
if holdout_test:
model_y_1d = model_y
[self.true_raa[s1][s2].append(raa) for raa in model_y_1d]
[self.emulator_raa_mean[s1][s2].append(raa) for raa in emulator_y[i]]
[self.emulator_raa_stdev[s1][s2].append(stdev) for stdev in stdev_prediction]
else:
model_y_1d = model_y[i]
deltaRAA = (emulator_y[i] - model_y_1d) / model_y_1d
if holdout_test:
deltaRAA_stdev = stdev_prediction[i] / model_y_1d
axes[s1][s2].plot(model_x, deltaRAA, 'b-', alpha=0.1, label="Posterior" if i==0 else '')
if holdout_test:
axes[s1][s2].fill_between(model_x, -deltaRAA_stdev, deltaRAA_stdev,
lw=0, color=sns.xkcd_rgb['light blue'], alpha=.3, zorder=20)
figure.savefig('{}/RAA_Residuals_Design.pdf'.format(self.plot_dir), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Plot residuals of each PC
#---------------------------------------------------------------
def plot_PC_residuals(self):
for system in self.AllData["systems"]:
# Get emulators for a given system (one per PC) from cache
gps = emulator.emulators[system].gps
nrows = len(gps)
ncols = gps[0].X_train_.shape[1]
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(ncols*4., nrows*4.) )
ymax = np.ceil(max(np.fabs(g.y_train_).max() for g in gps))
ylim = (-ymax, ymax)
design = Design(system, self.workdir)
test_points = [r*design.min + (1 - r)*design.max for r in [.2, .5, .8]]
# Loop through emulators (one per PC)
for ny, (gp, row) in enumerate(zip(gps, axes)):
# Get list of training y-values
y = gp.y_train_
# Loop through training parameters {A+C,A/(A+C),B,D,Q}
for nx, (x, label, xlim, ax) in enumerate(zip(gp.X_train_.T, design.labels, design.range, row)):
# Plot training points
ax.plot(x, y, 'o', ms=3., color='.75', zorder=10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(label)
ax.set_ylabel('PC {}'.format(ny))
# Plot emulator prediction (and stdev) for three different
x = np.linspace(xlim[0], xlim[1], 100)
X = np.empty((x.size, ncols))
for k, test_point in enumerate(test_points):
X[:] = test_point
X[:, nx] = x
mean, std = gp.predict(X, return_std=True)
color = plt.cm.tab10(k)
ax.plot(x, mean, lw=.2, color=color, zorder=30)
ax.fill_between(x, mean - std, mean + std, lw=0, color=color, alpha=.3, zorder=20)
plt.savefig('{}/EmulatorPCs_{}.pdf'.format(self.plot_dir, system), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Check that burn-in is sufficient
#---------------------------------------------------------------
def plot_MCMC_samples(self):
with self.chain.dataset() as d:
W = d.shape[0] # number of walkers
S = d.shape[1] # number of steps
N = d.shape[2] # number of paramters
T = int(S / 200) # "thinning"
A = 20 / W
figure, axes = plt.subplots(figsize = (15, 2 * N), ncols = 1, nrows = N)
for i, ax in enumerate(axes):
for j in range(0, W):
ax.plot(range(0, S, T), d[j, ::T, i], alpha = A)
plt.savefig('{}/MCMCSamples.pdf'.format(self.plot_dir), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Plot posterior parameter distributions, either in transformed
# or non-transformed coordinates
#---------------------------------------------------------------
def plot_correlation(self, suffix = '', holdout_test = False):
if 'Transformed' in suffix:
Names = self.Names
samples = self.TransformedSamples
color = 'blue'
colormap = 'Blues'
ranges = self.Ranges_transformed
if holdout_test:
holdout_design = self.holdout_design
else:
Names = self.Names_untransformed
samples = self.MCMCSamples
color = 'green'
colormap = 'Greens'
ranges = self.Ranges
if holdout_test:
holdout_design = self.AllData['holdout_design']
NDimension = len(self.AllData["labels"])
figure, axes = plt.subplots(figsize = (3 * NDimension, 3 * NDimension), ncols = NDimension, nrows = NDimension)
for i, row in enumerate(axes):
for j, ax in enumerate(row):
if i==j:
# Draw 1D projection
ax.hist(samples[:,i], bins=50,
range=ranges[:,i], histtype='step', color=color)
ax.set_xlabel(Names[i])
ax.set_xlim(*ranges[:,j])
ymax = ax.get_ylim()[1]
# If holdout test, draw the highest posterior density interval (HPDI)
if holdout_test:
credible_interval = pymc3.stats.hpd(np.array(samples[:,i]), self.confidence[0])
ax.fill_between(credible_interval, [ymax,ymax], color=sns.xkcd_rgb['almost black'], alpha=0.1)
if self.model in ['LBT', 'MATTER'] or 'Transformed' in suffix:
# Store whether truth value is contained within credible region
theta_truth = holdout_design[i]
theta_closure = (theta_truth < credible_interval[1]) and (theta_truth > credible_interval[0])
credible_interval2 = pymc3.stats.hpd(np.array(samples[:,i]), self.confidence[1])
theta_closure2 = (theta_truth < credible_interval2[1]) and (theta_truth > credible_interval2[0])
name = self.Names[i]
self.output_dict['{}_closure'.format(name)] = theta_closure
self.output_dict['{}_closure2'.format(name)] = theta_closure2
# Draw 2D correlations
if i>j:
ax.hist2d(samples[:, j], samples[:, i],
bins=50, range=[ranges[:,j], ranges[:,i]],
cmap=colormap)
ax.set_xlabel(Names[j])
ax.set_ylabel(Names[i])
ax.set_xlim(*ranges[:,j])
ax.set_ylim(*ranges[:,i])
if holdout_test:
ax.plot(holdout_design[j], holdout_design[i], 'ro')
if i<j:
ax.axis('off')
plt.savefig('{}/Posterior_Correlations{}.pdf'.format(self.plot_dir, suffix), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
def plot_avg_residuals(self):
design_points = self.AllData['design']
if self.model in ['MATTER+LBT1', 'MATTER+LBT2']:
transformed_design_points = np.copy(design_points)
transformed_design_points[:,0] = design_points[:,0] * design_points[:,1]
transformed_design_points[:,1] = design_points[:,0] - design_points[:,0] * design_points[:,1]
else:
transformed_design_points = np.copy(design_points)
if len(self.avg_residuals) < len(self.AllData['design']):
transformed_design_points = transformed_design_points[0:self.n_max_holdout_tests]
NDimension = len(self.AllData["labels"])
figure, axes = plt.subplots(figsize = (3 * NDimension, 3 * NDimension), ncols = NDimension, nrows = NDimension)
for i, row in enumerate(axes):
for j, ax in enumerate(row):
if i==j:
ax.hist(transformed_design_points[:,i], bins=50, weights=self.avg_residuals,
range=self.Ranges_transformed[:,i], histtype='step', color='blue')
ax.set_xlabel(self.Names[i])
ax.set_xlim(*self.Ranges_transformed[:,j])
if i>j:
ax.hist2d(transformed_design_points[:, j], transformed_design_points[:, i], weights=self.avg_residuals,
bins=50, range=[self.Ranges_transformed[:,j], self.Ranges_transformed[:,i]],
cmap='Blues')
ax.set_xlabel(self.Names[j])
ax.set_ylabel(self.Names[i])
ax.set_xlim(*self.Ranges_transformed[:,j])
ax.set_ylim(*self.Ranges_transformed[:,i])
if i<j:
ax.axis('off')
plt.savefig('{}/Average_Residuals.pdf'.format(self.plot_dir), dpi = 192)
plt.close('all')
##################################################################
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description='Jetscape STAT analysis')
parser.add_argument('-c', '--configFile', action='store',
type=str, metavar='configFile',
default='analysis_config.yaml',
help='Path of config file')
parser.add_argument('-m', '--model', action='store',
type=str, metavar='model',
default='LBT',
help='model')
parser.add_argument('-o', '--outputdir', action='store',
type=str, metavar='outputdir',
default='./STATGallery')
parser.add_argument('-i', '--excludeIndex', action='store',
type=int, metavar='excludeIndex',
default=-1,
help='Index of design point to exclude from emulator')
# Parse the arguments
args = parser.parse_args()
print('')
print('Configuring RunAnalysis...')
# If invalid configFile is given, exit
if not os.path.exists(args.configFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
analysis = RunAnalysis(config_file=args.configFile,
model=args.model,
output_dir=args.outputdir,
exclude_index=args.excludeIndex)
analysis.run_model()
```
|
{
"source": "JDMusc/Online-Bullying-Image-Classifcation",
"score": 3
}
|
#### File: JDMusc/Online-Bullying-Image-Classifcation/controlDataHelpers.py
```python
import os
import shutil
from toolz import pipe as p
import numpy as np
import pandas as pd
def makeControlDir(dest_dir, keep_actions = None, drop_actions = None, n_total_images = 200, replace=True):
dest_dir_exists = os.path.exists(dest_dir)
if dest_dir_exists and replace:
shutil.rmtree(dest_dir)
os.mkdir(dest_dir)
elif not dest_dir_exists:
os.mkdir(dest_dir)
action_counts = _loadActionCounts(keep_actions, drop_actions, n_total_images)
src_dir = 'stanford_40/JPEGImages'
for c in action_counts.index:
num_c = action_counts.loc[c, 'number_of_images']
class_fs = np.random.choice(
[f for f in os.listdir(src_dir) if c in f], num_c, replace = False)
for f in class_fs:
shutil.copy(os.path.join(src_dir, f), os.path.join(dest_dir, f))
def _loadActionCounts(keep_actions = None, drop_actions = None, n_total_images=200):
if keep_actions is not None and drop_actions is not None:
raise ValueError('can only chose keep actions or drop actions')
f = "stanford_40/ImageSplits/actions.txt"
action_counts = pd.read_csv(f, delim_whitespace=True, index_col = 0)
actions = p(action_counts.index, set)
if keep_actions is None and drop_actions is not None:
drop_actions = drop_actions
elif keep_actions is None and drop_actions is None:
drop_actions = []
else:
keep_actions = [keep_actions] if type(keep_actions) is str else keep_actions
drop_actions = actions - set(keep_actions)
action_counts = action_counts.drop(drop_actions)
action_counts['ratio'] = action_counts.number_of_images/sum(action_counts.number_of_images)
action_counts['number_of_images_orig'] = action_counts.number_of_images
action_counts['number_of_images'] = round(action_counts.ratio * n_total_images).astype(int)
return action_counts
```
#### File: JDMusc/Online-Bullying-Image-Classifcation/facelocationsParser.py
```python
import os
from PIL import Image
from toolz import pipe as p
from xml.dom import minidom
import xml.etree.ElementTree as ET
import pandas as pd
root = 'C:/Users/anamu/OneDrive/Desktop/Biomedical Data Science and Informatics/2019 Spring/CPSC 8810 Deep Learning/Project/Labelled Image/'
def writeLocationsFiles(locations_dir, dest_dir):
os.makedirs(dest_dir)
location_fs = [os.path.join(locations_dir, f)
for f in os.listdir(locations_dir)
if f.endswith('.csv')]
for f in location_fs:
(clas, base_img_f) = parseLocationsFileName(f)
clas_dir = os.path.join(dest_dir, clas)
if not os.path.exists(clas_dir):
os.makedirs(clas_dir)
try:
dest_f = p(base_img_f,
lambda _: os.path.splitext(_)[0],
lambda _: _ + '.xml',
lambda _: os.path.join(clas_dir, _))
writeLocationsFile(f, dest_f)
except Exception:
Warning('file ' + f + ' not parsed')
def writeLocationsFile(locations_f, dest_f):
xmlstr = p(locations_f,
toXml,
toXmlString)
with open(dest_f, "w") as f:
f.write(xmlstr)
def toXmlString(xml):
return p(xml,
ET.tostring,
minidom.parseString,
lambda _: _.toprettyxml(),
lambda _: _.replace('<?xml version="1.0" ?>\n', ''))
def toXml(locations_f):
(clas, img_f_name) = parseLocationsFileName(locations_f)
ann = createHeader(clas, img_f_name)
size = createSizeTag(clas, img_f_name)
ann.append(size)
locations = pd.read_csv(locations_f)
n_boxes = locations.shape[0]
for _ in range(0, n_boxes):
arr = locations.iloc[_, 0:4].get_values().astype(int)
object = createObjectTag(arr, clas)
ann.append(object)
return ann
def createHeader(clas, img_f_name):
xml_root = ET.Element('annotation')
folder = ET.SubElement(xml_root, 'folder')
folder.text = clas
filename = ET.SubElement(xml_root, 'filename')
filename.text = os.path.basename(img_f_name)
path = ET.SubElement(xml_root, 'path')
path.text = os.path.join(root, clas, img_f_name)
source = ET.SubElement(xml_root, 'source')
database = ET.SubElement(source, 'database')
database.text = 'Unknown'
segmented = ET.SubElement(xml_root, 'segmented')
segmented.text = 0
return xml_root
def createSizeTag(clas, img_f_name):
full_img_f = os.path.join('image_data', clas, img_f_name)
img = Image.open(full_img_f)
size = ET.Element('size')
width = ET.SubElement(size, 'width')
width.text = str(img.width)
height = ET.SubElement(size, 'height')
height.text = str(img.height)
depth = ET.SubElement(size, 'depth')
depth.text = str(img.layers)
return size
def createObjectTag(arr, c):
if len(arr) == 0:
return None
object = ET.Element('object')
name = ET.SubElement(object, 'name')
if c == 'laughing':
name.text = 'bully'
else:
name.text = 'victim'
pose = ET.SubElement(object, 'pose')
pose.text = 'Unspecified'
truncated = ET.SubElement(object, 'truncated')
truncated.text = "0"
difficult = ET.SubElement(object, 'difficult')
difficult.text = "0"
bndbox = createBoundingBoxTag(arr)
object.append(bndbox)
return object
def createBoundingBoxTag(arr):
bndbox = ET.Element('bndbox')
def addElement(name, i):
tag = ET.SubElement(bndbox, name)
tag.text = str(arr[i])
addElement('xmin', 0)
addElement('ymin', 1)
addElement('xmax', 2)
addElement('ymax', 3)
return bndbox
def parseLocationsFileName(locations_f):
base_f = os.path.basename(locations_f)
(clas, img_f_name) = base_f.split('_')
img_f_name = img_f_name.replace('.csv','')
return (clas, img_f_name)
```
#### File: JDMusc/Online-Bullying-Image-Classifcation/localResnet.py
```python
from toolz import pipe as p
from torch import nn
N_IMAGE_CHANNELS = 3
def makeConv2d(in_channels, out_channels, kernel_size=3, stride=1,
padding = 1, bias = False):
conv = nn.Conv2d(in_channels, out_channels,
kernel_size = kernel_size,
stride = stride,
padding = padding, bias = bias)
nn.init.kaiming_normal_(conv.weight, mode='fan_out',
nonlinearity='relu')
return conv
def makeBn2(num_channels):
bn = nn.BatchNorm2d(num_channels)
nn.init.constant_(bn.weight, 1)
nn.init.constant_(bn.bias, 0)
return bn
def preResLayer(out_channels = 64):
return nn.Sequential(
makeConv2d(N_IMAGE_CHANNELS, out_channels, kernel_size=7,
stride=2, padding=3),
makeBn2(out_channels),
nn.ReLU(inplace = True),
nn.MaxPool2d(kernel_size = 3, stride=2, padding=1)
)
def postResLayer(in_channels, num_classes, dropout_p = None):
blocks = [
nn.AdaptiveAvgPool2d( (1,1) ),
Lambda(flatten)]
if dropout_p is not None:
blocks.append(nn.Dropout(p=dropout_p))
blocks.append(nn.Linear(in_channels, num_classes))
return nn.Sequential(
nn.AdaptiveAvgPool2d( (1,1) ),
Lambda(flatten),
nn.Linear(in_channels, num_classes)
)
#from PyTorch Website
class Lambda(nn.Module):
def __init__(self, func):
super(Lambda, self).__init__()
self.func = func
def forward(self, x):
return self.func(x)
def flatten(x):
return p(x,
lambda _: _.size(0),
lambda _: x.view(_, -1)
)
class ResNet(nn.Module):
def __init__(self, block_sizes, num_classes, in_channels = 64, p = None):
super(ResNet, self).__init__()
self.preres = preResLayer(out_channels = in_channels)
blocks = []
blocks.append(makeBlock(in_channels, in_channels, block_sizes[0], stride=1))
for i in range(1, len(block_sizes)):
out_channels = in_channels * 2
blocks.append(makeBlock(in_channels, out_channels, block_sizes[i]))
in_channels = out_channels
self.blocks = nn.Sequential(*blocks)
self.postres = postResLayer(out_channels, num_classes, dropout_p = p)
def forward(self, x):
return p(x,
self.preres,
self.blocks,
self.postres
)
#unlike PyTorch, Block is defined as an array of layers
#ResNet paper defines layers as PyTorch defines blocks
def makeBlock(in_channels, out_channels, num_layers, stride=2):
def makeLayer(i):
in_chan = in_channels if i == 0 else out_channels
stri = stride if i == 0 else 1
return ResLayer(in_chan, out_channels, stride=stri)
return nn.Sequential(*[makeLayer(i) for i in range(0, num_layers)])
class ResLayer(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResLayer, self).__init__()
self.conv1 = makeConv2d(in_channels, out_channels,
stride = stride)
self.bn1 = makeBn2(out_channels)
self.relu = nn.ReLU(inplace = True)
self.conv2 = makeConv2d(out_channels, out_channels)
self.bn2 = makeBn2(out_channels)
self.resizeInput = self.resizeInputGen(in_channels, out_channels, stride)
self.stride = stride
def resizeInputGen(self, in_channels, out_channels, stride):
resizeInput = lambda _: _
if in_channels != out_channels or stride != 1:
resizeInput = nn.Sequential(
makeConv2d(
in_channels, out_channels, kernel_size = 1, stride = stride, padding=0),
makeBn2(out_channels)
)
return resizeInput
def forward(self, x):
def addInput(processed_x):
return processed_x + self.resizeInput(x)
return p(x,
self.conv1,
self.bn1,
self.relu,
self.conv2,
self.bn2,
addInput,
self.relu
)
```
#### File: JDMusc/Online-Bullying-Image-Classifcation/presentUtils.py
```python
import pandas as pd
import analyzeModel
def makePredsPerformanceTable(preds_f, phase = None):
preds = pd.read_csv(preds_f)
perf = analyzeModel.performanceMetrics(preds)
if phase is not None:
perf = analyzeModel.performanceMetricsWithPhase(preds)
perf = perf[phase]
for k in perf.keys():
perf[k].pop('class_counts', None)
return pd.DataFrame(perf)
```
#### File: JDMusc/Online-Bullying-Image-Classifcation/scrapDataHelpers.py
```python
import os
import shutil
import numpy as np
from toolz import pipe as p
def makeScrapData(classes, dest_dir = None, n_train = 30, n_val = None, src_dir = 'image_data'):
if dest_dir is None:
dest_dir = 'scrap_data' + str(n_train)
fs = {c: [os.path.join(src_dir, c, f) for f in p(os.path.join(src_dir, c), os.listdir)]
for c in classes}
by_phase = 'train' in os.listdir(src_dir) and 'test' in os.listdir(src_dir)
class_percents = classPercentages(src_dir, classes = classes, by_phase= by_phase)['percent']
train_counts = {c: int(class_percents[c]/100 * n_train) for c in classes}
train_fs = {c: np.random.choice(fs[c], train_counts[c], replace = False) for c in classes}
val_candidates = lambda c: list(set(fs[c]) - set(train_fs[c]))
val_fs = {c: val_candidates(c) for c in classes}
if n_val is not None:
val_counts = {c: int(class_percents[c]/100 * n_val) for c in classes}
val_fs = {c: np.random.choice(val_candidates(c), val_counts[c], replace = False)
for c in classes}
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
os.mkdir(dest_dir)
joinDirGen = lambda d: lambda f: os.path.join(d, f)
joinScrapDir = joinDirGen(dest_dir)
train_val_fs = dict(train=train_fs, val=val_fs)
for tv in ('train', 'val'):
p(tv, joinScrapDir, os.mkdir)
for c in classes:
p(c, joinDirGen(tv), joinScrapDir, os.mkdir)
tv_fs = train_val_fs[tv][c]
for f in tv_fs:
dest = p(f,
os.path.basename,
joinDirGen(c),
joinDirGen(tv),
joinScrapDir)
shutil.copyfile(f, dest)
def classPercentages(data_dir, by_phase = True, classes = None):
if not by_phase:
classes = os.listdir(data_dir) if classes is None else classes
class_counts = {c: p(os.path.join(data_dir, c), os.listdir, len) for c in classes}
n_total = sum(class_counts.values())
class_percents = {c: count/n_total * 100 for (c, count) in class_counts.items()}
return dict(percent = class_percents, count = class_counts)
xs = ('train', 'val')
if classes is None:
train_dir = os.path.join(data_dir, 'train')
classes = os.listdir(train_dir)
folders = {(x,c):os.path.join(data_dir, x, c) for x in xs
for c in classes}
train_val_counts = {x:sum(
[p(folders[x, c], os.listdir, len) for c in classes])
for x in xs}
class_counts = {(x, c): p(folders[x, c], os.listdir, len)
for c in classes for x in xs}
class_percents = {xc: count/train_val_counts[xc[0]]
for (xc, count) in class_counts.items()}
return dict(percent = class_percents, count = class_counts)
```
#### File: JDMusc/Online-Bullying-Image-Classifcation/vggTransfer.py
```python
import torch
import torch.nn as nn
from torchvision import models
def loadVgg(n_classes = 9, device = "cuda"):
device = torch.device(device)
vgg = models.vgg19(pretrained=True).to(device)
for param in vgg.parameters():
param.requires_grad = False
n_inputs = 4096
vgg.classifier[6] = nn.Sequential(
nn.Linear(n_inputs, 256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, 100),
nn.ReLU(),
nn.Dropout(.2),
nn.Linear(100, n_classes),
nn.LogSoftmax(dim = 1))
return vgg.to(device)
def viewBackPropParams(vgg):
for (i, (n, param)) in enumerate(vgg.named_parameters()):
if param.requires_grad:
print(str(i) + ': ' + n)
def setParamGrad(vgg, param_ix, requires_grad):
list(vgg.parameters())[param_ix].requires_grad = requires_grad
def unfreezeParam(vgg, param_ix):
setParamGrad(vgg, param_ix, True)
def freezeParam(vgg, param_ix):
setParamGrad(vgg, param_ix, False)
def unfreezeParams(vgg, param_ixs):
for (i, p) in enumerate(vgg.parameters()):
if i in param_ixs:
p.requires_grad = True
def paramName(vgg, param_ix):
return [n for (n, _) in vgg.named_parameters()][param_ix]
def paramNames(vgg, param_ixs):
return [(i, paramName(vgg, i)) for i in param_ixs]
def paramIndex(vgg, param_name):
return [i for (i, (n, _)) in enumerate(vgg.named_parameters()) if n == param_name][0]
```
|
{
"source": "jdmuss/advent_of_code",
"score": 2
}
|
#### File: advent_of_code/2018/day_6.py
```python
import os
from numpy import where, zeros
#from itertools import repeat
test = [(1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]
#xt, yt = zip(*test)
res = {i:[] for i in test}
for x1, y1 in test:
idx = test.index((x1,y1))
for x2, y2 in (test[:idx]+test[(idx+1):]):
res[(x1, y1)].append((abs(x2-x1)+1 + abs(y2-y1)+1))
#res[(x1, y1)] = sum(res[(x1, y1)])
workPath = os.path.expanduser("~/Documents/Code/Advent_of_code")
os.chdir(workPath)
with open(os.path.join(workPath, "day6_input.txt"), "r") as inFile:
lines = [s.strip() for s in inFile]
x, y = zip(*[(int(c), int(r)) for c, r in [line.split(', ') for line in lines] ])
coords = list(zip(x, y))
minX = min(x)
maxX = max(x) + 1
minY = min(y)
maxY = max(y) + 1
left, right = -1, +1
up, down = -1, +1
neighbors = [(0, 1), (1, 0), (1, 2), (2, 1)]
field = zeros((maxY, maxX), dtype=int)
field[:minY, :] = -1
field[:, :minX] = -1
for i, (c, r) in enumerate(coords):
field[r, c] = i + 1
stop = False
while not stop:
fill = where(field == 0)
if len(fill[0]) == 0:
stop = True
else:
field_copy = field.copy()
for r, c in zip(*fill):
g = where(field[(r-1):(r+2), (c-1):(c+2)] > 0)
fill_coords = [(i,j) for i, j in zip(g[0], g[1]) if (i,j) in neighbors]
vals = set([field[r+fill_row-1, c+fill_col-1] for fill_row, fill_col in [fc for fc in fill_coords]])
if len(vals) == 1:
field_copy[r, c] = list(vals)[0]
elif len(fill_coords) > 1:
field_copy[r, c] = -1
field = field_copy
field
# answer = 3401
# ignore (46, 188), (352, 115), (251, 67), (346, 348)
good_vals = [i+1 for i, c in enumerate(coords) if c not in [(46, 188), (352, 115), (251, 67), (346, 348)]]
bad_vals = set(list(field[:,minX]) + list(field[:,maxX-1]) + list(field[minY, :]) + list(field[maxY-1, :]))
good_vals = [v+1 for v in range(len(x)) if v not in bad_vals]
max([len(where(field==val)[0]) for val in good_vals])
#for r, c in zip(*where(field == 0)):
# Part 2:
from numpy import zeros_like
from itertools import product
def get_dist(pt1, pt2):
return abs(pt1[0]-pt2[0]) + abs(pt1[1]-pt2[1])
field = zeros((maxY, maxX), dtype=int)
for i, (c, r) in enumerate(coords):
field[r, c] = i + 1
field = field[minY:, :]
field = field[:, minX:]
new_coords = where(field > 0)
new_coords = list(zip(new_coords[0], new_coords[1]))
results = zeros_like(field)
h, w = results.shape
for r, c in product(range(h),range(w)):
results[r, c] = sum([get_dist((r,c), nc) for nc in new_coords])
t = where(results<10000)
len(t[0]) # = 49327
# A nice picture for good measure
import matplotlib.pyplot as plt
norm_color = results.copy()
norm_color[t] = 500
plt.imshow(norm_color)
plt.colorbar()
plt.show()
"""
aaaaa.cccc
aAaaa.cccc
aaaddecccc
aadddeccCc
..dDdeeccc
bb.deEeecc
bBb.eeee..
bbb.eeefff
bbb.eeffff
bbb.ffffFf
"""
from collections import defaultdict
#def d((x1,y1), (x2,y2)):
def d(pt1, pt2):
return abs(pt1[0]-pt2[0]) + abs(pt1[1]-pt2[1])
def closest(x,y):
ds = [(d(p, (x,y)), p) for p in coords]
ds.sort()
if ds[0][0] < ds[1][0]:
return ds[0][1]
else:
return (-1,-1)
def score_around(W):
score = defaultdict(int)
for x in range(minX-W, maxX+W):
for y in range(minY-W, maxY+W):
score[closest(x,y)] += 1
return score
S2 = score_around(400)
S3 = score_around(600)
best = [(S2[k] if S2[k]==S3[k] else 0, k) for k in S2.keys()]
best.sort()
for area, p in best:
print(area, p)
[517, 539, 655, 663, 698, 734, 808, 845, 856, 902, 1202, 1204, 1299, 1439, 1546, 1711, 1719, 1907, 1965, 2034, 2048,
2063, 2226, 2281, 2979, 3351, 3511, 3599, 4149, 5036]
# Another version of #1:
import numpy as np
from scipy.spatial import distance
# read the data using scipy
points = np.loadtxt('input.txt', delimiter=', ')
# build a grid of the appropriate size - note the -1 and +2 to ensure all points
# are within the grid
xmin, ymin = points.min(axis=0) - 1
xmax, ymax = points.max(axis=0) + 2
# and use mesgrid to build the target coordinates
xgrid, ygrid = np.meshgrid(np.arange(xmin, xmax), np.arange(xmin, xmax))
targets = np.dstack([xgrid, ygrid]).reshape(-1, 2)
# happily scipy.spatial.distance has cityblock (or manhatten) distance out
# of the box
cityblock = distance.cdist(points, targets, metric='cityblock')
# the resulting array is an input points x target points array
# so get the index of the maximum along axis 0 to tie each target coordinate
# to closest ID
closest_origin = np.argmin(cityblock, axis=0)
# we need to filter out points with competing closest IDs though
min_distances = np.min(cityblock, axis=0)
competing_locations_filter = (cityblock == min_distances).sum(axis=0) > 1
# note, integers in numpy don't support NaN, so make the ID higher than
# the possible point ID
closest_origin[competing_locations_filter] = len(points) + 1
# and those points around the edge of the region for "infinite" regions
closest_origin = closest_origin.reshape(xgrid.shape)
infinite_ids = np.unique(np.vstack([
closest_origin[0],
closest_origin[-1],
closest_origin[:, 0],
closest_origin[:, -1]
]))
closest_origin[np.isin(closest_origin, infinite_ids)] = len(points) + 1
# and because we know the id of the "null" data is guaranteed to be last
# in the array (it's highest) we can index it out before getting the max
# region size
print(np.max(np.bincount(closest_origin.ravel())[:-1]))
# finally, make a pretty picture for good measure
import matplotlib.pyplot as plt
plt.imshow(np.where(closest_origin > len(points), np.NaN, closest_origin))
plt.colorbar()
plt.show()
```
#### File: advent_of_code/2020/day_11.py
```python
import os
from itertools import product
import re
from numpy import append, array, bincount, diff, ma, sort #cumsum, nditer, roll, setdiff1d, where
from numpy import product as np_prod
seating_re = re.compile('[L\.]')
workPath = os.path.expanduser("~/Documents/Code/Advent_of_code/2020")
os.chdir(workPath)
#with open("day-11_data.txt", "r") as in_file:
with open("test_data.txt", "r") as in_file:
data = array([list(row.strip()) for row in in_file])
empty_seats = ma.masked_where(data == 'L', data).mask
floor = ma.masked_where(data == '.', data).mask
occupied_seats = ma.masked_where(data == '#', data).mask
occupied = array([[False, False, False], [False, True, False], [False, False, False]])
# Part 1:
sorted_adapters = sort(data)
sorted_adapters = append(append(array([0]), sorted_adapters), sorted_adapters[-1]+3)
jolts = diff(sorted_adapters)
distribution = {k:v for k, v in zip(range(max(set(jolts))+4), bincount(jolts))}
print(f"The product of the counts of 1- and 3-jolt differences is {distribution[1]*distribution[3]}")
# Part 2:
def possible_permutations(n, m):
perms = (i for i in product(list(range(m + 1)), repeat=n) if sum(i) == n)
return set(tuple(n for n in sublist if n != 0) for sublist in perms)
max_step = 3
reps = re.findall('1{2,}', ''.join([str(i) for i in jolts]))
rep_lens = [len(i) for i in reps]
perm_dict = {s:len(possible_permutations(s, max_step)) for s in range(2, max(rep_lens) + 1)}
counts = np_prod([perm_dict[possibilities] for possibilities in rep_lens])
print(f"There are {counts} possible permutations of the adapters")
```
#### File: advent_of_code/2020/day_3.py
```python
import os
from numpy import product
from onr_py.utils import path_expander
os.chdir(path_expander('~/Documents/Code/Advent_of_code/2020'))
with open('day_3_data.txt', 'r') as in_file:
data = [row.strip() for row in in_file.readlines()]
# Part 1:
right = 3
down = 1
wrap = len(data[0])
def ride(right, down, map):
trees = 0
right_idx = 0
for row_idx in range(1, len(map), down):
right_idx = (right_idx + right) % wrap
if map[row_idx][right_idx] == '#': trees += 1
return trees
print(f"Part 1:the tobbogan hit {ride(right, down, data)} trees")
# Part 2: do this again, but for three numbers
# Part 2: do this again, but for five different slopes
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
trees = []
for i, (right, down) in enumerate(slopes):
trees.append(ride(right, down, data))
print(f"Part 2:the answer is {product(trees)}")
```
|
{
"source": "jdm/web-platform-tests",
"score": 2
}
|
#### File: wptserve/wptserve/config.py
```python
import logging
import os
from collections import defaultdict, Mapping
from six import iteritems, itervalues
from .sslutils import environments
from .utils import get_port
_renamed_props = {
"host": "browser_host",
"bind_hostname": "bind_address",
"external_host": "server_host",
"host_ip": "server_host",
}
def _merge_dict(base_dict, override_dict):
rv = base_dict.copy()
for key, value in iteritems(base_dict):
if key in override_dict:
if isinstance(value, dict):
rv[key] = _merge_dict(value, override_dict[key])
else:
rv[key] = override_dict[key]
return rv
class Config(Mapping):
"""wptserve config
Inherits from Mapping for backwards compatibility with the old dict-based config"""
_default = {
"browser_host": "localhost",
"alternate_hosts": {},
"doc_root": os.path.dirname("__file__"),
"server_host": None,
"ports": {"http": [8000]},
"check_subdomains": True,
"log_level": "debug",
"bind_address": True,
"ssl": {
"type": "none",
"encrypt_after_connect": False,
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"force_regenerate": False,
"base_conf_path": None
},
"pregenerated": {
"host_key_path": None,
"host_cert_path": None,
},
},
"aliases": []
}
def __init__(self,
logger=None,
subdomains=set(),
not_subdomains=set(),
**kwargs):
self.log_level = kwargs.get("log_level", "DEBUG")
if logger is None:
self._logger_name = "web-platform-tests"
else:
level_name = logging.getLevelName(logger.level)
if level_name != "NOTSET":
self.log_level = level_name
self._logger_name = logger.name
for k, v in iteritems(self._default):
setattr(self, k, kwargs.pop(k, v))
self.subdomains = subdomains
self.not_subdomains = not_subdomains
for k, new_k in iteritems(_renamed_props):
if k in kwargs:
self.logger.warning(
"%s in config is deprecated; use %s instead" % (
k,
new_k
)
)
setattr(self, new_k, kwargs.pop(k))
self.override_ssl_env = kwargs.pop("override_ssl_env", None)
if kwargs:
raise TypeError("__init__() got unexpected keyword arguments %r" % (tuple(kwargs),))
def __getitem__(self, k):
try:
return getattr(self, k)
except AttributeError:
raise KeyError(k)
def __iter__(self):
return iter([x for x in dir(self) if not x.startswith("_")])
def __len__(self):
return len([x for x in dir(self) if not x.startswith("_")])
def update(self, override):
"""Load an overrides dict to override config values"""
override = override.copy()
for k in self._default:
if k in override:
self._set_override(k, override.pop(k))
for k, new_k in iteritems(_renamed_props):
if k in override:
self.logger.warning(
"%s in config is deprecated; use %s instead" % (
k,
new_k
)
)
self._set_override(new_k, override.pop(k))
if override:
k = next(iter(override))
raise KeyError("unknown config override '%s'" % k)
def _set_override(self, k, v):
old_v = getattr(self, k)
if isinstance(old_v, dict):
setattr(self, k, _merge_dict(old_v, v))
else:
setattr(self, k, v)
@property
def ports(self):
# To make this method thread-safe, we write to a temporary dict first,
# and change self._computed_ports to the new dict at last atomically.
new_ports = defaultdict(list)
try:
old_ports = self._computed_ports
except AttributeError:
old_ports = {}
for scheme, ports in iteritems(self._ports):
for i, port in enumerate(ports):
if scheme in ["wss", "https"] and not self.ssl_env.ssl_enabled:
port = None
if port == "auto":
try:
port = old_ports[scheme][i]
except (KeyError, IndexError):
port = get_port()
else:
port = port
new_ports[scheme].append(port)
self._computed_ports = new_ports
return self._computed_ports
@ports.setter
def ports(self, v):
self._ports = v
@property
def doc_root(self):
return self._doc_root
@doc_root.setter
def doc_root(self, v):
self._doc_root = v
@property
def server_host(self):
return self._server_host if self._server_host is not None else self.browser_host
@server_host.setter
def server_host(self, v):
self._server_host = v
@property
def domains(self):
hosts = self.alternate_hosts.copy()
assert "" not in hosts
hosts[""] = self.browser_host
rv = {}
for name, host in iteritems(hosts):
rv[name] = {subdomain: (subdomain.encode("idna").decode("ascii") + u"." + host)
for subdomain in self.subdomains}
rv[name][""] = host
return rv
@property
def not_domains(self):
hosts = self.alternate_hosts.copy()
assert "" not in hosts
hosts[""] = self.browser_host
rv = {}
for name, host in iteritems(hosts):
rv[name] = {subdomain: (subdomain.encode("idna").decode("ascii") + u"." + host)
for subdomain in self.not_subdomains}
return rv
@property
def all_domains(self):
rv = self.domains.copy()
nd = self.not_domains
for host in rv:
rv[host].update(nd[host])
return rv
@property
def domains_set(self):
return {domain
for per_host_domains in itervalues(self.domains)
for domain in itervalues(per_host_domains)}
@property
def not_domains_set(self):
return {domain
for per_host_domains in itervalues(self.not_domains)
for domain in itervalues(per_host_domains)}
@property
def all_domains_set(self):
return self.domains_set | self.not_domains_set
@property
def paths(self):
return {"doc_root": self.doc_root}
@property
def ssl_env(self):
try:
if self.override_ssl_env is not None:
return self.override_ssl_env
except AttributeError:
pass
implementation_type = self.ssl["type"]
try:
cls = environments[implementation_type]
except KeyError:
raise ValueError("%s is not a vaid ssl type." % implementation_type)
kwargs = self.ssl.get(implementation_type, {}).copy()
return cls(self.logger, **kwargs)
@property
def ssl_config(self):
key_path, cert_path = self.ssl_env.host_cert_path(self.domains_set)
return {"key_path": key_path,
"cert_path": cert_path,
"encrypt_after_connect": self.ssl["encrypt_after_connect"]}
@property
def log_level(self):
return getattr(logging, self._log_level)
@log_level.setter
def log_level(self, value):
self._log_level = value.upper()
@property
def logger(self):
logger = logging.getLogger(self._logger_name)
logger.setLevel(self.log_level)
return logger
def as_dict(self):
rv = {
"domains": list(self.domains),
"sundomains": list(self.subdomains),
}
for item in self._default.iterkeys():
rv[item] = getattr(self, item)
return rv
```
#### File: tests/delete_session/delete.py
```python
import pytest
from webdriver import error
from tests.support.asserts import assert_success
from tests.support.inline import inline
def delete_session(session):
return session.transport.send("DELETE", "session/{session_id}".format(**vars(session)))
def test_null_response_value(session):
response = delete_session(session)
value = assert_success(response)
assert value is None
# Need an explicit call to session.end() to notify the test harness
# that a new session needs to be created for subsequent tests.
session.end()
def test_dismissed_beforeunload_prompt(session):
session.url = inline("""
<input type="text">
<script>
window.addEventListener("beforeunload", function (event) {
event.preventDefault();
});
</script>
""")
session.find.css("input", all=False).send_keys("foo")
response = delete_session(session)
assert_success(response)
# A beforeunload prompt has to be automatically dismissed, and the session deleted
with pytest.raises(error.InvalidSessionIdException):
session.alert.text
# Need an explicit call to session.end() to notify the test harness
# that a new session needs to be created for subsequent tests.
session.end()
```
#### File: tests/is_element_selected/user_prompts.py
```python
import pytest
from tests.support.asserts import assert_error, assert_dialog_handled, assert_success
from tests.support.inline import inline
def is_element_selected(session, element_id):
return session.transport.send(
"GET", "session/{session_id}/element/{element_id}/selected".format(
session_id=session.session_id,
element_id=element_id))
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_accept(session, create_dialog, dialog_type):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text="dialog")
response = is_element_selected(session, element.id)
assert_success(response, False)
assert_dialog_handled(session, expected_text="dialog")
def test_handle_prompt_accept_and_notify():
"""TODO"""
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_dismiss(session, create_dialog, dialog_type):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text="dialog")
response = is_element_selected(session, element.id)
assert_success(response, False)
assert_dialog_handled(session, expected_text="dialog")
def test_handle_prompt_dismiss_and_notify():
"""TODO"""
def test_handle_prompt_ignore():
"""TODO"""
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_default(session, create_dialog, dialog_type):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text="dialog")
response = is_element_selected(session, element.id)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text="dialog")
```
#### File: tests/set_window_rect/set.py
```python
import pytest
from tests.support.asserts import assert_error, assert_success
def set_window_rect(session, rect):
return session.transport.send(
"POST", "session/{session_id}/window/rect".format(**vars(session)),
rect)
def is_fullscreen(session):
# At the time of writing, WebKit does not conform to the Fullscreen API specification.
# Remove the prefixed fallback when https://bugs.webkit.org/show_bug.cgi?id=158125 is fixed.
return session.execute_script("return !!(window.fullScreen || document.webkitIsFullScreen)")
# 10.7.2 Set Window Rect
def test_current_top_level_browsing_context_no_longer_open(session, create_window):
"""
1. If the current top-level browsing context is no longer open,
return error with error code no such window.
"""
session.window_handle = create_window()
session.close()
response = set_window_rect(session, {})
assert_error(response, "no such window")
@pytest.mark.parametrize("rect", [
{"width": "a"},
{"height": "b"},
{"width": "a", "height": "b"},
{"x": "a"},
{"y": "b"},
{"x": "a", "y": "b"},
{"width": "a", "height": "b", "x": "a", "y": "b"},
{"width": True},
{"height": False},
{"width": True, "height": False},
{"x": True},
{"y": False},
{"x": True, "y": False},
{"width": True, "height": False, "x": True, "y": False},
{"width": []},
{"height": []},
{"width": [], "height": []},
{"x": []},
{"y": []},
{"x": [], "y": []},
{"width": [], "height": [], "x": [], "y": []},
{"height": {}},
{"width": {}},
{"height": {}, "width": {}},
{"x": {}},
{"y": {}},
{"x": {}, "y": {}},
{"width": {}, "height": {}, "x": {}, "y": {}},
])
def test_invalid_types(session, rect):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, rect)
assert_error(response, "invalid argument")
@pytest.mark.parametrize("rect", [
{"width": -1},
{"height": -2},
{"width": -1, "height": -2},
])
def test_out_of_bounds(session, rect):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, rect)
assert_error(response, "invalid argument")
def test_width_height_floats(session):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
"""
response = set_window_rect(session, {"width": 500.5, "height": 420})
value = assert_success(response)
assert value["width"] == 500
assert value["height"] == 420
response = set_window_rect(session, {"width": 500, "height": 450.5})
value = assert_success(response)
assert value["width"] == 500
assert value["height"] == 450
def test_x_y_floats(session):
"""
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, {"x": 0.5, "y": 420})
value = assert_success(response)
assert value["x"] == 0
assert value["y"] == 420
response = set_window_rect(session, {"x": 100, "y": 450.5})
value = assert_success(response)
assert value["x"] == 100
assert value["y"] == 450
@pytest.mark.parametrize("rect", [
{},
{"width": None},
{"height": None},
{"width": None, "height": None},
{"x": None},
{"y": None},
{"x": None, "y": None},
{"width": None, "x": None},
{"width": None, "y": None},
{"height": None, "x": None},
{"height": None, "Y": None},
{"width": None, "height": None, "x": None, "y": None},
{"width": 200},
{"height": 200},
{"x": 200},
{"y": 200},
{"width": 200, "x": 200},
{"height": 200, "x": 200},
{"width": 200, "y": 200},
{"height": 200, "y": 200},
])
def test_no_change(session, rect):
"""
13. If width and height are not null:
[...]
14. If x and y are not null:
[...]
15. Return success with the JSON serialization of the current
top-level browsing context's window rect.
"""
original = session.window.rect
response = set_window_rect(session, rect)
assert_success(response, original)
def test_fully_exit_fullscreen(session):
"""
10. Fully exit fullscreen.
[...]
To fully exit fullscreen a document document, run these steps:
1. If document's fullscreen element is null, terminate these steps.
2. Unfullscreen elements whose fullscreen flag is set, within
document's top layer, except for document's fullscreen element.
3. Exit fullscreen document.
"""
session.window.fullscreen()
assert is_fullscreen(session) is True
response = set_window_rect(session, {"width": 400, "height": 400})
value = assert_success(response)
assert value["width"] == 400
assert value["height"] == 400
assert is_fullscreen(session) is False
def test_restore_from_minimized(session):
"""
12. If the visibility state of the top-level browsing context's
active document is hidden, restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
session.window.minimize()
assert session.execute_script("return document.hidden") is True
response = set_window_rect(session, {"width": 450, "height": 450})
value = assert_success(response)
assert value["width"] == 450
assert value["height"] == 450
assert session.execute_script("return document.hidden") is False
def test_restore_from_maximized(session):
"""
12. If the visibility state of the top-level browsing context's
active document is hidden, restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
original_size = session.window.size
session.window.maximize()
assert session.window.size != original_size
response = set_window_rect(session, {"width": 400, "height": 400})
value = assert_success(response)
assert value["width"] == 400
assert value["height"] == 400
def test_height_width(session):
original = session.window.rect
max = session.execute_script("""
return {
width: window.screen.availWidth,
height: window.screen.availHeight,
}""")
# step 12
response = set_window_rect(session, {"width": max["width"] - 100,
"height": max["height"] - 100})
# step 14
assert_success(response, {"x": original["x"],
"y": original["y"],
"width": max["width"] - 100,
"height": max["height"] - 100})
def test_height_width_larger_than_max(session):
max = session.execute_script("""
return {
width: window.screen.availWidth,
height: window.screen.availHeight,
}""")
# step 12
response = set_window_rect(session, {"width": max["width"] + 100,
"height": max["height"] + 100})
# step 14
rect = assert_success(response)
assert rect["width"] >= max["width"]
assert rect["height"] >= max["height"]
def test_height_width_as_current(session):
original = session.window.rect
# step 12
response = set_window_rect(session, {"width": original["width"],
"height": original["height"]})
# step 14
assert_success(response, {"x": original["x"],
"y": original["y"],
"width": original["width"],
"height": original["height"]})
def test_x_y(session):
original = session.window.rect
# step 13
response = set_window_rect(session, {"x": original["x"] + 10,
"y": original["y"] + 10})
# step 14
assert_success(response, {"x": original["x"] + 10,
"y": original["y"] + 10,
"width": original["width"],
"height": original["height"]})
def test_negative_x_y(session):
original = session.window.rect
# step 13
response = set_window_rect(session, {"x": - 8, "y": - 8})
# step 14
os = session.capabilities["platformName"]
# certain WMs prohibit windows from being moved off-screen
if os == "linux":
rect = assert_success(response)
assert rect["x"] <= 0
assert rect["y"] <= 0
assert rect["width"] == original["width"]
assert rect["height"] == original["height"]
# On macOS, windows can only be moved off the screen on the
# horizontal axis. The system menu bar also blocks windows from
# being moved to (0,0).
elif os == "mac":
assert_success(response, {"x": -8,
"y": 23,
"width": original["width"],
"height": original["height"]})
# It turns out that Windows is the only platform on which the
# window can be reliably positioned off-screen.
elif os == "windows":
assert_success(response, {"x": -8,
"y": -8,
"width": original["width"],
"height": original["height"]})
def test_move_to_same_position(session):
original_position = session.window.position
position = session.window.position = original_position
assert position == original_position
def test_move_to_same_x(session):
original_x = session.window.position[0]
position = session.window.position = (original_x, 345)
assert position == (original_x, 345)
def test_move_to_same_y(session):
original_y = session.window.position[1]
position = session.window.position = (456, original_y)
assert position == (456, original_y)
def test_resize_to_same_size(session):
original_size = session.window.size
size = session.window.size = original_size
assert size == original_size
def test_resize_to_same_width(session):
original_width = session.window.size[0]
size = session.window.size = (original_width, 345)
assert size == (original_width, 345)
def test_resize_to_same_height(session):
original_height = session.window.size[1]
size = session.window.size = (456, original_height)
assert size == (456, original_height)
"""
TODO(ato):
Disable test because the while statements are wrong.
To fix this properly we need to write an explicit wait utility.
def test_resize_by_script(session):
# setting the window size by JS is asynchronous
# so we poll waiting for the results
size0 = session.window.size
session.execute_script("window.resizeTo(700, 800)")
size1 = session.window.size
while size0 == size1:
size1 = session.window.size
assert size1 == (700, 800)
session.execute_script("window.resizeTo(800, 900)")
size2 = session.window.size
while size1 == size2:
size2 = session.window.size
assert size2 == (800, 900)
assert size2 == {"width": 200, "height": 100}
"""
def test_payload(session):
# step 14
response = set_window_rect(session, {"x": 400, "y": 400})
assert response.status == 200
assert isinstance(response.body["value"], dict)
value = response.body["value"]
assert "width" in value
assert "height" in value
assert "x" in value
assert "y" in value
assert isinstance(value["width"], int)
assert isinstance(value["height"], int)
assert isinstance(value["x"], int)
assert isinstance(value["y"], int)
```
|
{
"source": "jdnascim/mo434-ODIR-5K",
"score": 3
}
|
#### File: exps/2-final/confusion_matrix_best_model.py
```python
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB3
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay
import numpy as np
import matplotlib.pyplot as plt
import itertools
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
BATCH_SIZE = 16
QTDE_TEST = 348
base_model = EfficientNetB3(weights='imagenet')
out = base_model.get_layer('top_dropout').output
out = Dense(8, activation='softmax', name='predictions')(out)
model = Model(base_model.input, out)
# We compile the model
model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy',
metrics=['AUC'])
model.load_weights("ft_efficientnetb3_top_dropout_lr-4_best_model.h5")
datagen = ImageDataGenerator()
flow = datagen.flow_from_directory("/work/ocular-dataset/ODIR-5K-Flow/fake-test")
#Confution Matrix and Classification Report
Y_pred = model.predict_generator(flow, QTDE_TEST // BATCH_SIZE+1)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
cm = confusion_matrix(flow.classes, y_pred)
print(cm)
print('Classification Report')
target_names = ['N', 'D', 'G', 'C', "A", "H", "M", "O"]
print(classification_report(flow.classes, y_pred, target_names=target_names))
#abc = plot_confusion_matrix(cm, target_names)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=target_names)
abc = disp.plot().figure_
abc.savefig("confusion_matrix.png")
```
#### File: mo434-ODIR-5K/exps/ODIR_evaluation.py
```python
from sklearn import metrics
import numpy as np
import sys
import xlrd
import csv
# read the ground truth from xlsx file and output case id and eight labels
def importGT(filepath):
data = xlrd.open_workbook(filepath)
table = data.sheets()[0]
data = [ [int(table.row_values(i,0,1)[0])] + table.row_values(i,-8) for i in range(1,table.nrows)]
return np.array(data)
# read the submitted predictions in csv format and output case id and eight labels
def importPR(gt_data,filepath):
with open(filepath,'r') as f:
reader = csv.reader(f)
header = next(reader)
pr_data = [ [int(row[0])] + list(map(float, row[1:])) for row in reader]
pr_data = np.array(pr_data)
# Sort columns if they are not in predefined order
order = ['ID','N', 'D', 'G', 'C', 'A', 'H', 'M', 'O']
order_index = [0, 1, 2, 3, 4, 5, 6, 7, 8]
order_dict = {item: ind for ind, item in enumerate(order)}
sort_index = [order_dict[item] for ind, item in enumerate(header) if item in order_dict]
wrong_col_order = 0
if(sort_index!=order_index):
wrong_col_order = 1
pr_data[:,order_index] = pr_data[:,sort_index]
# Sort rows if they are not in predefined order
wrong_row_order = 0
order_dict = {item: ind for ind, item in enumerate(gt_data[:,0])}
order_index = [ v for v in order_dict.values() ]
sort_index = [order_dict[item] for ind, item in enumerate(pr_data[:,0]) if item in order_dict]
if(sort_index!=order_index):
wrong_row_order = 1
pr_data[order_index,:] = pr_data[sort_index,:]
# If have missing results
missing_results = 0
if(gt_data.shape != pr_data.shape):
missing_results = 1
return pr_data,wrong_col_order,wrong_row_order,missing_results
#calculate kappa, F-1 socre and AUC value
def ODIR_Metrics(gt_data, pr_data):
th = 0.5
gt = gt_data.flatten()
pr = pr_data.flatten()
kappa = metrics.cohen_kappa_score(gt, pr>th)
f1 = metrics.f1_score(gt, pr>th, average='micro')
auc = metrics.roc_auc_score(gt, pr)
final_score = (kappa+f1+auc)/3.0
return kappa, f1, auc, final_score
def ODIR_Evaluation(GT_filepath, PR_filepath):
gt_data = importGT(GT_filepath)
pr_data, wrong_col_order, wrong_row_order, missing_results = importPR(gt_data,PR_filepath)
if wrong_col_order:
print(sys.argv[0], "\n Error: Submission with disordered columns.")
sys.exit(-1)
if wrong_row_order:
print(sys.argv[0], "\n Error: Submission with disordered rows.")
sys.exit(-1)
if missing_results:
print(sys.argv[0], "\n Error: Incomplete submission with missing data.")
sys.exit(-1)
kappa, f1, auc, final_score = ODIR_Metrics(gt_data[:,1:], pr_data[:,1:])
print("kappa score:", kappa, " f-1 score:", f1, " AUC vlaue:", auc, " Final Score:", final_score)
```
|
{
"source": "jdnc/astroquery",
"score": 2
}
|
#### File: astroquery/irsa_dust/core.py
```python
import types
import time
import warnings
import io
import sys
import urllib
import urllib2
import StringIO
import string
import re
from xml.etree.ElementTree import ElementTree
from astropy.table import Table, Column
import astropy.units as u
from astropy.io import fits
from . import utils
__all__ = ["DustResults", "SingleDustResult", "query"]
DUST_SERVICE_URL = "http://irsa.ipac.caltech.edu/cgi-bin/DUST/nph-dust"
EXT_DESC = "E(B-V) Reddening"
EM_DESC = "100 Micron Emission"
TEMP_DESC = "Dust Temperature"
INPUT = "input"
OBJ_NAME = "objname"
REG_SIZE = "regSize"
DESC = "desc"
IMAGE_URL = "imageUrl"
STATISTICS = "statistics"
REF_PIXEL_VALUE = "refPixelValue"
REF_COORDINATE = "refCoordinate"
MEAN_VALUE = "meanValue"
STD = "std"
MAX_VALUE = "maxValue"
MIN_VALUE = "minValue"
DATA_IMAGE = "./data/image"
DATA_TABLE = "./data/table"
DELAY = 1
def query(location, reg_size=None, delay=DELAY, verbose=False, url=DUST_SERVICE_URL):
"""
Queries the IRSA Galactic Dust Reddening and Extinction service
and returns the results.
Parameters
----------
location : str
Can be either the name of an object or a coordinate string
If a name, must be resolveable by NED, SIMBAD, 2MASS, or SWAS.
Examples of acceptable coordinate strings, can be found here:
http://irsa.ipac.caltech.edu/applications/DUST/docs/coordinate.html
reg_size : Number
(optional) the size of the region to include in the dust query, in degrees
Defaults to 5 degrees.
delay : int
(optional) wait time between queries in seconds. Default is 1 second. Included in case
rapid fire queries are considered obnoxious behavior by the server.
url : str
(optional) when specified, overrides the default IRSA dust service url,
sending queries to the given url instead - should only be necessary for testing
Returns
-------
result : `astroquery.irsa_dust.DustResults`
object containing the results of the query
Examples
--------
Query a single object by object name and output results as an astropy Table:
>>> dust_result = query('m81')
>>> table = dust_result.table()
>>> table.pprint()
Query multiple objects with a single command:
>>> dust_result = query(['m101', 'm33', 'm15'])
>>> table = dust_result.table()
>>> table.pprint()
Query a single object by coordinates, then get extinction detail table
and FITS emission image:
>>> dust_result = query('266.12 -61.89 equ j2000')
>>> detail_table = dust_result.ext_detail_table()
>>> emission_image = dust_result.image('emission')
>>> emission_image.writeto("image1.fits")
"""
if not isinstance(location, types.ListType):
location = [location]
# Query each location, one by one.
result_set = []
index = 1
for loc in location:
options = {"locstr" : loc}
if reg_size != None:
options["regSize"] = reg_size
# Do the query
try:
if verbose:
log_str = ("Executing query " + str(index) + " of " + str(len(location))
+ ", location: " + loc)
print(log_str)
response = utils.query(options, url, debug=True)
xml_tree = utils.xml(response)
except Exception as ex:
warnings.warn("Query for location " + loc + " resulted in an error.\n" + str(ex))
continue
# Parse the results
#try:
result = SingleDustResult(xml_tree, loc)
result_set.append(result)
if verbose:
print("Success.")
#except Exception as ex:
# warnings.warn("Could not parse results of query for location " + loc + ".\n" + str(ex))
# continue
# Wait a little while before querying again, to give the server a break
if delay != None and index < len(location):
time.sleep(delay)
index += 1
if len(result_set) == 0:
msg = """Query or queries did not return any parseable results. Cannot instantiate DustResult."""
raise ValueError(msg)
dust_results = DustResults(result_set)
return dust_results
class DustResults(object):
"""
Representes the response(s) to one or more dust queries. It's essentially a wrapper around a list
of SingleDustResult objects.
"""
def __init__(self, result_set):
"""
Parameters
----------
result_set : list[SingleDustResult]
a list of one or more SingleDustResult objects
"""
if len(result_set) == 0:
raise ValueError("Cannot instantiate DustResult with empty result set.")
self._result_set = result_set
def table(self, section=None):
"""
Create and return an astropy Table representing the query response(s).
When `section` is missing or `all`, returns the full table. When a
section is specified (`location`, `extinction`, `emission`, or `temperature`),
only that portion of the table is returned.
Parameters
----------
section : str
When missing or `all`, returns the full table. When the name
of a section is given, only that portion of the table is returned.
The following values are accepted::
'all'
'location', 'loc', 'l',
'reddening', 'red', 'r',
'emission', 'em', 'e',
'temperature', 'temp', 't'
Returns
-------
table : `astropy.table.Table`
Either the full table or a table containing one of the four
sections of the table, depending on what the section
parameter was.
"""
# Use the first result to create the table.
# Use values from the other results to create additional rows.
table = self._result_set[0].table(section=section)
for result in self._result_set[1:]:
values = result.values(section=section)
table.add_row(vals=values)
return table
def ext_detail_table(self, row=1):
"""
Fetch the extinction detail table specfied in the given query result.
Parameters
----------
row : int
the index of the dust result within the list of results
The list of results has the same order as the list of locations specified
in the query.
Returns
-------
the extinction detail table, in `astropy.io.ascii.Ipac` format
"""
if row < 1 or row > len(self._result_set):
raise IndexError("Row " + str(row) + " is out of bounds for this table of length "
+ str(len(self._result_set)) + ".")
return self._result_set[row-1].ext_detail_table()
def image(self, section, row=1):
"""
Return the image associated wtih the given section and row.
Parameters
----------
section : str
the name or abbreviation for the section (extinction, emission, temperature)
row : int
the index of the dust result within the list of results
The list of results has the same order as the list of locations specified
in the query.
"""
if row < 1 or row > len(self._result_set):
raise IndexError("Row " + str(row) + " is out of bounds for this table of length "
+ str(len(self._result_set)) + ".")
return self._result_set[row-1].image(section)
def query_loc(self, row=1):
"""
Return the query location.
Parameters
----------
row : int
the index of the dust result within the list of results
The list of results has the same order as the list of locations specified
in the query.
"""
if row < 1 or row > len(self._result_set):
raise IndexError("Row " + str(row) + " is out of bounds for this table of length "
+ str(len(self._result_set)) + ".")
location = self._result_set[row-1].query_loc
return location
@property
def result_set(self):
"""
Returns
-------
result_set : list[SingleDustResult]
the list of SingleDustResult objects underlying this DustResults object
"""
return self._result_set
def append(self, dust_results2):
"""
Append the results from the given DustResults object to this DustResults object.
Parameters
----------
dust_results2 : `astroquery.irsa_dust.DustResults`
the results to append
"""
#self._result_set.extend(dust_results2.result_set)
result_set2_copy = []
for result in dust_results2.result_set:
single_result_copy = SingleDustResult(result.xml, result.query_loc)
result_set2_copy.append(single_result_copy)
self._result_set.extend(result_set2_copy)
def __str__(self):
"""Return a string representation of this DustResult."""
string = ""
for result in self._result_set:
string += result.__str__()
return string
class SingleDustResult(object):
"""
Represents the response to a dust query for a single object or location.
Provides methods to return the response as an astropy Table, and to retrieve
FITS images listed as urls in the initial response. It can also retrieve
a detailed extinction table linked to in the initial response. Not intended
to be instantiated by the end user.
"""
def __init__(self, xml_tree, query_loc):
"""
Parameters
----------
xml_root : `xml.etree.ElementTree`
the xml tree representing the response to the query
query_loc : str
the location string specified in the query
"""
self._xml = xml_tree
self._query_loc = query_loc
self._location_section = LocationSection(xml_tree)
ext_node = utils.find_result_node(EXT_DESC, xml_tree)
self._ext_section = ExtinctionSection(ext_node)
em_node = utils.find_result_node(EM_DESC, xml_tree)
self._em_section = EmissionSection(em_node)
temp_node = utils.find_result_node(TEMP_DESC, xml_tree)
self._temp_section = TemperatureSection(temp_node)
self._result_sections = [self._location_section, self._ext_section,
self._em_section, self._temp_section]
@property
def query_loc(self):
"""Return the location string used in the query."""
return self._query_loc
@property
def xml(self):
"""Return the raw xml underlying this SingleDustResult."""
return self._xml
def table(self, section=None):
"""
Create and return an astropy Table representing the query response.
Parameters
----------
section : str
(optional) the name of the section to include in the table.
If not provided, the entire table will be returned.
"""
code = self._section_code(section)
if code == "all":
return self._table_all()
else:
return self._table(code)
def values(self, section=None):
"""
Return the data values contained in the query response,
i.e. the list of values corresponding to a row in the result table.
Parameters
----------
section : str
the name of the section to include in the response
If no section is given, all sections will be included.
"""
code = self._section_code(section)
sections = self._sections(code)
values = []
for sec in sections:
values.extend(sec.values())
return values
def _section_code(self, section):
"""
Return a one-letter code identifying the section.
Parameters
----------
section : str
the name or abbreviated name of the section
Returns
-------
str: a one-letter code identifying the section.
"""
if section == None:
return "all"
else:
if section in ["location", "loc", "l"]:
return "l"
elif section in ["reddening", "red", "r"]:
return "r"
elif section in ["emission", "em", "e"]:
return "e"
elif section in ["temperature", "temp", "t"]:
return "t"
else:
msg = """section must be one of the following:
'all',
'location', 'loc', 'l',
'reddening', 'red', 'r',
'emission', 'em', 'e',
'temperature', 'temp', 't'."""
raise ValueError(msg)
def _sections(self, code):
"""
Parameters
----------
code : str
the one-letter code name of the section
Returns
-------
The section corresponding to the code, or a list containing all sections if
no section is provided.
"""
if code == 'l':
return [self._location_section]
elif code == 'r':
return [self._ext_section]
elif code == 'e':
return [self._em_section]
elif code == 't':
return [self._temp_section]
return [self._location_section, self._ext_section, self._em_section, self._temp_section]
def _table_all(self):
"""
Create and return the full table containing all four sections:
location, extinction, emission, and temperature.
Returns
-------
table : `astropy.table.Table`
table containing the data from the query response
"""
columns = (self._location_section.columns + self._ext_section.columns
+ self._em_section.columns + self._temp_section.columns)
table = Table(data=columns)
values = self.values()
table.add_row(vals=values)
return table
def _table(self, section):
"""
Create and return a smaller table containing only one section
of the overall DustResult table.
Parameters
----------
section : str
a string indicating the section to be returned
"""
# Get the specified section
section_object = self._sections(section)[0]
# Create the table
columns = section_object.columns
table = Table(data=columns)
# Populate the table
values = section_object.values()
table.add_row(vals=values)
return table
def ext_detail_table(self):
"""
Get the additional, detailed table of extinction data for various filters.
There is a url for this table given in the initial response to the query.
Returns
-------
table : `astropy.io.ascii.Ipac`
detailed table of extinction data by filter
"""
table_url = self._ext_section.table_url
response = utils.ext_detail_table(table_url)
if sys.version_info > (3, 0):
read_response = response.read().decode("utf-8")
else:
read_response = response.read()
table = Table.read(read_response, format="ipac")
return table
def image(self, section):
"""
Get the FITS image associated with the given section.
The extinction, emission, and temperature sections each provide
a url to a FITS image.
Parameters
----------
section : str
the name of the section
Returns
-------
image : `astropy.io.fits.hdu.HDUList`
the HDUList representing the image data
"""
# Get the url of the image for the given section
image_url = None
if section in ["reddening", "red", "r"]:
image_url = self._ext_section.image_url
elif section in ["emission", "em", "e"]:
image_url = self._em_section.image_url
elif section in ["temperature", "temp", "t"]:
image_url = self._temp_section.image_url
if image_url == None:
msg = """section must be one of the following values:
'reddening', 'red', 'r',
'emission', 'em', 'e',
'temperature', 'temp', 't'"""
raise ValueError(msg)
response = utils.image(image_url)
S = io.BytesIO(response)
image = fits.open(S)
return image
def __str__(self):
"""Return a string representation of the table."""
string = "[DustResult: \n\t"
for section in self._result_sections:
if len(string) > 15:
string += ",\n\t"
string += section.__str__()
string += "]"
return string
class BaseDustNode(object):
"""
A node in the result xml that has been enhanced to return values and Columns
appropriate to its type (String, Number, or Coord).
"""
def __init__(self, xml_node):
"""
Parameters
----------
xml_node : `xml.etree.ElementTree`
the xml node that provides the raw data for this DustNode
"""
self._name = xml_node.tag
def set_value(self, node_text):
"""Override in subclasses."""
pass
@property
def name(self):
"""Return the xml node name."""
return self._name
@property
def value(self):
"""Return the value extracted from the node."""
return self._value
@property
def columns(self):
"""Return the column or columns associated with this item in the astropy Table."""
return self._columns
def __str__(self):
"""Return a string representation of this item."""
col_str = "[Column: "
for column in self._columns:
for format_str in column.pformat(show_units=True):
col_str += format_str
string = "name: " + self._name + ", " + col_str + "]"
return string
class StringNode(BaseDustNode):
"""
A node that contains text.
"""
def __init__(self, xml_node, col_name, length):
"""
Parameters
----------
xml_node : `xml.etree.ElementTree`
the xml node that provides the raw data for this DustNode
col_name : str
the name of the column associated with this item
length : int
the size of the column associated with this item
"""
BaseDustNode.__init__(self, xml_node)
self._value = xml_node.text.strip()
self._length = length
self._columns = [Column(name=col_name, dtype="S" + str(length))]
def __str__(self):
"""Return a string representation of this item."""
base_string = BaseDustNode.__str__(self)
string = ("[StringNode: " + base_string
+ ", value: " + self._value + "]")
return string
class NumberNode(BaseDustNode):
"""
A node that contains a number. Outputs a single column containing the number.
"""
def __init__(self, xml_node, col_name, units=None):
"""
Parameters
----------
xml_node : `xml.etree.ElementTree`
the xml node that provides the raw data for this DustNode
col_name : str
the name of the column associated with this item
units : `astropy.units.Unit`
the units associated with this item
"""
BaseDustNode.__init__(self, xml_node)
self._value = utils.parse_number(xml_node.text)
self._columns = [Column(name=col_name, units=units)]
def __str__(self):
"""Return a string representation of the item."""
base_string = BaseDustNode.__str__(self)
string = ("[NumberNode: " + base_string
+ ", value: " + str(self._value) + "]")
return string
class CoordNode(BaseDustNode):
"""
A node that contains RA, Dec coordinates. Outputs three values / columns: RA, Dec
and a coordinate system description string.
"""
def __init__(self, xml_node, col_names):
"""
Parameters
----------
xml_node : `xml.etree.ElementTree`
the xml node that provides the raw data for this DustNode
col_names : str
the names of the columns associated with this item
"""
BaseDustNode.__init__(self, xml_node)
self._value = utils.parse_coords(xml_node.text)
units = u.deg
self._columns = [Column(name=col_names[0], units=units),
Column(name=col_names[1], units=units),
Column(name=col_names[2], dtype="S25")]
def __str__(self):
"""Return a string representation of the item."""
base_string = BaseDustNode.__str__(self)
values_str = ("values: " + str(self._value[0]) + ", " + str(self._value[1])
+ ", " + str(self._value[2]))
string = ("[CoordNode: " + base_string + ", " + values_str + "]")
return string
class BaseResultSection(object):
"""
Represents a group of related nodes/columns in a DustResults object.
A DustResults table contains four main sections:
1-location
2-extinction
3-emission
4-temperature
In addition, the extinction, emission, and temperature sections
each contain a nested statistics subsection.
"""
def node_dict(self, names, xml_root):
"""
Find all the nodes with the given names under the given root,
and put them in a dictionary.
Parameters
----------
names : list[str]
the names of the nodes to find
xml_root : `xml.etree.ElementTree`
the root of the xml tree to search
Returns
-------
nodes : dictionary
a dictionary of xml nodes, where the keys are the node names
"""
nodes = {}
for name in names:
found_node = xml_root.find(name)
if found_node == None:
raise ValueError("Could not find node '" + name + "'")
nodes[name] = found_node
return nodes
def create_columns(self):
"""Build the columns associated with this section."""
columns = []
for dust_node in self._dust_nodes:
if isinstance(dust_node._columns, types.ListType):
columns.extend(dust_node._columns)
else:
columns.append(dust_node._columns)
self._columns = columns
@property
def columns(self):
"""Return the list of columns associated with this section."""
return self._columns
def values(self):
"""Return the list of data values associated with this section,
i.e. the data corresponding to a single row in the results table."""
values = []
for dust_node in self._dust_nodes:
if isinstance(dust_node._value, types.ListType):
values.extend(dust_node._value)
else:
values.append(dust_node._value)
return values
def __str__(self):
"""Return a string representation of the section."""
string = "\n\t\t"
for dust_node in self._dust_nodes:
if len(string) > 6:
string += ",\n\t\t"
string += dust_node.__str__()
return string
class LocationSection(BaseResultSection):
"""
The location section of the DustResults object.
"""
def __init__(self, xml_root):
"""
Parameters
----------
xml_root : `xml.etree.ElementTree`
the xml tree where the data for this section resides
"""
location_node = xml_root.find(INPUT)
names = [OBJ_NAME, REG_SIZE]
xml_nodes = self.node_dict(names, location_node)
# Create the section's DustNodes
self._dust_nodes = [CoordNode(xml_nodes[OBJ_NAME], col_names=["RA", "Dec", "coord sys"]),
NumberNode(xml_nodes[REG_SIZE], REG_SIZE, u.deg)]
self.create_columns()
def __str__(self):
"""Return a string representation of the section."""
base_string = BaseResultSection.__str__(self)
string = "[LocationSection: " + base_string + "]"
return string
class StatsSection(BaseResultSection):
"""
The statistics subsection of one of an extinction, emission, or temperature
section.
"""
def __init__(self, xml_root, col_prefix):
"""
Parameters
----------
xml_root : `xml.etree.ElementTree`
The xml tree containing the data for this section
col_prefix : str
the prefix to use in column names for this section
"""
names = [REF_PIXEL_VALUE, REF_COORDINATE, MEAN_VALUE, STD, MAX_VALUE, MIN_VALUE]
xml_nodes = self.node_dict(names, xml_root)
# Create the DustNodes
self._dust_nodes = [NumberNode(xml_nodes[REF_PIXEL_VALUE], col_prefix + " ref"),
CoordNode(xml_nodes[REF_COORDINATE], col_names=[col_prefix + " ref RA",
col_prefix + " ref Dec", col_prefix + " ref coord sys"]),
NumberNode(xml_nodes[MEAN_VALUE], col_prefix + " mean"),
NumberNode(xml_nodes[STD], col_prefix + " std"),
NumberNode(xml_nodes[MAX_VALUE], col_prefix + " max"),
NumberNode(xml_nodes[MIN_VALUE], col_prefix + " min")]
self._units = utils.parse_units(xml_nodes[REF_PIXEL_VALUE].text)
self.create_columns()
@property
def units(self):
"""Return the units associated with this section."""
return self._units
@property
def dust_nodes(self):
"""Return the list of DustNodes in this section."""
return self._dust_nodes
def __str__(self):
"""Return a string representation of the section."""
base_string = "\n\t\t\t\t"
for dust_node in self._dust_nodes:
if len(base_string) > 6:
base_string += ",\n\t\t\t\t"
base_string += dust_node.__str__()
string = "\n\t\t\t[StatisticsSection: " + base_string + "]"
return string
class ExtinctionSection(BaseResultSection):
"""
The extinction (reddening) section in a DustResults object.
"""
def __init__(self, xml_root):
"""
Parameters
----------
xml_root : `xml.etree.ElementTree`
The xml tree containing the data for this section
"""
# Find the section's xml nodes
names = [DESC, DATA_IMAGE, DATA_TABLE, STATISTICS]
xml_nodes = self.node_dict(names, xml_root)
# Build the DustNodes
self._dust_nodes = [StringNode(xml_nodes[DESC], "ext desc", 100),
StringNode(xml_nodes[DATA_IMAGE], "ext image", 255),
StringNode(xml_nodes[DATA_TABLE], "ext table", 255)]
# Create statistics subsection
self._stats = StatsSection(xml_nodes[STATISTICS], "ext")
self.create_columns()
def create_columns(self):
"""Build the columns associated with this section."""
BaseResultSection.create_columns(self)
self._columns.extend(self._stats.columns)
@property
def table_url(self):
"""Return the url where the extinction detail table can be found."""
table_url = self._dust_nodes[2]._value
return table_url
@property
def image_url(self):
"""Return the url of the FITS image associated with this section."""
return self._dust_nodes[1]._value
def values(self):
"""Return the data values associated with this section,
i.e. the list of values corresponding to a single row in the results table."""
ext_values = BaseResultSection.values(self)
ext_values.extend(self._stats.values())
return ext_values
def __str__(self):
"""Return a string representation of the section."""
base_string = BaseResultSection.__str__(self)
string = "[ExtinctionSection: " + base_string + self._stats.__str__() + "]"
return string
class EmissionSection(BaseResultSection):
"""
The emission section in a DustResults object.
"""
def __init__(self, xml_root):
"""
Parameters
----------
xml_root : `xml.etree.ElementTree`
The xml tree containing the data for this section
"""
names = [DESC, DATA_IMAGE, STATISTICS]
xml_nodes = self.node_dict(names, xml_root)
# Create the DustNodes
self._dust_nodes = [StringNode(xml_nodes[DESC], "em desc", 100),
StringNode(xml_nodes[DATA_IMAGE], "em image", 255)]
# Create the statistics subsection
self._stats = StatsSection(xml_nodes[STATISTICS], "em")
self.create_columns()
def create_columns(self):
"""Build the columns associated with this section."""
BaseResultSection.create_columns(self)
self._columns.extend(self._stats.columns)
def values(self):
"""Return the data values associated with this section,
i.e. the list of values corresponding to a single row in the results table."""
values = BaseResultSection.values(self)
values.extend(self._stats.values())
return values
@property
def image_url(self):
"""Return the url of the FITS image associated with this section."""
return self._dust_nodes[1]._value
def __str__(self):
"""Return a string representation of the section."""
base_string = BaseResultSection.__str__(self)
string = "[EmissionSection: " + base_string + self._stats.__str__() + "]"
return string
class TemperatureSection(BaseResultSection):
"""
The temperature section in a DustResults object.
"""
def __init__(self, xml_root):
"""
Parameters
----------
xml_root : `xml.etree.ElementTree`
The xml tree containing the data for this section
"""
names = [DESC, DATA_IMAGE, STATISTICS]
xml_nodes = self.node_dict(names, xml_root)
# Create the DustNodes
self._dust_nodes = [StringNode(xml_nodes[DESC], "temp desc", 100),
StringNode(xml_nodes[DATA_IMAGE], "temp image", 255)]
# Create the statistics subsection
self._stats = StatsSection(xml_nodes[STATISTICS], "temp")
self.create_columns()
def create_columns(self):
"""Build the columns associated with this section."""
BaseResultSection.create_columns(self)
self._columns.extend(self._stats.columns)
def values(self):
"""Return the data values associated with this section,
i.e. the list of values corresponding to a single row in the results table."""
values = BaseResultSection.values(self)
values.extend(self._stats.values())
return values
@property
def image_url(self):
"""Return the url of the FITS image associated with this section."""
return self._dust_nodes[1]._value
def __str__(self):
"""Return a string representation of the section."""
base_string = BaseResultSection.__str__(self)
string = "[TemperatureSection: " + base_string + self._stats.__str__() + "]"
return string
```
#### File: irsa/tests/test_irsa.py
```python
from ... import irsa
import numpy as np
import distutils.version as dv
import pytest
# this just wrong. give up.
# @pytest.mark.skipif(dv.StrictVersion(np.__version__) <= dv.StrictVersion("1.4.1"))
# def test_trivial():
# """ just make sure it doesn't raise anything
# takes about 3-5 seconds"""
# tbl = astroquery.irsa.query_gator_box('pt_src_cat','83.808 -5.391',300)
#
# assert len(tbl) == 100 # at least, that's what I got...
# return tbl
```
#### File: astroquery/simbad/queries.py
```python
import urllib
import urllib2
from .parameters import ValidatedAttribute
from . import parameters
from .result import SimbadResult
from .simbad_votable import VoTableDef
__all__ = ['QueryId',
'QueryAroundId',
'QueryCat',
'QueryCoord',
'QueryBibobj',
'QueryMulti',
]
class _Query(object):
def execute(self, votabledef=None, limit=None, pedantic=False, mirror='strasbourg'):
""" Execute the query, returning a :class:`SimbadResult` object.
Parameters
----------
votabledef: string or :class:`VoTableDef`, optional
Definition object for the output.
limit: int, optional
Limits the number of rows returned. None sets the limit to
SIMBAD's server maximum.
pedantic: bool, optional
The value to pass to the votable parser for the *pedantic*
parameters.
"""
return execute_query(self, votabledef=votabledef, limit=limit,
pedantic=pedantic, mirror=mirror)
@ValidatedAttribute('wildcard', parameters._ScriptParameterWildcard)
class QueryId(_Query):
""" Query by identifier.
Parameters
----------
identifier: string
The identifier to query for.
wildcard: bool, optional
If True, specifies that `identifier` should be understood as an
expression with wildcards.
"""
__command = 'query id '
def __init__(self, identifier, wildcard=None):
self.identifier = identifier
self.wildcard = wildcard
def __str__(self):
return self.__command + (self.wildcard and 'wildcard ' or '') + \
str(self.identifier) + '\n'
def __repr__(self):
return '{%s(identifier=%s, wildcard=%s)}' % (self.__class__.__name__,
repr(self.identifier), repr(self.wildcard.value))
#class QueryBasic(_Query):
# """ Basic Query
#
# Parameters
# ----------
# anything : string
# The identifier, coordinate, or bibcode to search for
# """
#
# __command = 'query basic '
#
# def __init__(self, qstring):
# self.Ident = qstring
#
# def __str__(self):
# return self.__command + str(self.Ident) + '\n'
#
# def __repr__(self):
# return '{%s(Ident=%s)}' % (self.__class__.__name__,
# repr(self.Ident))
@ValidatedAttribute('radius', parameters._ScriptParameterRadius)
class QueryAroundId(_Query):
""" Query around identifier.
Parameters
----------
identifier: string
The identifier around wich to query.
radius: string, optional
The value of the cone search radius. The value must be suffixed by
'd' (degrees), 'm' (arcminutes) or 's' (arcseconds).
If set to None the default value will be used.
"""
__command = 'query around '
def __init__(self, identifier, radius=None):
self.identifier = identifier
self.radius = radius
def __str__(self):
s = self.__command + str(self.identifier)
if self.radius:
s += ' radius=%s' % self.radius
return s + '\n'
def __repr__(self):
return '{%s(identifier=%s, radius=%s)}' % (self.__class__.__name__,
repr(self.identifier), repr(self.radius.value))
class QueryCat(_Query):
""" Query for a whole catalog.
Parameters
----------
catalog: string
The catalog identifier, for example 'm', 'ngc'.
"""
__command = 'query cat '
def __init__(self, catalog):
self.catalog = catalog
def __str__(self):
return self.__command + str(self.catalog) + '\n'
def __repr__(self):
return '{%s(catalog=%s)}' % (self.__class__.__name__,
repr(self.catalog))
@ValidatedAttribute('radius', parameters._ScriptParameterRadius)
@ValidatedAttribute('frame', parameters._ScriptParameterFrame)
@ValidatedAttribute('equinox', parameters._ScriptParameterEquinox)
@ValidatedAttribute('epoch', parameters._ScriptParameterEpoch)
class QueryCoord(_Query):
""" Query by coordinates.
Parameters
----------
ra: string
Right ascension, for example '+12 30'.
dec: string
Declination, for example '-20 17'.
radius: string, optional
The value of the cone search radius. The value must be suffixed by
'd' (degrees), 'm' (arcminutes) or 's' (arcseconds).
If set to None the default value will be used.
frame: string, optional
Frame of input coordinates.
equinox: string optional
Equinox of input coordinates.
epoch: string, optional
Epoch of input coordinates.
"""
__command = 'query coo '
def __init__(self, ra, dec, radius=None, frame=None, equinox=None,
epoch=None):
self.ra = ra
self.dec = dec
self.radius = radius
self.frame = frame
self.equinox = equinox
self.epoch = epoch
def __str__(self):
s = self.__command + str(self.ra) + ' ' + str(self.dec)
for item in ('radius', 'frame', 'equinox', 'epoch'):
if getattr(self, item):
s += ' %s=%s' % (item, str(getattr(self, item)))
return s + '\n'
def __repr__(self):
return '{%s(ra=%s, dec=%s, radius=%s, frame=%s, equinox=%s, ' \
'epoch=%s)}' % \
(self.__class__.__name__, repr(self.ra), repr(self.dec),
repr(self.radius), repr(self.frame), repr(self.equinox),
repr(self.epoch))
class QueryBibobj(_Query):
""" Query by bibcode objects. Used to fetch objects contained in the
given article.
Parameters
----------
bibcode: string
The bibcode of the article.
"""
__command = 'query bibobj '
def __init__(self, bibcode):
self.bibcode = bibcode
def __str__(self):
return self.__command + str(self.bibcode) + '\n'
def __repr__(self):
return '{%s(bibcode=%s)}' % (self.__class__.__name__,
repr(self.bibcode))
@ValidatedAttribute('radius', parameters._ScriptParameterRadius)
@ValidatedAttribute('frame', parameters._ScriptParameterFrame)
@ValidatedAttribute('epoch', parameters._ScriptParameterEpoch)
@ValidatedAttribute('equinox', parameters._ScriptParameterEquinox)
class QueryMulti(_Query):
__command_ids = ('radius', 'frame', 'epoch', 'equinox')
def __init__(self, queries=None, radius=None, frame=None, epoch=None,
equinox=None):
""" A type of Query used to aggregate the values of multiple simple
queries into a single result.
Parameters
----------
queries: iterable of Query objects
The list of Query objects to aggregate results for.
radius: string, optional
The value of the cone search radius. The value must be suffixed by
'd' (degrees), 'm' (arcminutes) or 's' (arcseconds).
If set to None the default value will be used.
frame: string, optional
Frame of input coordinates.
equinox: string optional
Equinox of input coordinates.
epoch: string, optional
Epoch of input coordinates.
.. note:: Each of the *radius*, *frame*, *equinox* et *epoch* arguments
acts as a default value for the whole MultiQuery object.
Individual queries may override these.
"""
self.queries = []
self.radius = radius
self.frame = frame
self.epoch = epoch
self.equinox = equinox
if queries is not None:
if (isinstance(queries, _Query) and not isinstance(queries,
QueryMulti)):
self.queries.append(queries)
elif iter(queries):
for query in queries:
if isinstance(query,_Query):
self.queries.append(query)
else:
raise ValueError("Queries must be simbad.Query instances")
#self.queries.append(BasicQuery(query))
elif isinstance(queries, QueryMulti):
for query in queries.queries:
self.queries.append(query)
@property
def __commands(self):
""" The list of commands which are not None for this script.
"""
return tuple([x for x in self.__command_ids if getattr(self, x)])
@property
def _header(self):
s = ''
for comm in self.__commands:
s += 'set %s %s\n' % (comm, str(getattr(self, comm)))
return s
@property
def __queries_string(self):
s = ''
for query in self.queries:
s += str(query)
return s
def __str__(self):
return self._header + self.__queries_string
def __repr__(self):
return repr(self.queries)
def execute_query(query, votabledef, limit, pedantic, mirror='strasbourg'):
limit2 = parameters._ScriptParameterRowLimit(limit)
if votabledef is None:
# votabledef is None, use the module level default one
from . import votabledef as vodefault
if isinstance(vodefault, VoTableDef):
votabledef = vodefault
else:
votabledef = VoTableDef(vodefault)
elif not isinstance(votabledef, VoTableDef):
votabledef = VoTableDef(votabledef)
# Create the 'script' string
script = ''
if limit is not None:
script += 'set limit %s\n' % str(limit2)
if isinstance(query, QueryMulti):
script += query._header
script += votabledef.def_str
script += votabledef.open_str
script += str(query)
script += votabledef.close_str
script = urllib.quote(script)
from . import mirrors
req_str = mirrors[mirror] + script
response = urllib2.urlopen(req_str)
result = b''.join(response.readlines())
result = result.decode('utf-8')
if not result:
raise TypeError
return SimbadResult(result, pedantic=pedantic)
```
#### File: simbad/tests/test_simbad.py
```python
from ... import simbad
import sys
is_python3 = (sys.version_info >= (3,))
def test_simbad():
r = simbad.QueryAroundId('m31', radius='0.5s').execute()
print r.table
if is_python3:
m31 = b"M 31"
else:
m31 = "M 31"
assert m31 in r.table["MAIN_ID"]
def test_multi():
result = simbad.QueryMulti(
[simbad.QueryId('m31'),
simbad.QueryId('m51')])
table = result.execute().table
if is_python3:
m31 = b"M 31"
m51 = b"M 51"
else:
m31 = "M 31"
m51 = "M 51"
assert m31 in table["MAIN_ID"]
assert m51 in table["MAIN_ID"]
if __name__ == "__main__":
test_simbad()
test_multi()
```
#### File: astroquery/utils/progressbar.py
```python
import urllib2
import gzip
import sys
import StringIO
from astropy.io import fits
__all__ = ['chunk_report','chunk_read']
def chunk_report(bytes_so_far, chunk_size, total_size):
if total_size > 0:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write(u"Downloaded %12.2g of %12.2g Mb (%6.2f%%)\r" %
(bytes_so_far / 1024.**2, total_size / 1024.**2, percent))
else:
sys.stdout.write(u"Downloaded %10.2g Mb\r" %
(bytes_so_far / 1024.**2))
def chunk_read(response, chunk_size=1024, report_hook=None):
content_length = response.info().get('Content-Length')
if content_length is None:
total_size = 0
else:
total_size = content_length.strip()
total_size = int(total_size)
bytes_so_far = 0
result_string = b""
#sys.stdout.write("Beginning download.\n")
while 1:
chunk = response.read(chunk_size)
result_string += chunk
bytes_so_far += len(chunk)
if not chunk:
if report_hook:
sys.stdout.write('\n')
break
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return result_string
def retrieve(url, outfile, opener=None, overwrite=False):
"""
"retrieve" (i.e., download to file) a URL.
"""
if opener is None:
opener = urllib2.build_opener()
page = opener.open(url)
results = chunk_read(page, report_hook=chunk_report)
S = StringIO.StringIO(results)
try:
fitsfile = fits.open(S,ignore_missing_end=True)
except IOError:
S.seek(0)
G = gzip.GzipFile(fileobj=S)
fitsfile = fits.open(G,ignore_missing_end=True)
fitsfile.writeto(outfile, clobber=overwrite)
```
|
{
"source": "jdnc/SimpleCV",
"score": 4
}
|
#### File: SimpleCV/Features/Detection.py
```python
from SimpleCV.base import *
from SimpleCV.ImageClass import *
from SimpleCV.Color import *
from SimpleCV.Features.Features import Feature, FeatureSet
class Corner(Feature):
"""
**SUMMARY**
The Corner feature is a point returned by the FindCorners function
Corners are used in machine vision as a very computationally efficient way
to find unique features in an image. These corners can be used in
conjunction with many other algorithms.
**SEE ALSO**
:py:meth:`findCorners`
"""
def __init__(self, i, at_x, at_y):
points = [(at_x-1,at_y-1),(at_x-1,at_y+1),(at_x+1,at_y+1),(at_x+1,at_y-1)]
super(Corner, self).__init__(i, at_x, at_y,points)
#can we look at the eigenbuffer and find direction?
def draw(self, color = (255, 0, 0),width=1):
"""
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.drawCircle((self.x, self.y), 4, color,width)
######################################################################
class Line(Feature):
"""
**SUMMARY**
The Line class is returned by the findLines function, but can also be initialized with any two points.
>>> l = Line(Image, (point1, point2))
Where point1 and point2 are (x,y) coordinate tuples.
>>> l.points
Returns a tuple of the two points
"""
#TODO - A nice feature would be to calculate the endpoints of the line.
def __init__(self, i, line):
self.image = i
self.vector = None
self.end_points = copy(line)
#print self.end_points[1][1], self.end_points[0][1], self.end_points[1][0], self.end_points[0][0]
if self.end_points[1][0] - self.end_points[0][0] == 0:
self.slope = float("inf")
else:
self.slope = float(self.end_points[1][1] - self.end_points[0][1])/float(self.end_points[1][0] - self.end_points[0][0])
#coordinate of the line object is the midpoint
at_x = (line[0][0] + line[1][0]) / 2
at_y = (line[0][1] + line[1][1]) / 2
xmin = int(np.min([line[0][0],line[1][0]]))
xmax = int(np.max([line[0][0],line[1][0]]))
ymax = int(np.min([line[0][1],line[1][1]]))
ymin = int(np.max([line[0][1],line[1][1]]))
points = [(xmin,ymin),(xmin,ymax),(xmax,ymax),(xmax,ymin)]
super(Line, self).__init__(i, at_x, at_y,points)
def draw(self, color = (0, 0, 255),width=1):
"""
Draw the line, default color is blue
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - Draw the line using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.drawLine(self.end_points[0], self.end_points[1], color,width)
def length(self):
"""
**SUMMARY**
This method returns the length of the line.
**RETURNS**
A floating point length value.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> lines = img.findLines
>>> for l in lines:
>>> if l.length() > 100:
>>> print "OH MY! - WHAT A BIG LINE YOU HAVE!"
>>> print "---I bet you say that to all the lines."
"""
return float(spsd.euclidean(self.end_points[0], self.end_points[1]))
def crop(self):
"""
**SUMMARY**
This function crops the source image to the location of the feature and returns
a new SimpleCV image.
**RETURNS**
A SimpleCV image that is cropped to the feature position and size.
**EXAMPLE**
>>> img = Image("../sampleimages/EdgeTest2.png")
>>> l = img.findLines()
>>> myLine = l[0].crop()
"""
tl = self.topLeftCorner()
return self.image.crop(tl[0],tl[1],self.width(),self.height())
def meanColor(self):
"""
**SUMMARY**
Returns the mean color of pixels under the line. Note that when the line falls "between" pixels, each pixels color contributes to the weighted average.
**RETURNS**
Returns an RGB triplet corresponding to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].meanColor()
"""
(pt1, pt2) = self.end_points
#we're going to walk the line, and take the mean color from all the px
#points -- there's probably a much more optimal way to do this
(maxx,minx,maxy,miny) = self.extents()
d_x = maxx - minx
d_y = maxy - miny
#orient the line so it is going in the positive direction
#if it's a straight one, we can just get mean color on the slice
if (d_x == 0.0):
return self.image[pt1[0]:pt1[0] + 1, miny:maxy].meanColor()
if (d_y == 0.0):
return self.image[minx:maxx, pt1[1]:pt1[1] + 1].meanColor()
error = 0.0
d_err = d_y / d_x #this is how much our "error" will increase in every step
px = []
weights = []
if (d_err < 1):
y = miny
#iterate over X
for x in range(minx, maxx):
#this is the pixel we would draw on, check the color at that px
#weight is reduced from 1.0 by the abs amount of error
px.append(self.image[x, y])
weights.append(1.0 - abs(error))
#if we have error in either direction, we're going to use the px
#above or below
if (error > 0): #
px.append(self.image[x, y+1])
weights.append(error)
if (error < 0):
px.append(self.image[x, y-1])
weights.append(abs(error))
error = error + d_err
if (error >= 0.5):
y = y + 1
error = error - 1.0
else:
#this is a "steep" line, so we iterate over X
#copy and paste. Ugh, sorry.
x = minx
for y in range(miny, maxy):
#this is the pixel we would draw on, check the color at that px
#weight is reduced from 1.0 by the abs amount of error
px.append(self.image[x, y])
weights.append(1.0 - abs(error))
#if we have error in either direction, we're going to use the px
#above or below
if (error > 0): #
px.append(self.image[x + 1, y])
weights.append(error)
if (error < 0):
px.append(self.image[x - 1, y])
weights.append(abs(error))
error = error + (1.0 / d_err) #we use the reciprocal of error
if (error >= 0.5):
x = x + 1
error = error - 1.0
#once we have iterated over every pixel in the line, we avg the weights
clr_arr = np.array(px)
weight_arr = np.array(weights)
weighted_clrs = np.transpose(np.transpose(clr_arr) * weight_arr)
#multiply each color tuple by its weight
temp = sum(weighted_clrs) / sum(weight_arr) #return the weighted avg
return (float(temp[0]),float(temp[1]),float(temp[2]))
def findIntersection(self, line):
"""
**SUMMARY**
Returns the interesction point of two lines.
**RETURNS**
A point tuple.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].findIntersection[1]
TODO: THIS NEEDS TO RETURN A TUPLE OF FLOATS
"""
if self.slope == float("inf"):
x = self.end_points[0][0]
y = line.slope*(x-line.end_points[1][0])+line.end_points[1][1]
return (x, y)
if line.slope == float("inf"):
x = line.end_points[0][0]
y = self.slope*(x-self.end_points[1][0])+self.end_points[1][1]
return (x, y)
m1 = self.slope
x12, y12 = self.end_points[1]
m2 = line.slope
x22, y22 = line.end_points[1]
x = (m1*x12 - m2*x22 + y22 - y12)/float(m1-m2)
y = (m1*m2*(x12-x22) - m2*y12 + m1*y22)/float(m1-m2)
return (x, y)
def isParallel(self, line):
"""
**SUMMARY**
Checks whether two lines are parallel or not.
**RETURNS**
Bool. True or False
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].isParallel(l[1])
"""
if self.slope == line.slope:
return True
return False
def isPerpendicular(self, line):
"""
**SUMMARY**
Checks whether two lines are perpendicular or not.
**RETURNS**
Bool. True or False
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].isPerpendicular(l[1])
"""
if self.slope == float("inf"):
if line.slope == 0:
return True
return False
if line.slope == float("inf"):
if self.slope == 0:
return True
return False
if self.slope*line.slope == -1:
return True
return False
def imgIntersections(self, img):
"""
**SUMMARY**
Returns a set of pixels where the line intersects with the binary image.
**RETURNS**
list of points.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].imgIntersections(img.binarize())
"""
pixels = []
if self.slope == float("inf"):
for y in range(self.end_points[0][1], self.end_points[1][1]+1):
pixels.append((self.end_points[0][0], y))
else:
for x in range(self.end_points[0][0], self.end_points[1][0]+1):
pixels.append((x, int(self.end_points[1][1] + self.slope*(x-self.end_points[1][0]))))
for y in range(self.end_points[0][1], self.end_points[1][1]+1):
pixels.append((int(((y-self.end_points[1][1])/self.slope)+self.end_points[1][0]), y))
pixels = list(set(pixels))
matched_pixels=[]
for pixel in pixels:
if img[pixel[0], pixel[1]] == (255.0, 255.0, 255.0):
matched_pixels.append(pixel)
matched_pixels.sort()
return matched_pixels
def angle(self):
"""
**SUMMARY**
This is the angle of the line, from the leftmost point to the rightmost point
Returns angle (theta) in radians, with 0 = horizontal, -pi/2 = vertical positive slope, pi/2 = vertical negative slope
**RETURNS**
An angle value in degrees.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> ls = img.findLines
>>> for l in ls:
>>> if l.angle() == 0:
>>> print "I AM HORIZONTAL."
"""
#first find the leftmost point
a = 0
b = 1
if (self.end_points[a][0] > self.end_points[b][0]):
b = 0
a = 1
d_x = self.end_points[b][0] - self.end_points[a][0]
d_y = self.end_points[b][1] - self.end_points[a][1]
#our internal standard is degrees
return float(360.00 * (atan2(d_y, d_x)/(2 * np.pi))) #formerly 0 was west
def getVector(self):
# this should be a lazy property
if( self.vector is None):
self.vector = [float(self.end_points[1][0]-self.end_points[0][0]),
float(self.end_points[1][1]-self.end_points[0][1])]
return self.vector
def dot(self,other):
return np.dot(self.getVector(),other.getVector())
def cross(self,other):
return np.cross(self.getVector(),other.getVector())
def extendToImageEdges(self):
x = np.array([self.end_points[1][0],self.end_points[0][0]])
y = np.array([self.end_points[1][1],self.end_points[0][1]])
xmax_idx = np.where(np.max(x)==x)
xmin_idx = np.where(np.min(x)==x)
m = (y[xmin_idx]-y[xmax_idx])/(np.min(x)-np.max(x))
b = self.end_points[0][1]-(m*self.end_points[0][0])
p0 = (0,b)
p1 = (self.image.width,(self.image.width)*m+b)
return Line(self.image,[p0,p1])
######################################################################
class Barcode(Feature):
"""
**SUMMARY**
The Barcode Feature wrappers the object returned by findBarcode(), a zbar symbol
* The x,y coordinate is the center of the code.
* points represents the four boundary points of the feature. Note: for QR codes, these points are the reference rectangls, and are quadrangular, rather than rectangular with other datamatrix types.
* data is the parsed data of the code.
**SEE ALSO**
:py:meth:`ImageClass.findBarcodes()`
"""
data = ""
#given a ZXing bar
def __init__(self, i, zbsymbol):
self.image = i
locs = zbsymbol.location
if len(locs) > 4:
xs = [l[0] for l in locs]
ys = [l[1] for l in locs]
xmax = np.max(xs)
xmin = np.min(xs)
ymax = np.max(ys)
ymin = np.min(ys)
points = ((xmin, ymin),(xmin,ymax),(xmax, ymax),(xmax,ymin))
else:
points = copy(locs) # hopefully this is in tl clockwise order
super(Barcode, self).__init__(i, 0, 0,points)
self.data = zbsymbol.data
self.points = copy(points)
numpoints = len(self.points)
self.x = 0
self.y = 0
for p in self.points:
self.x += p[0]
self.y += p[1]
if (numpoints):
self.x /= numpoints
self.y /= numpoints
def __repr__(self):
return "%s.%s at (%d,%d), read data: %s" % (self.__class__.__module__, self.__class__.__name__, self.x, self.y, self.data)
def draw(self, color = (255, 0, 0),width=1):
"""
**SUMMARY**
Draws the bounding area of the barcode, given by points. Note that for
QR codes, these points are the reference boxes, and so may "stray" into
the actual code.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.drawLine(self.points[0], self.points[1], color,width)
self.image.drawLine(self.points[1], self.points[2], color,width)
self.image.drawLine(self.points[2], self.points[3], color,width)
self.image.drawLine(self.points[3], self.points[0], color,width)
def length(self):
"""
**SUMMARY**
Returns the longest side of the quandrangle formed by the boundary points.
**RETURNS**
A floating point length value.
**EXAMPLE**
>>> img = Image("mycode.jpg")
>>> bc = img.findBarcode()
>>> print bc[-1].length()
"""
sqform = spsd.squareform(spsd.pdist(self.points, "euclidean"))
#get pairwise distances for all points
#note that the code is a quadrilateral
return max(sqform[0][1], sqform[1][2], sqform[2][3], sqform[3][0])
def area(self):
"""
**SUMMARY**
Returns the area defined by the quandrangle formed by the boundary points
**RETURNS**
An integer area value.
**EXAMPLE**
>>> img = Image("mycode.jpg")
>>> bc = img.findBarcode()
>>> print bc[-1].area()
"""
#calc the length of each side in a square distance matrix
sqform = spsd.squareform(spsd.pdist(self.points, "euclidean"))
#squareform returns a N by N matrix
#boundry line lengths
a = sqform[0][1]
b = sqform[1][2]
c = sqform[2][3]
d = sqform[3][0]
#diagonals
p = sqform[0][2]
q = sqform[1][3]
#perimeter / 2
s = (a + b + c + d)/2.0
#i found the formula to do this on wikihow. Yes, I am that lame.
#http://www.wikihow.com/Find-the-Area-of-a-Quadrilateral
return sqrt((s - a) * (s - b) * (s - c) * (s - d) - (a * c + b * d + p * q) * (a * c + b * d - p * q) / 4)
######################################################################
class HaarFeature(Feature):
"""
**SUMMARY**
The HaarFeature is a rectangle returned by the FindHaarFeature() function.
* The x,y coordinates are defined by the center of the bounding rectangle.
* The classifier property refers to the cascade file used for detection .
* Points are the clockwise points of the bounding rectangle, starting in upper left.
"""
classifier = ""
_width = ""
_height = ""
neighbors = ''
featureName = 'None'
def __init__(self, i, haarobject, haarclassifier = None):
self.image = i
((x, y, width, height), self.neighbors) = haarobject
at_x = x + width/2
at_y = y + height/2 #set location of feature to middle of rectangle
points = ((x, y), (x + width, y), (x + width, y + height), (x, y + height))
#set bounding points of the rectangle
self.classifier = haarclassifier
if( haarclassifier is not None ):
self.featureName = haarclassifier.getName()
super(HaarFeature, self).__init__(i, at_x, at_y, points)
def draw(self, color = (0, 255, 0),width=1):
"""
**SUMMARY**
Draw the bounding rectangle, default color green.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.drawLine(self.points[0], self.points[1], color,width)
self.image.drawLine(self.points[1], self.points[2], color,width)
self.image.drawLine(self.points[2], self.points[3], color,width)
self.image.drawLine(self.points[3], self.points[0], color,width)
def __getstate__(self):
dict = self.__dict__.copy()
if 'classifier' in dict:
del dict["classifier"]
return dict
def meanColor(self):
"""
**SUMMARY**
Find the mean color of the boundary rectangle.
**RETURNS**
Returns an RGB triplet that corresponds to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> face = HaarCascade("face.xml")
>>> faces = img.findHaarFeatures(face)
>>> print faces[-1].meanColor()
"""
crop = self.image[self.points[0][0]:self.points[1][0], self.points[0][1]:self.points[2][1]]
return crop.meanColor()
def area(self):
"""
**SUMMARY**
Returns the area of the feature in pixels.
**RETURNS**
The area of the feature in pixels.
**EXAMPLE**
>>> img = Image("lenna")
>>> face = HaarCascade("face.xml")
>>> faces = img.findHaarFeatures(face)
>>> print faces[-1].area()
"""
return self.width() * self.height()
######################################################################
class Chessboard(Feature):
"""
**SUMMARY**
This class is used for Calibration, it uses a chessboard
to calibrate from pixels to real world measurements.
"""
spCorners = []
dimensions = ()
def __init__(self, i, dim, subpixelCorners):
self.dimensions = dim
self.spCorners = subpixelCorners
at_x = np.average(np.array(self.spCorners)[:, 0])
at_y = np.average(np.array(self.spCorners)[:, 1])
posdiagsorted = sorted(self.spCorners, key = lambda corner: corner[0] + corner[1])
#sort corners along the x + y axis
negdiagsorted = sorted(self.spCorners, key = lambda corner: corner[0] - corner[1])
#sort corners along the x - y axis
points = (posdiagsorted[0], negdiagsorted[-1], posdiagsorted[-1], negdiagsorted[0])
super(Chessboard, self).__init__(i, at_x, at_y, points)
def draw(self, no_needed_color = None):
"""
**SUMMARY**
Draws the chessboard corners. We take a color param, but ignore it.
**PARAMETERS**
* *no_needed_color* - An RGB color triplet that isn't used
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
cv.DrawChessboardCorners(self.image.getBitmap(), self.dimensions, self.spCorners, 1)
def area(self):
"""
**SUMMARY**
Returns the mean of the distance between corner points in the chessboard
Given that the chessboard is of a known size, this can be used as a
proxy for distance from the camera
**RETURNS**
Returns the mean distance between the corners.
**EXAMPLE**
>>> img = Image("corners.jpg")
>>> feats = img.findChessboardCorners()
>>> print feats[-1].area()
"""
#note, copying this from barcode means we probably need a subclass of
#feature called "quandrangle"
sqform = spsd.squareform(spsd.pdist(self.points, "euclidean"))
a = sqform[0][1]
b = sqform[1][2]
c = sqform[2][3]
d = sqform[3][0]
p = sqform[0][2]
q = sqform[1][3]
s = (a + b + c + d)/2.0
return 2 * sqrt((s - a) * (s - b) * (s - c) * (s - d) - (a * c + b * d + p * q) * (a * c + b * d - p * q) / 4)
######################################################################
class TemplateMatch(Feature):
"""
**SUMMARY**
This class is used for template (pattern) matching in images.
The template matching cannot handle scale or rotation.
"""
template_image = None
quality = 0
w = 0
h = 0
def __init__(self, image, template, location, quality):
self.template_image = template # -- KAT - TRYING SOMETHING
self.image = image
self.quality = quality
w = template.width
h = template.height
at_x = location[0]
at_y = location[1]
points = [(at_x,at_y),(at_x+w,at_y),(at_x+w,at_y+h),(at_x,at_y+h)]
super(TemplateMatch, self).__init__(image, at_x, at_y, points)
def _templateOverlaps(self,other):
"""
Returns true if this feature overlaps another template feature.
"""
(maxx,minx,maxy,miny) = self.extents()
overlap = False
for p in other.points:
if( p[0] <= maxx and p[0] >= minx and p[1] <= maxy and p[1] >= miny ):
overlap = True
break
return overlap
def consume(self, other):
"""
Given another template feature, make this feature the size of the two features combined.
"""
(maxx,minx,maxy,miny) = self.extents()
(maxx0,minx0,maxy0,miny0) = other.extents()
maxx = max(maxx,maxx0)
minx = min(minx,minx0)
maxy = max(maxy,maxy0)
miny = min(miny,miny0)
self.x = minx
self.y = miny
self.points = [(minx,miny),(minx,maxy),(maxx,maxy),(maxx,miny)]
self._updateExtents()
def rescale(self,w,h):
"""
This method keeps the feature's center the same but sets a new width and height
"""
(maxx,minx,maxy,miny) = self.extents()
xc = minx+((maxx-minx)/2)
yc = miny+((maxy-miny)/2)
x = xc-(w/2)
y = yc-(h/2)
self.x = x
self.y = y
self.points = [(x,y),
(x+w,y),
(x+w,y+h),
(x,y+h)]
self._updateExtents()
def crop(self):
(maxx,minx,maxy,miny) = self.extents()
return self.image.crop(minx,miny,maxx-minx,maxy-miny)
def draw(self, color = Color.GREEN, width = 1):
"""
**SUMMARY**
Draw the bounding rectangle, default color green.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.dl().rectangle((self.x,self.y), (self.width(), self.height()), color = color, width=width)
######################################################################
class Circle(Feature):
"""
**SUMMARY**
Class for a general circle feature with a center at (x,y) and a radius r
"""
x = 0.00
y = 0.00
r = 0.00
image = "" #parent image
points = []
avgColor = None
def __init__(self, i, at_x, at_y, r):
self.r = r
self.avgColor = None
points = [(at_x-r,at_y-r),(at_x+r,at_y-r),(at_x+r,at_y+r),(at_x-r,at_y+r)]
super(Circle, self).__init__(i, at_x, at_y, points)
segments = 18
rng = range(1,segments+1)
self.mContour = []
for theta in rng:
rp = 2.0*math.pi*float(theta)/float(segments)
x = (r*math.sin(rp))+at_x
y = (r*math.cos(rp))+at_y
self.mContour.append((x,y))
def draw(self, color = Color.GREEN,width=1):
"""
**SUMMARY**
With no dimension information, color the x,y point for the feature.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.dl().circle((self.x,self.y),self.r,color,width)
def show(self, color = Color.GREEN):
"""
**SUMMARY**
This function will automatically draw the features on the image and show it.
It is a basically a shortcut function for development and is the same as:
**PARAMETERS**
* *color* - the color of the feature as an rgb triplet.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
**EXAMPLE**
>>> img = Image("logo")
>>> feat = img.findCircle()
>>> feat[0].show()
"""
self.draw(color)
self.image.show()
def distanceFrom(self, point = (-1, -1)):
"""
**SUMMARY**
Given a point (default to center of the image), return the euclidean distance of x,y from this point.
**PARAMETERS**
* *point* - The point, as an (x,y) tuple on the image to measure distance from.
**RETURNS**
The distance as a floating point value in pixels.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findCircle()
>>> blobs[-1].distanceFrom(blobs[-2].coordinates())
"""
if (point[0] == -1 or point[1] == -1):
point = np.array(self.image.size()) / 2
return spsd.euclidean(point, [self.x, self.y])
def meanColor(self):
"""
**SUMMARY**
Returns the average color within the circle.
**RETURNS**
Returns an RGB triplet that corresponds to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> c = img.findCircle()
>>> c[-1].meanColor()
"""
#generate the mask
if( self.avgColor is None):
mask = self.image.getEmpty(1)
cv.Zero(mask)
cv.Circle(mask,(self.x,self.y),self.r,color=(255,255,255),thickness=-1)
temp = cv.Avg(self.image.getBitmap(),mask)
self.avgColor = (temp[0],temp[1],temp[2])
return self.avgColor
def area(self):
"""
Area covered by the feature -- for a pixel, 1
**SUMMARY**
Returns a numpy array of the area of each feature in pixels.
**RETURNS**
A numpy array of all the positions in the featureset.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> xs = feats.coordinates()
>>> print xs
"""
return self.r*self.r*pi
def perimeter(self):
"""
**SUMMARY**
Returns the perimeter of the circle feature in pixels.
"""
return 2*pi*self.r
def width(self):
"""
**SUMMARY**
Returns the width of the feature -- for compliance just r*2
"""
return self.r*2
def height(self):
"""
**SUMMARY**
Returns the height of the feature -- for compliance just r*2
"""
return self.r*2
def radius(self):
"""
**SUMMARY**
Returns the radius of the circle in pixels.
"""
return self.r
def diameter(self):
"""
**SUMMARY**
Returns the diameter of the circle in pixels.
"""
return self.r*2
def crop(self,noMask=False):
"""
**SUMMARY**
This function returns the largest bounding box for an image.
**PARAMETERS**
* *noMask* - if noMask=True we return the bounding box image of the circle.
if noMask=False (default) we return the masked circle with the rest of the area set to black
**RETURNS**
The masked circle image.
"""
if( noMask ):
return self.image.crop(self.x, self.y, self.width(), self.height(), centered = True)
else:
mask = self.image.getEmpty(1)
result = self.image.getEmpty()
cv.Zero(mask)
cv.Zero(result)
#if you want to shave a bit of time we go do the crop before the blit
cv.Circle(mask,(self.x,self.y),self.r,color=(255,255,255),thickness=-1)
cv.Copy(self.image.getBitmap(),result,mask)
retVal = Image(result)
retVal = retVal.crop(self.x, self.y, self.width(), self.height(), centered = True)
return retVal
##################################################################################
class KeyPoint(Feature):
"""
**SUMMARY**
The class is place holder for SURF/SIFT/ORB/STAR keypoints.
"""
x = 0.00
y = 0.00
r = 0.00
image = "" #parent image
points = []
__avgColor = None
mAngle = 0
mOctave = 0
mResponse = 0.00
mFlavor = ""
mDescriptor = None
mKeyPoint = None
def __init__(self, i, keypoint, descriptor=None, flavor="SURF" ):
#i, point, diameter, descriptor=None,angle=-1, octave=0,response=0.00,flavor="SURF"):
self.mKeyPoint = keypoint
x = keypoint.pt[1] #KAT
y = keypoint.pt[0]
self._r = keypoint.size/2.0
self._avgColor = None
self.image = i
self.mAngle = keypoint.angle
self.mOctave = keypoint.octave
self.mResponse = keypoint.response
self.mFlavor = flavor
self.mDescriptor = descriptor
r = self._r
points = ((x+r,y+r),(x+r,y-r),(x-r,y-r),(x-r,y+r))
super(KeyPoint, self).__init__(i, x, y, points)
segments = 18
rng = range(1,segments+1)
self.points = []
for theta in rng:
rp = 2.0*math.pi*float(theta)/float(segments)
x = (r*math.sin(rp))+self.x
y = (r*math.cos(rp))+self.y
self.points.append((x,y))
def getObject(self):
"""
**SUMMARY**
Returns the raw keypoint object.
"""
return self.mKeyPoint
def descriptor(self):
"""
**SUMMARY**
Returns the raw keypoint descriptor.
"""
return self.mDescriptor
def quality(self):
"""
**SUMMARY**
Returns the quality metric for the keypoint object.
"""
return self.mResponse
def octave(self):
"""
**SUMMARY**
Returns the raw keypoint's octave (if it has one).
"""
return self.mOctave
def flavor(self):
"""
**SUMMARY**
Returns the type of keypoint as a string (e.g. SURF/MSER/ETC)
"""
return self.mFlavor
def angle(self):
"""
**SUMMARY**
Return the angle (theta) in degrees of the feature. The default is 0 (horizontal).
**RETURNS**
An angle value in degrees.
"""
return self.mAngle
def draw(self, color = Color.GREEN, width=1):
"""
**SUMMARY**
Draw a circle around the feature. Color tuple is single parameter, default is Green.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.dl().circle((self.x,self.y),self._r,color,width)
pt1 = (int(self.x),int(self.y))
pt2 = (int(self.x+(self.radius()*sin(radians(self.angle())))),
int(self.y+(self.radius()*cos(radians(self.angle())))))
self.image.dl().line(pt1,pt2,color,width)
def show(self, color = Color.GREEN):
"""
**SUMMARY**
This function will automatically draw the features on the image and show it.
It is a basically a shortcut function for development and is the same as:
>>> img = Image("logo")
>>> feat = img.findBlobs()
>>> if feat: feat.draw()
>>> img.show()
"""
self.draw(color)
self.image.show()
def distanceFrom(self, point = (-1, -1)):
"""
**SUMMARY**
Given a point (default to center of the image), return the euclidean distance of x,y from this point
"""
if (point[0] == -1 or point[1] == -1):
point = np.array(self.image.size()) / 2
return spsd.euclidean(point, [self.x, self.y])
def meanColor(self):
"""
**SUMMARY**
Return the average color within the feature's radius
**RETURNS**
Returns an RGB triplet that corresponds to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp[0].meanColor()
"""
#generate the mask
if( self._avgColor is None):
mask = self.image.getEmpty(1)
cv.Zero(mask)
cv.Circle(mask,(int(self.x),int(self.y)),int(self._r),color=(255,255,255),thickness=-1)
temp = cv.Avg(self.image.getBitmap(),mask)
self._avgColor = (temp[0],temp[1],temp[2])
return self._avgColor
def colorDistance(self, color = (0, 0, 0)):
"""
Return the euclidean color distance of the color tuple at x,y from a given color (default black)
"""
return spsd.euclidean(np.array(color), np.array(self.meanColor()))
def perimeter(self):
"""
**SUMMARY**
Returns the perimeter of the circle feature in pixels.
"""
return 2*pi*self._r
def width(self):
"""
**SUMMARY**
Returns the width of the feature -- for compliance just r*2
"""
return self._r*2
def height(self):
"""
**SUMMARY**
Returns the height of the feature -- for compliance just r*2
"""
return self._r*2
def radius(self):
"""
**SUMMARY**
Returns the radius of the circle in pixels.
"""
return self._r
def diameter(self):
"""
**SUMMARY**
Returns the diameter of the circle in pixels.
"""
return self._r*2
def crop(self,noMask=False):
"""
**SUMMARY**
This function returns the largest bounding box for an image.
**PARAMETERS**
* *noMask* - if noMask=True we return the bounding box image of the circle.
if noMask=False (default) we return the masked circle with the rest of the area set to black
**RETURNS**
The masked circle image.
"""
if( noMask ):
return self.image.crop(self.x, self.y, self.width(), self.height(), centered = True)
else:
mask = self.image.getEmpty(1)
result = self.image.getEmpty()
cv.Zero(mask)
cv.Zero(result)
#if you want to shave a bit of time we go do the crop before the blit
cv.Circle(mask,(int(self.x),int(self.y)),int(self._r),color=(255,255,255),thickness=-1)
cv.Copy(self.image.getBitmap(),result,mask)
retVal = Image(result)
retVal = retVal.crop(self.x, self.y, self.width(), self.height(), centered = True)
return retVal
######################################################################
class Motion(Feature):
"""
**SUMMARY**
The motion feature is used to encapsulate optical flow vectors. The feature
holds the length and direction of the vector.
"""
x = 0.0
y = 0.0
image = "" #parent image
points = []
dx = 0.00
dy = 0.00
norm_dy = 0.00
norm_dx = 0.00
window = 7
def __init__(self, i, at_x, at_y,dx,dy,wndw):
"""
i - the source image.
at_x - the sample x pixel position on the image.
at_y - the sample y pixel position on the image.
dx - the x component of the optical flow vector.
dy - the y component of the optical flow vector.
wndw - the size of the sample window (we assume it is square).
"""
self.dx = dx # the direction of the vector
self.dy = dy
self.window = wndw # the size of the sample window
sz = wndw/2
# so we center at the flow vector
points = [(at_x+sz,at_y+sz),(at_x-sz,at_y+sz),(at_x+sz,at_y+sz),(at_x+sz,at_y-sz)]
super(Motion, self).__init__(i, at_x, at_y, points)
def draw(self, color = Color.GREEN, width=1,normalize=True):
"""
**SUMMARY**
Draw the optical flow vector going from the sample point along the length of the motion vector.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
* *normalize* - normalize the vector size to the size of the block (i.e. the biggest optical flow
vector is scaled to the size of the block, all other vectors are scaled relative to
the longest vector.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
new_x = 0
new_y = 0
if( normalize ):
win = self.window/2
w = math.sqrt((win*win)*2)
new_x = (self.norm_dx*w) + self.x
new_y = (self.norm_dy*w) + self.y
else:
new_x = self.x + self.dx
new_y = self.y + self.dy
self.image.dl().line((self.x,self.y),(new_x,new_y),color,width)
def normalizeTo(self, max_mag):
"""
**SUMMARY**
This helper method normalizes the vector give an input magnitude.
This is helpful for keeping the flow vector inside the sample window.
"""
if( max_mag == 0 ):
self.norm_dx = 0
self.norm_dy = 0
return None
mag = self.magnitude()
new_mag = mag/max_mag
unit = self.unitVector()
self.norm_dx = unit[0]*new_mag
self.norm_dy = unit[1]*new_mag
def magnitude(self):
"""
Returns the magnitude of the optical flow vector.
"""
return sqrt((self.dx*self.dx)+(self.dy*self.dy))
def unitVector(self):
"""
Returns the unit vector direction of the flow vector as an (x,y) tuple.
"""
mag = self.magnitude()
if( mag != 0.00 ):
return (float(self.dx)/mag,float(self.dy)/mag)
else:
return (0.00,0.00)
def vector(self):
"""
Returns the raw direction vector as an (x,y) tuple.
"""
return (self.dx,self.dy)
def windowSz(self):
"""
Return the window size that we sampled over.
"""
return self.window
def meanColor(self):
"""
Return the color tuple from x,y
**SUMMARY**
Return a numpy array of the average color of the area covered by each Feature.
**RETURNS**
Returns an array of RGB triplets the correspond to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp.meanColor()
"""
x = int(self.x-(self.window/2))
y = int(self.y-(self.window/2))
return self.image.crop(x,y,int(self.window),int(self.window)).meanColor()
def crop(self):
"""
This function returns the image in the sample window around the flow vector.
Returns Image
"""
x = int(self.x-(self.window/2))
y = int(self.y-(self.window/2))
return self.image.crop(x,y,int(self.window),int(self.window))
######################################################################
class KeypointMatch(Feature):
"""
This class encapsulates a keypoint match between images of an object.
It is used to record a template of one image as it appears in another image
"""
x = 0.00
y = 0.00
image = "" #parent image
points = []
_minRect = []
_avgColor = None
_homography = []
_template = None
def __init__(self, image,template,minRect,_homography):
self._template = template
self._minRect = minRect
self._homography = _homography
xmax = 0
ymax = 0
xmin = image.width
ymin = image.height
for p in minRect:
if( p[0] > xmax ):
xmax = p[0]
if( p[0] < xmin ):
xmin = p[0]
if( p[1] > ymax ):
ymax = p[1]
if( p[1] < ymin ):
ymin = p[1]
width = (xmax-xmin)
height = (ymax-ymin)
at_x = xmin + (width/2)
at_y = ymin + (height/2)
#self.x = at_x
#self.y = at_y
points = [(xmin,ymin),(xmin,ymax),(xmax,ymax),(xmax,ymin)]
#self._updateExtents()
#self.image = image
#points =
super(KeypointMatch, self).__init__(image, at_x, at_y, points)
def draw(self, color = Color.GREEN,width=1):
"""
The default drawing operation is to draw the min bounding
rectangle in an image.
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.dl().line(self._minRect[0],self._minRect[1],color,width)
self.image.dl().line(self._minRect[1],self._minRect[2],color,width)
self.image.dl().line(self._minRect[2],self._minRect[3],color,width)
self.image.dl().line(self._minRect[3],self._minRect[0],color,width)
def drawRect(self, color = Color.GREEN,width=1):
"""
This method draws the axes alligned square box of the template
match. This box holds the minimum bounding rectangle that describes
the object. If the minimum bounding rectangle is axes aligned
then the two bounding rectangles will match.
"""
self.image.dl().line(self.points[0],self.points[1],color,width)
self.image.dl().line(self.points[1],self.points[2],color,width)
self.image.dl().line(self.points[2],self.points[3],color,width)
self.image.dl().line(self.points[3],self.points[0],color,width)
def crop(self):
"""
Returns a cropped image of the feature match. This cropped version is the
axes aligned box masked to just include the image data of the minimum bounding
rectangle.
"""
raw = self.image.crop(TL[0],TL[1],self.width(),self.height()) # crop the minbouding rect
return raw
def meanColor(self):
"""
return the average color within the circle
**SUMMARY**
Return a numpy array of the average color of the area covered by each Feature.
**RETURNS**
Returns an array of RGB triplets the correspond to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp.meanColor()
"""
if( self._avgColor is None ):
TL = self.topLeftCorner()
raw = self.image.crop(TL[0],TL[0],self.width(),self.height()) # crop the minbouding rect
mask = Image((self.width(),self.height()))
mask.dl().polygon(self._minRect,color=Color.WHITE,filled=TRUE)
mask = mask.applyLayers()
retVal = cv.Avg(raw.getBitmap(),mask._getGrayscaleBitmap())
self._avgColor = retVal
else:
retVal = self._avgColor
return retVal
def getMinRect(self):
"""
Returns the minimum bounding rectangle of the feature as a list
of (x,y) tuples.
"""
return self._minRect
def getHomography(self):
"""
Returns the _homography matrix used to calulate the minimum bounding
rectangle.
"""
return self._homography
######################################################################
"""
Create a shape context descriptor.
"""
class ShapeContextDescriptor(Feature):
x = 0.00
y = 0.00
image = "" #parent image
points = []
_minRect = []
_avgColor = None
_descriptor = None
_sourceBlob = None
def __init__(self, image,point,descriptor,blob):
self._descriptor = descriptor
self._sourceBlob = blob
x = point[0]
y = point[1]
points = [(x-1,y-1),(x+1,y-1),(x+1,y+1),(x-1,y+1)]
super(ShapeContextDescriptor, self).__init__(image, x, y, points)
def draw(self, color = Color.GREEN,width=1):
"""
The default drawing operation is to draw the min bounding
rectangle in an image.
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.dl().circle((self.x,self.y),3,color,width)
######################################################################
class ROI(Feature):
"""
This class creates a region of interest that inherit from one
or more features or no features at all.
"""
x = 0 # the center x coordinate
y = 0 # the center y coordinate
w = 0
h = 0
xtl = 0 # top left x
ytl = 0 # top left y
# we are going to assume x,y,w,h is our canonical form
points = [] # point list for cross compatibility
image = None
subFeatures = []
_meanColor = None
def __init__(self,x,y=None,w=None,h=None,image=None ):
"""
**SUMMARY**
This function can handle just about whatever you throw at it
and makes a it into a feature. Valid input items are tuples and lists
of x,y points, features, featuresets, two x,y points, and a
set of x,y,width,height values.
**PARAMETERS**
* *x* - this can be just about anything, a list or tuple of x points,
a corner of the image, a list of (x,y) points, a Feature, a FeatureSet
* *y* - this is usually a second point or set of y values.
* *w* - a width
* *h* - a height.
**RETURNS**
Nothing.
**EXAMPLE**
>>> img = Image('lenna')
>>> x,y = np.where(img.threshold(230).getGrayNumpy() > 128 )
>>> roi = ROI(zip(x,y),img)
>>> roi = ROI(x,y,img)
"""
#After forgetting to set img=Image I put this catch
# in to save some debugging headache.
if( isinstance(y,Image) ):
self.image = y
y = None
elif( isinstance(w,Image) ):
self.image = w
w = None
elif( isinstance(h,Image) ):
self.image = h
h = None
else:
self.image = image
if( image is None and isinstance(x,(Feature,FeatureSet))):
if( isinstance(x,Feature) ):
self.image = x.image
if( isinstance(x,FeatureSet) and len(x) > 0 ):
self.image = x[0].image
if(isinstance(x,Feature)):
self.subFeatures = FeatureSet([x])
elif(isinstance(x,(list,tuple)) and len(x) > 0 and isinstance(x,Feature)):
self.subFeatures = FeatureSet(x)
result = self._standardize(x,y,w,h)
if result is None:
logger.warning("Could not create an ROI from your data.")
return
self._rebase(result)
def resize(self,w,h=None,percentage=True):
"""
**SUMMARY**
Contract/Expand the roi. By default use a percentage, otherwise use pixels.
This is all done relative to the center of the roi
**PARAMETERS**
* *w* - the percent to grow shrink the region is the only parameter, otherwise
it is the new ROI width
* *h* - The new roi height in terms of pixels or a percentage.
* *percentage* - If true use percentages (e.g. 2 doubles the size), otherwise
use pixel values.
* *h* - a height.
**RETURNS**
Nothing.
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.resize(2)
>>> roi.show()
"""
if(h is None and isinstance(w,(tuple,list))):
h = w[1]
w = w[0]
if(percentage):
if( h is None ):
h = w
nw = self.w * w
nh = self.h * h
nx = self.xtl + ((self.w-nw)/2.0)
ny = self.ytl + ((self.h-nh)/2.0)
self._rebase([nx,ny,nw,nh])
else:
nw = self.w+w
nh = self.h+h
nx = self.xtl + ((self.w-nw)/2.0)
ny = self.ytl + ((self.h-nh)/2.0)
self._rebase([nx,ny,nw,nh])
def overlaps(self,otherROI):
for p in otherROI.points:
if( p[0] <= self.maxX() and p[0] >= self.minX() and
p[1] <= self.maxY() and p[1] >= self.minY() ):
return True
return False
def translate(self,x=0,y=0):
"""
**SUMMARY**
Move the roi.
**PARAMETERS**
* *x* - Move the ROI horizontally.
* *y* - Move the ROI vertically
**RETURNS**
Nothing.
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.translate(30,30)
>>> roi.show()
"""
if( x == 0 and y == 0 ):
return
if(y == 0 and isinstance(x,(tuple,list))):
y = x[1]
x = x[0]
if( isinstance(x,(float,int)) and isinstance(y,(float,int))):
self._rebase([self.xtl+x,self.ytl+y,self.w,self.h])
def toXYWH(self):
"""
**SUMMARY**
Get the ROI as a list of the top left corner's x and y position
and the roi's width and height in pixels.
**RETURNS**
A list of the form [x,y,w,h]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.translate(30,30)
>>> print roi.toXYWH()
"""
return [self.xtl,self.ytl,self.w,self.h]
def toTLAndBR(self):
"""
**SUMMARY**
Get the ROI as a list of tuples of the ROI's top left
corner and bottom right corner.
**RETURNS**
A list of the form [(x,y),(x,y)]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.translate(30,30)
>>> print roi.toTLAndBR()
"""
return [(self.xtl,self.ytl),(self.xtl+self.w,self.ytl+self.h)]
def toPoints(self):
"""
**SUMMARY**
Get the ROI as a list of four points that make up the bounding rectangle.
**RETURNS**
A list of the form [(x,y),(x,y),(x,y),(x,y)]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> print roi.toPoints()
"""
tl = (self.xtl,self.ytl)
tr = (self.xtl+self.w,self.ytl)
br = (self.xtl+self.w,self.ytl+self.h)
bl = (self.xtl,self.ytl+self.h)
return [tl,tr,br,bl]
def toUnitXYWH(self):
"""
**SUMMARY**
Get the ROI as a list, the values are top left x, to left y,
width and height. These values are scaled to unit values with
respect to the source image..
**RETURNS**
A list of the form [x,y,w,h]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> print roi.toUnitXYWH()
"""
if(self.image is None):
return None
srcw = float(self.image.width)
srch = float(self.image.height)
x,y,w,h = self.toXYWH()
nx = 0
ny = 0
if( x != 0 ):
nx = x/srcw
if( y != 0 ):
ny = y/srch
return [nx,ny,w/srcw,h/srch]
def toUnitTLAndBR(self):
"""
**SUMMARY**
Get the ROI as a list of tuples of the ROI's top left
corner and bottom right corner. These coordinates are in unit
length values with respect to the source image.
**RETURNS**
A list of the form [(x,y),(x,y)]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.translate(30,30)
>>> print roi.toUnitTLAndBR()
"""
if(self.image is None):
return None
srcw = float(self.image.width)
srch = float(self.image.height)
x,y,w,h = self.toXYWH()
nx = 0
ny = 0
nw = w/srcw
nh = h/srch
if( x != 0 ):
nx = x/srcw
if( y != 0 ):
ny = y/srch
return [(nx,ny),(nx+nw,ny+nh)]
def toUnitPoints(self):
"""
**SUMMARY**
Get the ROI as a list of four points that make up the bounding rectangle.
Each point is represented in unit coordinates with respect to the
souce image.
**RETURNS**
A list of the form [(x,y),(x,y),(x,y),(x,y)]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> print roi.toUnitPoints()
"""
if(self.image is None):
return None
srcw = float(self.image.width)
srch = float(self.image.height)
pts = self.toPoints()
retVal = []
for p in pts:
x,y = p
if(x != 0):
x = x/srcw
if(y != 0):
y = y/srch
retVal.append((x,y))
return retVal
def CoordTransformX(self,x,intype="ROI",output="SRC"):
"""
**SUMMARY**
Transform a single or a set of x values from one reference frame to another.
Options are:
SRC - the coordinates of the source image.
ROI - the coordinates of the ROI
ROI_UNIT - unit coordinates in the frame of reference of the ROI
SRC_UNIT - unit coordinates in the frame of reference of source image.
**PARAMETERS**
* *x* - A list of x values or a single x value.
* *intype* - A string indicating the input format of the data.
* *output* - A string indicating the output format of the data.
**RETURNS**
A list of the transformed values.
**EXAMPLE**
>>> img = Image('lenna')
>>> blobs = img.findBlobs()
>>> roi = ROI(blobs[0])
>>> x = roi.crop()..... /find some x values in the crop region
>>> xt = roi.CoordTransformX(x)
>>> #xt are no in the space of the original image.
"""
if( self.image is None ):
logger.warning("No image to perform that calculation")
return None
if( isinstance(x,(float,int))):
x = [x]
intype = intype.upper()
output = output.upper()
if( intype == output ):
return x
return self._transform(x,self.image.width,self.w,self.xtl,intype,output)
def CoordTransformY(self,y,intype="ROI",output="SRC"):
"""
**SUMMARY**
Transform a single or a set of y values from one reference frame to another.
Options are:
SRC - the coordinates of the source image.
ROI - the coordinates of the ROI
ROI_UNIT - unit coordinates in the frame of reference of the ROI
SRC_UNIT - unit coordinates in the frame of reference of source image.
**PARAMETERS**
* *y* - A list of y values or a single y value.
* *intype* - A string indicating the input format of the data.
* *output* - A string indicating the output format of the data.
**RETURNS**
A list of the transformed values.
**EXAMPLE**
>>> img = Image('lenna')
>>> blobs = img.findBlobs()
>>> roi = ROI(blobs[0])
>>> y = roi.crop()..... /find some y values in the crop region
>>> yt = roi.CoordTransformY(y)
>>> #yt are no in the space of the original image.
"""
if( self.image is None ):
logger.warning("No image to perform that calculation")
return None
if( isinstance(y,(float,int))):
y = [y]
intype = intype.upper()
output = output.upper()
if( intype == output ):
return y
return self._transform(y,self.image.height,self.h,self.ytl,intype,output)
def CoordTransformPts(self,pts,intype="ROI",output="SRC"):
"""
**SUMMARY**
Transform a set of (x,y) values from one reference frame to another.
Options are:
SRC - the coordinates of the source image.
ROI - the coordinates of the ROI
ROI_UNIT - unit coordinates in the frame of reference of the ROI
SRC_UNIT - unit coordinates in the frame of reference of source image.
**PARAMETERS**
* *pts* - A list of (x,y) values or a single (x,y) value.
* *intype* - A string indicating the input format of the data.
* *output* - A string indicating the output format of the data.
**RETURNS**
A list of the transformed values.
**EXAMPLE**
>>> img = Image('lenna')
>>> blobs = img.findBlobs()
>>> roi = ROI(blobs[0])
>>> pts = roi.crop()..... /find some x,y values in the crop region
>>> pts = roi.CoordTransformPts(pts)
>>> #yt are no in the space of the original image.
"""
if( self.image is None ):
logger.warning("No image to perform that calculation")
return None
if( isinstance(pts,tuple) and len(pts)==2):
pts = [pts]
intype = intype.upper()
output = output.upper()
x = [pt[0] for pt in pts]
y = [pt[1] for pt in pts]
if( intype == output ):
return pts
x = self._transform(x,self.image.width,self.w,self.xtl,intype,output)
y = self._transform(y,self.image.height,self.h,self.ytl,intype,output)
return zip(x,y)
def _transform(self,x,imgsz,roisz,offset,intype,output):
xtemp = []
# we are going to go to src unit coordinates
# and then we'll go back.
if( intype == "SRC" ):
xtemp = [xt/float(imgsz) for xt in x]
elif( intype == "ROI" ):
xtemp = [(xt+offset)/float(imgsz) for xt in x]
elif( intype == "ROI_UNIT"):
xtemp = [((xt*roisz)+offset)/float(imgsz) for xt in x]
elif( intype == "SRC_UNIT"):
xtemp = x
else:
logger.warning("Bad Parameter to CoordTransformX")
return None
retVal = []
if( output == "SRC" ):
retVal = [int(xt*imgsz) for xt in xtemp]
elif( output == "ROI" ):
retVal = [int((xt*imgsz)-offset) for xt in xtemp]
elif( output == "ROI_UNIT"):
retVal = [int(((xt*imgsz)-offset)/float(roisz)) for xt in xtemp]
elif( output == "SRC_UNIT"):
retVal = xtemp
else:
logger.warning("Bad Parameter to CoordTransformX")
return None
return retVal
def splitX(self,x,unitVals=False,srcVals=False):
"""
**SUMMARY**
Split the ROI at an x value.
x can be a list of sequentianl tuples of x split points e.g [0.3,0.6]
where we assume the top and bottom are also on the list.
Use units to split as a percentage (e.g. 30% down).
The srcVals means use coordinates of the original image.
**PARAMETERS**
* *x*-The split point. Can be a single point or a list of points. the type is determined by the flags.
* *unitVals* - Use unit vals for the split point. E.g. 0.5 means split at 50% of the ROI.
* *srcVals* - Use x values relative to the source image rather than relative to the ROI.
**RETURNS**
Returns a feature set of ROIs split from the source ROI.
**EXAMPLE**
>>> roi = ROI(0,0,100,100,img)
>>> splits = roi.splitX(50) # create two ROIs
"""
retVal = FeatureSet()
if(unitVals and srcVals):
logger.warning("Not sure how you would like to split the feature")
return None
if(not isinstance(x,(list,tuple))):
x = [x]
if unitVals:
x = self.CoordTransformX(x,intype="ROI_UNIT",output="SRC")
elif not srcVals:
x = self.CoordTransformX(x,intype="ROI",output="SRC")
for xt in x:
if( xt < self.xtl or xt > self.xtl+self.w ):
logger.warning("Invalid split point.")
return None
x.insert(0,self.xtl)
x.append(self.xtl+self.w)
for i in xrange(0,len(x)-1):
xstart = x[i]
xstop = x[i+1]
w = xstop-xstart
retVal.append(ROI(xstart,self.ytl,w,self.h,self.image ))
return retVal
def splitY(self,y,unitVals=False,srcVals=False):
"""
**SUMMARY**
Split the ROI at an x value.
y can be a list of sequentianl tuples of y split points e.g [0.3,0.6]
where we assume the top and bottom are also on the list.
Use units to split as a percentage (e.g. 30% down).
The srcVals means use coordinates of the original image.
**PARAMETERS**
* *y*-The split point. Can be a single point or a list of points. the type is determined by the flags.
* *unitVals* - Use unit vals for the split point. E.g. 0.5 means split at 50% of the ROI.
* *srcVals* - Use x values relative to the source image rather than relative to the ROI.
**RETURNS**
Returns a feature set of ROIs split from the source ROI.
**EXAMPLE**
>>> roi = ROI(0,0,100,100,img)
>>> splits = roi.splitY(50) # create two ROIs
"""
retVal = FeatureSet()
if(unitVals and srcVals):
logger.warning("Not sure how you would like to split the feature")
return None
if(not isinstance(y,(list,tuple))):
y = [y]
if unitVals:
y = self.CoordTransformY(y,intype="ROI_UNIT",output="SRC")
elif not srcVals:
y = self.CoordTransformY(y,intype="ROI",output="SRC")
for yt in y:
if( yt < self.ytl or yt > self.ytl+self.h ):
logger.warning("Invalid split point.")
return None
y.insert(0,self.ytl)
y.append(self.ytl+self.h)
for i in xrange(0,len(y)-1):
ystart = y[i]
ystop = y[i+1]
h = ystop-ystart
retVal.append(ROI(self.xtl,ystart,self.w,h,self.image ))
return retVal
def merge(self, regions):
"""
**SUMMARY**
Combine another region, or regions with this ROI. Everything must be
in the source image coordinates. Regions can be a ROIs, [ROI], features,
FeatureSets, or anything that can be cajoled into a region.
**PARAMETERS**
* *regions* - A region or list of regions. Regions are just about anything that has position.
**RETURNS**
Nothing, but modifies this region.
**EXAMPLE**
>>> blobs = img.findBlobs()
>>> roi = ROI(blob[0])
>>> print roi.toXYWH()
>>> roi.merge(blob[2])
>>> print roi.toXYWH()
"""
result = self._standardize(regions)
if( result is not None ):
xo,yo,wo,ho = result
x = np.min([xo,self.xtl])
y = np.min([yo,self.ytl])
w = np.max([self.xtl+self.w,xo+wo])-x
h = np.max([self.ytl+self.h,yo+ho])-y
if( self.image is not None ):
x = np.clip(x,0,self.image.width)
y = np.clip(y,0,self.image.height)
w = np.clip(w,0,self.image.width-x)
h = np.clip(h,0,self.image.height-y)
self._rebase([x,y,w,h])
if( isinstance(regions,ROI) ):
self.subFeatures += regions
elif( isinstance(regions,Feature) ):
self.subFeatures.append(regions)
elif( isinstance(regions,(list,tuple)) ):
if(isinstance(regions[0],ROI)):
for r in regions:
self.subFeatures += r.subFeatures
elif(isinstance(regions[0],Feature)):
for r in regions:
self.subFeatures.append(r)
def rebase(self, x,y=None,w=None,h=None):
"""
Completely alter roi using whatever source coordinates you wish.
"""
if(isinstance(x,Feature)):
self.subFeatures.append(x)
elif(isinstance(x,(list,tuple)) and len[x] > 0 and isinstance(x,Feature)):
self.subFeatures += list(x)
result = self._standardize(x,y,w,h)
if result is None:
logger.warning("Could not create an ROI from your data.")
return
self._rebase(result)
def draw(self, color = Color.GREEN,width=3):
"""
**SUMMARY**
This method will draw the feature on the source image.
**PARAMETERS**
* *color* - The color as an RGB tuple to render the image.
**RETURNS**
Nothing.
**EXAMPLE**
>>> img = Image("RedDog2.jpg")
>>> blobs = img.findBlobs()
>>> blobs[-1].draw()
>>> img.show()
"""
x,y,w,h = self.toXYWH()
self.image.drawRectangle(x,y,w,h,width=width,color=color)
def show(self, color = Color.GREEN, width=2):
"""
**SUMMARY**
This function will automatically draw the features on the image and show it.
**RETURNS**
Nothing.
**EXAMPLE**
>>> img = Image("logo")
>>> feat = img.findBlobs()
>>> feat[-1].show() #window pops up.
"""
self.draw(color,width)
self.image.show()
def meanColor(self):
"""
**SUMMARY**
Return the average color within the feature as a tuple.
**RETURNS**
An RGB color tuple.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> if (b.meanColor() == color.WHITE):
>>> print "Found a white thing"
"""
x,y,w,h = self.toXYWH()
return self.image.crop(x,y,w,h).meanColor()
def _rebase(self,roi):
x,y,w,h = roi
self._mMaxX = None
self._mMaxY = None
self._mMinX = None
self._mMinY = None
self._mWidth = None
self._mHeight = None
self.mExtents = None
self.mBoundingBox = None
self.xtl = x
self.ytl = y
self.w = w
self.h = h
self.points = [(x,y),(x+w,y),(x,y+h),(x+w,y+h)]
#WE MAY WANT TO DO A SANITY CHECK HERE
self._updateExtents()
def _standardize(self,x,y=None,w=None,h=None):
if(isinstance(x,np.ndarray)):
x = x.tolist()
if(isinstance(y,np.ndarray)):
y = y.tolist()
# make the common case fast
if( isinstance(x,(int,float)) and isinstance(y,(int,float)) and
isinstance(w,(int,float)) and isinstance(h,(int,float)) ):
if( self.image is not None ):
x = np.clip(x,0,self.image.width)
y = np.clip(y,0,self.image.height)
w = np.clip(w,0,self.image.width-x)
h = np.clip(h,0,self.image.height-y)
return [x,y,w,h]
elif(isinstance(x,ROI)):
x,y,w,h = x.toXYWH()
#If it's a feature extract what we need
elif(isinstance(x,FeatureSet) and len(x) > 0 ):
#double check that everything in the list is a feature
features = [feat for feat in x if isinstance(feat,Feature)]
xmax = np.max([feat.maxX() for feat in features])
xmin = np.min([feat.minX() for feat in features])
ymax = np.max([feat.maxY() for feat in features])
ymin = np.min([feat.minY() for feat in features])
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
elif(isinstance(x, Feature)):
theFeature = x
x = theFeature.points[0][0]
y = theFeature.points[0][1]
w = theFeature.width()
h = theFeature.height()
# [x,y,w,h] (x,y,w,h)
elif(isinstance(x, (tuple,list)) and len(x) == 4 and isinstance(x[0],(int, long, float))
and y == None and w == None and h == None):
x,y,w,h = x
# x of the form [(x,y),(x1,y1),(x2,y2),(x3,y3)]
# x of the form [[x,y],[x1,y1],[x2,y2],[x3,y3]]
# x of the form ([x,y],[x1,y1],[x2,y2],[x3,y3])
# x of the form ((x,y),(x1,y1),(x2,y2),(x3,y3))
elif( isinstance(x, (list,tuple)) and
isinstance(x[0],(list,tuple)) and
(len(x) == 4 and len(x[0]) == 2 ) and
y == None and w == None and h == None):
if (len(x[0])==2 and len(x[1])==2 and len(x[2])==2 and len(x[3])==2):
xmax = np.max([x[0][0],x[1][0],x[2][0],x[3][0]])
ymax = np.max([x[0][1],x[1][1],x[2][1],x[3][1]])
xmin = np.min([x[0][0],x[1][0],x[2][0],x[3][0]])
ymin = np.min([x[0][1],x[1][1],x[2][1],x[3][1]])
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form ((x,y),(x1,y1),(x2,y2),(x3,y3))")
return None
# x,y of the form [x1,x2,x3,x4,x5....] and y similar
elif(isinstance(x, (tuple,list)) and
isinstance(y, (tuple,list)) and
len(x) > 4 and len(y) > 4 ):
if(isinstance(x[0],(int, long, float)) and isinstance(y[0],(int, long, float))):
xmax = np.max(x)
ymax = np.max(y)
xmin = np.min(x)
ymin = np.min(y)
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form x = [1,2,3,4,5] y =[0,2,4,6,8]")
return None
# x of the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)]
elif(isinstance(x, (list,tuple)) and
len(x) > 4 and len(x[0]) == 2 and y == None and w == None and h == None):
if(isinstance(x[0][0],(int, long, float))):
xs = [pt[0] for pt in x]
ys = [pt[1] for pt in x]
xmax = np.max(xs)
ymax = np.max(ys)
xmin = np.min(xs)
ymin = np.min(ys)
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)]")
return None
# x of the form [(x,y),(x1,y1)]
elif(isinstance(x,(list,tuple)) and len(x) == 2 and isinstance(x[0],(list,tuple)) and isinstance(x[1],(list,tuple)) and y == None and w == None and h == None):
if (len(x[0])==2 and len(x[1])==2):
xt = np.min([x[0][0],x[1][0]])
yt = np.min([x[0][0],x[1][0]])
w = np.abs(x[0][0]-x[1][0])
h = np.abs(x[0][1]-x[1][1])
x = xt
y = yt
else:
logger.warning("x should be in the form [(x1,y1),(x2,y2)]")
return None
# x and y of the form (x,y),(x1,y2)
elif(isinstance(x, (tuple,list)) and isinstance(y,(tuple,list)) and w == None and h == None):
if (len(x)==2 and len(y)==2):
xt = np.min([x[0],y[0]])
yt = np.min([x[1],y[1]])
w = np.abs(y[0]-x[0])
h = np.abs(y[1]-x[1])
x = xt
y = yt
else:
logger.warning("if x and y are tuple it should be in the form (x1,y1) and (x2,y2)")
return None
if(y == None or w == None or h == None):
logger.warning('Not a valid roi')
elif( w <= 0 or h <= 0 ):
logger.warning("ROI can't have a negative dimension")
return None
if( self.image is not None ):
x = np.clip(x,0,self.image.width)
y = np.clip(y,0,self.image.height)
w = np.clip(w,0,self.image.width-x)
h = np.clip(h,0,self.image.height-y)
return [x,y,w,h]
def crop(self):
retVal = None
if(self.image is not None):
retVal = self.image.crop(self.xtl,self.ytl,self.w,self.h)
return retVal
```
|
{
"source": "JDNdeveloper/2048_Smart_Players",
"score": 4
}
|
#### File: 2048_Smart_Players/src/Model.py
```python
import copy
import random
DEFAULT_SIZE = 4
class Move:
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
class Model(object):
"""Board is 4x4."""
SIZE = DEFAULT_SIZE
"""Fill values, using value repetition as a probability distribution."""
FILL_VALUES = [2] * 9 + [4] * 1
"""All possible moves."""
MOVES = (Move.UP, Move.DOWN, Move.LEFT, Move.RIGHT)
MOVE_NAMES = ('', 'UP', 'DOWN', 'LEFT', 'RIGHT')
def __init__(self):
self.board = None
self.score = None
self.reset()
def getState(self):
"""Retrieves current state.
Returns:
(board, score): Current board and score.
"""
return (self.board, self.score)
@staticmethod
def makeBoardMove(board, move, modifyState=True, returnBoard=False):
"""Executes a move.
Args:
move: The move to execute.
modifyState: If False the board is not updated with the move.
returnBoard: Also returns the new board.
Returns:
moveScore: The points made from the given move.
boardChanged: True if the move would/did make the board change.
(optional) newBoard: Board with the move performed.
"""
boardChanged = False
moveScore = 0
assert move in Model.MOVES
if move == Move.UP:
allRowColPairs = [zip(range(Model.SIZE), [i] * Model.SIZE)
for i in range(Model.SIZE)]
elif move == Move.DOWN:
allRowColPairs = [zip(list(reversed(range(Model.SIZE))), [i] * Model.SIZE)
for i in range(Model.SIZE)]
elif move == Move.LEFT:
allRowColPairs = [zip([i] * Model.SIZE, range(Model.SIZE))
for i in range(Model.SIZE)]
elif move == Move.RIGHT:
allRowColPairs = [zip([i] * Model.SIZE, list(reversed(range(Model.SIZE))))
for i in range(Model.SIZE)]
if returnBoard:
newBoard = copy.deepcopy(board)
for rowColPairs in allRowColPairs:
line = [board[row][col] for (row, col) in rowColPairs]
(newLine, lineScore) = Model._compressLine(line)
if newLine != line:
boardChanged = True
moveScore += lineScore
if modifyState:
for (val, (row, col)) in zip(newLine, rowColPairs):
board[row][col] = val
if returnBoard:
for (val, (row, col)) in zip(newLine, rowColPairs):
newBoard[row][col] = val
if returnBoard:
return (moveScore, boardChanged, newBoard)
return (moveScore, boardChanged)
def makeMove(self, move):
"""Performs move on the game board, performs random fill (if board changed),
updates score."""
(moveScore, boardChanged) = self.makeBoardMove(
self.board, move, modifyState=True, returnBoard=False)
if boardChanged:
# if the move actually changed the game board,
# we do a random fill
self._randomFill()
self.score += moveScore
return (moveScore, boardChanged)
@staticmethod
def getBoardNumTiles(board):
"""Returns number of tiles on the board"""
return sum(1 for row in board for val in row if val is not None)
def numTiles(self):
return self.getBoardNumTiles(self.board)
@staticmethod
def isBoardGameOver(board):
"""True if game is over."""
if len(Model.getBoardOpenPositions(board)) > 0:
return False
for i in range(Model.SIZE):
# check for consecutive numbers in all rows and cols
prevRowVal = None
prevColVal = None
for j in range(Model.SIZE):
# check row
val = board[i][j]
if val == prevRowVal:
return False
else:
prevRowVal = val
# check col
val = board[j][i]
if val == prevColVal:
return False
else:
prevColVal = val
return True
def isGameOver(self):
return self.isBoardGameOver(self.board)
@staticmethod
def getBoardMaxTile(board):
"""Returns value of maximum tile on the board."""
return max(val for row in board for val in row)
def maxTile(self):
return self.getBoardMaxTile(self.board)
def reset(self):
"""Resets the game."""
self.board = [[None] * self.SIZE for _ in range(self.SIZE)]
self.score = 0
# add the initial two tiles
self._randomFill()
self._randomFill()
@staticmethod
def _compressLine(line):
"""Compresses line to the left.
Args:
line: The original line.
Returns:
newLine: The compressed line.
lineScore: The points made from compressing this line.
"""
newLine = []
lineScore = 0
prevVal = None
for val in line:
if val is None:
continue
if val == prevVal:
newVal = 2 * val
newLine[-1] = newVal
lineScore += newVal
prevVal = None
else:
newLine.append(val)
prevVal = val
if len(newLine) < Model.SIZE:
newLine += [None] * (Model.SIZE - len(newLine))
return (newLine, lineScore)
@staticmethod
def getBoardScore(board):
"""Get sum of elements on the board"""
return sum([sum(filter(None, row)) for row in board])
@staticmethod
def getBoardOpenPositions(board):
"""Retrieve open positions.
Returns:
openPositions: List of open (row, col) positions.
"""
return [(row, col) for row in range(Model.SIZE) for col in range(Model.SIZE)
if board[row][col] == None]
@staticmethod
def getBoardSortedValues(board):
"""Return tile values sorted in descending order"""
return list(reversed(sorted(val for row in board for val in row)))
@staticmethod
def getBoardRotated(board):
"""Return board rotated clockwise by 90 degrees"""
return [list(row) for row in zip(*board[::-1])]
@staticmethod
def getBoardMirrored(board):
"""Return mirrored board"""
return [row[::-1] for row in board]
@staticmethod
def doBoardRandomFill(board):
"""Randomly fill an open position on the board.
The fill values and probability distribution is defined above.
"""
openPositions = Model.getBoardOpenPositions(board)
if openPositions:
(row, col) = random.choice(openPositions)
board[row][col] = random.choice(Model.FILL_VALUES)
def _randomFill(self):
self.doBoardRandomFill(self.board)
@staticmethod
def getBoardString(board):
"""Return string representation of the board."""
rowBreak = '--------' * Model.SIZE + '-\n'
s = ''
for row in range(Model.SIZE):
s += rowBreak
for col in range(Model.SIZE):
val = board[row][col]
s += '| %s\t' % str(val if val is not None else '')
s += '|\n'
s += rowBreak
return s
def __str__(self):
return self.getBoardString(self.board)
```
#### File: 2048_Smart_Players/src/ModelTest.py
```python
import copy
import unittest
import Model
class ModelTest(unittest.TestCase):
def setUp(self):
self.m = Model.Model()
def testReset(self):
# fill the board and score with junk
self.board = [range(self.m.SIZE) for _ in range(self.m.SIZE)]
self.score = 1234
# reset
self.m.reset()
# check that there are only two values in the board
# and the score is reset
#
# run enough times to make sure we see all 2 and 4 combos
# and verify we did see all of them by the end and that we
# see more (2,2) than (2,4) than (4,4) (due to the probabilities)
expectedFilledValues = [set([2,2]), set([2,4]), set([4,4])]
occurrences = [0, 0, 0]
for _ in range(5000):
self.m.reset()
filledValues = [self.m.board[row][col]
for row in range(self.m.SIZE)
for col in range(self.m.SIZE)
if self.m.board[row][col] is not None]
self.assertIn(set(filledValues), expectedFilledValues)
occurrences[expectedFilledValues.index(set(filledValues))] += 1
self.assertEquals(len(filledValues), 2)
self.assertEquals(self.m.score, 0)
self.assertGreater(occurrences[0], occurrences[1])
self.assertGreater(occurrences[1], occurrences[2])
self.assertGreater(occurrences[2], 0)
def testMakeMove(self):
## Direction Tests
diagonalBoard = [[2 if row == col else None
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
def verifyBoard(fixedRowColValues=None,
fixedRowValues=None,
fixedColValues=None,
randomFillExpected=1):
"""Verifies board has expected values."""
randomFillFound = 0
for row in range(self.m.SIZE):
for col in range(self.m.SIZE):
val = self.m.board[row][col]
if fixedRowColValues and (row, col) in fixedRowColValues:
self.assertEqual(val, fixedRowColValues[(row, col)])
elif fixedRowValues and row in fixedRowValues:
self.assertEqual(val, fixedRowValues[row])
elif fixedColValues and col in fixedColValues:
self.assertEqual(val, fixedColValues[col])
else:
if val is not None:
randomFillFound += 1
self.assertEquals(randomFillFound, randomFillExpected)
def verifyMove(initialBoard, move, expectedScore,
expectedMoveScore=None, reset=True,
expectedBoardToChange=True):
"""Sets up initial board and verifies move and resulting state."""
if reset:
self.m.reset()
if expectedMoveScore is None:
expectedMoveScore = expectedScore
self.m.board = copy.deepcopy(initialBoard)
(moveScore, boardChanged) = self.m.makeMove(move)
self.assertEqual(moveScore, expectedMoveScore)
self.assertEqual(boardChanged, expectedBoardToChange)
self.assertEqual(self.m.score, expectedScore)
# UP
verifyMove(diagonalBoard, Model.Move.UP, 0)
verifyBoard(fixedRowValues={0: 2})
# DOWN
verifyMove(diagonalBoard, Model.Move.DOWN, 0)
verifyBoard(fixedRowValues={self.m.SIZE - 1: 2})
# LEFT
verifyMove(diagonalBoard, Model.Move.LEFT, 0)
verifyBoard(fixedColValues={0: 2})
# RIGHT
verifyMove(diagonalBoard, Model.Move.RIGHT, 0)
verifyBoard(fixedColValues={self.m.SIZE - 1: 2})
## Merging Tests
# two items
twoMergeBoard = [[2 if row in [0, 1] else None
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
expectedTwoMergeScore = 4 * self.m.SIZE
verifyMove(twoMergeBoard, Model.Move.UP, expectedTwoMergeScore)
verifyBoard(fixedRowValues={0: 4})
# four items
fourMergeBoard = [[2 if row in [0, 1, 2, 3] else None
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
expectedFourMergeScore = 2 * expectedTwoMergeScore
verifyMove(fourMergeBoard, Model.Move.UP, expectedFourMergeScore)
verifyBoard(fixedRowValues={0: 4, 1: 4})
# three items (third item should not merge)
threeMergeBoard = [[2 if row in [0, 1, 2] else None
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
expectedThreeMergeScore = expectedTwoMergeScore
verifyMove(threeMergeBoard, Model.Move.UP, expectedThreeMergeScore)
verifyBoard(fixedRowValues={0: 4, 1: 2})
## Edge Case Tests
# moving UP should not change the board
upFixedBoard = [[2 if row in [0] else None
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
verifyMove(upFixedBoard, Model.Move.UP, 0, expectedBoardToChange=False)
verifyBoard(fixedRowValues={0: 2}, randomFillExpected=0)
# two moves in a row, both with merges
verifyMove(fourMergeBoard, Model.Move.UP, expectedFourMergeScore)
verifyBoard(fixedRowValues={0: 4, 1: 4})
verifyMove(self.m.board, Model.Move.UP, 2 * expectedFourMergeScore,
expectedMoveScore=expectedFourMergeScore, reset=False)
verifyBoard(fixedRowValues={0: 8}, randomFillExpected=2)
# verify score for up/down and left/right are the same
sameScoreBoard = [
[2, None, None, None],
[None, None, None, 2],
[None, None, None, 2],
[None, None, 4, 2],
]
verifyMove(sameScoreBoard, Model.Move.UP, 4)
verifyMove(sameScoreBoard, Model.Move.DOWN, 4)
verifyMove(sameScoreBoard, Model.Move.LEFT, 0)
verifyMove(sameScoreBoard, Model.Move.RIGHT, 0)
# test a 5x5 board
Model.Model.SIZE = 5
fiveBoard = [[2 if row in [0, 1, 2, 3] else None
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
expectedFiveBoardScore = 2 * 4 * self.m.SIZE
verifyMove(fiveBoard, Model.Move.UP, expectedFiveBoardScore)
verifyBoard(fixedRowValues={0: 4, 1: 4})
# set size back to default
Model.Model.SIZE = Model.DEFAULT_SIZE
def testIsGameOver(self):
# verify new board is not game over
self.assertFalse(self.m.isGameOver())
# verify full board with possible up/down compression
# is not game over
self.m.board = [[2 ** (col + 1)
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
self.assertFalse(self.m.isGameOver())
# verify full board with possible left/right compression
# is not game over
self.m.board = [[2 ** (row + 1)
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
self.assertFalse(self.m.isGameOver())
# verify full board with no compression is game over
self.m.board = [[2 ** ((row + col) % 2 + 1)
for col in range(self.m.SIZE)]
for row in range(self.m.SIZE)]
self.assertTrue(self.m.isGameOver())
def testMaxTile(self):
# test empty board
self.m.board = [[None for _ in range(self.m.SIZE)]
for _ in range(self.m.SIZE)]
self.assertEqual(self.m.maxTile(), None)
# test normal board
self.m.board = [
[2, None, None, None],
[None, None, None, 2],
[None, None, None, 2],
[None, None, 4, 2],
]
self.assertEqual(self.m.maxTile(), 4)
# test full board with duplicate max's
self.m.board = [
[2, 8, 16, 256],
[8, 32, 4, 2],
[2, 256, 64, 2],
[16, 8, 4, 2],
]
self.assertEqual(self.m.maxTile(), 256)
def testNumTiles(self):
# test empty board
self.m.board = [[None for _ in range(self.m.SIZE)]
for _ in range(self.m.SIZE)]
self.assertEqual(self.m.numTiles(), 0)
# test board with two tiles
self.m.board = [
[2, None, None, None],
[None, 4, None, None],
[None, None, None, None],
[None, None, None, None],
]
self.assertEqual(self.m.numTiles(), 2)
# test board with five tiles
self.m.board = [
[2, None, None, None],
[None, None, None, 2],
[None, None, None, 2],
[None, None, 4, 2],
]
self.assertEqual(self.m.numTiles(), 5)
# test full board
self.m.board = [
[2, 8, 16, 256],
[8, 32, 4, 2],
[2, 4, 64, 2],
[16, 8, 4, 2],
]
self.assertEqual(self.m.numTiles(), 16)
if __name__ == '__main__':
unittest.main()
```
#### File: 2048_Smart_Players/src/Player.py
```python
import collections
import numpy as np
import random
import time
import Model
class Player(object):
def __init__(self, debug=False):
self.m = Model.Model()
self.debug = debug
def run(self, numIters=1, printStats=False, printAtCheckpoints=False):
"""Runs the game.
Args:
numIters: The number of times to run the game.
printStats: If True, stats for scores are outputted.
Returns:
(scores, maxTiles): Scores and maxTiles lists from all runs.
"""
startTime = time.time()
endTime = time.time()
scores = []
maxTiles = []
numMoves = []
def printAllStats():
print 'Total runs: %d, %.3f seconds' % (len(scores), endTime - startTime)
self._printScoreStats(scores)
self._printMaxTileStats(maxTiles)
self._printMaxTileHistogram(maxTiles)
self._printMoveStats(numMoves)
checkpoint = 1
for i in range(numIters):
if i == checkpoint:
checkpoint *= 10
if printAtCheckpoints:
printAllStats()
print ''
self.m.reset()
count = 0
while not self.m.isGameOver():
board, score = self.m.getState()
move = self.getMove(board, score)
if self.debug:
print "Move Chosen: %s" % self.m.MOVE_NAMES[move]
print self.m.getBoardString(board)
self.m.makeMove(move)
count += 1
endTime = time.time()
scores.append(self.m.score)
maxTiles.append(self.m.maxTile())
numMoves.append(count)
if printStats:
printAllStats()
return (endTime - startTime, scores, maxTiles, numMoves)
def getMove(self, board, score):
"""Get the next move given current board and score."""
raise NotImplementedError
@staticmethod
def _printStats(data, dataName):
"""Outputs stastics about the given data."""
npData = np.array(data)
print (dataName + ' ::: ' + ', '.join([
"Max: %d",
"Min: %d",
"Median: %d",
"Average: %d",
"Stdev: %d",
]) % (max(data),
min(data),
np.median(npData),
npData.mean(),
npData.std(),
))
@staticmethod
def _printScoreStats(scores):
"""Output basic statistics about the scores."""
Player._printStats(scores, 'SCORES')
@staticmethod
def _printMaxTileStats(maxTiles):
"""Output stastics about the max tiles."""
Player._printStats(maxTiles, 'MAX TILES')
@staticmethod
def _printMoveStats(numMoves):
"""Output stastics about the number of moves."""
Player._printStats(numMoves, 'MOVES')
@staticmethod
def _printHistogram(data, dataName):
"""Outputs histogram for the given data."""
histogram = collections.defaultdict(int)
for d in data:
histogram[d] += 1
print (dataName + ' histogram' + ' ::: ' + ', '.join([
'%d: %d' % (value, occurrences)
for value, occurrences in sorted(histogram.iteritems())
]))
@staticmethod
def _printMaxTileHistogram(maxTiles):
"""Outputs histogram for max tiles."""
Player._printHistogram(maxTiles, 'MAX TILES')
class BaselineGreedyPlayer(Player):
def getMove(self, board, score):
"""Player chooses move that yields maximum points for that turn.
If scores are the same (which they always are for up/down and left/right)
it selects in the following order: UP, LEFT, DOWN, RIGHT
Note that in the above ordering moves are only considered if they cause the
board to change.
"""
maxScore = 0
maxMove = None
validMoves = []
for move in [Model.Move.UP, Model.Move.LEFT]:
# choose move that maximizes score and would actually change the board
(moveScore, boardChanged) = self.m.makeBoardMove(board, move,
modifyState=False)
if boardChanged:
validMoves.append(move)
if moveScore > maxScore:
maxMove = move
maxScore = moveScore
if maxMove:
# if one or both of the scores were non-zero, return the max score move
return maxMove
if validMoves:
# if there were no moves with non-zero score, just return a valid one
return validMoves[0]
for move in [Model.Move.DOWN, Model.Move.RIGHT]:
# if up and left were not valid, return the first valid of down and right
(_, boardChanged) = self.m.makeBoardMove(board, move, modifyState=False)
if boardChanged:
return move
class BaselineCornerPlayer(Player):
def getMove(self, board, score):
"""Always returns a playable move in the following order:
UP, LEFT, RIGHT, DOWN.
This approach concentrates the pieces in the corners, and should behave
better than the random player.
"""
for move in [Model.Move.UP, Model.Move.LEFT,
Model.Move.RIGHT, Model.Move.DOWN]:
(_, boardChanged) = self.m.makeBoardMove(board, move, modifyState=False)
if boardChanged:
return move
class BaselineRandomPlayer(Player):
def getMove(self, board, score):
"""Gives a random move."""
return random.choice([
Model.Move.UP,
Model.Move.DOWN,
Model.Move.LEFT,
Model.Move.RIGHT,
])
class InteractivePlayer(Player):
def getMove(self, board, score):
print self.m
print "Score: %d\n" % score
moves = {
'1': Model.Move.UP,
'2': Model.Move.DOWN,
'3': Model.Move.LEFT,
'4': Model.Move.RIGHT,
}
move = None
while move not in moves:
move = raw_input("Enter move: 1=UP, 2=DOWN, 3=LEFT, 4=RIGHT: ")
return moves[move]
```
|
{
"source": "JDNdeveloper/Interview-Practice-Python",
"score": 4
}
|
#### File: Interview-Practice-Python/src/Heap.py
```python
import math
class Heap:
class Node:
def __init__(self, data=None, key=None):
_data = data
_key = key
def __init__(self):
self._heap = []
self._size = 0
self._isHeap = True
def _boundsCheck(func):
def wrapper(self, *args):
if self._size == 0:
return False
elif len(args) > 0:
pos = args[0]
if pos < 0 or pos >= self._size:
return False
return func(self, *args)
return wrapper
def _confirmIsHeap(func):
def wrapper(self, *args):
if self._isHeap == False:
self._buildHeap()
return func(self, *args)
return wrapper
@property
def size(self):
return self._size
@property
def isHeap(self):
return self._isHeap
def insert(self, data, key):
self._heap.append(self.Node(data=data, key=key))
self._size += 1
@_boundsCheck
@_confirmIsHeap
def extractMax(self):
pass
self._size -= 1
@_boundsCheck
@_confirmIsHeap
def increaseKey(self, pos, key):
return True
@_boundsCheck
@_confirmIsHeap
def heapSort(self):
pass
_isHeap = False
def buildHeap(self):
_isHeap = True
@_boundsCheck
@_confirmIsHeap
def _heapify(self, pos):
pass
@_boundsCheck
def _parent(self, pos):
return int(pos / 2 - 1) if pos % 2 == 0 else int(pos / 2)
@_boundsCheck
def _left(self, pos):
return pos * 2 + 1
@_boundsCheck
def _right(self, pos):
return pos * 2 + 2
if __name__ == "__main__":
def decoratorTest():
heap = Heap()
assert heap.increaseKey(3, 2) == False
assert heap.increaseKey(0, 3) == False
heap.insert(2, 10)
heap.insert(3, 13)
heap.insert(2, 2)
heap.insert(10, 4)
assert heap.increaseKey(3, 10) == True
assert heap.increaseKey(0, 3) == True
assert heap.increaseKey(-1, 32) == False
assert heap.increaseKey(4, 10) == False
def parentTest():
heap = Heap()
heap._size = 11
assert heap._parent(3) == 1
assert heap._parent(4) == 1
assert heap._parent(9) == 4
assert heap._parent(10) == 4
assert heap._left(5) == 11
assert heap._left(7) == 15
assert heap._left(8) == 17
assert heap._right(4) == 10
assert heap._right(6) == 14
assert heap._right(2) == 6
def heapTest():
heap = Heap()
assert heap.size == 0
assert heap.isHeap == True
for data in range(20):
key = int(data / 2 + 5)
heap.insert(data, key)
assert heap.size == 20
assert heap.isHeap == True
assert 19 == heap.extractMax()
assert 18 == heap.extractMax()
assert heap.size == 18
sortedList = list(self._heap)
sortedList.sort()
heap.heapSort()
assert self._heap == sortedList
assert heap.isHeap == False
assert heap[0] == 0
heap._heapify()
assert heap[0] == 17
assert heap.isHeap == True
heap.heapSort()
assert heap.isHeap == False
assert heap.extractMax == 17
assert heap.isHeap == True
decoratorTest()
parentTest()
#heapTest()
```
|
{
"source": "JDNdeveloper/ProfileTool",
"score": 2
}
|
#### File: ProfileTool/src/profile_tool_test.py
```python
import filecmp
import os
from shutil import copyfile
import profile_tool as pt
orig_profile = 'example_profile'
test_profile = 'test_profile'
def runTest():
copyfile( orig_profile, test_profile )
ptool = pt.profile_tool( test_profile )
# read groups, then write them back, confirm file doesn't
# change
groups = ptool.readGroups()
ptool.writeGroups( groups )
assert filecmp.cmp( orig_profile, test_profile )
# confirm everything is as it should be from the file
groups = ptool.readGroups()
assert groups[ 'default_project' ] == [ ( 'Proj_A.0', ) ]
assert groups[ 'projects' ] == [
( 'Proj_A.3', 'Pack_A/subdir', 'Type_A' ),
( 'Proj_B.4', 'Pack_B', 'Type_B' ),
( 'Proj_C.0', 'Pack_C', 'Type_C' ),
]
os.remove( test_profile )
if __name__ == '__main__':
runTest()
print "TEST PASSED"
```
#### File: src/project_tools/default_pkg.py
```python
import argparse
import os
import sys
from proj_helper import proj_helper
def default_pkg( pkg_name, proj_name, profile='' ):
ph = proj_helper( profile )
ph.read_profile()
full_proj_name = ph.get_full_project_name( proj_name )
projects = ph.projects
old_pkg_name, proj_type = projects[ full_proj_name ]
projects[ full_proj_name ] = ( pkg_name, proj_type )
ph.projects = projects
ph.write_profile()
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='change default package' )
parser.add_argument( 'package', type=str, help='new default package' )
parser.add_argument( '--project', type=str, default=os.environ[ 'CURR_WS' ],
help='project name (default is current project)' )
args = parser.parse_args()
pkg_name = args.package
proj_name = args.project
default_pkg( pkg_name, proj_name )
```
|
{
"source": "jdnemelka/lambdata_26",
"score": 3
}
|
#### File: lambdata_26/lambdata/helper_functions.py
```python
import pandas as pd
import numpy as np
import re
def null_count(df):
return df.isnull().sum()
def train_test_split(df):
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(n_samples, test_size, train_size,
default_test_size=0.25)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for "
"shuffle=False")
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=n_test,
train_size=n_train,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((_safe_indexing(a, train),
_safe_indexing(a, test)) for a in arrays))
def addy_split(addy_series):
#Create Dataframe
df = pd.DataFrame()
#Create City Column
search = []
for values in addy_series:
search.append(re.match(r'[a-zA-Z]+[a-zA-Z],&')).group()
df['city'] = search
df['city'] = df['city'].str.replace(r',', '')
#Create State Column
search = []
for values in addy_series:
search.append(re.match(r'[A-Z]{2}')).group()
df['State'] = search
#Create Zip COde Column
search = []
for values in addy_series:
search.append(re.match(r'[0-9]{5}')).group()
df['Zip Code'] = search
return df
```
#### File: lambdata_26/lambdata/__init__.py
```python
import pandas as pd
import numpy as np
favorite_dogs = ['corgi', 'windsor', 'grizz', 'cutie_pie']
def null_count(df):
"""Cleans Pandas Dataframes"""
#TODO - Implement such and such
print('import succesful')
```
|
{
"source": "jdngibson/CanFlood",
"score": 2
}
|
#### File: canflood/build/rsamp.py
```python
import logging, configparser, datetime
start = datetime.datetime.now()
#==============================================================================
# imports------------
#==============================================================================
import os
import numpy as np
import pandas as pd
#Qgis imports
from qgis.core import QgsVectorLayer, QgsRasterLayer, QgsFeatureRequest, QgsProject, \
QgsWkbTypes, QgsProcessingFeedback, QgsCoordinateTransform
from qgis.analysis import QgsRasterCalculatorEntry, QgsRasterCalculator
import processing
#==============================================================================
# custom imports
#==============================================================================
from hlpr.exceptions import QError as Error
from hlpr.Q import Qcoms,vlay_get_fdf, vlay_get_fdata, view, vlay_rename_fields
from hlpr.plot import Plotr
#==============================================================================
# functions-------------------
#==============================================================================
class Rsamp(Plotr, Qcoms):
""" sampling hazard rasters from the inventory
METHODS:
run(): main caller for Hazard Sampler 'Sample' button
"""
out_fp = None
names_d = None
rname_l =None
psmp_codes = {
0:'Count',
1: 'Sum',
2: 'Mean',
3: 'Median',
#4: Std. dev.
5: 'Min',
6: 'Max',
# 7: Range
# 8: Minority
# 9: Majority (mode)
# 10: Variety
# 11: Variance
# 12: All
}
dep_rlay_d = dict() #container for depth rasters (for looped runs)
impactfmt_str = '.2f' #formatting impact values on plots
def __init__(self,
fname='expos', #prefix for file name
*args, **kwargs):
"""
Plugin: called by each button push
"""
super().__init__(*args, **kwargs)
self.fname=fname
#flip the codes
self.psmp_codes = dict(zip(self.psmp_codes.values(), self.psmp_codes.keys()))
self.logger.debug('Rsamp.__init__ w/ feedback \'%s\''%type(self.feedback).__name__)
def load_layers(self, #load data to project (for console runs)
rfp_l, finv_fp,
providerLib='ogr'
):
"""
special input loader for StandAlone runs"""
log = self.logger.getChild('load_layers')
#======================================================================
# load rasters
#======================================================================
raster_d = dict()
for fp in rfp_l:
rlayer = self.load_rlay(fp)
#add it in
basefn = os.path.splitext(os.path.split(fp)[1])[0]
raster_d[basefn] = rlayer
#======================================================================
# load finv vector layer
#======================================================================
fp = finv_fp
assert os.path.exists(fp), fp
basefn = os.path.splitext(os.path.split(fp)[1])[0]
vlay_raw = QgsVectorLayer(fp,basefn,providerLib)
# checks
if not isinstance(vlay_raw, QgsVectorLayer):
raise IOError
#check if this is valid
if not vlay_raw.isValid():
raise Error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
#check if it has geometry
if vlay_raw.wkbType() == 100:
raise Error('loaded vlay has NoGeometry')
vlay = vlay_raw
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
#======================================================================
# wrap
#======================================================================
return list(raster_d.values()), vlay
def load_rlays(self, #shortcut for loading a set of rasters in a directory
data_dir,
rfn_l=None, #if None, loads all tifs in the directory
aoi_vlay = None,
logger=None,
**kwargs
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('load_rlays')
#=======================================================================
# prechecks
#=======================================================================
assert os.path.exists(data_dir)
#=======================================================================
# get filenames
#=======================================================================
#load all in the passed directory
if rfn_l is None:
rfn_l = [e for e in os.listdir(data_dir) if e.endswith('.tif')]
log.info('scanned directory and found %i rasters: %s'%(len(rfn_l), data_dir))
rfp_d = {fn:os.path.join(data_dir, fn) for fn in rfn_l} #get filepaths
#check
for fn, fp in rfp_d.items():
assert os.path.exists(fp), 'bad filepath for \"%s\''%fn
#=======================================================================
# loop and assemble
#=======================================================================
log.info('loading %i rlays'%len(rfp_d))
rlay_d = dict()
for fn, fp in rfp_d.items():
rlay_d[fn] = self.load_rlay(fp, logger=log,aoi_vlay=aoi_vlay, **kwargs)
log.info('loaded %i'%len(rlay_d))
return rlay_d
def run(self,
rlayRaw_l, #set of rasters to sample
finv_raw, #inventory layer
cid = None, #index field name on finv
#exposure value controls
psmp_stat='Max', #for polygon finvs, statistic to sample
#inundation sampling controls
as_inun=False, #whether to sample for inundation (rather than wsl values)
dtm_rlay=None, #dtm raster (for as_inun=True)
dthresh = 0, #fordepth threshold
clip_dtm=False,
fname = None, #prefix for layer name
):
"""
Generate the exposure dataset ('expos') from a set of hazard event rasters
"""
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('run')
if cid is None: cid = self.cid
if fname is None: fname=self.fname
self.as_inun = as_inun
self.finv_name = finv_raw.name() #for plotters
log.info('executing on %i rasters'%(len(rlayRaw_l)))
#======================================================================
# precheck
#======================================================================
#assert self.crs == self.qproj.crs(), 'crs mismatch!'
#check the finv_raw
assert isinstance(finv_raw, QgsVectorLayer), 'bad type on finv_raw'
"""rasteres are checked below"""
assert finv_raw.crs() == self.qproj.crs(), 'finv_raw crs %s doesnt match projects \'%s\'' \
%(finv_raw.crs().authid(), self.qproj.crs().authid())
assert cid in [field.name() for field in finv_raw.fields()], \
'requested cid field \'%s\' not found on the finv_raw'%cid
#check the rasters
rname_l = []
for rlay in rlayRaw_l:
assert isinstance(rlay, QgsRasterLayer)
assert rlay.crs() == self.qproj.crs(), 'rlay %s crs doesnt match project'%(rlay.name())
rname_l.append(rlay.name())
self.rname_l = rname_l
#======================================================================
# prep the finv for sampling
#======================================================================
self.finv_name = finv_raw.name()
#drop all the fields except the cid
finv = self.deletecolumn(finv_raw, [cid], invert=True)
#fix the geometry
finv = self.fixgeometries(finv, logger=log)
#check field lengths
self.finv_fcnt = len(finv.fields())
assert self.finv_fcnt== 1, 'failed to drop all the fields'
self.gtype = QgsWkbTypes().displayString(finv.wkbType())
if self.gtype.endswith('Z'):
log.warning('passed finv has Z values... these are not supported')
self.feedback.setProgress(20)
#=======================================================================
# prep the raster layers------
#=======================================================================
self.feedback.setProgress(40)
#=======================================================================
#inundation runs--------
#=======================================================================
if as_inun:
#===================================================================
# #prep DTM
#===================================================================
if clip_dtm:
"""makes the raster clipping a bitcleaner
2020-05-06
ran 2 tests, and this INCREASED run times by ~20%
set default to clip_dtm=False
"""
log.info('trimming dtm \'%s\' by finv extents'%(dtm_rlay.name()))
finv_buf = self.polygonfromlayerextent(finv,
round_to=dtm_rlay.rasterUnitsPerPixelX()*3,#buffer by 3x the pixel size
logger=log )
#clip to just the polygons
dtm_rlay1 = self.cliprasterwithpolygon(dtm_rlay,finv_buf, logger=log)
else:
dtm_rlay1 = dtm_rlay
self.feedback.setProgress(60)
#===================================================================
# sample by goetype
#===================================================================
if 'Polygon' in self.gtype:
res_vlay = self.samp_inun(finv,rlayRaw_l, dtm_rlay1, dthresh)
elif 'Line' in self.gtype:
res_vlay = self.samp_inun_line(finv, rlayRaw_l, dtm_rlay1, dthresh)
else:
raise Error('\'%s\' got unexpected gtype: %s'%(finv.name(), self.gtype))
res_name = '%s_%s_%i_%i_d%.2f'%(
fname, self.tag, len(rlayRaw_l), res_vlay.dataProvider().featureCount(), dthresh)
#=======================================================================
#WSL value sampler------
#=======================================================================
else:
res_vlay = self.samp_vals(finv,rlayRaw_l, psmp_stat)
res_name = '%s_%s_%i_%i'%(fname, self.tag, len(rlayRaw_l), res_vlay.dataProvider().featureCount())
if not 'Point' in self.gtype: res_name = res_name + '_%s'%psmp_stat.lower()
res_vlay.setName(res_name)
#=======================================================================
# wrap
#=======================================================================
"""TODO: harmonize output types for build modules"""
#get dataframe like results
try:
df = vlay_get_fdf(res_vlay, logger=log).set_index(cid, drop=True
).rename(columns=self.names_d)
"""
view(df)
d.keys()
"""
#get sorted index by values
sum_ser = pd.Series({k:cser.dropna().sum() for k, cser in df.items()}).sort_values()
#set this new index
self.res_df = df.loc[:, sum_ser.index]
except Exception as e:
log.warning('failed to convert vlay to dataframe w/ \n %s'%e)
#max out the progress bar
self.feedback.setProgress(90)
log.info('sampling finished')
self.psmp_stat=psmp_stat #set for val_str
return res_vlay
def runPrep(self, #apply raster preparation handels to a set of rasters
rlayRaw_l,
**kwargs
):
#=======================================================================
# do the prep
#=======================================================================
self.feedback.setProgress(20)
res_l = []
for rlayRaw in rlayRaw_l:
rlay = self.prep(rlayRaw, **kwargs)
res_l.append(rlay)
self.feedback.upd_prog(70/len(rlayRaw_l), method='append')
assert isinstance(rlay, QgsRasterLayer)
self.feedback.setProgress(90)
return res_l
def prep(self, #prepare a raster for sampling
rlayRaw, #set of raw raster to apply prep handles to
allow_download=False,
aoi_vlay=None,
allow_rproj=False,
clip_rlays=False,
scaleFactor=1.00,
logger=None,
):
"""
#=======================================================================
# mstore
#=======================================================================
todo: need to fix this... using the store is currently crashing Qgis
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('prep')
log.info('on \'%s\''%rlayRaw.name())
res_d = dict() #reporting container
#start a new store for handling intermediate layers
#mstore = QgsMapLayerStore()
newLayerName='%s_prepd' % rlayRaw.name()
#=======================================================================
# precheck
#=======================================================================
#check the aoi
if clip_rlays: assert isinstance(aoi_vlay, QgsVectorLayer)
if not aoi_vlay is None:
self.check_aoi(aoi_vlay)
#=======================================================================
# dataProvider check/conversion-----
#=======================================================================
if not rlayRaw.providerType() == 'gdal':
msg = 'raster \'%s\' providerType = \'%s\' and allow_download=%s' % (
rlayRaw.name(), rlayRaw.providerType(), allow_download)
#check if we're allowed to fix
if not allow_download:
raise Error(msg)
log.info(msg)
#set extents
if not aoi_vlay is None: #aoi extents in correct CRS
extent = QgsCoordinateTransform(aoi_vlay.crs(), rlayRaw.crs(),
self.qproj.transformContext()
).transformBoundingBox(aoi_vlay.extent())
else:
extent = rlayRaw.extent() #layers extents
#save a local copy
ofp = self.write_rlay(rlayRaw, extent=extent,
newLayerName='%s_gdal' % rlayRaw.name(),
out_dir = os.environ['TEMP'], #will write to the working directory at the end
logger=log)
#load this file
rlayDp = self.load_rlay(ofp, logger=log)
#check
assert rlayDp.bandCount() == rlayRaw.bandCount()
assert rlayDp.providerType() == 'gdal'
res_d['download'] = 'from \'%s\' to \'gdal\''%rlayRaw.providerType()
self.mstore.addMapLayer(rlayRaw)
else:
rlayDp = rlayRaw
log.debug('%s has expected dataProvider \'gdal\''%rlayRaw.name())
#=======================================================================
# re-projection--------
#=======================================================================
if not rlayDp.crs() == self.qproj.crs():
msg = 'raster \'%s\' crs = \'%s\' and allow_rproj=%s' % (
rlayDp.name(), rlayDp.crs(), allow_rproj)
if not allow_rproj:
raise Error(msg)
log.info(msg)
#save a local copy?
newName = '%s_%s' % (rlayDp.name(), self.qproj.crs().authid()[5:])
"""just write at the end
if allow_download:
output = os.path.join(self.out_dir, '%s.tif' % newName)
else:
output = 'TEMPORARY_OUTPUT'"""
output = 'TEMPORARY_OUTPUT'
#change the projection
rlayProj = self.warpreproject(rlayDp, crsOut=self.qproj.crs(),
output=output, layname=newName)
res_d['rproj'] = 'from %s to %s'%(rlayDp.crs().authid(), self.qproj.crs().authid())
self.mstore.addMapLayer(rlayDp)
else:
log.debug('\'%s\' crs matches project crs: %s'%(rlayDp.name(), rlayDp.crs()))
rlayProj = rlayDp
#=======================================================================
# aoi slice----
#=======================================================================
if clip_rlays:
log.debug('trimming raster %s by AOI'%rlayRaw.name())
log.warning('not Tested!')
#clip to just the polygons
rlayTrim = self.cliprasterwithpolygon(rlayProj,aoi_vlay, logger=log)
res_d['clip'] = 'with \'%s\''%aoi_vlay.name()
self.mstore.addMapLayer(rlayProj)
else:
rlayTrim = rlayProj
#===================================================================
# scale
#===================================================================
if not float(scaleFactor) ==float(1.00):
rlayScale = self.raster_mult(rlayTrim, scaleFactor, logger=log)
res_d['scale'] = 'by %.4f'%scaleFactor
self.mstore.addMapLayer(rlayTrim)
else:
rlayScale = rlayTrim
#=======================================================================
# final write
#=======================================================================
resLay1 = rlayScale
write=False
if len(res_d)>0: #only where we did some operations
write=True
"""write it regardless
if len(res_d)==1 and 'download' in res_d.keys():
write=False"""
if write:
resLay1.setName(newLayerName)
ofp = self.write_rlay(resLay1, logger=log)
#mstore.addMapLayer(resLay1)
#use the filestore layer
resLay = self.load_rlay(ofp, logger=log)
"""control canvas loading in the plugin"""
else:
log.warning('layer \'%s\' not written to file!'%resLay.name())
resLay=resLay1
#=======================================================================
# wrap
#=======================================================================
log.info('finished w/ %i prep operations on \'%s\' \n %s'%(
len(res_d), resLay.name(), res_d))
#clean up the store
#=======================================================================
# _ = mstore.takeMapLayer(rlayRaw) #take out the raw (without deleteing)
# try:
# _ = mstore.takeMapLayer(resLay) #try and pull out the result layer
# except:
# log.warning('failed to remove \'%s\' from store'%resLay.name())
#=======================================================================
"""
for k,v in mstore.mapLayers().items():
print(k,v)
"""
#self.mstore.removeAllMapLayers() #clear all layers
assert isinstance(resLay, QgsRasterLayer)
return resLay
def samp_vals(self, #sample a set of rasters with a vectorlayer
finv, raster_l,psmp_stat):
"""
this is NOT for inundation percent
can handle all 3 geometries"""
log = self.logger.getChild('samp_vals')
#=======================================================================
# build the loop
#=======================================================================
gtype=self.gtype
if 'Polygon' in gtype:
assert psmp_stat in self.psmp_codes, 'unrecognized psmp_stat'
psmp_code = self.psmp_codes[psmp_stat] #sample each raster
algo_nm = 'qgis:zonalstatistics'
elif 'Point' in gtype:
algo_nm = 'qgis:rastersampling'
elif 'Line' in gtype:
algo_nm = 'native:pointsalonglines'
else:
raise Error('unsupported gtype: %s'%gtype)
#=======================================================================
# sample loop
#=======================================================================
names_d = dict()
log.info('sampling %i raster layers w/ algo \'%s\' and gtype: %s'%(
len(raster_l), algo_nm, gtype))
for indxr, rlay in enumerate(raster_l):
log.info('%i/%i sampling \'%s\' on \'%s\''%(
indxr+1, len(raster_l), finv.name(), rlay.name()))
ofnl = [field.name() for field in finv.fields()]
self.mstore.addMapLayer(finv)
#===================================================================
# sample.poly----------
#===================================================================
if 'Polygon' in gtype:
algo_nm = 'native:zonalstatisticsfb'
ins_d = { 'COLUMN_PREFIX':indxr,
'INPUT_RASTER':rlay,
'INPUT':finv,
'RASTER_BAND':1,
'STATISTICS':[psmp_code],#0: pixel counts, 1: sum
'OUTPUT' : 'TEMPORARY_OUTPUT',
}
#execute the algo
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
finv = res_d['OUTPUT']
#=======================================================================
# sample.Line--------------
#=======================================================================
elif 'Line' in gtype:
finv = self.line_sample_stats(finv, rlay,[psmp_stat], logger=log)
#======================================================================
# sample.Points----------------
#======================================================================
elif 'Point' in gtype:
#build the algo params
params_d = { 'COLUMN_PREFIX' : rlay.name(),
'INPUT' : finv,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RASTERCOPY' : rlay}
#execute the algo
res_d = processing.run(algo_nm, params_d, feedback=self.feedback)
#extract and clean results
finv = res_d['OUTPUT']
else:
raise Error('unexpected geo type: %s'%gtype)
#===================================================================
# sample.wrap
#===================================================================
assert isinstance(finv, QgsVectorLayer)
assert len(finv.fields()) == self.finv_fcnt + indxr +1, \
'bad field length on %i'%indxr
finv.setName('%s_%i'%(self.finv_name, indxr))
#===================================================================
# correct field names
#===================================================================
"""
algos don't assign good field names.
collecting a conversion dictionary then adjusting below
TODO: propagate these field renames to the loaded result layers
"""
#get/updarte the field names
nfnl = [field.name() for field in finv.fields()]
new_fn = set(nfnl).difference(ofnl) #new field names not in the old
if len(new_fn) > 1:
raise Error('bad mismatch: %i \n %s'%(len(new_fn), new_fn))
elif len(new_fn) == 1:
names_d[list(new_fn)[0]] = rlay.name()
else:
raise Error('bad fn match')
log.debug('sampled %i values on raster \'%s\''%(
finv.dataProvider().featureCount(), rlay.name()))
self.names_d = names_d #needed by write()
log.debug('finished w/ \n%s'%self.names_d)
return finv
"""
view(finv)
"""
def samp_inun(self, #inundation percent for polygons
finv, raster_l, dtm_rlay, dthresh,
):
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('samp_inun')
gtype=self.gtype
#setup temp dir
import tempfile #todo: move this up top
temp_dir = tempfile.mkdtemp()
#=======================================================================
# precheck
#=======================================================================
dp = finv.dataProvider()
assert isinstance(dtm_rlay, QgsRasterLayer)
assert isinstance(dthresh, float)
assert 'Memory' in dp.storageType() #zonal stats makes direct edits
assert 'Polygon' in gtype
#=======================================================================
# sample loop---------
#=======================================================================
"""
too memory intensive to handle writing of all these.
an advanced user could retrive from the working folder if desiered
"""
names_d = dict()
parea_d = dict()
for indxr, rlay in enumerate(raster_l):
log = self.logger.getChild('samp_inun.%s'%rlay.name())
ofnl = [field.name() for field in finv.fields()]
#===================================================================
# #get depth raster
#===================================================================
dep_rlay = self._get_depr(dtm_rlay, log, temp_dir, rlay)
#===================================================================
# get threshold
#===================================================================
#reduce to all values above depththreshold
log.info('calculating %.2f threshold raster'%dthresh)
"""
TODO: speed this up somehow... super slow
native calculator?
"""
thr_rlay = self.grastercalculator(
'A*(A>%.2f)'%dthresh, #null if not above minval
{'A':dep_rlay},
logger=log,
layname= '%s_mv'%dep_rlay.name()
)
#===================================================================
# #get cell counts per polygon
#===================================================================
log.info('getting pixel counts on %i polys'%finv.dataProvider().featureCount())
algo_nm = 'native:zonalstatisticsfb'
ins_d = { 'COLUMN_PREFIX':indxr,
'INPUT_RASTER':thr_rlay,
'INPUT':finv,
'RASTER_BAND':1,
'STATISTICS':[0],#0: pixel counts, 1: sum
'OUTPUT' : 'TEMPORARY_OUTPUT',
}
#execute the algo
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
finvw = res_d['OUTPUT']
"""
view(finvw)
view(finv)
"""
#===================================================================
# check/correct field names
#===================================================================
#get/updarte the field names
nfnl = [field.name() for field in finvw.fields()]
new_fn = set(nfnl).difference(ofnl) #new field names not in the old
if len(new_fn) > 1:
"""
possible error with algo changes
"""
raise Error('zonalstatistics generated more new fields than expected: %i \n %s'%(
len(new_fn), new_fn))
elif len(new_fn) == 1:
names_d[list(new_fn)[0]] = rlay.name()
else:
raise Error('bad fn match')
#===================================================================
# #clean up the layers
#===================================================================
self.mstore.addMapLayer(finv)
self.mstore.removeMapLayer(finv)
finv = finvw
#===================================================================
# update pixel size
#===================================================================
parea_d[rlay.name()] = rlay.rasterUnitsPerPixelX()*rlay.rasterUnitsPerPixelY()
#=======================================================================
# area calc-----------
#=======================================================================
log = self.logger.getChild('samp_inun')
log.info('calculating areas on %i results fields:\n %s'%(len(names_d), list(names_d.keys())))
#add geometry fields
finv = self.addgeometrycolumns(finv, logger = log)
#get data frame
df_raw = vlay_get_fdf(finv, logger=log)
"""
view(df_raw)
"""
df = df_raw.rename(columns=names_d)
#multiply each column by corresponding raster's cell size
res_df = df.loc[:, names_d.values()].multiply(pd.Series(parea_d)).round(self.prec)
res_df = res_df.rename(columns={coln:'%s_a'%coln for coln in res_df.columns})
#divide by area of each polygon
frac_df = res_df.div(df_raw['area'], axis=0).round(self.prec)
d = {coln:'%s_pct_raw'%coln for coln in frac_df.columns}
frac_df = frac_df.rename(columns=d)
res_df = res_df.join(frac_df)#add back in results
#adjust for excessive fractions
booldf = frac_df>1
d1 = {coln:'%s_pct'%ename for ename, coln in d.items()}
if booldf.any().any():
log.warning('got %i (of %i) pct values >1.00. setting to 1.0 (bad pixel/polygon ratio?)'%(
booldf.sum().sum(), booldf.size))
fix_df = frac_df.where(~booldf, 1.0)
fix_df = fix_df.rename(columns=d1)
res_df = res_df.join(fix_df)
else:
res_df = res_df.rename(columns=d1)
#add back in all the raw
res_df = res_df.join(df_raw.rename(columns=names_d))
#set the reuslts converter
self.names_d = {coln:ename for coln, ename in dict(zip(d1.values(), names_d.values())).items()}
#=======================================================================
# write working reuslts
#=======================================================================
ofp = os.path.join(temp_dir, 'RAW_rsamp_SampInun_%s_%.2f.csv'%(self.tag, dthresh))
res_df.to_csv(ofp, index=None)
log.info('wrote working data to \n %s'%ofp)
#slice to results only
res_df = res_df.loc[:,[self.cid]+list(d1.values())]
log.info('data assembed w/ %s: \n %s'%(str(res_df.shape), res_df.columns.tolist()))
#=======================================================================
# bundle back into vectorlayer
#=======================================================================
geo_d = vlay_get_fdata(finv, geo_obj=True, logger=log)
res_vlay = self.vlay_new_df2(res_df, crs=finv.crs(), geo_d=geo_d, logger=log,
layname='%s_%s_inun'%(self.tag, finv.name()))
log.info('finisished w/ %s'%res_vlay.name())
return res_vlay
def samp_inun_line(self, #inundation percent for Line
finv, raster_l, dtm_rlay, dthresh,
):
""""
couldn't find a great pre-made algo
option 1:
SAGA profile from lines (does not retain line attributes)
join attributes by nearest (to retrieve XID)
option 2:
Generate points (pixel centroids) along line
(does not retain line attributes)
generates points on null pixel values
sample points
join by nearest
option 3:
add geometry attributes
Points along geometry (retains attribute)
sample raster
count those above threshold
divide by total for each line
get % above threshold for each line
get km inundated for each line
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('samp_inun_line')
gtype=self.gtype
#setup temp dir
import tempfile #todo: move this up top
temp_dir = tempfile.mkdtemp()
#=======================================================================
# precheck
#=======================================================================
dp = finv.dataProvider()
assert isinstance(dtm_rlay, QgsRasterLayer)
assert isinstance(dthresh, float), 'expected float for dthresh. got %s'%type(dthresh)
assert 'Memory' in dp.storageType() #zonal stats makes direct edits
assert 'Line' in gtype
#=======================================================================
# sample loop---------
#=======================================================================
"""
too memory intensive to handle writing of all these.
an advanced user could retrive from the working folder if desiered
"""
names_d = dict()
for indxr, rlay in enumerate(raster_l):
log = self.logger.getChild('samp_inunL.%s'%rlay.name())
ofnl = [field.name() for field in finv.fields()]
#===================================================================
# #get depth raster
#===================================================================
dep_rlay = self._get_depr(dtm_rlay, log, temp_dir, rlay)
#===============================================================
# #convert to points
#===============================================================
params_d = { 'DISTANCE' : dep_rlay.rasterUnitsPerPixelX(),
'END_OFFSET' : 0,
'INPUT' : finv,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'START_OFFSET' : 0 }
res_d = processing.run('native:pointsalonglines', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#===============================================================
# #sample the raster
#===============================================================
ofnl2 = [field.name() for field in fpts_vlay.fields()]
params_d = { 'COLUMN_PREFIX' : rlay.name(),
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RASTERCOPY' : dep_rlay}
res_d = processing.run('qgis:rastersampling', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#get new field name
new_fn = set([field.name() for field in fpts_vlay.fields()]).difference(ofnl2) #new field names not in the old
assert len(new_fn)==1
new_fn = list(new_fn)[0]
#===================================================================
# clean/pull data
#===================================================================
#drop all the other fields
fpts_vlay = self.deletecolumn(fpts_vlay,[new_fn, self.cid], invert=True, logger=log )
#pull data
"""
the builtin statistics algo doesn't do a good job handling nulls
"""
pts_df = vlay_get_fdf(fpts_vlay, logger=log)
#===================================================================
# calc stats
#===================================================================
#set those below threshold to null
boolidx = pts_df[new_fn]<=dthresh
pts_df.loc[boolidx, new_fn] = np.nan
log.debug('set %i (of %i) \'%s\' vals <= %.2f to null'%(
boolidx.sum(), len(boolidx), new_fn, dthresh))
"""
view(pts_df)
(pts_df[self.cid]==4).sum()
"""
#get count of REAL values in each xid group
pts_df['all']=1 #add dummy column for the demoninator
sdf = pts_df.groupby(self.cid).count().reset_index(drop=False).rename(
columns={new_fn:'real'})
#get ratio (non-NAN count / all count)
new_fn = rlay.name()
sdf[new_fn] = sdf['real'].divide(sdf['all']).round(self.prec)
assert sdf[new_fn].max() <=1
#===================================================================
# link in result
#===================================================================
#convert df back to a mlay
pstat_vlay = self.vlay_new_df2(sdf.drop(['all', 'real'], axis=1),
layname='%s_stats'%(finv.name()), logger=log)
#join w/ algo
params_d = { 'DISCARD_NONMATCHING' : False,
'FIELD' : self.cid,
'FIELDS_TO_COPY' : [new_fn],
'FIELD_2' : self.cid,
'INPUT' : finv,
'INPUT_2' : pstat_vlay,
'METHOD' : 1, #Take attributes of the first matching feature only (one-to-one)
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREFIX' : ''}
res_d = processing.run('native:joinattributestable', params_d, feedback=self.feedback)
finv = res_d['OUTPUT']
#===================================================================
# check/correct field names
#===================================================================
"""
algos don't assign good field names.
collecting a conversion dictionary then adjusting below
"""
#get/updarte the field names
nfnl = [field.name() for field in finv.fields()]
new_fn = set(nfnl).difference(ofnl) #new field names not in the old
if len(new_fn) > 1:
raise Error('unexpected algo behavior... bad new field count: %s'%new_fn)
elif len(new_fn) == 1:
names_d[list(new_fn)[0]] = rlay.name()
log.debug('updated names_d w/ %s'%rlay.name())
else:
raise Error('bad fn match')
#=======================================================================
# wrap-------------
#=======================================================================
self.names_d = dict() #names should be fine
log.debug('finished')
"""
view(finv)
"""
return finv
def _get_depr(self, #get a depth raster, but first check if its already been made
dtm_rlay, log, temp_dir, rlay):
dep_rlay_nm = '%s_%s' % (dtm_rlay.name(), rlay.name())
#pull previously created
if dep_rlay_nm in self.dep_rlay_d:
dep_rlay = self.dep_rlay_d[dep_rlay_nm]
#build fresh
else:
log.info('calculating depth raster \'%s\''%dep_rlay_nm)
#using Qgis raster calculator constructor
dep_rlay = self.raster_subtract(rlay, dtm_rlay, logger=log,
out_dir=os.path.join(temp_dir, 'dep'),
layname=dep_rlay_nm)
#store for next time
self.dep_rlay_d[dep_rlay_nm] = dep_rlay
return dep_rlay
def raster_subtract(self, #performs raster calculator rlayBig - rlaySmall
rlayBig, rlaySmall,
out_dir = None,
layname = None,
logger = None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = self.logger.getChild('raster_subtract')
if out_dir is None:
out_dir = os.environ['TEMP']
if layname is None:
layname = '%s_dep'%rlayBig.name()
#=======================================================================
# assemble the entries
#=======================================================================
entries_d = dict()
for tag, rlay in {'Big':rlayBig, 'Small':rlaySmall}.items():
rcentry = QgsRasterCalculatorEntry()
rcentry.raster=rlay
rcentry.ref = '%s@1'%tag
rcentry.bandNumber=1
entries_d[tag] = rcentry
#=======================================================================
# assemble parameters
#=======================================================================
formula = '%s - %s'%(entries_d['Big'].ref, entries_d['Small'].ref)
outputFile = os.path.join(out_dir, '%s.tif'%layname)
outputExtent = rlayBig.extent()
outputFormat = 'GTiff'
nOutputColumns = rlayBig.width()
nOutputRows = rlayBig.height()
rasterEntries =list(entries_d.values())
#=======================================================================
# precheck
#=======================================================================
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if os.path.exists(outputFile):
msg = 'requseted outputFile exists: %s'%outputFile
if self.overwrite:
log.warning(msg)
os.remove(outputFile)
else:
raise Error(msg)
assert not os.path.exists(outputFile), 'requested outputFile already exists! \n %s'%outputFile
#=======================================================================
# execute
#=======================================================================
"""throwing depreciation warning"""
rcalc = QgsRasterCalculator(formula, outputFile, outputFormat, outputExtent,
nOutputColumns, nOutputRows, rasterEntries)
result = rcalc.processCalculation(feedback=self.feedback)
#=======================================================================
# check
#=======================================================================
if not result == 0:
raise Error(rcalc.lastError())
assert os.path.exists(outputFile)
log.info('saved result to: \n %s'%outputFile)
#=======================================================================
# retrieve result
#=======================================================================
rlay = QgsRasterLayer(outputFile, layname)
return rlay
def raster_mult(self, #performs raster calculator rlayBig - rlaySmall
rlayRaw,
scaleFactor,
out_dir = None,
layname = None,
logger = None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = self.logger.getChild('raster_mult')
if out_dir is None:
out_dir = os.environ['TEMP']
if layname is None:
layname = '%s_scaled'%rlayRaw.name()
#=======================================================================
# precheck
#=======================================================================
assert scaleFactor >= 0.01, 'scaleFactor = %.2f is too low'%scaleFactor
assert round(scaleFactor, 4)!=round(1.0, 4), 'scaleFactor = 1.0'
#=======================================================================
# assemble the entries
#=======================================================================
entries_d = dict()
for tag, rlay in {'rlayRaw':rlayRaw}.items():
rcentry = QgsRasterCalculatorEntry()
rcentry.raster=rlay
rcentry.ref = '%s@1'%tag
rcentry.bandNumber=1
entries_d[tag] = rcentry
#=======================================================================
# assemble parameters
#=======================================================================
formula = '%s * %.2f'%(entries_d['rlayRaw'].ref, scaleFactor)
outputFile = os.path.join(out_dir, '%s.tif'%layname)
outputExtent = rlayRaw.extent()
outputFormat = 'GTiff'
nOutputColumns = rlayRaw.width()
nOutputRows = rlayRaw.height()
rasterEntries =list(entries_d.values())
#=======================================================================
# precheck
#=======================================================================
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if os.path.exists(outputFile):
msg = 'requseted outputFile exists: %s'%outputFile
if self.overwrite:
log.warning(msg)
os.remove(outputFile)
else:
raise Error(msg)
assert not os.path.exists(outputFile), 'requested outputFile already exists! \n %s'%outputFile
#=======================================================================
# execute
#=======================================================================
"""throwing depreciation warning"""
rcalc = QgsRasterCalculator(formula, outputFile, outputFormat, outputExtent,
nOutputColumns, nOutputRows, rasterEntries)
result = rcalc.processCalculation(feedback=self.feedback)
#=======================================================================
# check
#=======================================================================
if not result == 0:
raise Error(rcalc.lastError())
assert os.path.exists(outputFile)
log.info('saved result to: \n %s'%outputFile)
#=======================================================================
# retrieve result
#=======================================================================
rlay = QgsRasterLayer(outputFile, layname)
return rlay
def line_sample_stats(self, #get raster stats using a line
line_vlay, #line vectorylayer with geometry to sample from
rlay, #raster to sample
sample_stats, #list of stats to sample
logger=None,
):
"""
sampliung a raster layer with a line and a statistic
TODO: check if using the following is faster:
Densify by Interval
Drape
Extract Z
"""
if logger is None: logger=self.logger
log=logger.getChild('line_sample_stats')
log.debug('on %s'%(line_vlay.name()))
#drop everythin gto lower case
sample_stats = [e.lower() for e in sample_stats]
#===============================================================
# #convert to points
#===============================================================
params_d = { 'DISTANCE' : rlay.rasterUnitsPerPixelX(),
'END_OFFSET' : 0,
'INPUT' : line_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'START_OFFSET' : 0 }
res_d = processing.run('native:pointsalonglines', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#===============================================================
# #sample the raster
#===============================================================
ofnl2 = [field.name() for field in fpts_vlay.fields()]
params_d = { 'COLUMN_PREFIX' : rlay.name(),
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RASTERCOPY' : rlay}
res_d = processing.run('qgis:rastersampling', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
"""
view(fpts_vlay)
"""
#get new field name
new_fn = set([field.name() for field in fpts_vlay.fields()]).difference(ofnl2) #new field names not in the old
assert len(new_fn)==1
new_fn = list(new_fn)[0]
#===============================================================
# get stats
#===============================================================
"""note this does not return xid values where everything sampled as null"""
params_d = { 'CATEGORIES_FIELD_NAME' : [self.cid],
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'VALUES_FIELD_NAME' :new_fn}
res_d = processing.run('qgis:statisticsbycategories', params_d, feedback=self.feedback)
stat_tbl = res_d['OUTPUT']
#===============================================================
# join stats back to line_vlay
#===============================================================
#check that the sample stat is in there
s = set(sample_stats).difference([field.name() for field in stat_tbl.fields()])
assert len(s)==0, 'requested sample statistics \"%s\' failed to generate'%s
#run algo
params_d = { 'DISCARD_NONMATCHING' : False,
'FIELD' : self.cid,
'FIELDS_TO_COPY' : sample_stats,
'FIELD_2' : self.cid,
'INPUT' : line_vlay,
'INPUT_2' : stat_tbl,
'METHOD' : 1, #Take attributes of the first matching feature only (one-to-one)
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREFIX' : line_vlay }
res_d = processing.run('native:joinattributestable', params_d, feedback=self.feedback)
line_vlay = res_d['OUTPUT']
log.debug('finished on %s w/ %i'%(line_vlay.name(), len(line_vlay)))
return line_vlay
#===========================================================================
# CHECKS--------
#===========================================================================
def check(self):
pass
def dtm_check(self, vlay):
log = self.logger.getChild('dtm_check')
df = vlay_get_fdf(vlay)
boolidx = df.isna()
if boolidx.any().any():
log.error('got %i (of %i) nulls on dtm sampler'%(boolidx.sum().sum(), len(boolidx)))
log.info('passed checks')
#===========================================================================
# OUTPUTS--------
#===========================================================================
def write_res(self, #save expos dataset to file
vlay,
out_dir = None, #directory for puts
names_d = None, #names conversion
rname_l = None,
res_name = None, #prefix for output name
write=True,
):
log = self.logger.getChild('write_res')
#======================================================================
# defaults
#======================================================================
if names_d is None: names_d = self.names_d
if rname_l is None: rname_l = self.rname_l
if out_dir is None: out_dir = self.out_dir
if res_name is None: res_name = vlay.name()
log.debug("on \'%s\'"%res_name)
#======================================================================
# prechekss
#======================================================================
assert os.path.exists(out_dir), 'bad out_dir'
#======================================================================
# get
#======================================================================
#extract data
df = vlay_get_fdf(vlay)
#rename
if len(names_d) > 0:
df = df.rename(columns=names_d)
log.info('renaming columns: \n names_d: %s \n df.cols:%s'%(
names_d, df.columns.tolist()))
#check the raster names
miss_l = set(rname_l).difference(df.columns.to_list())
if len(miss_l)>0:
log.warning('failed to map %i raster layer names onto results: \n %s'%(len(miss_l), miss_l))
df = df.set_index(self.cid, drop=True)
#=======================================================================
# write
#=======================================================================
if not write:
return df
out_fp = self.output_df(df, '%s.csv'%res_name, out_dir = out_dir, write_index=True)
self.out_fp = out_fp
return df
def update_cf(self, cf_fp): #configured control file updater
"""make sure you write the file first"""
return self.set_cf_pars(
{
'dmg_fps':(
{'expos':self.out_fp},
'#\'expos\' file path set from rsamp.py at %s'%(datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')),
),
'parameters':(
{'as_inun':str(self.as_inun)},
)
},
cf_fp = cf_fp
)
def upd_cf_dtm(self, cf_fp=None):
if cf_fp is None: cf_fp=self.cf_fp
return self.set_cf_pars(
{
'dmg_fps':(
{'gels':self.out_fp},
'#\'gels\' file path set from rsamp.py at %s'%(datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')),
),
'parameters':(
{'felv':'ground'},
)
},
cf_fp = cf_fp
)
#===========================================================================
# PLOTS-----
#===========================================================================
def plot_hist(self, #plot failure histogram of all layers
df=None,**kwargs):
if df is None: df=self.res_df
title = '%s Raster Sample Histogram on %i Events'%(self.tag, len(df.columns))
self._set_valstr(df)
return self.plot_impact_hist(df,
title=title, xlab = 'raster value',
val_str=self.val_str, **kwargs)
def plot_boxes(self, #plot boxplots of results
df=None,
**kwargs):
if df is None:df=self.res_df
title = '%s Raster Sample Boxplots on %i Events'%(self.tag, len(df.columns))
self._set_valstr(df)
return self.plot_impact_boxes(df,
title=title, xlab = 'hazard layer', ylab = 'raster value',
smry_method='mean',
val_str=self.val_str, **kwargs)
def _set_valstr(self, df):
self.val_str= 'finv_fcnt=%i \nfinv_name=\'%s\' \nas_inun=%s \ngtype=%s \ndate=%s'%(
len(df), self.finv_name, self.as_inun, self.gtype, self.today_str)
if not 'Point' in self.gtype:
self.val_str = self.val_str + '\npsmp_stat=%s'%self.psmp_stat
```
#### File: canflood/hlpr/exceptions.py
```python
class QError(Exception): #errors for qgis plugins
def __init__(self, msg):
from qgis.utils import iface
try:
from qgis.core import QgsMessageLog, Qgis, QgsLogger
iface.messageBar().pushMessage("Error", msg, level=Qgis.Critical)
QgsMessageLog.logMessage(msg,'CanFlood', level=Qgis.Critical)
QgsLogger.debug('ERROR_%s'%msg) #also send to file
except:
Error(msg)
class Error(Exception):
def __init__(self, msg):
import logging
mod_logger = logging.getLogger('exceptions') #creates a child logger of the root
mod_logger.error(msg)
```
#### File: canflood/hlpr/plt_qt.py
```python
import sys, os
from PyQt5 import QtCore, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT
from matplotlib.figure import Figure
import matplotlib
class PltWindow(QtWidgets.QMainWindow):
def __init__(self,
figure,
out_dir=None,
):
super().__init__()
#=======================================================================
# defauklts
#=======================================================================
if out_dir is None: out_dir = os.getcwd()
#update defaults
if not os.path.exists(out_dir):os.makedirs(out_dir)
matplotlib.rcParams['savefig.directory'] = out_dir
#styleize window
self.setWindowTitle('CanFlood %s'%(figure._suptitle.get_text()[:15]))
#=======================================================================
# setup window
#=======================================================================
#add the main widget
self._main = QtWidgets.QWidget()
self.setCentralWidget(self._main)
#build a la yout
layout = QtWidgets.QVBoxLayout(self._main)
#build/add canvas to layout
canvas = FigureCanvas(figure)
layout.addWidget(canvas)
#build/add toolbar
self._toolbar = NavigationToolbar2QT(canvas, self)
self.addToolBar(self._toolbar)
```
#### File: CanFlood/canflood/__init__.py
```python
"""
TODO: better dependency check
"""
#==============================================================================
# dependency check
#==============================================================================
# Let users know if they're missing any of our hard dependencies
hard_dependencies = ("pandas",)
missing_dependencies = []
for dependency in hard_dependencies:
try:
__import__(dependency)
except ImportError as e:
missing_dependencies.append("{0}: {1}".format(dependency, str(e)))
if missing_dependencies:
raise ImportError(
"Unable to import required dependencies:\n" + "\n".join(missing_dependencies)
)
del hard_dependencies, dependency, missing_dependencies
#===============================================================================
# add module directory to environemnt
#===============================================================================
import os, sys
file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(file_dir)
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load CanFlood_inPrep class from file CanFlood_inPrep.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .CanFlood import CanFlood
return CanFlood(iface)
```
#### File: misc/dikes/dcoms.py
```python
import logging, configparser, datetime, shutil
#==============================================================================
# imports------------
#==============================================================================
import os
import numpy as np
import pandas as pd
from pandas import IndexSlice as idx
#Qgis imports
import processing
#==============================================================================
# custom imports
#==============================================================================
from hlpr.exceptions import QError as Error
from hlpr.basic import ComWrkr, view
#from hlpr.basic import get_valid_filename
#==============================================================================
# functions-------------------
#==============================================================================
class Dcoms(ComWrkr):
"""
each time the user performs an action,
a new instance of this should be spawned
this way all the user variables can be freshley pulled
"""
#data labels
wsln = 'wsl'
celn = 'crest_el'
sdistn = 'segTr_dist' #profile distance of transect (along dike)
segln = 'sid_len'
fbn = 'freeboard'
sid = 'sid' #global segment identifier
nullSamp = -999 #value for bad samples
lfxn = 'lenfx_SF'
pfn = 'p_fail'
#program containers
expo_dxcol = None #exposure data
def __init__(self,
dikeID = 'dikeID', #dike identifier field
segID = 'segID', #segment identifier field
cbfn = 'crest_buff', #crest buffer field name
ifidN = 'ifzID', #influence polygon id field name
*args, **kwargs):
super().__init__(*args,**kwargs)
#=======================================================================
# attach
#=======================================================================
self.dikeID, self.segID = dikeID, segID #done during init
self.cbfn = cbfn
self.ifidN = ifidN
#=======================================================================
# checks
#=======================================================================
for coln in [self.dikeID, self.segID, self.segln, self.cbfn, self.celn, self.ifidN]:
assert isinstance(coln, str), 'bad type on %s: %s'%(coln, type(coln))
assert not coln =='', 'got empty string for \'%s\''%coln
self.logger.debug('Dcoms.__init__ w/ feedback \'%s\''%type(self.feedback).__name__)
def load_expo(self, #load the dike segment exposure data
fp=None,
df=None,
prop_colns = None,
logger=None):
"""
TODO: make this more general (for dRes)
"""
if logger is None: logger=self.logger
log = logger.getChild('load_expo')
if df is None:
df = pd.read_csv(fp, header=0, index_col=0)
#=======================================================================
# tags
#=======================================================================
"""duplicated in _get_etags()"""
tag_l = [c for c in df.columns if c.endswith('_dtag')]
assert len(tag_l)>0, 'failed to find any tag columns'
etag_l = self._get_etags(df, prop_colns=prop_colns)
"""
view(df)
view(self.expo_df)
"""
df.loc[:, etag_l] = df.loc[:, etag_l].round(self.prec)
#=======================================================================
# wrap
#=======================================================================
log.info('loaded expos_df w/ %i dtags and %i etags'%(len(tag_l), len(etag_l)))
#collapse all dtags
l1 = [col.unique().tolist() for coln, col in df.loc[:, tag_l].items()]
self.dtag_l = set([item for sublist in l1 for item in sublist])
self.etag_l = etag_l
self.expo_df = df
return self.expo_df
def _get_etags(self, #exposure column names
df,
prop_colns = None,
):
#=======================================================================
# precheck
#=======================================================================
if prop_colns is None:
prop_colns = [self.dikeID, self.segID, self.segln, self.cbfn, self.celn, self.ifidN]
miss_l = set(prop_colns).difference(df.columns)
assert len(miss_l)==0, 'passed data is missing %i required columns. are the dike fields correct? \n %s'%(
len(miss_l), miss_l)
#=======================================================================
# tags
#=======================================================================
tag_l = [c for c in df.columns if c.endswith('_dtag')]
assert len(tag_l)>0, 'failed to find any tag columns'
#=======================================================================
# events
#=======================================================================
l1 = set(prop_colns).union(tag_l) #those we dont want
etag_l = list(set(df.columns).difference(l1))
assert len(etag_l)>0, 'failed to get any eTags'
etag_l.sort()
return etag_l
def load_expo_dx(self, #load the transect exposure data
fp):
log = self.logger.getChild('load_expo_dx')
dxcol_raw = pd.read_csv(fp, header=[0,1], index_col=0)
#=======================================================================
# precheck
#=======================================================================
mdex = dxcol_raw.columns
assert 'common' in mdex.levels[0]
#check l2 headers
miss_l = set([self.wsln, self.celn, self.sdistn, self.fbn]).difference(
mdex.levels[1])
assert len(miss_l)==0, 'missing some l2 colns: %s'%miss_l
#=======================================================================
# extract some sumaries
#=======================================================================
"""
view(dxcol_raw)
"""
self.sid_vals = dxcol_raw.loc[:, ('common', self.sid)].unique().tolist()
#=======================================================================
# wrap
#=======================================================================
log.info('loaded expo dxcol w/ %s \n%s'%(str(dxcol_raw.shape), mdex))
self.expo_dxcol = dxcol_raw
return self.expo_dxcol
```
#### File: canflood/misc/force_mon.py
```python
import configparser, os, inspect, logging
#==============================================================================
# custom
#==============================================================================
#standalone runs
if __name__ =="__main__":
from hlpr.logr import basic_logger
mod_logger = basic_logger()
#plugin runs
else:
mod_logger = logging.getLogger('common') #get the root logger
from hlpr.exceptions import QError as Error
from hlpr.basic import *
from model.modcom import Model
class ForceWorker(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) #initilzie teh baseclass
def run(self, df, adf, logger=None):
if logger is None: logger=self.logger
log =logger
aep_ser = adf.iloc[0, adf.columns.isin(df.columns)].astype(int).sort_values().copy()
assert len(aep_ser) == len(df.columns)
res_df = self.force_monot(df, aep_ser = aep_ser, event_probs='ari', logger=log)
return res_df
if __name__ =="__main__":
#==========================================================================
# dev data
#==========================================================================
#==========================================================================
# out_dir = os.path.join(os.getcwd(), 'modcoms')
# cf_fp = r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\ICI_rec\CanFlood_scenario1.txt'
# tag='dev'
#==========================================================================
#==========================================================================
# exp_fp = r'C:\LS\03_TOOLS\_git\CanFlood\Test_Data\model\risk1\wex\expos_test.csv'
# aep_fp = r'C:\LS\03_TOOLS\_git\CanFlood\Test_Data\model\risk1\wex\aeps_test.csv'
#==========================================================================
#==========================================================================
# 20200304 data
#==========================================================================
runpars_d = {
'TDDnrp':{
'out_dir':r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\TDDnrp\risk1',
'cf_fp': r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\TDDnrp\CanFlood_TDDnrp.txt',
},
'TDDres':{
'out_dir':r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\TDD_res\risk1',
'cf_fp':r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\TDD_res\CanFlood_TDDres.txt',
},
'ICIrec':{
'out_dir':r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\ICI_rec\risk1',
'cf_fp':r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\ICI_rec\CanFlood_ICIrec.txt',
}
}
cid = 'xid'
#==========================================================================
# exp_fp = r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\ICI_rec\expos_scenario1_16_855.csv'
# aep_fp = r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\ICI_rec\aeps_16_scenario1.csv'
# exl_fp = r'C:\LS\03_TOOLS\CanFlood\_wdirs\20200304\ICI_rec\exlikes_ICI_rec.csv'
#==========================================================================
for dtag, pars in runpars_d.items():
cf_fp, out_dir = pars['cf_fp'], pars['out_dir']
log = mod_logger.getChild(dtag)
#======================================================================
# load from pars
#======================================================================
cf_pars = configparser.ConfigParser(inline_comment_prefixes='#')
log.info('reading parameters from \n %s'%cf_pars.read(cf_fp))
aep_fp = cf_pars['risk_fps']['aeps']
exl_fp = cf_pars['risk_fps']['exlikes']
exp_fp = cf_pars['dmg_fps']['expos']
#======================================================================
# load common data
#======================================================================
adf = pd.read_csv(aep_fp)
#==========================================================================
# setup
#==========================================================================
wrkr = ForceWorker(cf_fp, out_dir=out_dir, logger=log)
#==========================================================================
# exlikes-------------
#==========================================================================
tag, fp = 'exlikes', exl_fp
# load exposure data
ddf_raw = pd.read_csv(fp).set_index(cid)
# force monotoncity
res_df = wrkr.run(ddf_raw, adf)
#==========================================================================
# output
#==========================================================================
basefn = os.path.splitext(os.path.split(fp)[1])[0]
ofp = os.path.join(out_dir, '%s_forceM.csv'%basefn)
res_df.to_csv(ofp, index=True)
log.info('wrote %s to \n %s'%(str(res_df.shape), ofp))
#==========================================================================
# wsl.fail-------------
#==========================================================================
tag, fp = 'expo', exp_fp
log = mod_logger.getChild(tag)
# load exposure data
ddf_raw = pd.read_csv(fp).set_index(cid)
#==========================================================================
# divide
#==========================================================================
boolcol = ddf_raw.columns.str.contains('fail')
ddf = ddf_raw.loc[:, boolcol]
res_df1 = wrkr.run(ddf, adf)
#==========================================================================
# wsl.good------------
#==========================================================================
ddf = ddf_raw.loc[:, ~boolcol]
res_df2= wrkr.run(ddf, adf)
#==========================================================================
# recombine
#==========================================================================
res_df =res_df1.join(res_df2)
"""
view(res_df)
"""
#==========================================================================
# output
#==========================================================================
basefn = os.path.splitext(os.path.split(fp)[1])[0]
ofp = os.path.join(out_dir, '%s_forceM.csv'%basefn)
res_df.to_csv(ofp, index=True)
log.info('wrote %s to \n %s'%(str(res_df.shape), ofp))
force_open_dir(out_dir)
print('finished')
```
#### File: canflood/model/risk1.py
```python
import os, logging
import pandas as pd
import numpy as np
#from scipy import interpolate, integrate
#==============================================================================
# custom imports
#==============================================================================
#standalone runs
mod_logger = logging.getLogger('risk1') #get the root logger
from hlpr.exceptions import QError as Error
from model.riskcom import RiskModel
class Risk1(RiskModel):
"""
model for summarizing inundation counts (positive depths)
"""
valid_par='risk1'
#expectations from parameter file
exp_pars_md = {#mandataory: section: {variable: handles}
'parameters' :
{'name':{'type':str}, 'cid':{'type':str},
'event_probs':{'values':('ari', 'aep')},
'felv':{'values':('ground', 'datum')},
'prec':{'type':int},
'ltail':None, 'rtail':None, 'drop_tails':{'type':bool},
'as_inun':{'type':bool},
#'ground_water':{'type':bool}, #NO! risk1 only accepts positive depths
},
'dmg_fps':{
'finv':{'ext':('.csv',)}, #should only need the expos
'expos':{'ext':('.csv',)},
},
'risk_fps':{
'evals':{'ext':('.csv',)}
},
'validation':{
'risk1':{'type':bool}
}
}
exp_pars_op = {#optional expectations
'parameters':{
'impact_units': {'type':str}
},
'dmg_fps':{
'gels':{'ext':('.csv',)},
},
'risk_fps':{
'exlikes':{'ext':('.csv',)}
},
}
#number of groups to epxect per prefix
group_cnt = 2
#minimum inventory expectations
finv_exp_d = {
'scale':{'type':np.number},
'elv':{'type':np.number}
}
"""
NOTE: for as_inun=True,
using this flag to skip conversion of exposure to binary
we dont need any elevations (should all be zero)
but allowing the uesr to NOT pass an elv column would be very difficult
"""
def __init__(self,**kwargs):
super().__init__(**kwargs) #initilzie Model
self.dtag_d={**self.dtag_d,**{
'expos':{'index_col':0}
}}
self.logger.debug('finished __init__ on Risk1')
def prep_model(self, #attach and prepare data for model run
):
"""
called by Dialog and standalones
"""
self.set_finv()
self.set_evals()
self.set_expos()
if not self.exlikes == '':
self.set_exlikes()
if self.felv == 'ground':
self.set_gels()
self.add_gels()
self.build_exp_finv() #build the expanded finv
self.build_depths()
self.logger.debug('finished setup_data on Risk1')
return
def run(self,
res_per_asset=False, #whether to generate results per asset
calc_risk=True, #whether to run integration algo
):
"""
main caller for L1 risk model
TODO: clean this up and divide into more functions
extend impact only support to GUI and tests
"""
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('run')
#ddf_raw, finv, = self.data_d['expos'],self.data_d['finv']
aep_ser = self.data_d['evals']
cid, bid = self.cid, self.bid
bdf ,ddf = self.bdf, self.ddf
#======================================================================
# prechecks
#======================================================================
assert isinstance(res_per_asset, bool)
assert cid in ddf.columns, 'ddf missing %s'%cid
assert bid in ddf.columns, 'ddf missing %s'%bid
assert ddf.index.name == bid, 'ddf bad index'
#identifier for depth columns
#dboolcol = ~ddf.columns.isin([cid, bid])
log.info('running on %i assets and %i events'%(len(bdf), len(ddf.columns)-2))
self.feedback.upd_prog(20, method='raw')
#=======================================================================
# clean exposure
#=======================================================================
boolcol = ddf.columns.isin([bid, cid])
ddf1 = ddf.loc[:, ~boolcol]
if calc_risk:
assert len(ddf1.columns)>3, 'must pass at least 3 exposure columns to calculate ead'
#======================================================================
# convert exposures to binary
#======================================================================
if not self.as_inun: #standard impact/noimpact analysis
#get relvant bids
"""
because there are no curves, Risk1 can only use positive depths.
ground_water flag is ignored
"""
booldf = pd.DataFrame(np.logical_and(
ddf1 > 0,#get bids w/ positive depths
ddf1.notna()) #real depths
)
if booldf.all().all():
log.warning('got all %i entries as null... no impacts'%(ddf.size))
raise Error('dome')
return
log.info('got %i (of %i) exposures'%(booldf.sum().sum(), ddf.size))
bidf = ddf1.where(booldf, other=0.0)
bidf = bidf.where(~booldf, other=1.0)
#=======================================================================
# leave as percentages
#=======================================================================
else:
bidf = ddf1.copy()
assert bidf.max().max() <=1
#fill nulls with zero
bidf = bidf.fillna(0)
self.feedback.upd_prog(10, method='append')
#======================================================================
# scale
#======================================================================
if 'fscale' in bdf:
log.info('scaling impact values by \'fscale\' column')
bidf = bidf.multiply(bdf.set_index(bid)['fscale'], axis=0).round(self.prec)
#======================================================================
# drop down to worst case
#======================================================================
#reattach indexers
bidf1 = bidf.join(ddf.loc[:, boolcol])
assert not bidf1.isna().any().any()
cdf = bidf1.groupby(cid).max().drop(bid, axis=1)
#======================================================================
# resolve alternate impacts (per evemt)-----
#======================================================================
#take maximum expected value at each asset
if 'exlikes' in self.data_d:
bres_df = self.ev_multis(cdf, self.data_d['exlikes'], aep_ser, logger=log)
#no duplicates. .just rename by aep
else:
bres_df = cdf.rename(columns = aep_ser.to_dict()).sort_index(axis=1)
assert bres_df.columns.is_unique, 'duplicate aeps require exlikes'
log.info('got damages for %i events and %i assets'%(
len(bres_df), len(bres_df.columns)))
#======================================================================
# checks
#======================================================================
#check the columns
assert np.array_equal(bres_df.columns.values, aep_ser.unique()), 'column name problem'
_ = self.check_monot(bres_df)
self.feedback.upd_prog(10, method='append')
#======================================================================
# get ead per asset------
#======================================================================
if calc_risk:
if res_per_asset:
res_df = self.calc_ead(bres_df)
else:
res_df = None
self.res_df = res_df
self.feedback.upd_prog(10, method='append')
#======================================================================
# totals
#======================================================================
res_ttl = self.calc_ead(bres_df.sum(axis=0).to_frame().T,
drop_tails=False,
).T #1 column df
self.ead_tot = res_ttl.iloc[:,0]['ead'] #set for plot_riskCurve()
self.res_ttl = self._fmt_resTtl(res_ttl)
self._set_valstr()
#=======================================================================
# impacts only----
#=======================================================================
else:
self.res_df = bres_df.rename(
columns={e[1]:e[0] for e in self.eventType_df.drop('noFail', axis=1).values})
self.res_ttl = pd.Series()
log.info('finished on %i assets and %i damage cols'%(len(bres_df), len(self.res_ttl)))
#=======================================================================
# #format total results for writing
#=======================================================================
#=======================================================================
# wrap
#=======================================================================
log.info('finished')
return self.res_ttl, self.res_df
if __name__ =="__main__":
print('???')
```
#### File: sofda/fdmg/house.py
```python
import logging, os, time, re, math, copy, gc, weakref, random
import pandas as pd
import numpy as np
#===============================================================================
# shortcuts
#===============================================================================
from collections import OrderedDict
from hlpr.exceptions import Error
from weakref import WeakValueDictionary as wdict
from weakref import proxy
from model.sofda.hp.basic import OrderedSet
from model.sofda.hp.pd import view
idx = pd.IndexSlice
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
#import hp.plot
import model.sofda.hp.basic as hp_basic
import model.sofda.hp.pd as hp_pd
import model.sofda.hp.oop as hp_oop
import model.sofda.hp.sim as hp_sim
import model.sofda.hp.dyno as hp_dyno
#import model.sofda.hp.data as hp_data
from model.sofda.fdmg.dfunc import Dfunc
import model.sofda.udev.scripts as udev_scripts
# logger setup -----------------------------------------------------------------------
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
class House(
udev_scripts.House_udev,
#hp.plot.Plot_o,
hp_dyno.Dyno_wrap,
hp_sim.Sim_o,
hp_oop.Parent, #building/asset objects
hp_oop.Child):
#===========================================================================
# program pars
#==========================================================================
geocode_list = ['area', 'per', 'height', 'inta'] #sufficxs of geometry attributes to search for (see set_geo)
finish_code_list = ['f', 'u', 't'] #code for finished or unfinished
#===========================================================================
# debugging
#===========================================================================
last_floodo = None
#===========================================================================
# user provided pars
#===========================================================================
dem_el = np.nan
"""changed to acode
hse_type = '' # Class + Type categorizing the house"""
acode_s = ''
acode_c = ''
anchor_el = np.nan # anchor elevation for house relative to datum (generally main floor el)
gis_area = np.nan #foot print area (generally from teh binv)
B_f_height = np.nan
bsmt_f = True
area_prot_lvl = 0 #level of area protection
asector =''
f1area =np.nan
f0area = np.nan
f1a_uf =np.nan
f0a_uf =np.nan
#needed for udev
parcel_area = np.nan
#defaults passed from model
"""While the ICS for these are typically uniform and broadcast down by the model,
these need to exist on the House, so we can spatially limit our changes"""
G_anchor_ht = None #default garage anchor height (chosen aribtrarily by IBI (2015)
joist_space = None #space between basement and mainfloor. used to set the
#set of expected attributes (and their types) for validty checking
exp_atts_d = {'parcel_area':float, 'acode_s':str, 'acode_c':str, 'anchor_el':float, 'gis_area':float,
'B_f_height':float, 'dem_el':float, 'asector':str}
#===========================================================================
# calculated pars
#===========================================================================
floodo = None #flood object flooding the house
# #geometry placeholders
#geo_dxcol_blank = None #blank dxcol for houes geometry
geo_dxcol = None
'keeping just this one for reporting and dynp'
boh_max_val = None #basement open height minimum value
# #anchoring
"""
Im keeping anchor heights separate from geometry attributes as these could still apply
even for static dmg_feats
"""
bsmt_opn_ht = 0.0 #height of lowest basement opening
damp_spill_ht = 0.0
vuln_el = 9999.0 #starter value
# personal property protection
bkflowv_f = False #flag indicating the presence of a backflow valve on this property
sumpump_f = False
genorat_f = False
bsmt_egrd = ''
#statistics
BS_ints = 0.0 #some statistic of the weighted depth/damage of the BS dfunc
max_dmg = 0.0 #max damage possible for this house
dummy_cnt = 0 #number of dummy dfuncs
kid_nm_t = tuple()
beg_hist = ''
#===========================================================================
# data containers
#===========================================================================
dd_df = None #df results of total depth damage
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('House')
logger.debug('start _init_')
#=======================================================================
# attach pre init atts
#=======================================================================
#self.model = self.parent.model #pass the Fdmg model down
'put this here just to keep the order nice and avoid the unresolved import error'
self.inherit_parent_ans=set(['mind', 'model'])
#=======================================================================
# #initilzie teh baseclass
#=======================================================================
super(House, self).__init__(*vars, **kwargs)
if self.db_f:
if self.model is None: raise IOError
#=======================================================================
#common setup
#=======================================================================
if self.sib_cnt == 0:
logger.debug("sib_cnt=0. setting atts")
self.kid_class = Dfunc
"""noved this out to set_dfunc_df
self.childmeta_df = self.model.house_childmeta_df #dfunc meta data"""
self.joist_space = self.model.joist_space
self.G_anchor_ht = self.model.G_anchor_ht
#=======================================================================
# unique se5tup
#=======================================================================
self.bldg_id = int(getattr(self, self.mind ))
self.bsmt_f = hp_basic.str_to_bool(self.bsmt_f, logger=self.logger)
if not 'B' in self.model.place_codes:
raise Error('not sure about this')
self.bsmt_f = False
'these need to be unique. calculated during init_dyno()'
self.post_upd_func_s = set([self.calc_statres_hse])
logger.debug('building the house \n')
self.build_house()
logger.debug('raising my dfuncs \n')
self.raise_dfuncs()
logger.debug('init_dyno \n')
self.init_dyno()
#=======================================================================
# cheking
#=======================================================================
if self.db_f: self.check_house()
logger.debug('_init_ finished as %i \n'%self.bldg_id)
return
def check_house(self):
logger = self.logger.getChild('check_house')
#check the proxy objects
if not self.model.__repr__() == self.parent.parent.__repr__():
raise IOError
#=======================================================================
# check attribute validity
#=======================================================================
self.check_atts()
#=======================================================================
# check the basement logic
#=======================================================================
if self.bsmt_f:
if self.B_f_height < self.session.bfh_min:
raise Error('%s basement finish height (%.2f) is lower than the session minimum %.2f)'
%(self.name,self.B_f_height, self.session.bfh_min ))
#=======================================================================
# check your children
#=======================================================================
for name, dfunc in self.kids_d.items():
dfunc.check_dfunc()
return
def build_house(self): #buidl yourself from the building inventory
"""
#=======================================================================
# CALLS
#=======================================================================
binv.raise_children()
spawn_child()
"""
logger = self.logger.getChild('build_house')
#=======================================================================
# custom loader functions
#=======================================================================
#self.set_binv_legacy_atts() #compile data from legacy (rfda) inventory syntax
logger.debug('set_geo_dxcol \n')
self.set_geo_dxcol() #calculate the geometry (defaults) of each floor
logger.debug('set_hse_anchor \n')
self.set_hse_anchor()
""" a bit redundant, but we need to set the bsmt egrade regardless for reporting consistency
'these should be accessible regardless of dfeats as they only influence the depth calc'"""
self.set_bsmt_egrd()
if self.bsmt_f:
logger.debug('set_bsmt_opn_ht \n')
self.set_bsmt_opn_ht()
logger.debug('set_damp_spill_ht \n')
self.set_damp_spill_ht()
#=======================================================================
# value
#=======================================================================
'need a better way to do this'
"""contents value scaling
self.cont_val = self.value * self.model.cont_val_scale"""
if self.db_f:
if self.gis_area < self.model.gis_area_min:
raise IOError
if self.gis_area > self.model.gis_area_max: raise IOError
logger.debug('finished')
return
def raise_dfuncs(self): #build dictionary with damage functions for each dmg_type
"""
called by spawn_child and passing childmeta_df (from dfunc tab. see above)
this allows each dfunc object to be called form the dictionary by dmg_type
dfunc_df is sent as the childmeta_df (attached during __init__)
#=======================================================================
# INPUTS
#=======================================================================
dfunc_df: df with headers:
these are typically assigned from the 'dfunc' tab on the pars.xls
"""
#=======================================================================
# #defautls
#=======================================================================
logger = self.logger.getChild('raise_dfuncs')
'this is a slice from the dfunc tab made by Fdmg.load_pars_dfunc'
#=======================================================================
# get your dfunc pars
#=======================================================================
dfunc_pars_df = self.get_dfunc_df()
#set this as yoru childmeta
self.childmeta_df = dfunc_pars_df.copy()
logger.debug('from %s'%str(dfunc_pars_df.shape))
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not self.session.state=='init':
raise Error('should only build these once')
if not isinstance(dfunc_pars_df, pd.DataFrame):
raise IOError
if len(dfunc_pars_df) == 0:
raise Error('%s got no dfunc_pars_df!'%self.name)
if not self.kid_class == Dfunc:
raise IOError
if len(self.kids_d) > 0:
raise IOError
#=======================================================================
# clean the dfunc pars
#=======================================================================
"""I think we need placeholder dfuncs incase we rebuild this house with a basement later
#drop basements
if not self.bsmt_f:
dfunc_pars_df = dfunc_pars_df_raw[dfunc_pars_df_raw['place_code']!='B']
else:
dfunc_pars_df = dfunc_pars_df_raw"""
#slice out all the nones
dfunc_pars_df1 = dfunc_pars_df[dfunc_pars_df['acode'] != 'none']
#=======================================================================
# compile for each damage type
#=======================================================================
#shortcut for ALL nones
if len(dfunc_pars_df1) == 0:
logger.debug('no real dfuncs. skipping construction')
self.dfunc_d = dict()
else:
self.dfunc_d = self.raise_children_df(dfunc_pars_df1,
kid_class = self.kid_class,
dup_sibs_f = True)
#=======================================================================
# closeout and wrap up
#=======================================================================
logger.debug('built %i dfunc children: %s'%(len(self.dfunc_d), list(self.dfunc_d.keys())))
#=======================================================================
# post check
#=======================================================================
if self.db_f:
self.check_house()
return
def set_hse_anchor(self):
'pulled this out so updates can be made to dem_el'
if self.is_frozen('anchor_el'): return True
anchor_el = self.dem_el + float(self.ff_height) #height + surface elevation
#set the update
self.handle_upd('anchor_el', anchor_el, proxy(self), call_func = 'set_hse_anchor')
return True
def set_bsmt_opn_ht(self): #set the basement openning height (from teh basement floor)
"""
bsmt_open_ht is used by dfuncs with bsmt_e_grd == 'damp' and damp_func_code == 'spill'
for low water floods
"""
#=======================================================================
# shortcuts
#=======================================================================
if not self.bsmt_f:
return True
#=======================================================================
# check dependencies and frozen
#=========================================================== ============
if not self.session.state=='init':
if self.is_frozen('bsmt_opn_ht'):
return True
dep_l = [([self], ['set_hse_anchor', 'set_geo_dxcol'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_bsmt_opn_ht'):
return False
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_bsmt_opn_ht')
#=======================================================================
# from user provided minimum
#=======================================================================
if self.model.bsmt_opn_ht_code.startswith('*max'):
#===================================================================
# prechecks
#===================================================================
if self.db_f:
bfh_chk = float(self.geo_dxcol.loc['height',('B','f')])
if not round(self.B_f_height, 2) == round(bfh_chk, 2):
raise Error('B_f_height mismatch attribute (%.2f) geo_dxcol (%.2f)'
%(self.B_f_height, bfh_chk))
"""lets let the basement be above grade"""
if self.ff_height > (bfh_chk + self.joist_space):
logger.warning('basement is above grade!')
#get the minimum value
if self.boh_max_val is None: #calculate and set
'this means we are non dynamic'
s_raw = self.model.bsmt_opn_ht_code
s = re.sub('\)', '',s_raw[5:])
self.boh_max_val = float(s) #pull the number out of the brackets
max_val = self.boh_max_val
# get the basement anchor el
B_f_height = float(self.geo_dxcol.loc['height',('B','t')]) #pull from frame
bsmt_anchor_el = self.anchor_el - B_f_height - self.joist_space #basement curve
#get the distance to grade
bsmt_to_dem = self.dem_el - bsmt_anchor_el
if bsmt_to_dem <0: #floating basements
bsmt_opn_ht = 0
else:
#take the min of all three
bsmt_opn_ht = min(B_f_height, bsmt_to_dem, max_val)
#===================================================================
# wrap
#===================================================================
if self.db_f:
#check basement anchor elevation logic
if bsmt_anchor_el > self.anchor_el:
raise Error('%s basement anchor el (%.2f) is above the main anchor el (%.2f)'
%(self.name, bsmt_anchor_el, self.anchor_el))
"""letting this happen for now"""
if bsmt_to_dem < 0:
logger.debug('\n dem_el=%.2f, bsmt_anchor_el=%.2f, B_f_heigh=%.2f, anchor_el=%.2f'
%(self.dem_el, bsmt_anchor_el, B_f_height, self.anchor_el))
logger.warning('%s bassement is above grade! bsmt_anchor_el(%.2f) > dem _el (%.2f) '
%(self.name, bsmt_anchor_el, self.dem_el))
#detailed output
boolar = np.array([B_f_height, bsmt_to_dem, max_val, 0]) == bsmt_opn_ht #identify which one you pulled from
selected = np.array(['B_f_height', 'bsmt_to_dem', 'max_val', 'zero'])[boolar]
logger.debug('got bsmt_opn_ht = %.2f from \'%s\''%(bsmt_opn_ht, selected[0]))
else:
logger.debug('got bsmt_opn_ht = %.2f ')
#=======================================================================
# from user provided float
#=======================================================================
else:
bsmt_opn_ht = float(self.model.bsmt_opn_ht_code)
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
if not bsmt_opn_ht >= 0:
logger.error('\n dem_el=%.2f, bsmt_anchor_el=%.2f, B_f_heigh=%.2f, anchor_el=%.2f'
%(self.dem_el, bsmt_anchor_el, B_f_height, self.anchor_el))
raise Error('%s got a negative bsmt_opn_ht (%.2f)'%(self.name, bsmt_opn_ht))
#=======================================================================
# wrap up
#=======================================================================
self.handle_upd('bsmt_opn_ht', bsmt_opn_ht, proxy(self), call_func = 'set_bsmt_opn_ht')
return True
def set_damp_spill_ht(self):
damp_spill_ht = self.bsmt_opn_ht / 2.0
self.handle_upd('damp_spill_ht', damp_spill_ht, proxy(self), call_func = 'set_damp_spill_ht')
return True
def set_bsmt_egrd(self): #calculate the basement exposure grade
"""
bkflowv_f sumpump_f genorat_f
There is also a globabl flag to indicate whether bsmt_egrd should be considered or not
for the implementation of the bsmt_egrd in determining damages, see Dfunc.get_dmg_wsl()
#=======================================================================
# CALLS
#=======================================================================
this is now called during every get_dmgs_wsls()... as gpwr_f is a function of the Flood object
consider only calling w
"""
#=======================================================================
# shortcuts
#=======================================================================
if self.is_frozen('bsmt_egrd'):
return 'frozen'
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_bsmt_egrd')
if self.bsmt_f:
#=======================================================================
# from plpms
#=======================================================================
if self.model.bsmt_egrd_code == 'plpm':
#store the plpm status into the cond string
if self.db_f:
cond = 'plpm.'
for tag, flag in {'s':self.sumpump_f, 'g':self.genorat_f, 'b':self.bkflowv_f}.items():
if flag:
cond = '%s%s'%(cond, tag)
else:
cond = 'plpm'
#=======================================================================
# get the grid power state
#=======================================================================
if self.session.state == 'init':
gpwr_f = self.model.gpwr_f
cond = cond + '.init'
else:
gpwr_f = self.floodo.gpwr_f
cond = '%s.%s'%(cond, self.floodo.ari)
#=======================================================================
# grid power is on
#=======================================================================
if gpwr_f:
cond = cond + '.on'
if self.bkflowv_f and self.sumpump_f:
bsmt_egrd = 'dry'
elif self.bkflowv_f or self.sumpump_f:
bsmt_egrd = 'damp'
else:
bsmt_egrd = 'wet'
#=======================================================================
# grid power is off
#=======================================================================
else:
cond = cond + '.off'
if self.bkflowv_f and self.sumpump_f and self.genorat_f:
bsmt_egrd = 'dry'
elif self.bkflowv_f or (self.sumpump_f and self.genorat_f):
bsmt_egrd = 'damp'
else: bsmt_egrd = 'wet'
logger.debug('set bsmt_egrd = %s (from \'%s\') with grid_power_f = %s'%(bsmt_egrd,self.bsmt_egrd, gpwr_f))
#=======================================================================
# ignore bsmt_egrd
#=======================================================================
elif self.model.bsmt_egrd_code == 'none':
cond = 'none'
bsmt_egrd = 'wet'
gpwr_f = self.model.gpwr_f
#=======================================================================
# allow the user to override all
#=======================================================================
elif self.model.bsmt_egrd_code in ['wet', 'damp', 'dry']:
cond = 'global'
bsmt_egrd = self.model.bsmt_egrd_code
gpwr_f = self.model.gpwr_f
else:
raise IOError
else:
gpwr_f = self.model.gpwr_f
cond = 'nobsmt'
bsmt_egrd = 'nobsmt'
#=======================================================================
# wrap up
#=======================================================================
self.bsmt_egrd = bsmt_egrd
self.gpwr_f = gpwr_f #set this
"""report/collect on the flood
self.parent.childmeta_df.loc[self.dfloc,'bsmt_egrd'] = bsmt_egrd"""
return cond
def set_geo_dxcol(self): #calculate the geometry of each floor based on the geo_build_code
"""
builds a dxcol with all the geometry attributes of this house
called by load_data when self.session.wdfeats_f = True
#=======================================================================
# KEY VARS
#=======================================================================
geo_build_code: code to indicate what geometry to use for the house. see the dfunc tab
'defaults': see House.get_default_geo()
'from_self': expect all geo atts from the binv.
'any': take what you can from the binv, everything else use defaults.
'legacy': use gis area for everything
gbc_override: used to override the geo_build_code
geo_dxcol: house geometry
#=======================================================================
# UDPATES
#=======================================================================
when a specific geometry attribute of the house is updated (i.e. B_f_height)
this dxcol needs to be rebuilt
and all the dfuncs need to run build_dd_ar()
#=======================================================================
# TODO
#=======================================================================
add some isolated updating?
for when we only change one floor
need to add some kwargs to the dynp_handles
"""
logger = self.logger.getChild('set_geo_dxcol')
if self.is_frozen('geo_dxcol', logger=logger):
return True
pars_dxcol = self.session.pars_df_d['hse_geo'] #pull the pars frame
#=======================================================================
# get default geometry for this house
#=======================================================================
self.defa = self.gis_area #default area
if self.defa <=0:
logger.error('got negative area = %.2f'%self.defa)
raise IOError
self.defp = 4*math.sqrt(self.defa)
#=======================================================================
# setup the geo_dxcol
#=======================================================================
dxcol = self.model.geo_dxcol_blank.copy() #get a copy of the blank one\
'I need to place the reference herer so that geometry attributes have access to each other'
#self.geo_dxcol = dxcol
place_codes = dxcol.columns.get_level_values(0).unique().tolist()
#finish_codes = dxcol.columns.get_level_values(1).unique().tolist()
#geo_codes = dxcol.index
logger.debug("from geo_dxcol_blank %s filling:"%(str(dxcol.shape)))
#=======================================================================
# #loop through each place code and compile the appropriate geometry
#=======================================================================
for place_code in place_codes:
geo_df = dxcol[place_code] #geometry for just this place
pars_df = pars_dxcol[place_code]
#logger.debug('filling geo_df for place_code: \'%s\' '%(place_code))
#===================================================================
# #loop through and build the geometry by each geocode
#===================================================================
for geo_code, row in geo_df.iterrows():
for finish_code, value in row.items():
#===========================================================
# total column
#===========================================================
if finish_code == 't':
uval = dxcol.loc[geo_code, (place_code, 'u')]
fval = dxcol.loc[geo_code, (place_code, 'f')]
if self.db_f:
if np.any(pd.isnull([uval, fval])):
raise IOError
if geo_code == 'height': #for height, take the maximum
att_val = max(uval, fval)
else: #for other geometry, take the total
att_val = uval + fval
#===========================================================
# finish/unfinished
#===========================================================
else:
#get the user passed par for this
gbc = pars_df.loc[geo_code, finish_code]
try:gbc = float(gbc)
except: pass
#===========================================================
# #assemble per the geo_build_code
#===========================================================
#user specified code
if isinstance(gbc, str):
gbc = str(gbc)
if gbc == '*binv':
att_name = place_code +'_'+finish_code+'_'+ geo_code #get the att name for this
att_val = getattr(self, att_name) #get this attribute from self
""""
mostly using this key for the B_f_height
"""
elif gbc == '*geo':
att_val = self.calc_secondary_geo(place_code, finish_code, geo_code, dxcol=dxcol) #calculate the default value
elif gbc.startswith('*tab'):
#get the pars
tabn = re.sub('\)',"",gbc[5:]) #remove the end parentheisis
df = self.session.pars_df_d[tabn]
att_name = place_code +'_'+finish_code+'_'+ geo_code #get the att name for this
att_val = self.get_geo_from_other(df, att_name)
else:
att_val = getattr(self, gbc)
#user speciifed value
elif isinstance(gbc, float): #just use the default value provided in the pars
att_val = gbc
else: raise IOError
logger.debug('set %s.%s.%s = %.2f with gbc \'%s\''%(place_code,finish_code,geo_code, att_val, gbc))
#===========================================================
# value checks
#===========================================================
if self.db_f:
att_name = place_code +'_'+finish_code+'_'+ geo_code
if not 'float' in type(att_val).__name__:
raise Error('got unexpected type for \"%s\': %s'%(att_name, type(att_val)))
if pd.isnull(att_val):
raise IOError
if att_val < 0:
raise IOError
#===========================================================
# set the value
#===========================================================
dxcol.loc[geo_code, (place_code, finish_code)] = att_val
#row[finish_code] = att_val #update the ser
#logger.debug('set \'%s\' as \'%s\''%(att_name, att_val))
#=======================================================================
# rounding
#=======================================================================
dxcol = dxcol.round(decimals=2)
#=======================================================================
# special attribute setting
#=======================================================================
'need this as an attribute for reporting'
B_f_height = float(dxcol.loc['height', ('B', 'f')]) #to set the type
#===============================================================
# POST
#===============================================================
"""todo:
add some checking that we are not changing any geometry attributes with a dynp
that would be overwritten here
"""
#logger.debug('built house_geo_dxcol %s'%str(dxcol.shape))
self.handle_upd('geo_dxcol', dxcol, weakref.proxy(self), call_func = 'set_geo_dxcol')
self.handle_upd('B_f_height', B_f_height, weakref.proxy(self), call_func = 'set_geo_dxcol')
return True
def set_bfh(self):#set the basement finish height into the geo_dxcol
#shortcutting for those without basements
if not self.bsmt_f:
return True
#updat ethe geo_dxcol
return self.update_geo_dxcol(self.B_f_height, 'height', 'B', 'f')
def xxxset_ffh(self): #set the ff_height (from the anchor_el and the dem_el
"""not sure I want to do this, because we generally get the anchor_el from the ff_height"""
self.ff_height = self.anchor_el - self.dem_el
return True
def update_geo_dxcol(self,
nval_raw, #new value
geo_code, place_code, finish_code, #locations
):
log = self.logger.getChild('update_geo_dxcol')
#=======================================================================
# frozen check
#=======================================================================
if self.is_frozen('geo_dxcol', logger=log):
return True
#=======================================================================
# defaults
#=======================================================================
nval = round(nval_raw, 2)
#=======================================================================
# prechecks
#=======================================================================
if finish_code == 't':
raise Error('not implemented')
dxcol = self.geo_dxcol.copy() #get a copy of the original
#=======================================================================
# check if we had a change
#=======================================================================
oldv = float(dxcol.loc[geo_code, (place_code, finish_code)])
if nval == round(oldv, 2):
log.debug('for %s.%s.%s nval= %.2f has no change... skipping'%(geo_code, place_code, finish_code, nval))
return True
#=======================================================================
# #set the new value
#=======================================================================
dxcol.loc[geo_code, (place_code, finish_code)] = nval
if self.db_f:
if not nval == round(float(dxcol.loc[geo_code, (place_code, finish_code)]), 2):
raise Error('value didnt set')
"""
dxcol.loc[geo_code, (place_code, finish_code)] = 99.9
"""
log.debug('for %s.%s.%s set %.2f'%(geo_code, place_code, finish_code, nval))
#=======================================================================
# set the total value
#=======================================================================
dxcol.loc[geo_code, (place_code, 't')] = dxcol.loc[geo_code, idx[[place_code], ['u','f']]].sum()
#=======================================================================
# #handle the update
#=======================================================================
self.handle_upd('geo_dxcol', dxcol, weakref.proxy(self), call_func = 'update_geo_dxcol')
"""
for just hte basement, would be nice to only force updates on those that have changed
"""
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
if not nval == round(float(self.geo_dxcol.loc[geo_code, (place_code, finish_code)]), 2):
raise Error('value didnt set')
return True
def get_dfunc_df(self): #pull your dfunc_df
"""
20190512: added this to provide for dfunc handling on all the different acodes
the dfuncs should use this new
killing dfuncs and spawning new ones?
way more complicated...
this is what we're doing with dfeats
how do we tell the dfuncs about their new pars?
added a loop to the front of build_dfunc()
simulation reseting?
as all these pars are in teh dynp_handles (which get loaded into the reset_d automatically
changes here should be reset
#=======================================================================
# callers
#=======================================================================
dynp_handles (for acode_s and acode_c changes)
"""
log = self.logger.getChild('set_dfunc_df')
df_raw = self.model.dfunc_mstr_df.copy() #pull from teh session
"""this is configured by scripts_fdmg.Fdmg.load_pars_dfunc()"""
#get your slice
boolidx = np.logical_or(
df_raw['acode']==self.acode_s, #matching your structural dfuncs
df_raw['acode']==self.acode_c, #matching contents
)
df = df_raw[boolidx].copy() #set this
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
#length check
"""want to allow adding garage curves and removeing some dfuncs"""
if len(df) > 6:
raise Error('%s dfunc_df too long (%i) with acode_s=%s and acode_c=%s'
%(self.name, len(df), self.acode_s, self.acode_c))
return df
def calc_secondary_geo(self, #aset the default geometry for this attribute
place_code, finish_code, geo_code,
dxcol = None):
logger = self.logger.getChild('get_default_geo')
#=======================================================================
# get primary geometrty from frame
#=======================================================================
if dxcol is None: dxcol = self.geo_dxcol
area = dxcol.loc['area',(place_code, finish_code)]
height = dxcol.loc['height',(place_code, finish_code)]
#=======================================================================
# calculate the geometris
#=======================================================================
if geo_code == 'inta':
per = dxcol.loc['per',(place_code, finish_code)]
att_value = float(area + height * per)
elif geo_code == 'per':
per = 4*math.sqrt(area)
att_value = float(per)
else: raise IOError
logger.debug(" for \'%s\' found %.2f"%(geo_code, att_value))
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
for v in [area, height, per, att_value]:
if not 'float' in type(v).__name__:
raise IOError
if pd.isnull(v):
raise IOError
if not v >= 0:
raise IOError
return att_value
def xxxrun_bsmt_egrd(self):
logger = self.logger.getChild('run_bsmt_egrd')
def get_geo_from_other(self, #set the garage area
df_raw, attn_search):
"""
we need this here to replicate the scaling done by the legacy curves on teh garage dmg_feats
assuming column 1 is the cross refereence data
"""
logger = self.logger.getChild('get_geo_from_other')
#=======================================================================
# find the cross reference row
#=======================================================================
cross_attn = df_raw.columns[0]
cross_v = getattr(self, cross_attn) #get our value for this
boolidx = df_raw.iloc[:,0] == cross_v #locate our cross reference
#=======================================================================
# find the search column
#=======================================================================
boolcol = df_raw.columns == attn_search
value_fnd = df_raw.loc[boolidx, boolcol].iloc[0,0] #just take the first
if self.db_f:
if not boolidx.sum() == 1:
raise IOError
if not boolidx.sum() == 1:
raise IOError
return value_fnd
def run_hse(self, wsl, **kwargs):
'TODO: compile the total dfunc and use that instead?'
logger = self.logger.getChild('run_hse')
hse_depth = wsl - self.anchor_el
self.run_cnt += 1
#=======================================================================
# precheck
#=======================================================================
"""todo: check that floods are increasing
if self.db_f:
if self.last_floodo is None:
pass"""
if self.db_f:
#full check
self.check_house()
#make sure you dont have any updates qued
if len(self.upd_cmd_od) > 0:
raise IOError
#=======================================================================
# basement egrade reset check
#=======================================================================
"""because the grid power changes on each flood, we need to re-calc this"""
if self.model.bsmt_egrd_code == 'plpm':
#always calc on the first time
if self.run_cnt ==1:
cond = self.set_bsmt_egrd()
elif not self.bsmt_f:
cond='nobsmt'
#some change! re-run the calc
elif not self.gpwr_f == self.floodo.gpwr_f:
cond = self.set_bsmt_egrd()
else:
cond = 'nochng'
logger.debug('no change in gpwr_f. keeping bsmt egrd = %s'%self.bsmt_egrd)
else:
cond = 'no_plpm'
#===============================================================
# write the beg histor y
#===============================================================
if not self.model.beg_hist_df is None:
self.model.beg_hist_df.loc[self.dfloc, (self.floodo.ari, 'egrd')] = self.bsmt_egrd
self.model.beg_hist_df.loc[self.dfloc, (self.floodo.ari, 'cond')] = cond
#=======================================================================
# calculate the results
#=======================================================================
#check for tiny depths
if hse_depth < self.model.hse_skip_depth:
logger.debug('depth below hse_obj.vuln_el setting fdmg=0')
dmg_ser = pd.Series(name = self.name, index = list(self.dfunc_d.keys()))
dmg_ser.loc[:] = 0.0
else:
logger.debug('returning get_dmgs_wsls \n')
dmg_ser = self.get_dmgs_wsls(wsl, **kwargs)
#=======================================================================
# wrap up
#=======================================================================
self.floodo = None #clear this
return dmg_ser
def get_dmgs_wsls(self, #get damage at this depth from each Dfunc
wsl,
dmg_rat_f = False, #flat to include damage ratios in the outputs
):
"""
#=======================================================================
# INPUTS
#=======================================================================
res_ser: shortcut so that damage are added to this series
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_dmgs_wsls')
id_str = self.get_id()
#=======================================================================
# precheck
#=======================================================================
#=======================================================================
# fast calc
#=======================================================================
if not dmg_rat_f:
dmg_ser = pd.Series(name = self.name, index = list(self.dfunc_d.keys()))
"""
logger.debug('\'%s\' at wsl= %.4f anchor_el = %.4f for %i dfuncs bsmt_egrd \'%s\'\n'
%(id_str, wsl, self.anchor_el, len(dmg_ser), self.bsmt_egrd))"""
for dmg_type, dfunc in self.kids_d.items():
logger.debug('getting damages for \'%s\' \n'%dmg_type)
#get the damge
_, dmg_ser[dmg_type], _ = dfunc.run_dfunc(wsl)
dfunc.get_results() #store these outputs if told
#=======================================================================
# full calc
#=======================================================================
else:
raise IOError #check this
dmg_df = pd.DataFrame(index = list(self.dfunc_d.keys()), columns = ['depth', 'dmg', 'dmg_raw'])
dmg_ser = pd.Series()
logger.debug('\'%s\' at wsl= %.4f anchor_el = %.4f for %i dfuncs bsmt_egrd \'%s\''
%(id_str, wsl, self.anchor_el, len(dmg_df), self.bsmt_egrd))
for indx, row in dmg_df.iterrows():
dfunc = self.kids_d[indx]
row['depth'], row['dmg'], row['dmg_raw'] = dfunc.run_dfunc(wsl)
dfunc.get_results() #store these outputs if told
#enter into series
dmg_ser[indx] = row['dmg']
dmg_ser['%s_rat'%indx] = row['dmg_raw']
#=======================================================================
# post chekcs
#=======================================================================
#=======================================================================
# wrap up
#=======================================================================
logger.debug('at %s finished with %i dfuncs queried and res_ser: \n %s \n'
%(self.model.tstep_o.name, len(self.kids_d), dmg_ser.values.tolist()))
return dmg_ser
def raise_total_dfunc(self, #compile the total dd_df and raise it as a child
dmg_codes = None, place_codes = None):
""" this is mostly used for debugging and comparing of curves form differnet methods
#=======================================================================
# todo
#=======================================================================
allow totaling by
possible performance improvement;
compile the total for all objects, then have Flood.get_dmg_set only run the totals
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('raise_total_dfunc')
tot_name = self.get_tot_name(dmg_codes)
if dmg_codes is None: dmg_codes = self.model.dmg_codes
if place_codes is None: place_codes = self.model.place_codes
#=======================================================================
# get the metadata for the child
#=======================================================================
df_raw = self.session.pars_df_d['dfunc'] #start with the raw tab data
#search by placecode
boolidx1 = df_raw['place_code'] == 'total' #identify all the entries except total
#search by dmg_code where all strings in the list are a match
boolidx2 = hp_pd.search_str_fr_list(df_raw['dmg_code'], dmg_codes, all_any='any') #find
if boolidx2.sum() <1:
logger.warning('unable to find a match in the dfunc tab for %s. using default'%tot_name)
boolidx2 = pd.Series(index = boolidx2.index, dtype = np.bool) #all true
'todo: add some logic for only finding one of the damage codes'
#get this slice
boolidx = np.logical_and(boolidx1, boolidx2)
if not boolidx.sum() == 1:
logger.error('childmeta search boolidx.sum() = %i'%boolidx.sum())
raise IOError
att_ser = df_raw[boolidx].iloc[0]
'need ot add the name here as were not using the childname override'
logger.debug('for place_code: \'total\' and dmg_code: \'%s\' found child meta from dfunc_df'%(dmg_codes))
#=======================================================================
# raise the child
#=======================================================================
#set the name
child = self.spawn_child(att_ser = att_ser, childname = tot_name)
#=======================================================================
# #do custom edits for total
#=======================================================================
child.anchor_el = self.anchor_el
#set the dd_ar
dd_df = self.get_total_dd_df(dmg_codes, place_codes)
depths = dd_df['depth'].values - child.anchor_el #convert back to no datum
child.dd_ar = np.array([depths, dd_df['damage'].values])
#add this to thedictionary
self.kids_d[child.name] = child
logger.debug('copied and edited a child for %s'%child.name)
return child
def get_total_dd_df(self, dmg_codes, place_codes): #get the total dd_df (across all dmg_types)
logger = self.logger.getChild('get_total_dd_df')
#=======================================================================
# compile al lthe depth_damage entries
#=======================================================================
df_full = pd.DataFrame(columns = ['depth', 'damage_cum', 'source'])
# loop through and fill the df
cnt = 0
for datoname, dato in self.kids_d.items():
if not dato.dmg_code in dmg_codes: continue #skip this one
if not dato.place_code in place_codes: continue
cnt+=1
#===================================================================
# get the adjusted dd
#===================================================================
df_dato = pd.DataFrame() #blank frame
df_dato['depth'] = dato.dd_ar[0]+ dato.anchor_el #adjust the dd to the datum
df_dato['damage_cum'] = dato.dd_ar[1]
"""the native format of the dmg_ar is cumulative damages
to sum these, we need to back compute to incremental
"""
df_dato['damage_inc'] = hp_pd.get_incremental(df_dato['damage_cum'], logger=logger)
df_dato['source'] = datoname
#append these to the full
df_full = df_full.append(df_dato, ignore_index=True)
logger.debug('compiled all dd entries %s from %i dfuncs with dmg_clodes: %s'
%(str(df_full.shape), cnt, dmg_codes))
df_full = df_full.sort_values('depth').reset_index(drop=True)
#=======================================================================
# harmonize this into a dd_ar
#=======================================================================
#get depths
depths_list = df_full['depth'].sort_values().unique().tolist()
#get starter frame
dd_df = pd.DataFrame(columns = ['depth', 'damage'])
dd_df['depth'] = depths_list #add in the depths
for index, row in dd_df.iterrows(): #sort through and sum by depth
boolidx = df_full['depth'] <= row['depth'] #identify all those entries in the full
row['damage'] = df_full.loc[boolidx, 'damage_inc'].sum() #add these as the sum
dd_df.iloc[index,:] = row #update the master
logger.debug('harmonized and compiled dd_df %s'%str(dd_df.shape))
self.dd_df = dd_df
return dd_df
def get_tot_name(self, dmg_codes): #return the equilvanet tot name
'not sure whats going on here'
new_str = 'total_'
for dmg_code in dmg_codes: new_str = new_str + dmg_code
return new_str
def calc_statres_hse(self): #calculate statistics for the house (outside of a run)
"""
#=======================================================================
# CALLS
#=======================================================================
this is always called with mypost_update() executing each command in self.post_upd_func_s()
mypost_update() is called:
init_dyno() #first call before setting the OG values
session.post_update() #called at the end of all the update loops
"""
logger = self.logger.getChild('calc_statres_hse')
if self.acode_s == 'none':
"""
ToDo:
need to fix how we handle null assets:
acode_s='none':
this should be a place holder asset
only parcel attributs are read from the binv (parcel_area, asector)
all output attributes should be NULL
When we transition a 'none' to a real,
we should have some check to make sure we have all hte attributes we need?
acode_c='none'
fine... only calc structural damages (empty asset).
"""
raise Error('not sure how this manifests on the outputers')
s = self.session.outpars_d[self.__class__.__name__]
#=======================================================================
# BS_ints
#=======================================================================
if 'BS_ints' in s:
'I dont like this as it requires updating the child as well'
"""rfda curves also have this stat
if self.dfunc_type == 'dfeats':"""
#updat eht ekid
if not self.kids_d['BS'].calc_intg_stat():
raise IOError
self.BS_ints = self.kids_d['BS'].intg_stat
"""this is handled by set_og_vals()
if self.session.state == 'init':
self.reset_d['BS_ints'] = self.BS_ints"""
logger.debug('set BS_ints as %.4f'%self.BS_ints)
if 'vuln_el' in s:
self.set_vuln_el()
if 'max_dmg' in s:
self.max_dmg = self.get_max_dmg()
self.parent.childmeta_df.loc[self.dfloc, 'max_dmg'] = self.max_dmg #set into the binv_df
if 'dummy_cnt' in s:
cnt = 0
for dfunc in self.kids_d.values():
if dfunc.dummy_f:
cnt+=1
self.dummy_cnt = cnt
if 'kid_nm_t' in s:
self.kid_nm_t = tuple([kid.get_tag() for kid in self.kids_d.values()])
if 'max_dmg_nm' in s:
d = dict()
for name, dfunc in self.kids_d.items():
if dfunc.dummy_f:
d[dfunc.get_tag()] = 'dummy'
else:
d[dfunc.get_tag()] = "{:,.1f}".format(max(dfunc.dd_ar[1]))
self.max_dmg_nm = str(d)
if 'beg_hist' in s and (not self.model.beg_hist_df is None):
"""view(self.model.beg_hist_df)"""
self.beg_hist = str(self.model.beg_hist_df.loc[self.dfloc,:].dropna().to_dict())
return True
def set_vuln_el(self): #calcualte the minimum vulnerability elevation
"""
#=======================================================================
# CALLS
#=======================================================================
TODO: consider including some logic for bsmt_egrade and spill type
"""
#=======================================================================
# check frozen and dependenceis
#=======================================================================
logger = self.logger.getChild('set_vuln_el')
"""this is a stat, not a dynamic par
if self.is_frozen('vuln_el', logger=logger): return True"""
vuln_el = 99999 #starter value
for dmg_type, dfunc in self.kids_d.items():
if dfunc.dummy_f:
continue #skip these
else:
vuln_el = min(dfunc.anchor_el, vuln_el) #update with new minimum
logger.debug('set vuln_el = %.2f from %i dfuncs'%(vuln_el, len(self.kids_d)))
if vuln_el == 99999:
vuln_el = np.nan
self.vuln_el = vuln_el
return True
def get_max_dmg(self): #calculate the maximum damage for this house
#logger = self.logger.getChild('get_max_dmg')
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
#loop and check dummies
for dmg_type, dfunc in self.kids_d.items():
if not dfunc.dummy_f:
if not len(dfunc.dd_ar)==2:
raise Error('%s.%s is real but got unexpected dd_ar length: %i'
%(self.name, dfunc.name, len(dfunc.dd_ar)))
#=======================================================================
# calcs
#=======================================================================
max_dmg = 0
for dfunc in self.kids_d.values():
if not dfunc.dummy_f:
max_dmg+= dfunc.dd_ar[1].max()
return max_dmg
"""sped this up
ser = pd.Series(index = list(self.kids_d.keys()))
#=======================================================================
# collect from each dfunc
#=======================================================================
for dmg_type, dfunc in self.kids_d.items():
try:
ser[dmg_type] = dfunc.dd_ar[1].max()
except: #should only trip for unreal baseements
ser[dmg_type] = 0.0
if self.db_f:
if self.bsmt_f:
raise Error('failed to get max damage and I have a basement')
return ser.sum()"""
def plot_dd_ars(self, #plot each dfunc on a single axis
datum='house', place_codes = None, dmg_codes = None, plot_tot = False,
annot=True, wtf=None, title=None, legon=False,
ax=None,
transparent = True, #flag to indicate whether the figure should have a transparent background
**kwargs):
"""
#=======================================================================
# INPUTS
#=======================================================================
datum: code to indicate what datum to plot the depth series of each dd_ar
None: raw depths (all start at zero)
real: depths relative to the project datum
house: depths relative to the hse_obj anchor (generally Main = 0)
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('plot_dd_ars')
if wtf==None: wtf= self.session._write_figs
if dmg_codes is None: dmg_codes = self.model.dmg_codes
if place_codes is None: place_codes = self.model.place_codes
if title is None:
title = 'plot_dd_ars on %s for %s and %s'%(self.name, dmg_codes, place_codes)
if plot_tot: title = title + 'and T'
'this should let the first plotter setup the axis '
logger.debug('for \n dmg_codes: %s \n place_codes: %s'%(dmg_codes, place_codes))
#=======================================================================
# plot the dfuncs that fit the criteria
#=======================================================================
dfunc_nl = [] #list of dfunc names fitting criteria
for datoname, dato in self.dfunc_d.items():
if not dato.dmg_code in dmg_codes: continue
if not dato.place_code in place_codes: continue
ax = dato.plot_dd_ar(ax=ax, datum = datum, wtf=False, title = title, **kwargs)
dfunc_nl.append(dato.name)
#=======================================================================
# add the total plot
#=======================================================================
if plot_tot:
#get the dato
tot_name = self.get_tot_name(dmg_codes)
if not tot_name in list(self.kids_d.keys()): #build it
'name searches should still work'
tot_dato = self.raise_total_dfunc(dmg_codes, place_codes)
else:
tot_dato = self.kids_d[tot_name]
#plot the dato
ax = tot_dato.plot_dd_ar(ax=ax, datum = datum, wtf=False, title = title, **kwargs)
#=======================================================================
# add annotation
#=======================================================================
if not annot is None:
if annot:
"""WARNING: not all attributes are generated for the differnt dfunc types
"""
B_f_height = float(self.geo_dxcol.loc['height',('B','f')]) #pull from frame
annot_str = 'acode = %s\n'%self.acode +\
' gis_area = %.2f m2\n'%self.gis_area +\
' anchor_el = %.2f \n'%self.anchor_el +\
' dem_el = %.2f\n'%self.dem_el +\
' B_f_height = %.2f\n'%B_f_height +\
' bsmt_egrd = %s\n'%self.bsmt_egrd +\
' AYOC = %i\n \n'%self.ayoc
#add info for each dfunc
for dname in dfunc_nl:
dfunc = self.dfunc_d[dname]
annot_str = annot_str + annot_builder(dfunc)
else: annot_str = annot
#=======================================================================
# Add text string 'annot' to lower left of plot
#=======================================================================
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
x_text = xmin + (xmax - xmin)*.7 # 1/10 to the right of the left axis
y_text = ymin + (ymax - ymin)*.01 #1/10 above the bottom axis
anno_obj = ax.text(x_text, y_text, annot_str)
#=======================================================================
# save figure
#=======================================================================
if wtf:
"""
self.outpath
"""
fig = ax.figure
flag = hp.plot.save_fig(self, fig, dpi = self.dpi, legon=legon, transparent = transparent)
if not flag: raise IOError
logger.debug('finished as %s'%title)
return ax
def write_all_dd_dfs(self, tailpath = None): #write all tehchildrens dd_dfs
if tailpath is None: tailpath = os.path.join(self.outpath, self.name)
if not os.path.exists(tailpath): os.makedirs(tailpath)
for gid, childo in self.kids_d.items():
if not childo.dfunc_type == 'dfeats': continue #skip this one\
filename = os.path.join(tailpath, childo.name + ' dd_df.csv')
childo.recompile_dd_df(outpath = filename)
```
#### File: sofda/hp/sci.py
```python
import logging, os, sys, imp, time, math, re, copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
from collections import OrderedDict
from weakref import proxy
#===============================================================================
# import other helpers
#===============================================================================
import hp.plot2
import model.sofda.hp.basic as hp_basic
import model.sofda.hp.np as hp_np
import model.sofda.hp.oop as hp_oop
import model.sofda.hp.data as hp_data
mod_logger = logging.getLogger(__name__)
#class Fit_func(hp_data.Data_o): #thin wrapper for regressions
class Data_func(hp_data.Data_wrapper,
hp.plot2.Plotr,
hp_oop.Child): #for analysis by data type
#===========================================================================
# regressions
#===========================================================================
dfunc = None #placeholder for callable function that takes a set of indepdent values and returns depdendent
fits_od = OrderedDict() #dictionary of regression fit children
#===========================================================================
# fit plotting formatters
#===========================================================================
fit_color = 'red'
fit_alpha = 0.6
fit_lw = 3
fit_linestyle = 'solid'
fit_markersize = 0
units = 'none'
#===========================================================================
# object handling overrides
#===========================================================================
def __init__(self, parent = None, session = None, name = 'datafunc'):
self.name = name
self.parent = parent
self.session = session
#initilzie teh baseclass
self.label = self.name + '(%s)'%self.units
if not parent is None:
self.inherit_logr(parent)
else:
self.logger = mod_logger
def clean_data(self, raw_data):
'placeholder'
return raw_data
def calc_stats(self): #update teh stats from teh data
data = self.data
self.min = data.min()
self.max = data.max()
self.mean = data.mean()
self.var = data.var()
self.stat_str = 'min: %.2f, max = %.2f, mean = %.2f, var = %.2f'\
%(self.min, self.max, self.mean, self.var)
self.logger.debug(self.stat_str)
def spawn_fit(self, kid_class=None, childname = None, **kwargs):
#=======================================================================
# defautls
#=======================================================================
if kid_class is None: kid_class = hp.plot.Plot_o
if childname is None: childname = '%s %s fit'%(self.name, self.fit_name)
#spawn the child
child = self.spawn_child(childname = childname,
kid_class = kid_class, **kwargs)
#give it the datat function
child.dfunc = self.dfunc
#pass down the correct attributes
child.units = self.units
#give it the formatters
child.color = self.fit_color
child.alpha = self.fit_alpha
child.lineweight = self.fit_lw
child.linestype = self.fit_linestyle
child.markersize = self.fit_markersize
self.fits_od[child.name] = child
return child
class Continuous_1D(Data_func): #set of 1d discrete data
rv = None #scipy random variable placeholder
fit_name = None #placeholder for the type of fit applied
def clean_data(self, ar_raw): #clean the data
logger = self.logger.getChild('clean_data')
if not hp_np.isar(ar_raw):
try:
ar1 = ar_raw.values
if not hp_np.isar(ar1): raise ValueError
except:
self.logger.error('failed to convert to array')
raise IOError
else: ar1 = copy.deepcopy(ar_raw) #just get a copy
#dimensional check
ar2 = hp_np.make_1D(ar1, logger = self.logger)
ar3 = hp_np.dropna(ar2, logger = self.logger)
ar_clean = ar3
logger.debug('cleaned %s to %s'%(str(ar_raw.shape), str(ar_clean.shape)))
return ar_clean
def fit_norm(self): #fit and freeze a normal distribution to this
logger = self.logger.getChild('fit_norm')
self.fit_name = 'norm'
logger.debug('fitting a normal disribution to data')
#get the noral dist paramters for this data
pars = scipy.stats.norm.fit(self.data)
#=======================================================================
# check the parameters
#=======================================================================
if np.isnan(pars[0]): raise IOError
if not len(pars) == 2: raise IOError #normal distribution should only return 2 pars
#freeze a distribution with these paramters
self.rv = scipy.stats.norm(loc = pars[0], scale = pars[1])
logger.info('froze dist with pars: %s '%str(pars))
self.pars = pars
return
def fit_lognorm(self):
logger = self.logger.getChild('fit_norm')
def plot_pdf(self, ax=None, title = None,wtf=None, annot_f=False,
color = 'red', alpha = 0.6, lw = 3, label = None,
outpath = None): #cretate a plot of the pdf
"""
Ive createdd a separate plot frunctio n(from hp.plot) as this is a curve fit to the data... not the data
"""
#=======================================================================
# defautls
#=======================================================================
if self.rv is None: raise IOError
if wtf is None: wtf = self.session._write_figs
if label is None: label = self.fit_name + ' pdf'
rv = self.rv
logger = self.logger.getChild('plot_pdf')
logger.debug('plotting with ax = \'%s\''%ax)
#=======================================================================
# setup plot
#=======================================================================
if ax is None:
plt.close()
fig = plt.figure(1)
fig.set_size_inches(self.figsize)
ax = fig.add_subplot(111)
if title is None: title = self.name + ' '+ self.fit_name + ' pdf plot'
ax.set_title(title)
ax.set_ylabel('likelihood')
ax.set_xlabel(self.label)
else:
fig = ax.figure
xmin, xmax = ax.get_xlim()
#=======================================================================
# data setup
#=======================================================================
x = np.linspace(rv.ppf(0.001), rv.ppf(0.999), 200) #dummy x values for plotting
#=======================================================================
# plot
#=======================================================================
pline = ax.plot(x, rv.pdf(x),
lw = lw, alpha = alpha, label = label, color=color)
if annot_f:
max1ind = np.argmax(rv.pdf(x)) #indicies of first occurance of the max value
max_x = x[max1ind]
"""
boolmax = max(rv.pdf(x)) == rv.pdf(x)
boolmax = x[np.argmax(rv.pdf(x))]
try:
max_x = float(x[boolmax])
except:
max_x = 0.00"""
annot = '%s dist \n'%self.rv.dist.name +\
r'$\mu=%.2f,\ \sigma=%.2f$, max=%.2f'%(self.rv.kwds['loc'], self.rv.kwds['scale'], max_x)
#add the shape parameter for 3 par functions
if len(self.rv.args) > 0:
annot = annot + '\n shape = %.2f'%self.rv.args[0]
#=======================================================================
# Add text string 'annot' to lower left of plot
#=======================================================================
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
x_text = xmin + (xmax - xmin)*.5 # 1/10 to the right of the left axis
y_text = ymin + (ymax - ymin)*.5 #1/10 above the bottom axis
anno_obj = ax.text(x_text, y_text, annot)
logger.debug('finished')
if wtf:
try:
self.save_fig(fig, outpath=outpath)
except:
logger.warning('failed to safe figure')
"""
plt.show()
"""
return ax
def plot_fit(self, bins = None, #plot the fit curve adn the data
ax = None, title=None, wtf=None,
**kwargs):
#=======================================================================
# defautls
#=======================================================================
if self.rv is None: raise IOError
if wtf is None: wtf = self.session._write_figs
rv = self.rv
logger = self.logger.getChild('plot_fit')
#=======================================================================
# setup plot
#=======================================================================
if ax is None:
plt.close()
fig = plt.figure(1)
fig.set_size_inches(self._figsize)
ax = fig.add_subplot(111)
if title is None: title = self.name + ' '+ self.fit_name + ' fit plot'
ax.set_title(title)
ax.set_ylabel('likelihood')
ax.set_xlabel(self.label)
else:
fig = ax.figure
xmin, xmax = ax.get_xlim()
#=======================================================================
# setup annotation
#=======================================================================
annot = r'n = %i, $\mu=%.2f,\ \sigma=%.2f$'%(len(self.data), self.mean, self.var)
#=======================================================================
# plot the data
#=======================================================================
ax = self.plot_data_hist(normed=True, bins = bins,
ax=ax, title=title, wtf=False, annot = annot,
**kwargs)
ax = self.plot_pdf(ax=ax, title = title, wtf=False)
#=======================================================================
# post formatting
#=======================================================================
if wtf:
flag = hp.plot.save_fig(self, fig, dpi = self.dpi, legon = True)
if not flag: raise IOError
logger.info('finished')
class Boolean_1D(Data_func): #set of 1d discrete data
reg = None #LogisticRegression from sklearn
fit_name = None #placeholder for the type of fit applied
data2_o = None #partenr data to compare against
data_int = None #boolean data converted to integers
logit_solvers = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
def data_setup(self, data2_o): #basic cleaning adn data setup
if data2_o is None: data2_o = self.data2_o
if data2_o is None: raise IOError
self.data2_o = data2_o
if self.data_int is None: self.bool_to_num()
#=======================================================================
# combine data into frame
#=======================================================================
df1 = pd.DataFrame(index = self.data_int.index)
df1['bool'] = self.data
df1['int'] = self.data_int.values
df1['data2'] = data2_o.data
self.df_raw = df1 #attach this
#=======================================================================
# clean
#=======================================================================
df2 = df1.dropna(axis='index', how='any')
self.df_clean = df2
#=======================================================================
# data setup
#=======================================================================
self.dep_ar = df2.loc[:,'int'].astype(np.int).values
self.ind_ar = df2.loc[:,'data2'].astype(np.int).values
return
def bool_to_num(self): #convert the boolean data to numeric
if not hp_pd.isser(self.data): raise IOError
self.data = self.data.astype(np.bool) #convert the original data to boolean
self.data_int = self.data.astype(np.int)
def fit_LogisticRegression(self, data2_o=None, target = 'int',
solver = 'newton-cg', verbose=2): #fit the data to a logit model
"""
#=======================================================================
# INPUTS
#=======================================================================
target: what header in the clean_df to use as the target array
"""
#=======================================================================
# set defaults
#=======================================================================
logger = self.logger.getChild('fit_LogisticRegression')
self.data_setup(data2_o)
#if not dep_ar.shape == ind_ar.shape: raise IOError
df = self.df_clean
#=======================================================================
# build the model
#=======================================================================
import sklearn.linear_model
#get teh train/target data from the clean frame
train_ar = df['data2'].values.reshape(-1,1)
target_ar = df[target].values.reshape(-1,1)
#initilze teh model
reg = sklearn.linear_model.LogisticRegression(solver = solver , verbose=verbose)
#fit to the data
reg = reg.fit(train_ar, target_ar)
self.reg = reg
self.fit_name = 'LogisticRegression'
#=======================================================================
# get equilvanet pars
#=======================================================================
'doin gthis here for plotting annot'
self.loc = self.data2_o.data.min() #this looks good. closest yet
self.scale = 1.0/float(self.reg.coef_) #looks pretty good
#=======================================================================
# create a new child for this
#=======================================================================
child = self.spawn_fit()
child.reg = self.reg #give it the regression
child.data2_o = self.data2_o #give it the indepdendnet dato
#=======================================================================
# wrap up and report
#=======================================================================
logger.info('finished with coef_ = %.2f, intercept_ = %.2f, n_iter_ = %.2f'
%(reg.coef_ , reg.intercept_ , reg.n_iter_ ))
return child
def try_all_logit_solvers(self): #plot results for all solver methods
logger = self.logger.getChild('plot_asnum')
success = []
for solver in self.logit_solvers:
try:
reg = self.fit_LogisticRegression(solver = solver, verbose=0)
title = self.name + ' logit on \'%s\' with \'%s\''%(self.data2_o.name, solver)
ax = self.plot_fit(title = title) #make the plot
success.append(solver)
except:
logger.error('failed on \'%s\''%solver)
logger.info('finished with %i (of %i) successful solvers: %s'%(len(success), len(self.logit_solvers), success))
def dfunc(self, x):
y = self.reg.predict_proba(x.reshape(-1,1))[:,1]
if not x.shape == y.shape:
raise IOError
return y
def build_scipy_equil(self, type = 'cdf'): #build an equilvalent scipy logistic (or Sech-squared) continuous random variable.
"""
because we need a more sophisiticated LinearMOdel to fit to the boolean data
we use Sklearn to train the model
However, there doesn't seem to be a good way to incorporate a simply parametersized Sklearn model into ABMRI
SOLUTION:
use the coefficents from teh Sklearn training to paramterize a simple scipy logistc curve
"""
logger = self.logger.getChild('plot_asnum')
if self.reg is None: raise IOError
#=======================================================================
# get teh equilvanet parameters
#=======================================================================
#=======================================================================
# loc = self.data2_o.data.min() #this looks good. closest yet
# scale = 1.0/float(self.reg.coef_) #looks pretty good
#=======================================================================
'pull from teh fit_LogisticRegression'
loc, scale = self.loc, self.scale #attach these
#get a frozen func
self.rv = scipy.stats.logistic(loc = loc, scale = scale)
logger.info('parameterized a scipy.stats.logistic with loc = %.2f and scale = %.2f'%(loc, scale))
#=======================================================================
# create a new child for this
#=======================================================================
childname = '%s %s fit'%(self.name, 'scipy.stats.logistic')
child = self.spawn_fit(childname = childname)
#attach the attributes
child.data2_o = self.data2_o #give it the indepdendnet dato
child.rv = self.rv #attach teh frozen curve
#=======================================================================
# child.loc = self.loc
# child.scale = self.scale
#=======================================================================
#=======================================================================
# attach the function
#=======================================================================
def cdf(x):
'no need to pass loc and scale as the curve is frozen'
return self.rv.cdf(x)
def pdf(x):
return self.rv.pdf(x)
if type == 'cdf': child.dfunc = cdf
elif type == 'pdf': child.dfunc = pdf
#=======================================================================
# attach the formatter overrides
#=======================================================================
'most the formatters are applied during spawn_fit'
child.linestyle = 'dashed'
return child
def plot_asnum(self, data2_o=None,
title = None,
ax=None, wtf=None, **kwargs): #plot the raw data converting bools to integers
logger = self.logger.getChild('plot_asnum')
if data2_o is None: data2_o = self.data2_o
if data2_o is None: raise IOError
#=======================================================================
# data setup
#=======================================================================
if self.data_int is None: self.bool_to_num()
dep_ar = self.data_int
#=======================================================================
# formatting
#=======================================================================
if title is None: title ='%s vs %s plot'%(self.name, data2_o.name)
#=======================================================================
# send for plotting
#=======================================================================
ax = self.parent.plot(self, indp_dato = data2_o, dep_ar = dep_ar,
linewidth = 0,
title = title, ax = ax, wtf=wtf, **kwargs)
"""
data2_o.name
plt.show()
"""
return ax
def plot_probs(self, data2_o=None, fit_o = None,
title = None, ax=None, wtf=None, **kwargs): #plot the raw data converting bools to integers
logger = self.logger.getChild('plot_probs')
if data2_o is None: data2_o = self.data2_o
if data2_o is None: raise IOError
#get the fitter child
if fit_o is None:
if not len(self.fits_od) == 1:
logger.warning('found more than one fit. taking first')
fit_o = list(self.fits_od.values())[0]
#=======================================================================
# data setup
#=======================================================================
x = np.linspace(data2_o.data.min(), data2_o.data.max(), 100) #dummy x values for plotting
#=======================================================================
# formatting
#=======================================================================
if title is None: title ='%s %s prob plot'%(self.name, self.fit_name)
logger.debug('%s with x: %s'%(title, x.shape))
ax = self.plot(fit_o, indp_dato = data2_o, indp_ar = x,
title = title,
ax = ax, wtf=wtf, **kwargs)
return ax
def plot_fit(self, title = None, wtf=None,ax = None):
#=======================================================================
# defauilts
#=======================================================================
logger = self.logger.getChild('fit_LogisticRegression')
if wtf is None: wtf = self.session._write_figs
#=======================================================================
# formatters
#=======================================================================
if title is None: title = 'plot %s fit of \'%s\' to \'%s\''%(self.fit_name, self.name,self.data2_o.name)
annot = 'LogisticRegression coefs: \nn_iter_ = %i, coef_ = %.2e, intercept_ = %.2f, xmin = %.2f \n' \
%(self.reg.n_iter_, self.reg.coef_, self.reg.intercept_, self.data2_o.data.min()) \
+ 'loc = %.2f, scale = %.2f'%(self.loc, self.scale)
#=======================================================================
# plot the raw data
#=======================================================================
ax = self.plot_asnum(ax = None, wtf = False, title = title, annot = annot)
ax = self.plot_probs(ax = ax, wtf = False)
#=======================================================================
# setup the synthetic data
#=======================================================================
plt.legend() #turn teh legend on
"""
plt.show()
reg = self.reg
from scipy.stats import logistic
x = np.linspace(self.data2_o.data.min(), self.data2_o.data.max(), 100) #dummy x values for plotting
import math.exp
for value in [reg.n_iter_, reg.coef_, reg.intercept_]:
value = float(value)
#print value
print 1.0/value
print math.exp(value)
print 1.0/math.exp(value)
print math.exp(-value)
print 1.0/math.exp(-value)
print math.exp(1.0/value)
print math.exp(-1.0/value)
try:
1/print math.log(value)
1/print math.log(1/value)
except:
print ('failed on %.2f'%value)
pass
int()
1.0/int(reg.n_iter_)
loc =
scale = float(self.reg.coef_) #way too low
reg.get_params() #nothing useful
scale = -float(self.reg.intercept_) #bad
float(self.reg.intercept_) #bad
ax.plot(x,ylogistic ,'b-', lw=1, alpha=0.6, label='logistic pdf')
self.reg.coef_
self.reg.intercept_
logger.info('finished with coef_ = %.2f, intercept_ = %.2f, n_iter_ = %.2f'
%(reg , , reg.n_iter_ ))
"""
if wtf:
fig = ax.figure
flag = hp.plot.save_fig(self, fig, dpi = self.dpi)
if not flag: raise IOError
return ax
```
#### File: canflood/results/riskPlot.py
```python
import logging, configparser, datetime
#==============================================================================
# imports------------
#==============================================================================
import os
import numpy as np
import pandas as pd
#==============================================================================
# # custom
#==============================================================================
from hlpr.exceptions import QError as Error
from hlpr.basic import view
from model.riskcom import RiskModel
#from hlpr.plot import Plotr
#==============================================================================
# functions-------------------
#==============================================================================
class RiskPlotr(RiskModel): #expanded plotting for risk models
"""
inherited by
results.compare.Cmpr
results.attribution.Attr
"""
#===========================================================================
# expectations from parameter file
#===========================================================================
exp_pars_md = {
'results_fps':{
'r_ttl':{'ext':('.csv',)},
}
}
exp_pars_op={
'results_fps':{
'r_passet':{'ext':('.csv',)},
}
}
#===========================================================================
# controls
#===========================================================================
#===========================================================================
# defaults
#===========================================================================
def __init__(self,**kwargs):
super().__init__(**kwargs) #initilzie teh baseclass
self.dtag_d={**self.dtag_d,**{
'r_ttl':{'index_col':None}}}
self.logger.debug('%s.__init__ w/ feedback \'%s\''%(
self.__class__.__name__, type(self.feedback).__name__))
def prep_model(self):
self.set_ttl() #load and prep the total results
#set default plot text
self._set_valstr()
return
def plot_mRiskCurves(self, #single plot w/ risk curves from multiple scenarios
parsG_d, #container of data and plot handles
#{cName:{
#ttl_df:df to plot
#ead_tot:total ead value (for label)
#impStyle_d: kwargs for semilogx
y1lab='AEP', #yaxis label and plot type c ontrol
#'impacts': impacts vs. ARI (use self.impact_name)
#'AEP': AEP vs. impacts
impactFmtFunc=None, #tick label format function for impact values
#lambda x:'{:,.0f}'.format(x)
legendTitle=None,
val_str='*no', #text to write on plot. see _get_val_str()
figsize=None, logger=None, plotTag=None,
):
"""
called by
Attr.plot_slice() #a data slice against the total
Cmpr.riskCurves() #a set of totals (scenarios)
"""
#======================================================================
# defaults
#======================================================================
if logger is None: logger=self.logger
log = logger.getChild('multi')
plt, matplotlib = self.plt, self.matplotlib
if figsize is None: figsize=self.figsize
if y1lab =='impacts':
y1lab = self.impact_name
if impactFmtFunc is None:
impactFmtFunc=self.impactFmtFunc
assert callable(impactFmtFunc)
if plotTag is None: plotTag=self.tag
#=======================================================================
# pre-data manip: collect all the impacts ari data into one
#=======================================================================
"""makes it easier for some operations
still plot on each individually"""
first = True
for cName, cPars_d in parsG_d.items():
#check keys
miss_l = set(['ttl_df', 'impStyle_d']).difference(cPars_d.keys())
assert len(miss_l)==0, '\'%s\' missing keys: %s'%(cName, miss_l)
#check data
cdf = cPars_d['ttl_df'].copy()
#check columns
miss_l = set(['aep', 'impacts', 'ari', 'plot']).difference(cdf.columns)
assert len(miss_l)==0, '\'%s\' missing columns: %s'%(cName, miss_l)
#drop to just the data (and rename)
cdf = cdf.loc[cdf['plot'],:].loc[:,('ari','impacts')].rename(columns={'impacts':cName})
#get index columns from first
if first:
all_df = cdf.copy()
first = False
else:
#add data
all_df = all_df.merge(cdf, how='outer', on='ari')
#add back in aep
all_df['aep'] = 1/all_df['ari']
#move these to the index for quicker operations
all_df = all_df.set_index(['aep', 'ari'], drop=True)
#======================================================================
# labels
#======================================================================
if y1lab == 'AEP':
title = '%s AEP-Impacts plot for %i scenarios'%(plotTag, len(parsG_d))
xlab=self.impact_name
elif y1lab == self.impact_name:
title = '%s Impacts-ARI plot for %i scenarios'%(plotTag, len(parsG_d))
xlab='ARI'
else:
raise Error('bad y1lab: %s'%y1lab)
#======================================================================
# figure setup
#======================================================================
"""
plt.show()
"""
plt.close()
fig = plt.figure(figsize=figsize, constrained_layout = True)
#axis setup
ax1 = fig.add_subplot(111)
# axis label setup
fig.suptitle(title)
ax1.set_ylabel(y1lab)
#ax2.set_ylabel(y2lab)
ax1.set_xlabel(xlab)
#======================================================================
# fill the plot----
#======================================================================
first = True
#ead_d=dict()
for cName, cPars_d in parsG_d.items():
#pull values from container
cdf = cPars_d['ttl_df'].copy()
cdf = cdf.loc[cdf['plot'], :] #drop from handles
#hatching
if 'hatch_f' in cPars_d:
hatch_f=cPars_d['hatch_f']
else:
hatch_f=False
#labels
if 'label' in cPars_d:
label = cPars_d['label']
else:
if 'ead_tot' in cPars_d:
label = '\'%s\' annualized = '%cName + impactFmtFunc(float(cPars_d['ead_tot']))
else:
label = cName
#add the line
self._lineToAx(cdf, y1lab, ax1, impStyle_d=cPars_d['impStyle_d'],
hatch_f=hatch_f, lineLabel=label)
#ead_d[label] = float(cPars_d['ead_tot']) #add this for constructing the
#set limits
if y1lab==self.impact_name:
ax1.set_xlim(max(all_df.index.get_level_values('ari')), 1) #ARI x-axis limits
else:
ax1.set_xlim(0, all_df.max().max())
ax1.set_ylim(0, max(all_df.index.get_level_values('aep'))*1.1)
#=======================================================================
# post format
#=======================================================================
#legend
h1, l1 = ax1.get_legend_handles_labels()
#legLab_d = {e:'\'%s\' annualized = '%e + impactFmtFunc(ead_d[e]) for e in l1}
val_str = self._get_val_str(val_str)
#legendTitle = self._get_val_str('*default')
self._postFmt(ax1,
val_str=val_str, #putting in legend ittle
legendHandles=(h1, l1),
#xLocScale=0.8, yLocScale=0.1,
legendTitle=legendTitle,
)
#=======================================================================
# val_str = self._get_val_str(val_str, impactFmtFunc)
# self._postFmt(ax1, val_str=val_str)
#=======================================================================
#assign tick formatter functions
if y1lab == 'AEP':
xfmtFunc = impactFmtFunc
yfmtFunc=lambda x:'%.4f'%x
elif y1lab==self.impact_name:
xfmtFunc = lambda x:'{:,.0f}'.format(x) #thousands separatro
yfmtFunc=impactFmtFunc
self._tickSet(ax1, xfmtFunc=xfmtFunc, yfmtFunc=yfmtFunc)
return fig
def plot_stackdRCurves(self, #single plot with stacks of risk components for single scenario
dxind, #mindex(aep, ari), columns: one stack or component
sEAD_ser, #series with EAD data for labels
y1lab='AEP',
#hatch format
h_alpha = 0.9,
figsize=None, impactFmtFunc=None, plotTag=None,
val_str=None,
logger=None,):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_stack')
plt, matplotlib = self.plt, self.matplotlib
if figsize is None: figsize=self.figsize
if y1lab =='impacts':
y1lab = self.impact_name
if impactFmtFunc is None:
impactFmtFunc=self.impactFmtFunc
if h_alpha is None: h_alpha=self.h_alpha
if plotTag is None: plotTag=self.plotTag
if val_str is None:
val_str = 'ltail=\'%s\', rtail=\'%s\''%(self.ltail, self.rtail) + \
'\naevent_rels = \'%s\', prec = %i'%(self.event_rels, self.prec)
#=======================================================================
# prechecks
#=======================================================================
#expectations on stacked data
mindex=dxind.index
assert isinstance(mindex, pd.MultiIndex)
assert np.array_equal(np.array(['aep', 'ari']), mindex.names)
nameRank_d= {lvlName:i for i, lvlName in enumerate(mindex.names)}
if isinstance(sEAD_ser, pd.Series):
miss_l = set(sEAD_ser.index).symmetric_difference(dxind.columns)
assert len(miss_l)==0, 'mismatch on plot group names'
#======================================================================
# labels
#======================================================================
val_str = self._get_val_str(val_str, impactFmtFunc)
if y1lab == 'AEP':
title = '%s %s AEP-Impacts plot for %i stacks'%(self.tag, plotTag, len(dxind.columns))
xlab=self.impact_name
elif y1lab == self.impact_name:
title = '%s %s Impacts-ARI plot for %i stacks'%(self.tag, plotTag, len(dxind.columns))
xlab='ARI'
else:
raise Error('bad y1lab: %s'%y1lab)
#=======================================================================
# data prep
#=======================================================================
dxind = dxind.sort_index(axis=0, level=0)
mindex = dxind.index
#=======================================================================
# figure setup
#=======================================================================
"""
plt.show()
"""
plt.close()
fig = plt.figure(figsize=figsize, constrained_layout = True)
#axis setup
ax1 = fig.add_subplot(111)
#ax2 = ax1.twinx()
# axis label setup
fig.suptitle(title)
ax1.set_ylabel(y1lab)
ax1.set_xlabel(xlab)
#=======================================================================
# plot line
#=======================================================================
if y1lab == 'AEP':
"""I dont see any native support for x axis stacks"""
yar = mindex.levels[nameRank_d['aep']].values
xCum_ar = 0
for colName, cser in dxind.items():
ax1.fill_betweenx(yar, xCum_ar, xCum_ar+cser.values, label=colName,
lw=0, alpha=h_alpha)
xCum_ar +=cser.values
elif y1lab == self.impact_name:
#ARI values (ascending?)
xar = np.sort(mindex.levels[nameRank_d['ari']].values)
#transpose, and ensure sorted
yar = dxind.T.sort_index(axis=1, level='ari', ascending=True).values
#plot the stack
ax1.stackplot(xar, yar, baseline='zero', labels=dxind.columns,
alpha=h_alpha, lw=0)
ax1.set_xscale('log')
#set limits
if y1lab == 'AEP':
ax1.set_xlim(0, max(xCum_ar)) #aep limits
ax1.set_ylim(0, max(yar)*1.1)
elif y1lab == self.impact_name:
ax1.set_xlim(max(xar), 1) #ari limits
#=======================================================================
# post format
#=======================================================================
#legend
h1, l1 = ax1.get_legend_handles_labels()
legLab_d = {e:'\'%s\' annualized = '%e + impactFmtFunc(sEAD_ser[e]) for e in l1}
legendTitle = self._get_val_str('*default')
self._postFmt(ax1,
val_str=val_str, #putting in legend ittle
legendHandles=(h1, list(legLab_d.values())),
#xLocScale=0.8, yLocScale=0.1,
legendTitle=legendTitle)
#assign tick formatter functions
if y1lab == 'AEP':
xfmtFunc = impactFmtFunc
yfmtFunc=lambda x:'%.4f'%x
elif y1lab==self.impact_name:
xfmtFunc = lambda x:'{:,.0f}'.format(x) #thousands separatro
yfmtFunc=impactFmtFunc
self._tickSet(ax1, xfmtFunc=xfmtFunc, yfmtFunc=yfmtFunc)
return fig
def _set_valstr(self):
""""
similar to whats on modcom.RiskModel
but removing some attributes set during run loops
"""
#plotting string
self.val_str = 'annualized impacts = %s %s \nltail=\'%s\' \nrtail=\'%s\''%(
self.impactFmtFunc(self.ead_tot), self.impact_units, self.ltail, self.rtail) +\
'\nnevent_rels = \'%s\'\nprec = %i\ndate=%s'%(
self.event_rels, self.prec, self.today_str)
```
#### File: tools/vfunc_conv/jrc_global.py
```python
import os, datetime
start = datetime.datetime.now()
import pandas as pd
import numpy as np
from pandas import IndexSlice as idx
from hlpr.basic import view, force_open_dir
from vfunc_conv.vcoms import VfConv
#from model.modcom import DFunc
mod_name = 'misc.jrc_global'
today_str = datetime.datetime.today().strftime('%Y%m%d')
class JRConv(VfConv):
def __init__(self,
libName = 'Huzinga_2017',
prec=5, #precision
**kwargs):
self.libName = libName
super().__init__(
prec=prec,
**kwargs) #initilzie teh baseclass
def load(self,
fp = r'C:\LS\02_WORK\IBI\202011_CanFlood\04_CALC\vfunc\lib\Huzinga_2017\copy_of_global_flood_depth-damage_functions__30102017.xlsx',
):
#===============================================================================
# inputs
#===============================================================================
dx_raw = pd.read_excel(fp, sheet_name = 'Damage functions', header=[1,2], index_col=[0,1])
#clean it
df = dx_raw.drop('Standard deviation', axis=1, level=0)
dxind = df.droplevel(level=0, axis=1)
dxind.index = dxind.index.set_names(['cat', 'depth_m'])
#get our series
boolcol = dxind.columns.str.contains('North AMERICA')
"""
no Transport or Infrastructure curves for North America
"""
dxind = dxind.loc[:, boolcol]
#handle nulls
dxind = dxind.replace({'-':np.nan}).dropna(axis=0, how='any')
self.dxind = dxind
return self.dxind
def convert(self,
dxind=None,
metac_d = {
'desc':'JRC Global curves',
'location':'USA',
'date':'2010',
'source':'(Huizinga et al. 2017)',
'impact_var':'loss',
'impact_units':'pct',
'exposure_var':'flood depth',
'exposure_units':'m',
'scale_var':'maximum damage (national average)',
'scale_units':'pct',
},
):
#=======================================================================
# defaults
#=======================================================================
if dxind is None: dxind=self.dxind
#=======================================================================
# setup meta
#=======================================================================
crve_d = self.crve_d.copy() #start with a copy
crve_d['file_conversion']='CanFlood.%s_%s'%(mod_name, today_str)
#check keys
miss_l = set(metac_d.keys()).difference(crve_d.keys())
assert len(miss_l)==0, 'requesting new keys: %s'%miss_l
#crve_d = {**metac_d, **crve_d}
crve_d.update(metac_d) #preserves order
"""
crve_d.keys()
"""
#check it
assert list(crve_d.keys())[-1]=='exposure', 'need last entry to be eexposure'
#=======================================================================
# curve loop
#=======================================================================
cLib_d = dict()
#loop and collect
for cval, cdf_raw in dxind.groupby('cat', axis=0, level=0):
#===================================================================
# get the tag
#===================================================================
tag = cval.strip().replace(' ','')
for k,v in self.tag_cln_d.items():
tag = tag.replace(k, v).strip()
#===================================================================
# depth-damage
#===================================================================
ddf = cdf_raw.droplevel(level=0, axis=0).astype(np.float).round(self.prec)
dd_d = ddf.iloc[:,0].to_dict()
#===================================================================
# meta
#===================================================================
dcurve_d = crve_d.copy()
dcurve_d['tag'] = tag
#assemble
dcurve_d = {**dcurve_d, **dd_d}
self.check_crvd(dcurve_d)
cLib_d[tag] = dcurve_d
#=======================================================================
# convert and summarize
#=======================================================================
rd = dict()
for k, sd in cLib_d.items():
"""need this to ensure index is formatted for plotters"""
df = pd.Series(sd).to_frame().reset_index(drop=False)
df.columns = range(df.shape[1]) #reset the column names
rd[k] = df
#get the summary tab first
smry_df = self._get_smry(cLib_d.copy())
rd = { **{'_smry':smry_df},
**rd,
}
self.res_d = rd.copy()
return self.res_d
"""
view(dxind)
view(dx_raw)
"""
if __name__=='__main__':
out_dir = r'C:\LS\03_TOOLS\CanFlood\outs\misc\vfunc_conv'
wrkr = JRConv(out_dir=out_dir, figsize = (10,10))
wrkr.load()
cLib_d = wrkr.convert()
wrkr.output(cLib_d)
#===========================================================================
# plots
#===========================================================================
fig = wrkr.plotAll(cLib_d, title=wrkr.libName,lib_as_df=True)
wrkr.output_fig(fig)
#===========================================================================
# wrap
#===========================================================================
force_open_dir(wrkr.out_dir)
tdelta = datetime.datetime.now() - start
print('finished in %s'%tdelta)
```
|
{
"source": "jdnixx/Groupmebot",
"score": 3
}
|
#### File: GroupmeClient/ApiWrapper/membersCommands.py
```python
from .command import Command
class Add(Command):
'''
Add a member the group
Params
members: array - objects described below. nickname is required. You must use one of the following identifiers: user_id, phone_number, or email.
object
nickname (string) required
user_id (string)
phone_number (string)
email (string)
guid (string)
'''
def __init__(self, groupmeAccessToken, groupId, **kwargs):
self.args = kwargs
self.groupId = groupId
super(Add, self).__init__(groupmeAccessToken, 'POST')
def createUrl(self):
print(self.groupId)
return self.URL_BASE + '/groups/' + str(self.groupId) + '/members/add' + self.TOKEN_QUERY_STRING
def createLoad(self):
load = {}
members = []
array = []
for key, value in self.args.items():
if key == 'members':
members = value
hasNickname = False
hasRequiredFields = False
for member in members:
if 'nickname' in member:
hasNickname = True
if 'user_id' in member:
hasRequiredFields = True
if 'phone_number' in member:
hasRequiredFields = True
if 'email' in member:
hasRequiredFields = True
if hasNickname and hasRequiredFields:
array.append(member)
load['members'] = array
return load
def makeCall(self):
return super(Add, self).makeCall()
def prepareMemberObject(self, nickname=None, user_id=None, phone_number=None, email=None):
'''A helper method for preparing Member objects which can be passed as array members to the Add command'''
member = {}
if nickname is None:
raise Exception("Nickname is required to create Member object")
else:
member['nickname'] = nickname
if user_id is not None:
member['user_id'] = user_id
if phone_number is not None:
member['phone_number'] = phone_number
if email is not None:
member['email'] = email
return member
class Remove(Command):
'''
Remove a member from a grup
NOTE: Creator cannot be removed
Params
membership_id: string — Please note that this isn't the same as the user ID. In the members key in the group JSON, this is the id value, not the user_id.
'''
def __init__(self, groupmeAccessToken, groupId, membership_id=None, **kwargs):
self.args = kwargs
self.groupId = groupId
self.membership_id = membership_id
super(Remove, self).__init__(groupmeAccessToken, 'POST')
def createUrl(self):
if self.membership_id is None:
raise Exception('membership_id is required')
url = self.URL_BASE + '/groups/' + str(self.groupId) + '/members/' + str(self.membership_id) + '/remove' + self.TOKEN_QUERY_STRING
return url
def makeCall(self):
return super(Remove, self).makeCall()
class Update(Command):
'''
Update YOUR nickname in a group. The nickname must be between 1 and 50 chars
Params
nickname: string - YOUR new nickname
'''
def __init__(self, groupmeAccessToken, groupId, nickname=None, **kwargs):
self.args = kwargs
self.groupId = groupId
self.nickname = nickname
super(Update, self).__init__(groupmeAccessToken, 'POST')
def createUrl(self):
return self.URL_BASE + '/groups/' + str(self.groupId) + '/memberships/update' + self.TOKEN_QUERY_STRING
def createLoad(self):
load = {}
load['membership'] = {'nickname': self.nickname}
return load
def makeCall(self):
super(Update, self).makeCall()
```
#### File: GroupmeClient/Utilities/responseParsing.py
```python
def parse_response_from_json(r):
response = ''
try:
response = r.json()['response']
except Exception as ex:
response = str(ex)
return response
```
#### File: Groupmebot/TradingViewScreenshotTesting/tvchartbot.py
```python
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException, StaleElementReferenceException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# from PIL import Image
import time
print('STARTING')
USER = "groupmebot"
PASS = "<PASSWORD>"
testing = False
# testing = True
url = "https://www.tradingview.com/chart/UzJ9PCY8/#"
class TradingViewScraper:
def __init__(self):
print("TradingViewScraper IS INIT'ing")
self.testing = False
self.driver = None
def start(self):
### OPENING A HEADLESS BROWSER ###
chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument("--headless")
chrome_options.add_argument(f"--window-size=800,640")
chrome_options.add_argument("--hide-scrollbars")
if not self.testing:
chrome_options.binary_location = '/app/.apt/usr/bin/google-chrome'
self.driver = webdriver.Chrome(chrome_options=chrome_options)
else:
self.driver = webdriver.Chrome("/Program Files/chromedriver", chrome_options=chrome_options)
self.driver.get(url)
### LOGGING IN ###
# finds "log in" hyperlink (if currently on error page)
login = self.driver.find_element_by_class_name('js-login-link')
print("Login:")
print(login)
login.click()
# wait for js login prompt
username = WebDriverWait(self.driver, 5, 0.05).until(
EC.presence_of_element_located((By.NAME, 'username')))
print("Username:")
print(username)
# find password
password = self.driver.find_element_by_name('password') # if username box is found, then password is visible too
# put in da details
username.send_keys(USER)
password.send_keys(<PASSWORD>)
password.send_keys(Keys.RETURN)
print("Login info entered")
print("Sleeping for 4....")
time.sleep(4)
def max_devices_dialog_check(self):
### CHECK AUTHENTICATION ERROR MESSAGE ###
try:
max_device_dialog = self.driver.find_element_by_class_name('tv-dialog__modal-container')
print(max_device_dialog)
connect = max_device_dialog.find_element_by_css_selector('[data-name=no]')
connect.click()
except StaleElementReferenceException:
print(StaleElementReferenceException)
return self.driver.get_screenshot_as_png()
except NoSuchElementException:
print("No max_devices dialog box found.")
def get_chart_screenshot_binary(self, parsedinput):
self.max_devices_dialog_check()
# first, resolve the user's input
rawsym = parsedinput[0]
propersym = rawsym
# add "USD" for the big blue chips
if rawsym == 'eth' or rawsym == 'btc' or rawsym == 'xbt':
propersym += 'usd'
# if not sym.endswith('btc'): # ltcusdt
# # either: 1. no pair (just "xrp" or "doge")
# # 2. btc pair (xrp/btc) or eth, etc..
# if len(sym) <= 4:
# # ...probably a no pair (eg. "XRP")
# sym_name = sym
# # already set to BTC pair by default
# elif sym.endswith('btc' or 'eth'):
# # ...then we know it's "xxBTC" or "xxETH" at least
# sym_pair = sym[-3:] # last 3 chars
# sym_name = sym[:-3]
# #
# propersym = sym_name + sym_pair
# elif sym is "btc" or sym is "eth":
# symbol input box (top-left)
symbolinput = WebDriverWait(self.driver, 10, 0.05).until(
EC.presence_of_element_located((By.CLASS_NAME, 'input-3lfOzLDc-')))
print("symbolinput:")
print(symbolinput)
self.max_devices_dialog_check()
try:
symbolinput.click()
except ElementClickInterceptedException:
print(ElementClickInterceptedException)
return self.driver.get_screenshot_as_png()
self.max_devices_dialog_check()
symbolinput.click()
# WebDriverWait(self.driver, 10, 0.05).until(
# EC.presence_of_element_located((By.CLASS_NAME, 'isExpanded-1pdStI5Z-')))
symbolinput.send_keys(Keys.CONTROL, 'a', Keys.BACKSPACE)
### EXCHANGE ###
# THIS IS ALSO WHERE THE PROBLEM IS ###########################################################
# if exchange is specified, loop through to find the correct line
if len(parsedinput) > 1: # exchange
exchange = parsedinput[1]
symbolinput.send_keys(exchange, ':')
# finally, enter symbol
symbolinput.send_keys(propersym)
symbolinput.send_keys(Keys.RETURN)
# ### SELECTING THE RIGHT SYMBOL ###
#
# # entire drop-down table of matching symbols
# symboleditpopup = WebDriverWait(self.driver, 10, 0.05).until(
# EC.visibility_of_element_located((By.CLASS_NAME, 'symbol-edit-popup'))
# )
# print(symboleditpopup)
# symboleditpopup.screenshot('symboleditpopup.png')
#
# # the symboleditpopup menu is up, but may be still loading
# # these lines might not be visible yet
#
# table_of_results = symboleditpopup.find_elements_by_css_selector('tr.symbol-edit-popup')
# target = table_of_results[0]
#
# # if exchange is specified, loop through to find the correct line
# if len(parsedinput) > 1: # exchange
# exchange = parsedinput[1]
# for line in table_of_results:
# name_and_exchange = line.get_attribute('data-item-ticker')
# print(name_and_exchange) #
# if "lol" is exchange:
# # target = line
# break
#
# target.click()
# locate the main chart element, for screenshot & Key-sending use
# chart_itself = self.driver.find_element_by_class_name("chart-container")
### CHANGING TIME INTERVAL ###
if len(parsedinput) > 2: # time interval
interval = parsedinput[2]
ActionChains(self.driver).send_keys(',', interval).perform()
else:
ActionChains(self.driver).send_keys(',', '4h').perform()
ActionChains(self.driver).send_keys(Keys.RETURN).perform()
### SCREENSHOT ###
# self.driver.close()
return self.driver.get_screenshot_as_png()
# if testing is True:
# tv = TradingViewScraper()
# tv.testing = True
# tv.start()
# bindata = tv.get_chart_screenshot_binary("ltcusd bitfinex 1d")
#
# churt = tv.driver.find_element_by_class_name("chart-container")
# churt.screenshot('screen_shot_chart.png')
# print("Screenshot of chart saved")
```
|
{
"source": "jdnrg/trailscraper",
"score": 2
}
|
#### File: trailscraper/trailscraper/cli.py
```python
import json
import logging
import os
import time
import click
import trailscraper
from trailscraper import time_utils, policy_generator
from trailscraper.cloudtrail import load_from_dir, load_from_api, last_event_timestamp_in_dir, filter_records, \
parse_records
from trailscraper.guess import guess_statements
from trailscraper.iam import parse_policy_document
from trailscraper.s3_download import download_cloudtrail_logs
@click.group()
@click.version_option(version=trailscraper.__version__)
@click.option('--verbose', default=False, is_flag=True)
def root_group(verbose):
"""A command-line tool to get valuable information out of AWS CloudTrail."""
logger = logging.getLogger()
if verbose:
logger.setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
@click.command()
@click.option('--bucket', required=True,
help='The S3 bucket that contains cloud-trail logs')
@click.option('--prefix', default="",
help='Prefix in the S3 bucket (including trailing slash)')
@click.option('--account-id', multiple=True, required=True,
help='ID of the account we want to look at')
@click.option('--region', multiple=True, required=True,
help='Regions we want to look at')
@click.option('--log-dir', default="~/.trailscraper/logs", type=click.Path(),
help='Where to put logfiles')
@click.option('--from', 'from_s', default="one day ago", type=click.STRING,
help='Start date, e.g. "2017-01-01" or "-1days". Defaults to "one day ago".')
@click.option('--to', 'to_s', default="now", type=click.STRING,
help='End date, e.g. "2017-01-01" or "now". Defaults to "now".')
@click.option('--wait', default=False, is_flag=True,
help='Wait until events after the specified timeframe are found.')
@click.option('--profile', default="default", help='Profile name')
# pylint: disable=too-many-arguments
def download(bucket, prefix, account_id, region, log_dir, from_s, to_s, wait, profile):
"""Downloads CloudTrail Logs from S3."""
log_dir = os.path.expanduser(log_dir)
from_date = time_utils.parse_human_readable_time(from_s)
to_date = time_utils.parse_human_readable_time(to_s)
download_cloudtrail_logs(log_dir, bucket, prefix, account_id, region, from_date, to_date, profile)
if wait:
last_timestamp = last_event_timestamp_in_dir(log_dir)
while last_timestamp <= to_date:
click.echo("CloudTrail logs haven't caught up to "+str(to_date)+" yet. "+
"Most recent timestamp: "+str(last_timestamp.astimezone(to_date.tzinfo))+". "+
"Trying again in 60sec.")
time.sleep(60*1)
download_cloudtrail_logs(log_dir, bucket, prefix, account_id, region, from_date, to_date)
last_timestamp = last_event_timestamp_in_dir(log_dir)
@click.command("select")
@click.option('--log-dir', default="~/.trailscraper/logs", type=click.Path(),
help='Where to put logfiles')
@click.option('--filter-assumed-role-arn', multiple=True,
help='only consider events from this role (can be used multiple times)')
@click.option('--use-cloudtrail-api', is_flag=True, default=False,
help='Pull Events from CloudtrailAPI instead of log-dir')
@click.option('--from', 'from_s', default="1970-01-01", type=click.STRING,
help='Start date, e.g. "2017-01-01" or "-1days"')
@click.option('--to', 'to_s', default="now", type=click.STRING,
help='End date, e.g. "2017-01-01" or "now"')
def select(log_dir, filter_assumed_role_arn, use_cloudtrail_api, from_s, to_s):
"""Finds all CloudTrail records matching the given filters and prints them."""
log_dir = os.path.expanduser(log_dir)
from_date = time_utils.parse_human_readable_time(from_s)
to_date = time_utils.parse_human_readable_time(to_s)
if use_cloudtrail_api:
records = load_from_api(from_date, to_date)
else:
records = load_from_dir(log_dir, from_date, to_date)
filtered_records = filter_records(records, filter_assumed_role_arn, from_date, to_date)
filtered_records_as_json = [record.raw_source for record in filtered_records]
click.echo(json.dumps({"Records": filtered_records_as_json}))
@click.command("generate")
def generate():
"""Generates a policy that allows the events passed in through STDIN"""
stdin = click.get_text_stream('stdin')
records = parse_records(json.load(stdin)['Records'])
policy = policy_generator.generate_policy(records)
click.echo(policy.to_json())
@click.command("guess")
@click.option("--only", multiple=True,
help='Only guess actions with the given prefix, e.g. Describe (can be passed multiple times)')
def guess(only):
"""Extend a policy passed in through STDIN by guessing related actions"""
stdin = click.get_text_stream('stdin')
policy = parse_policy_document(stdin)
allowed_prefixes = [s.title() for s in only]
policy = guess_statements(policy, allowed_prefixes)
click.echo(policy.to_json())
@click.command("last-event-timestamp")
@click.option('--log-dir', default="~/.trailscraper/logs", type=click.Path(),
help='Where to put logfiles')
def last_event_timestamp(log_dir):
"""Print the most recent cloudtrail event timestamp"""
log_dir = os.path.expanduser(log_dir)
click.echo(last_event_timestamp_in_dir(log_dir))
root_group.add_command(download)
root_group.add_command(select)
root_group.add_command(generate)
root_group.add_command(guess)
root_group.add_command(last_event_timestamp)
```
|
{
"source": "jdnumm/artdirector",
"score": 3
}
|
#### File: jdnumm/artdirector/artdirector.py
```python
import argparse
from PIL import (
Image,
ImageFilter,
ImageFile
)
class ArtDirector(object):
def __init__(self):
pass
def load(self, filename):
self.image = Image.open(filename)
return self
def save(self, filename):
self.image.save(filename, optimize=True, quality=85, progressive=False)
return self
def get_pil_image(self):
return self.image
def crop(self, size, focus=None, zoom=0.0, edge=3.0):
src_width, src_height = self.image.size
dst_width, dst_height = size
center_x = src_width/2
center_y = src_height/2
src_ratio = float(src_width) / float(src_height)
ratio = float(dst_width) / float(dst_height)
if ratio < src_ratio:
crop_height = src_height
crop_width = crop_height * ratio
x_offset = float(src_width - crop_width) / 2
y_offset = 0
else:
crop_width = src_width
crop_height = crop_width / ratio
x_offset = 0
y_offset = float(src_height - crop_height) / 2
crop_height = crop_height-crop_height*zoom
crop_width = crop_width-crop_width*zoom
if focus:
focus_x, focus_y = focus
x_end = x_offset+crop_width
# Move crop window to the right
while focus_x >= x_offset+crop_width/edge and x_offset+crop_width < src_width :
x_offset = x_offset+1
# Move crop window to the left
while focus_x <= x_offset+crop_width/edge and x_offset > 0:
x_offset = x_offset-1
# Move crop window down
while focus_y >= y_offset+crop_height/edge and y_offset+crop_height <= src_height :
y_offset = y_offset+1
# Move crop window up
while focus_y <= y_offset+crop_height/edge and y_offset > 0:
y_offset = y_offset-1
self.image = self.image.crop((int(x_offset), int(y_offset), int(x_offset)+int(crop_width), int(y_offset)+int(crop_height)))
self.image = self.image.resize(size, Image.ANTIALIAS)
return self
def filter_blur(self, radius=5):
self.image = self.image.filter(ImageFilter.GaussianBlur(radius=radius))
return self
def filter_bw(self):
self.image = self.image.convert('L')
return self
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('inputfile', metavar='INPUT_FILE', help='Input image')
parser.add_argument('outputfile', metavar='OUTPUT_FILE', help='Output image')
parser.add_argument('--width', dest='width', type=int, default=100, help='Crop width')
parser.add_argument('--height', dest='height', type=int, default=100, help='Crop height')
parser.add_argument('--focus-x', dest='focus_x', default=None, type=int, help='Focal point ')
parser.add_argument('--focus-y', dest='focus_y', default=None, type=int, help='Focal point')
parser.add_argument('--zoom', dest='zoom', type=float, default=0.0, help='Zoom between 0.0 - 1.0 (0.0. Default)')
parser.add_argument('--edge', dest='edge', type=float, default=3.0, help='Edge (size/n) around the focal target area')
return parser.parse_args()
def main():
args = parse_arguments()
ad = ArtDirector()
ad.load(args.inputfile)
if args.focus_x != None and args.focus_y != None:
ad.crop([args.width, args.height], focus=[args.focus_x, args.focus_y], zoom=args.zoom, edge=args.edge)
else:
ad.crop([args.width, args.height], zoom=args.zoom, edge=args.edge)
ad.save(args.outputfile)
if __name__ == '__main__':
main()
```
|
{
"source": "jdo4508/HW-12_Web_Scraping_Challenge",
"score": 3
}
|
#### File: HW-12_Web_Scraping_Challenge/Missions_to_Mars/scrape_mars.py
```python
from bs4 import BeautifulSoup as bs
from splinter import Browser
import pandas as pd
import requests
import time
# Create Dictionary to collect all of the data
mars= {}
# Define Function Scrape
def scrape():
# Define Function for opening browser
executable_path = {"executable_path":"chromedriver.exe"}
browser = Browser("chrome", **executable_path, headless = False)
# # NASA Mars News
#Open browser to NASA Mars News Site
browser.visit('https://mars.nasa.gov/news/')
html = browser.html
soup = bs(html, 'html.parser')
#Search for news titles and paragraph
news_title = soup.find('div', class_='list_text').find('div', class_='content_title').find('a').text
news_p = soup.find('div', class_='article_teaser_body').text
# Add data to dictionary
mars['news_title'] = news_title
mars['news'] = news_p
# # Featured Image
#Open browser to JPL featured image
browser.visit('https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars')
#Navigate to Full Image page'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.click_link_by_partial_text('FULL IMAGE')
#Navigate with delay for full large image
time.sleep(5)
browser.click_link_by_partial_text('more info')
html = browser.html
soup = bs(html, 'html.parser')
#Search for image source and save as variable
results = soup.find_all('figure', class_='lede')
relative_img_path = results[0].a['href']
featured_img_url = 'https://www.jpl.nasa.gov' + relative_img_path
#Add data to dictionary
mars['featured_image_url'] = featured_img_url
# # Mars Weather
#Specify url
url = 'https://twitter.com/marswxreport?lang=en'
response = requests.get(url)
soup = bs(response.text, 'lxml')
# Find all elements that contain tweets
tweets = soup.find_all('div', class_='js-tweet-text-container')
#Search through tweets
for tweet in tweets:
mars_weather = tweet.find('p').text
#select only weather related tweets that contain the word "pressure"
if 'pressure' in mars_weather:
weather = tweet.find('p')
break
else:
pass
#Add data to dictionary
mars['weather']= weather.text
# # Mars Facts
#Visit the mars facts site and parse
url = "https://space-facts.com/mars/"
tables = pd.read_html(url)
#Find Mars Facts DataFrame and assign comlumns
df = tables[0]
df.columns = ['Description', 'Value']
#Save as html
html_table = df.to_html(table_id="html_tbl_css",justify='left',index=False)
#Add data to dictionary
mars['table']=html_table
# # Mars Hemispheres
#Visit hemispheres website through splinter module
hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemispheres_url)
html= browser.html
soup = bs(html, 'html.parser')
#Retreive mars hemispheres information
items = soup.find_all('div', class_='item')
#Create empty list for hemisphere urls
hemisphere_image_urls = []
#Store the main_ul
hemispheres_main_url = 'https://astrogeology.usgs.gov'
#Loop through the items previously stored
for i in items:
title = i.find('h3').text
#Store link that leads to full image website
partial_img_url = i.find('a', class_='itemLink product-item')['href']
#Visit the link that contains the full image website
browser.visit(hemispheres_main_url + partial_img_url)
img_html = browser.html
soup = bs(img_html, 'html.parser')
#Retrieve full image source
img_url = hemispheres_main_url + soup.find('img', class_='wide-image')['src']
# Append the retreived information into a list of dictionaries
hemisphere_image_urls.append({"title" : title, "img_url" : img_url})
# Store data in a dictionary
mars['hemisphere_image_urls']= hemisphere_image_urls
# #Return data and quit broswer
return mars
browser.quit()
```
|
{
"source": "jdobber/python-training",
"score": 4
}
|
#### File: Projekte/RPN_Taschenrechner/calc.py
```python
from stack import Stack
def calc(expr):
"""
implements a postfix calculator
see also here:
https://www.geeksforgeeks.org/stack-set-4-evaluation-postfix-expression/
https://de.wikipedia.org/wiki/Umgekehrte_polnische_Notation
"""
s = Stack()
# split the expression
for e in expr.split(" "):
# check for number
try:
num = int(e)
s.push(num)
except:
# it's not a number, so treat it as an operator
op1 = s.pop()
op2 = s.pop()
if e == "*":
s.push(op2 * op1)
elif e == "+":
s.push(op2 + op1)
elif e == "-":
s.push(op2 - op1)
elif e == ":":
s.push(op2 / op1)
else:
print("Unknown operation")
return
return s.pop()
if __name__ == '__main__':
print( calc("4 3 * 2 2 + :") )
print( calc("2 3 1 * + 9 -") )
```
|
{
"source": "jdobes/lunch",
"score": 3
}
|
#### File: api/restaurants/spravnemisto.py
```python
from .utils import fetch_menicka, parse_menicka
NAME = "<NAME>"
URL = "https://www.menicka.cz/5335-spravne-misto.html"
RESTAURANT_ID = "5335"
def parse_menu():
menicka_html = fetch_menicka(RESTAURANT_ID)
return parse_menicka(menicka_html)
```
|
{
"source": "jdob/rules_python",
"score": 2
}
|
#### File: rules_python/python/pip.bzl
```python
load("//python/pip_install:pip_repository.bzl", "pip_repository", _package_annotation = "package_annotation")
load("//python/pip_install:repositories.bzl", "pip_install_dependencies")
load("//python/pip_install:requirements.bzl", _compile_pip_requirements = "compile_pip_requirements")
compile_pip_requirements = _compile_pip_requirements
package_annotation = _package_annotation
def pip_install(requirements = None, name = "pip", **kwargs):
"""Accepts a `requirements.txt` file and installs the dependencies listed within.
Those dependencies become available in a generated `requirements.bzl` file.
This macro wraps the [`pip_repository`](./pip_repository.md) rule that invokes `pip`.
In your WORKSPACE file:
```python
pip_install(
requirements = ":requirements.txt",
)
```
You can then reference installed dependencies from a `BUILD` file with:
```python
load("@pip//:requirements.bzl", "requirement")
py_library(
name = "bar",
...
deps = [
"//my/other:dep",
requirement("requests"),
requirement("numpy"),
],
)
```
> Note that this convenience comes with a cost.
> Analysis of any BUILD file which loads the requirements helper in this way will
> cause an eager-fetch of all the pip dependencies,
> even if no python targets are requested to be built.
> In a multi-language repo, this may cause developers to fetch dependencies they don't need,
> so consider using the long form for dependencies if this happens.
In addition to the `requirement` macro, which is used to access the `py_library`
target generated from a package's wheel, the generated `requirements.bzl` file contains
functionality for exposing [entry points][whl_ep] as `py_binary` targets.
[whl_ep]: https://packaging.python.org/specifications/entry-points/
```python
load("@pip_deps//:requirements.bzl", "entry_point")
alias(
name = "pip-compile",
actual = entry_point(
pkg = "pip-tools",
script = "pip-compile",
),
)
```
Note that for packages whose name and script are the same, only the name of the package
is needed when calling the `entry_point` macro.
```python
load("@pip_deps//:requirements.bzl", "entry_point")
alias(
name = "flake8",
actual = entry_point("flake8"),
)
```
Args:
requirements (Label): A 'requirements.txt' pip requirements file.
name (str, optional): A unique name for the created external repository (default 'pip').
**kwargs (dict): Additional arguments to the [`pip_repository`](./pip_repository.md) repository rule.
"""
# Just in case our dependencies weren't already fetched
pip_install_dependencies()
pip_repository(
name = name,
requirements = requirements,
repo_prefix = "pypi__",
**kwargs
)
def pip_parse(requirements_lock, name = "pip_parsed_deps", **kwargs):
"""Accepts a locked/compiled requirements file and installs the dependencies listed within.
Those dependencies become available in a generated `requirements.bzl` file.
You can instead check this `requirements.bzl` file into your repo, see the "vendoring" section below.
This macro wraps the [`pip_repository`](./pip_repository.md) rule that invokes `pip`, with `incremental` set.
In your WORKSPACE file:
```python
load("@rules_python//python:pip.bzl", "pip_parse")
pip_parse(
name = "pip_deps",
requirements_lock = ":requirements.txt",
)
load("@pip_deps//:requirements.bzl", "install_deps")
install_deps()
```
You can then reference installed dependencies from a `BUILD` file with:
```python
load("@pip_deps//:requirements.bzl", "requirement")
py_library(
name = "bar",
...
deps = [
"//my/other:dep",
requirement("requests"),
requirement("numpy"),
],
)
```
In addition to the `requirement` macro, which is used to access the generated `py_library`
target generated from a package's wheel, The generated `requirements.bzl` file contains
functionality for exposing [entry points][whl_ep] as `py_binary` targets as well.
[whl_ep]: https://packaging.python.org/specifications/entry-points/
```python
load("@pip_deps//:requirements.bzl", "entry_point")
alias(
name = "pip-compile",
actual = entry_point(
pkg = "pip-tools",
script = "pip-compile",
),
)
```
Note that for packages whose name and script are the same, only the name of the package
is needed when calling the `entry_point` macro.
```python
load("@pip_deps//:requirements.bzl", "entry_point")
alias(
name = "flake8",
actual = entry_point("flake8"),
)
```
## Vendoring the requirements.bzl file
In some cases you may not want to generate the requirements.bzl file as a repository rule
while Bazel is fetching dependencies. For example, if you produce a reusable Bazel module
such as a ruleset, you may want to include the requirements.bzl file rather than make your users
install the WORKSPACE setup to generate it.
See https://github.com/bazelbuild/rules_python/issues/608
This is the same workflow as Gazelle, which creates `go_repository` rules with
[`update-repos`](https://github.com/bazelbuild/bazel-gazelle#update-repos)
To do this, use the "write to source file" pattern documented in
https://blog.aspect.dev/bazel-can-write-to-the-source-folder
to put a copy of the generated requirements.bzl into your project.
Then load the requirements.bzl file directly rather than from the generated repository.
See the example in rules_python/examples/pip_parse_vendored.
Args:
requirements_lock (Label): A fully resolved 'requirements.txt' pip requirement file
containing the transitive set of your dependencies. If this file is passed instead
of 'requirements' no resolve will take place and pip_repository will create
individual repositories for each of your dependencies so that wheels are
fetched/built only for the targets specified by 'build/run/test'.
Note that if your lockfile is platform-dependent, you can use the `requirements_[platform]`
attributes.
name (str, optional): The name of the generated repository. The generated repositories
containing each requirement will be of the form <name>_<requirement-name>.
**kwargs (dict): Additional arguments to the [`pip_repository`](./pip_repository.md) repository rule.
"""
# Just in case our dependencies weren't already fetched
pip_install_dependencies()
pip_repository(
name = name,
requirements_lock = requirements_lock,
repo_prefix = "{}_".format(name),
incremental = True,
**kwargs
)
def pip_repositories():
"""
Obsolete macro to pull in dependencies needed to use the pip_import rule.
Deprecated:
the pip_repositories rule is obsolete. It is not used by pip_install.
"""
# buildifier: disable=print
print("DEPRECATED: the pip_repositories rule has been replaced with pip_install, please see rules_python 0.1 release notes")
```
#### File: extract_wheels/lib/wheel.py
```python
import email
import glob
import os
import stat
import zipfile
from typing import Dict, Optional, Set
import installer
import pkg_resources
def current_umask() -> int:
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def set_extracted_file_to_default_mode_plus_executable(path: str) -> None:
"""
Make file present at path have execute for user/group/world
(chmod +x) is no-op on windows per python docs
"""
os.chmod(path, (0o777 & ~current_umask() | 0o111))
class Wheel:
"""Representation of the compressed .whl file"""
def __init__(self, path: str):
self._path = path
@property
def path(self) -> str:
return self._path
@property
def name(self) -> str:
# TODO Also available as installer.sources.WheelSource.distribution
return str(self.metadata['Name'])
@property
def metadata(self) -> email.message.Message:
with installer.sources.WheelFile.open(self.path) as wheel_source:
metadata_contents = wheel_source.read_dist_info("METADATA")
metadata = installer.utils.parse_metadata_file(metadata_contents)
return metadata
@property
def version(self) -> str:
# TODO Also available as installer.sources.WheelSource.version
return str(self.metadata["Version"])
def entry_points(self) -> Dict[str, tuple[str, str]]:
"""Returns the entrypoints defined in the current wheel
See https://packaging.python.org/specifications/entry-points/ for more info
Returns:
Dict[str, Tuple[str, str]]: A mapping of the entry point's name to it's module and attribute
"""
with installer.sources.WheelFile.open(self.path) as wheel_source:
if "entry_points.txt" not in wheel_source.dist_info_filenames:
return dict()
entry_points_mapping = dict()
entry_points_contents = wheel_source.read_dist_info("entry_points.txt")
entry_points = installer.utils.parse_entrypoints(entry_points_contents)
for script, module, attribute, script_section in entry_points:
if script_section == "console":
entry_points_mapping[script] = (module, attribute)
return entry_points_mapping
def dependencies(self, extras_requested: Optional[Set[str]] = None) -> Set[str]:
dependency_set = set()
for wheel_req in self.metadata.get_all('Requires-Dist', []):
req = pkg_resources.Requirement(wheel_req) # type: ignore
if req.marker is None or any(
req.marker.evaluate({"extra": extra})
for extra in extras_requested or [""]
):
dependency_set.add(req.name) # type: ignore
return dependency_set
def unzip(self, directory: str) -> None:
with zipfile.ZipFile(self.path, "r") as whl:
whl.extractall(directory)
# The following logic is borrowed from Pip:
# https://github.com/pypa/pip/blob/cc48c07b64f338ac5e347d90f6cb4efc22ed0d0b/src/pip/_internal/utils/unpacking.py#L240
for info in whl.infolist():
name = info.filename
# Do not attempt to modify directories.
if name.endswith("/") or name.endswith("\\"):
continue
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
name = os.path.join(directory, name)
set_extracted_file_to_default_mode_plus_executable(name)
def get_dist_info(wheel_dir: str) -> str:
""" "Returns the relative path to the dist-info directory if it exists.
Args:
wheel_dir: The root of the extracted wheel directory.
Returns:
Relative path to the dist-info directory if it exists, else, None.
"""
dist_info_dirs = glob.glob(os.path.join(wheel_dir, "*.dist-info"))
if not dist_info_dirs:
raise ValueError(
"No *.dist-info directory found. %s is not a valid Wheel." % wheel_dir
)
if len(dist_info_dirs) > 1:
raise ValueError(
"Found more than 1 *.dist-info directory. %s is not a valid Wheel."
% wheel_dir
)
return dist_info_dirs[0]
def get_dot_data_directory(wheel_dir: str) -> Optional[str]:
"""Returns the relative path to the data directory if it exists.
See: https://www.python.org/dev/peps/pep-0491/#the-data-directory
Args:
wheel_dir: The root of the extracted wheel directory.
Returns:
Relative path to the data directory if it exists, else, None.
"""
dot_data_dirs = glob.glob(os.path.join(wheel_dir, "*.data"))
if not dot_data_dirs:
return None
if len(dot_data_dirs) > 1:
raise ValueError(
"Found more than 1 *.data directory. %s is not a valid Wheel." % wheel_dir
)
return dot_data_dirs[0]
def parse_wheel_meta_file(wheel_dir: str) -> Dict[str, str]:
"""Parses the given WHEEL file into a dictionary.
Args:
wheel_dir: The file path of the WHEEL metadata file in dist-info.
Returns:
The WHEEL file mapped into a dictionary.
"""
contents = {}
with open(wheel_dir, "r") as wheel_file:
for line in wheel_file:
cleaned = line.strip()
if not cleaned:
continue
try:
key, value = cleaned.split(":", maxsplit=1)
contents[key] = value.strip()
except ValueError:
raise RuntimeError(
"Encounted invalid line in WHEEL file: '%s'" % cleaned
)
return contents
```
|
{
"source": "jdochoas99/gestao_rh",
"score": 2
}
|
#### File: apps/departamentos/views.py
```python
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from .models import Departamento
# Create your views here.
class DepartamentosList(ListView):
model = Departamento
def get_queryset(self):
empresa_logada = self.request.user.funcionario.empresa
return Departamento.objects.filter(empresa=empresa_logada)
class DepartamentoCreate(CreateView):
model = Departamento
fields = ['nome']
def form_valid(self, form):
departamento = form.save(commit=False)
departamento.empresa = self.request.user.funcionario.empresa
departamento.save()
return super(DepartamentoCreate, self).form_valid(form)
class DepartamentoEdit(UpdateView):
model = Departamento
fields = ['nome']
class DepartamentoDelete(DeleteView):
model = Departamento
success_url = reverse_lazy('list_departamentos')
```
#### File: apps/funcionarios/models.py
```python
from django.db import models
from django.db.models import Sum
from django.contrib.auth.models import User
from django.urls import reverse
from apps.departamentos.models import Departamento
from apps.empresas.models import Empresa
# Create your models here.
class Funcionario(models.Model):
nome = models.CharField(max_length=100)
user = models.OneToOneField(User, on_delete=models.PROTECT)
departamentos = models.ManyToManyField(Departamento)
empresa = models.ForeignKey(Empresa, on_delete=models.PROTECT, null=True, blank=True)
def __str__(self):
return self.nome
def get_absolute_url(self):
return reverse('list_funcionarios')
@property
def total_horas_extra(self):
total = self.registrohoraextra_set.filter(utilizada=False).aggregate(Sum('horas'))['horas__sum']
return total or 0
```
#### File: apps/registro_hora_extra/models.py
```python
from django.db import models
from django.urls import reverse
from apps.funcionarios.models import Funcionario
# Create your models here.
class RegistroHoraExtra(models.Model):
motivo = models.CharField(max_length=100)
funcionario = models.ForeignKey(Funcionario, on_delete=models.PROTECT)
horas = models.DecimalField(max_digits=5, decimal_places=2)
utilizada = models.BooleanField(default=False)
def __str__(self):
return self.motivo
def get_absolute_url(self):
return reverse('list_hora_extra', args={self.funcionario.id})
```
#### File: apps/registro_hora_extra/views.py
```python
import json
from django.http import HttpResponse
from django.shortcuts import render
from django.urls import reverse_lazy, reverse
from django.views import View
from django.views.generic import ListView, UpdateView, DeleteView, CreateView
from .models import RegistroHoraExtra
from .forms import RegistroHoraExtraForm
class HoraExtraList(ListView):
model = RegistroHoraExtra
def get_queryset(self):
empresa_logada = self.request.user.funcionario.empresa
return RegistroHoraExtra.objects.filter(funcionario__empresa=empresa_logada)
class HoraExtraDelete(DeleteView):
model = RegistroHoraExtra
success_url = reverse_lazy('list_hora_extra')
class HoraExtraEdit(UpdateView):
model = RegistroHoraExtra
form_class = RegistroHoraExtraForm
def get_success_url(self):
return reverse_lazy('update_funcionario', args=[self.object.funcionario.id])
def get_form_kwargs(self):
kwargs = super(HoraExtraEdit, self).get_form_kwargs()
kwargs.update({'user': self.request.user})
return kwargs
class HoraExtraEditBase(UpdateView):
model = RegistroHoraExtra
form_class = RegistroHoraExtraForm
def get_success_url(self):
return reverse_lazy('update_hora_extra_base', args=[self.object.id])
def get_form_kwargs(self):
kwargs = super(HoraExtraEditBase, self).get_form_kwargs()
kwargs.update({'user': self.request.user})
return kwargs
class HoraExtraNovo(CreateView):
model = RegistroHoraExtra
form_class = RegistroHoraExtraForm
def get_form_kwargs(self):
kwargs = super(HoraExtraNovo, self).get_form_kwargs()
kwargs.update({'user': self.request.user})
return kwargs
def get_success_url(self):
return reverse_lazy('list_hora_extra')
class UtilizouHoraExtra(View):
def post(self, *args, **kwargs):
response = json.dumps({'mensagem': 'Requisição executada'})
registro_hora_extra = RegistroHoraExtra.objects.get(id=kwargs['pk'])
registro_hora_extra.utilizada = True
registro_hora_extra.save()
return HttpResponse(response, content_type='application/json')
class NaoUtilizouHoraExtra(View):
def post(self, *args, **kwargs):
response = json.dumps({'mensagem': 'Requisição executada'})
registro_hora_extra = RegistroHoraExtra.objects.get(id=kwargs['pk'])
registro_hora_extra.utilizada = False
registro_hora_extra.save()
return HttpResponse(response, content_type='application/json')
```
|
{
"source": "jdockerty/pyjob",
"score": 3
}
|
#### File: pyjob/pyjob/search_test.py
```python
from pyjob.search import Search
import pytest
search = Search()
def test_api_key_set():
assert search._API_KEY != ""
def test_api_key_error(monkeypatch):
monkeypatch.delenv("REED_API_KEY")
with pytest.raises(SystemExit):
new_search = Search()
def test_default_location_distance():
search.set_location_distance(-50) # Default of 10 should be used
assert search._distance_from_location == 10
def test_set_location():
search.set_location("London")
assert search._location == "London"
def test_keyterms_set():
terms = ['software engineer', 'devops', 'SRE']
search.set_keyterms(terms)
assert search._search_keyterms == terms
def test_keyterms_errors():
terms = ''
with pytest.raises(SystemExit):
search.set_keyterms(terms)
def test_invalid_salary():
min_salary_invalid = -50000
max_salary_invalid = -90000
with pytest.raises(SystemExit):
search.set_salary_range(min=min_salary_invalid, max=0)
with pytest.raises(SystemExit):
search.set_salary_range(min=0, max=max_salary_invalid)
def test_invalid_job_type():
invalid_type = "infinite_salary_type"
with pytest.raises(SystemExit):
search.set_job_type(invalid_type)
def test_successful_job_type():
valid_type = "permanent"
another_valid_type = "contract"
search.set_job_type(valid_type)
assert search._permanent == True
search.set_job_type(another_valid_type)
assert search._contract == True
def test_job_poster():
poster_type = "recruiter"
search.set_posted_by(poster_type)
assert search._recruitment_agency_post == True
def test_invalid_job_poster():
invalid_poster_type = "myself"
with pytest.raises(SystemExit):
search.set_posted_by(invalid_poster_type)
def test_graduate_roles():
search.set_graduate_roles(True)
assert search._graduate_suitable == True
```
|
{
"source": "jdoconnor/quip-api",
"score": 3
}
|
#### File: samples/websocket/main.py
```python
import argparse
import json
import quip
import sys
import time
import websocket
PY3 = sys.version_info > (3,)
if PY3:
import _thread as thread
else:
import thread
HEARTBEAT_INTERVAL = 20
def open_websocket(url):
def on_message(ws, message):
print("message:")
print(json.dumps(json.loads(message), indent=4))
def on_error(ws, error):
print("error:")
print(error)
def on_close(ws):
print("### connection closed ###")
def on_open(ws):
print("### connection established ###")
def run(*args):
while True:
time.sleep(HEARTBEAT_INTERVAL)
ws.send(json.dumps({"type": "heartbeat"}))
thread.start_new_thread(run, ())
# websocket.enableTrace(True)
ws = websocket.WebSocketApp(
url, on_message=on_message, on_error=on_error, on_close=on_close)
ws.on_open = on_open
ws.run_forever()
def main():
parser = argparse.ArgumentParser(description="Twitter gateway for Quip.")
parser.add_argument("--access_token", required=True,
help="User's access token")
parser.add_argument("--quip_api_base_url", default=None,
help="Alternative base URL for the Quip API. If none is provided, "
"https://platform.quip.com will be used")
args = parser.parse_args()
quip_client = quip.QuipClient(
access_token=args.access_token,
base_url=args.quip_api_base_url or "https://platform.quip.com")
websocket_info = quip_client.new_websocket()
open_websocket(websocket_info["url"])
if __name__ == "__main__":
main()
```
|
{
"source": "jdoda/sdl2hl",
"score": 3
}
|
#### File: sdl2hl/sdl2hl/image.py
```python
from enum import IntEnum
from sdl2._sdl2 import lib
from error import check_int_err, check_ptr_err
import enumtools
from surface import Surface
from renderer import Texture
class ImageInitFlag(IntEnum):
jpg = lib.IMG_INIT_JPG
png = lib.IMG_INIT_PNG
tif = lib.IMG_INIT_TIF
webp = lib.IMG_INIT_WEBP
def init(*flags):
"""Loads dynamic libraries and prepares them for use.
Args:
*flags (Set[ImageInitFlag]): The desired image file formats.
"""
check_int_err(lib.IMG_Init(enumtools.get_mask(flags)))
def quit():
"""Indicate that we are ready to unload the dynamically loaded libraries."""
lib.IMG_Quit()
def load(file):
"""Load an image from a file name in a new surface. Type detected from file name.
Args
file: The name of the image file.
Returns:
A new surface.
"""
return Surface._from_ptr(check_ptr_err(lib.IMG_Load(file)))
def load_texture(renderer, file):
"""Load an image directly into a render texture.
Args:
renderer: The renderer to make the texture.
file: The image file to load.
Returns:
A new texture
"""
return Texture._from_ptr(check_ptr_err(lib.IMG_LoadTexture(renderer._ptr, file)))
def save(surface, file):
"""Save a png image of the surface.
Args:
surface: The surface to save.
file: The file path to save to.
"""
check_int_err(lib.IMG_SavePNG(surface._ptr, file))
```
#### File: sdl2hl/sdl2hl/renderer.py
```python
from enum import IntEnum
from sdl2._sdl2 import ffi, lib
from error import check_int_err, check_ptr_err
from pixels import PixelFormat
import rect
import enumtools
class RendererFlags(IntEnum):
"""Flags used when creating a rendering context."""
software = lib.SDL_RENDERER_SOFTWARE #: The renderer is a software fallback.
accelerated = lib.SDL_RENDERER_ACCELERATED #: The renderer uses hardware acceleration.
presentvsync = lib.SDL_RENDERER_PRESENTVSYNC #: Present is synchronized with the refresh rate.
targettexture = lib.SDL_RENDERER_TARGETTEXTURE #: The renderer supports rendering to texture.
class BlendMode(IntEnum):
add = lib.SDL_BLENDMODE_ADD
blend = lib.SDL_BLENDMODE_BLEND
mod = lib.SDL_BLENDMODE_MOD
none = lib.SDL_BLENDMODE_NONE
class Renderer(object):
@staticmethod
def _from_ptr(ptr):
renderer = object.__new__(Renderer)
renderer._ptr = ptr
return renderer
@staticmethod
def create_software_renderer(self, surface):
"""Create a 2D software rendering context for a surface.
Args:
surface (Surface): The surface where rendering is done.
Returns:
Renderer: A 2D software rendering context.
Raises:
SDLError: If there was an error creating the renderer.
"""
renderer = object.__new__(Renderer)
renderer._ptr = self._ptr = check_ptr_err(lib.SDL_CreateSoftwareRenderer(surface._ptr))
return renderer
def __init__(self, window, index=-1, flags=frozenset()):
"""Create a 2D rendering context for a window.
Args:
window (Window): The window where rendering is displayed.
index (int): The index of the rendering driver to initialize, or -1 to initialize the first one supporting
the requested flags.
flags (Set[RendererFlags]): The requested renderer flags.
Raises:
SDLError: If there was an error creating the renderer.
"""
self._ptr = check_ptr_err(lib.SDL_CreateRenderer(window._ptr, index, enumtools.get_mask(flags)))
def __del__(self):
lib.SDL_DestroyRenderer(self._ptr)
def _get_renderer_info(self):
info = ffi.new('SDL_RendererInfo *')
check_int_err(lib.SDL_GetRendererInfo(self._ptr, info))
return info
@property
def name(self):
"""str: The name of the renderer."""
return self._get_renderer_info().name
@property
def flags(self):
"""Set[RendererFlags]: Supported renderer flags."""
return enumtools.get_items(RendererFlags, self._get_renderer_info().flags)
@property
def texture_formats(self):
"""Set[PixelFormat]: The available texture formats."""
info = self._get_renderer_info()
return {PixelFormat(info.texture_formats[i]) for i in range(info.num_texture_formats)}
@property
def max_texture_width(self):
"""int: The maximum texture width."""
return self._get_renderer_info().max_texture_width
@property
def max_texture_height(self):
"""int: The maximum texture height."""
return self._get_renderer_info().max_texture_height
@property
def draw_color(self):
"""Tuple[int, int, int, int]: The color used for drawing operations in (red, green, blue, alpha) format."""
rgba = ffi.new('Uint8[]', 4)
check_int_err(lib.SDL_GetRenderDrawColor(self._ptr, rgba + 0, rgba + 1, rgba + 2, rgba + 3))
return (rgba[0], rgba[1], rgba[2], rgba[3])
@draw_color.setter
def draw_color(self, rgba):
r, g, b, a = rgba
check_int_err(lib.SDL_SetRenderDrawColor(self._ptr, r, g, b, a))
@property
def viewport(self):
"""Rect: The drawing area for rendering on the current target."""
viewport = rect.Rect(0, 0, 0, 0)
check_int_err(lib.SDL_RenderGetViewport(self._ptr, viewport._ptr))
return viewport
@viewport.setter
def viewport(self, viewport):
check_int_err(lib.SDL_RenderSetViewport(self._ptr, viewport._ptr))
@property
def render_target_supported(self):
"""bool: Whether a window supports the use of render targets."""
return bool(lib.SDL_RenderTargetSupported(self._ptr))
@property
def render_target(self):
"""Texture: The current render target, or None if using the default render target."""
render_target = lib.SDL_GetRenderTarget(self._ptr)
if render_target == ffi.NULL:
return None
else:
return Texture._from_ptr(render_target)
@render_target.setter
def render_target(self, texture):
if texture is not None:
p = texture._ptr
else:
p = ffi.NULL
check_int_err(lib.SDL_SetRenderTarget(self._ptr, p))
@property
def blend_mode(self):
"""BlendMode: The blend mode used for drawing operations."""
blend_mode_ptr = ffi.new('int *')
check_int_err(lib.SDL_GetRenderDrawBlendMode(self._ptr, blend_mode_ptr))
return BlendMode(blend_mode_ptr[0])
@blend_mode.setter
def blend_mode(self, blend_mode):
check_int_err(lib.SDL_SetRenderDrawBlendMode(self._ptr, blend_mode))
def clear(self):
"""Clear the current rendering target with the drawing color.
This function clears the entire rendering target, ignoring the viewport.
Raises:
SDLError: If an error is encountered.
"""
check_int_err(lib.SDL_RenderClear(self._ptr))
def draw_line(self, x1, y1, x2, y2):
"""Draw a line on the current rendering target.
Args:
x1 (int): The x coordinate of the start point.
y1 (int): The y coordinate of the start point.
x2 (int): The x coordinate of the end point.
y2 (int): The y coordinate of the end point.
Raises:
SDLError: If an error is encountered.
"""
check_int_err(lib.SDL_RenderDrawLine(self._ptr, x1, y1, x2, y2))
def draw_lines(self, *points):
"""Draw a series of connected lines on the current rendering target.
Args:
*points (Point): The points along the lines.
Raises:
SDLError: If an error is encountered.
"""
point_array = ffi.new('SDL_Point[]', len(points))
for i, p in enumerate(points):
point_array[i] = p._ptr[0]
check_int_err(lib.SDL_RenderDrawLines(self._ptr, point_array, len(points)))
def draw_point(self, x, y):
"""Draw a point on the current rendering target.
Args:
x (int): The x coordinate of the point.
y (int): The y coordinate of the point.
Raises:
SDLError: If an error is encountered.
"""
check_int_err(lib.SDL_RenderDrawPoint(self._ptr, x, y))
def draw_points(self, *points):
"""Draw multiple points on the current rendering target.
Args:
*points (Point): The points to draw.
Raises:
SDLError: If an error is encountered.
"""
point_array = ffi.new('SDL_Point[]', len(points))
for i, p in enumerate(points):
point_array[i] = p._ptr[0]
check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points)))
def draw_rect(self, rect):
"""Draw a rectangle on the current rendering target.
Args:
rect (Rect): The destination rectangle, or None to outline the entire rendering target.
Raises:
SDLError: If an error is encountered.
"""
check_int_err(lib.SDL_RenderDrawRect(self._ptr, rect._ptr))
def draw_rects(self, *rects):
"""Draw some number of rectangles on the current rendering target.
Args:
*rects (Rect): The destination rectangles.
Raises:
SDLError: If an error is encountered.
"""
rect_array = ffi.new('SDL_Rect[]', len(rects))
for i, r in enumerate(rects):
rect_array[i] = r._ptr[0]
check_int_err(lib.SDL_RenderDrawRects(self._ptr, rect_array, len(rects)))
def fill_rect(self, rect):
"""Fill a rectangle on the current rendering target with the drawing color.
Args:
rect (Rect): The destination rectangle, or None to fill the entire rendering target.
Raises:
SDLError: If an error is encountered.
"""
check_int_err(lib.SDL_RenderFillRect(self._ptr, rect._ptr))
def fill_rects(self, *rects):
"""Fill some number of rectangles on the current rendering target with the drawing color.
Args:
*rects (Rect): The destination rectangles.
Raises:
SDLError: If an error is encountered.
"""
rect_array = ffi.new('SDL_Rect[]', len(rects))
for i, r in enumerate(rects):
rect_array[i] = r._ptr[0]
check_int_err(lib.SDL_RenderFillRects(self._ptr, rect_array, len(rects)))
def copy(self, texture, source_rect=None, dest_rect=None, rotation=0, center=None, flip=lib.SDL_FLIP_NONE):
"""Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center.
Args:
texture (Texture): The source texture.
source_rect (Rect): The source rectangle, or None for the entire texture.
dest_rect (Rect): The destination rectangle, or None for the entire rendering target.
rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect.
center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around
dest_rect.w/2, dest_rect.h/2).
flip (int): A value stating which flipping actions should be performed on the texture.
Raises:
SDLError: If an error is encountered.
"""
if source_rect == None:
source_rect_ptr = ffi.NULL
else:
source_rect_ptr = source_rect._ptr
if dest_rect == None:
dest_rect_ptr = ffi.NULL
else:
dest_rect_ptr = dest_rect._ptr
if center == None:
center_ptr = ffi.NULL
else:
center_ptr = center._ptr
check_int_err(lib.SDL_RenderCopyEx(self._ptr, texture._ptr, source_rect_ptr, dest_rect_ptr, rotation, center_ptr, flip))
def present(self):
"""Update the screen with rendering performed."""
lib.SDL_RenderPresent(self._ptr)
class TextureAccess(IntEnum):
static = lib.SDL_TEXTUREACCESS_STATIC #: Changes rarely, not lockable.
streaming = lib.SDL_TEXTUREACCESS_STREAMING #: Changes frequently, lockable.
target = lib.SDL_TEXTUREACCESS_TARGET #: Texture can be used as a render target.
class Texture(object):
@staticmethod
def _from_ptr(ptr):
renderer = object.__new__(Texture)
renderer._ptr = ptr
return renderer
@staticmethod
def from_surface(renderer, surface):
"""Create a texture from an existing surface.
Args:
surface (Surface): The surface containing pixel data used to fill the texture.
Returns:
Texture: A texture containing the pixels from surface.
Raises:
SDLError: If an error is encountered.
"""
texture = object.__new__(Texture)
texture._ptr = check_ptr_err(lib.SDL_CreateTextureFromSurface(renderer._ptr, surface._ptr))
return texture
def __init__(self, renderer, fmt, access, w, h):
"""Create a texture for a rendering context.
Args:
renderer (Renderer): The renderer.
fmt (PixelFormat): The format of the texture.
access (TextureAccess): The access value for the texture.
w (int): The width of the texture in pixels.
h (int): The height of the texture in pixels.
Raises:
SDLError: If no rendering context was active, the format was unsupported, or the width or height were out
of range.
"""
self._ptr = check_ptr_err(lib.SDL_CreateTexture(renderer._ptr, fmt, access, w, h))
def __del__(self):
lib.SDL_DestroyTexture(self._ptr)
@property
def format(self):
"""PixelFormat: The raw format of the texture. The actual format may differ, but pixel transfers will use this
format.
"""
fmt = ffi.new('Uint32 *')
check_int_err(lib.SDL_QueryTexture(self._ptr, fmt, ffi.NULL, ffi.NULL, ffi.NULL))
return PixelFormat(fmt[0])
@property
def access(self):
"""TextureAccess: The actual access to the texture."""
access = ffi.new('int *')
check_int_err(lib.SDL_QueryTexture(self._ptr, ffi.NULL, access, ffi.NULL, ffi.NULL))
return TextureAccess(access[0])
@property
def w(self):
"""int: The width of the texture in pixels."""
w = ffi.new('int *')
check_int_err(lib.SDL_QueryTexture(self._ptr, ffi.NULL, ffi.NULL, w, ffi.NULL))
return w[0]
@property
def h(self):
"""int: The height of the texture in pixels."""
h = ffi.new('int *')
check_int_err(lib.SDL_QueryTexture(self._ptr, ffi.NULL, ffi.NULL, ffi.NULL, h))
return h[0]
@property
def color_mod(self):
"""Tuple[int, int, int]: The additional color value used in render copy operations in (red, green, blue)
format.
"""
rgb = ffi.new('Uint8[]', 3)
check_int_err(lib.SDL_GetTextureColorMod(self._ptr, rgb + 0, rgb + 1, rgb + 2))
return (rgb[0], rgb[1], rgb[2])
@color_mod.setter
def color_mod(self, rgb):
r, g, b = rgb
check_int_err(lib.SDL_SetTextureColorMod(self._ptr, r, g, b))
@property
def alpha_mod(self):
"""int: The additional alpha value used in render copy operations."""
a = ffi.new('Uint8 *')
check_int_err(lib.SDL_GetTextureAlphaMod(self._ptr, a))
return a[0]
@alpha_mod.setter
def alpha_mod(self, a):
check_int_err(lib.SDL_SetTextureAlphaMod(self._ptr, a))
@property
def blend_mode(self):
"""BlendMode: The blend mode used for drawing operations."""
blend_mode_ptr = ffi.new('int *')
lib.SDL_GetTextureBlendMode(self._ptr, blend_mode_ptr)
return BlendMode(blend_mode_ptr[0])
@blend_mode.setter
def blend_mode(self, blend_mode):
check_int_err(lib.SDL_SetTextureBlendMode(self._ptr, blend_mode))
```
|
{
"source": "JDoelger/InfluenzaFitnessInference",
"score": 2
}
|
#### File: notebooks/fitnessinference/HA_analysis.py
```python
import numpy as np
import copy
import os
import pickle
import scipy
try:
import simulation as simu
import analysis as ana
except ModuleNotFoundError:
from fitnessinference import simulation as simu
from fitnessinference import analysis as ana
from sklearn.metrics import precision_recall_curve, auc, roc_auc_score, roc_curve
from datetime import date
import matplotlib as mpl
import matplotlib.pyplot as plt
from Bio import SeqIO
from Bio.Seq import Seq
from math import log10, floor
import pandas as pd
import os
def retrieve_seqs(fastafile='HA(H3N2)1968-2020_Accessed210418.fasta'):
"""
extract yearly sequences from fasta file
"""
repo_path = os.getcwd()
fastafilepath = os.path.join(repo_path, 'figures', fastafile)
protein_list = list(SeqIO.parse(fastafilepath,
'fasta')) # HA (H3N2) protein records from IRD (fludb.org) for 1968-2020, downloaded on 18th Apr. 2021, only date and season in description
# protein_BI1619068 = list(SeqIO.parse('BI_16190_68_ProteinFasta.fasta',
# 'fasta')) # HA (H3N2) protein records from IRD (fludb.org) for strain BI/16190/68 (accession: KC296480)
# seq_BI68 = protein_BI1619068[0].seq # reference sequence for strain BI/68
# use only seqs that are complete with no insertions/deletions
complete_list = []
for rec in protein_list:
if len(rec) == 566:
complete_list.append(rec)
# remove all sequences with ambiguous amino acid codes
amb_aa_list = ['B', 'J', 'Z', 'X']
complete_unamb_list = []
for rec in complete_list:
amb_count = 0
for aa in amb_aa_list:
if aa in rec.seq:
amb_count += 1
break
if amb_count == 0:
complete_unamb_list.append(rec)
# divide sequences into years: as list of years, which contain list of sequences
year1 = 1968
yearend = 2020
year_list = list(i for i in range(year1, yearend + 1)) # list of years
yearly = list([] for i in range(0, yearend - year1 + 1)) # list of sequences for each year
for rec in complete_unamb_list:
for year in year_list:
if str(year) in rec.id:
yearly[year_list.index(year)].append(str(rec.seq)) # append only the sequence, not whole record
return year_list, yearly
def add_reference_sequences_from_fasta(fastafile, seq_name, results_directory=('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures')):
"""
add one reference sequence in dictionary of reference sequences that is saved in the figure directory
"""
# load current seq_refs
results_directory = os.path.normpath(results_directory)
if not os.path.exists(results_directory):
results_directory = os.path.join(os.getcwd(), 'figures')
seq_ref_file = os.path.join(results_directory, 'reference_sequences.data')
if os.path.exists(seq_ref_file):
with open(seq_ref_file, 'rb') as f:
seq_ref_dict = pickle.load(f)
else:
# if no previous reference sequences saved, initialize empty directory
seq_ref_dict = {}
# retrieve sequence from fasta file
fasta_path = os.path.join(results_directory, fastafile)
seq_rec_list = list(SeqIO.parse(fasta_path, 'fasta'))
seq_ref = seq_rec_list[0].seq # choose first entry of sequence list, although each should only have one entry
# add the new reference sequence under its chosen name in the dictionary
seq_ref_dict[seq_name] = seq_ref
# save the dictionary back in the file
with open(seq_ref_file, 'wb') as f:
pickle.dump(seq_ref_dict, f)
def print_seq_refs(results_directory=('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures')):
"""
print out the names of added reference sequences in the list
"""
results_directory = os.path.normpath(results_directory)
if not os.path.exists(results_directory):
results_directory = os.path.join(os.getcwd(), 'figures')
seq_ref_file = os.path.join(results_directory, 'reference_sequences.data')
if os.path.exists(seq_ref_file):
with open(seq_ref_file, 'rb') as f:
seq_ref_dict = pickle.load(f)
for key in seq_ref_dict.keys():
print(key)
def strain_info(seqs_list):
"""
calculate strains and frequencies from list of seq.s at different time points
seqs_list: list of list of sequences for a number of time points
returns lists of strains and strain frequencies for each time, total count at each time,
strains and frequencies across all time points
"""
total_count_list=[len(seqs) for seqs in seqs_list] # total number of sequences at each time
strains_list=[[] for seqs in seqs_list]
strains_freq_list=[[] for seqs in seqs_list]
strain_All_list=[]
strain_All_freq_list=[]
for y in range(len(seqs_list)): # for each time point
## finding unique seqs in each time point
strains_count=[] # counts for each strain before normalization
for i in range(len(seqs_list[y])):
if seqs_list[y][i] not in strains_list[y]:
strains_list[y].append(seqs_list[y][i])
strains_count.append(1)
else:
strains_count[strains_list[y].index(seqs_list[y][i])]+=1
# rank strains of this year:
merge_list=list(zip(strains_count,strains_list[y]))
merge_list.sort(reverse=True) # sort coarse strain list according to count
strains_count=[y for y,x in merge_list]
strains_list[y]=[x for y,x in merge_list]
strains_freq_list[y]=[c/total_count_list[y] for c in strains_count] # calculate strain frequency from count
## finding unique seqs across time points
for sti in range(len(strains_list[y])): # for each strain at this time
if strains_list[y][sti] not in strain_All_list:
strain_All_list.append(strains_list[y][sti])
strain_All_freq_list.append(strains_freq_list[y][sti]) # unnormalized (adding yearly freq)
else:
strain_All_freq_list[strain_All_list.index(strains_list[y][sti])]+=strains_freq_list[y][sti]
merge_list=list(zip(strain_All_freq_list,strain_All_list))
merge_list.sort(reverse=True) # sort coarse strain list according to count
strain_All_freq_list=[y/len(seqs_list) for y,x in merge_list] # normalized by number of time points
strain_All_list=[x for y,x in merge_list]
return [strains_list, strains_freq_list, total_count_list, strain_All_list,strain_All_freq_list]
def exe_plot_strainSuccession_HA():
"""
make and save plot of strain succession since 1968 of HA (H3N2) as collected from
the influenza research database (fludb.org)
Results:
plot file: .pdf
name: HA_strain_succession
Returns:
None
Dependencies:
import os
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
from Bio import SeqIO
from Bio.Seq import Seq
other functions in this module
"""
# plot settings
plt_set = ana.set_plot_settings()
fig = plt.figure(figsize=(plt_set['full_page_width'], 3))
ax1 = fig.add_axes(plt_set['plot_dim_2pan'][0])
ax2 = fig.add_axes(plt_set['plot_dim_2pan'][1])
repo_directory = ('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape/'
'NewApproachFromMarch2021/InfluenzaFitnessInference')
repo_directory = os.path.normpath(repo_directory)
if not os.path.exists(repo_directory):
repo_directory = os.getcwd()
figure_directory = os.path.join(repo_directory, 'figures')
this_plot_filepath = os.path.join(figure_directory,
'HA_strain_succession' + plt_set['file_extension'])
# retrieve HA protein sequences from fasta file
year_list, yearly = retrieve_seqs()
# divide sequences into strains
[strain_yearly, strain_frequency_yearly, tot_count_yearly,
strain_All, strain_frequency_All] = strain_info(yearly)
strain_All_timeOrdered = [] # all strains ordered in time (first observed with highest frequency listed first)
strain_All_freq_timeOrdered = [] # frequency of all strains ordered in time
# order strains
for y in range(len(strain_yearly)):
for sti in range(len(strain_yearly[y])): # for each strain at this time
if strain_yearly[y][sti] not in strain_All_timeOrdered:
strain_All_timeOrdered.append(strain_yearly[y][sti])
strain_All_freq_timeOrdered.append(strain_frequency_yearly[y][sti]) # unnormalized (adding yearly freq)
else:
strain_All_freq_timeOrdered[strain_All_timeOrdered.index(strain_yearly[y][sti])] += \
strain_frequency_yearly[y][sti]
# assign strain label to each strain in each year
strain_All_freq_yearly = [[0 for i in range(len(strain_All_timeOrdered))] for y in
range(len(strain_yearly))] # frequency of all ever observed strains in each year
strain_index_yearly = [[0 for sti in range(len(strain_yearly[y]))] for y in
range(len(strain_yearly))] # strain labels for strains that are observed in each year
for y in range(len(strain_yearly)):
for sti in range(len(strain_yearly[y])):
label = strain_All_timeOrdered.index(strain_yearly[y][sti]) # strain label
strain_All_freq_yearly[y][label] = strain_frequency_yearly[y][sti] # strain frequency update
strain_index_yearly[y][sti] = label # save strain label
strain_frequency_yearly_transpose = list(map(list, zip(*strain_All_freq_yearly)))
cm = plt.get_cmap('rainbow')
colorlist = [cm(1. * i / (len(strain_frequency_yearly_transpose)))
for i in range(len(strain_frequency_yearly_transpose))]
for sti in range(len(strain_frequency_yearly_transpose)):
ax1.plot(year_list, strain_frequency_yearly_transpose[sti], color=colorlist[sti])
ax1.set_xlabel('year')
ax1.set_ylabel('strain frequency')
ax1.text(plt_set['plotlabel_shift_2pan'], 1, '(a)', transform=ax1.transAxes,
fontsize=plt_set['label_font_size'], va='top', ha='right')
for y in range(len(strain_index_yearly)):
for sti in range(len(strain_index_yearly[y]) - 1, -1, -1):
ax2.plot(y + year_list[0], strain_index_yearly[y][sti], '.',
markersize=plt_set['plot_marker_size_dot'], color='blue')
ax2.plot(y + year_list[0], strain_index_yearly[y][0], '.',
markersize=plt_set['plot_marker_size_dot'], color='red')
ax2.set_xlabel('year')
ax2.set_ylabel('strain label')
ax2.text(plt_set['plotlabel_shift_2pan'], 1, '(b)', transform=ax2.transAxes,
fontsize=plt_set['label_font_size'], va='top', ha='right')
plt.savefig(this_plot_filepath, bbox_inches='tight')
plt.close()
def fitness_host(seq, st_yearly, st_freq_yearly, sigma_h, D0, res_targeted):
"""
calculate the host population-dependent fitness contribution for one sequence
at the current time
Parameters:
seq: numpy.ndarray
sequence
st_yearly: list
list of strains for each time step up to t-1
st_freq_yearly: list
list of strain frequencies for each time step up to t-1
sigma_h: float
coefficient modulating f_host
D0: float
cross-immunity distance
Results:
f_host: float
host-dependent fitness for the sequence at the current time
Dependencies:
import numpy as np
"""
seq = np.array(list(seq))[res_targeted]
st_yearly = [np.array([np.array(list(seq))[res_targeted] for seq in st_current]) for st_current in st_yearly]
st_freq_yearly = [np.array(stf_current) for stf_current in st_freq_yearly]
f_host_noSig = 0 # initialize host fitness without sigma_h factor
for t in range(len(st_yearly)): # iterate through all prev. time steps
strains = st_yearly[t]
# create array of same dimension as strain list at t
seq_arr = np.repeat([seq], len(strains), axis=0)
# calculate mutational distances between seq_arr and strains
mut_dist = np.sum(seq_arr != strains, axis=1)
f_host_noSig += -np.dot(st_freq_yearly[t], np.exp(-mut_dist / D0))
f_host = sigma_h * f_host_noSig
return f_host
def minus_fhost_list(strain_current, st_yearly, st_freq_yearly, sigma_h, D0, res_targeted):
"""
calculate minus the host population-dependent fitness contribution for all strains
at the current time
Parameters:
strain_current: numpy.ndarray
list of current strains (=unique sequences)
st_yearly: list
list of strains for each time step up to t-1
st_freq_yearly: list
list of strain frequencies for each time step up to t-1
sigma_h: float
coefficient modulating f_host
D0: float
cross-immunity distance
Returns:
f_host_list: numpy.ndarray
host-dependent fitness for each strain at the current time
Dependencies:
import numpy as np
"""
Mf_host_list = np.array([-fitness_host(seq, st_yearly, st_freq_yearly, sigma_h, D0, res_targeted)
for seq in strain_current])
return Mf_host_list
def def_res_epitope_list():
"""
stores list of residue indices (in my numbering) for HA epitopes A, B, C, D, E with residue positions taken
and translated from (Suzuki 2006, Mol. Biol. Evol.)
"""
res_epitope_list = [[137, 139, 141, 145, 146, 147, 148, 150, 152, 153, 155, 157, 158, 159, 160, 161, 165, 167, 183],
[143, 144, 170, 171, 172, 173, 174, 175, 178, 179, 180, 201, 202, 203, 204, 205, 207, 208, 209, 211, 212, 213],
[59, 60, 61, 62, 63, 65, 66, 68, 69, 288, 290, 291, 293, 294, 295, 309, 312, 314, 315, 319, 320, 322, 323, 324,
325, 326, 327],
[111, 117, 118, 132, 136, 182, 185, 186, 187, 188, 189, 190, 191, 192, 194, 197, 216, 218, 222, 223, 224, 227, 228,
229, 230, 231, 232, 233, 234, 241, 242, 243, 244, 245, 253, 255, 257, 259, 261, 262, 263],
[72, 74, 77, 78, 82, 90, 93, 95, 96, 97, 98, 101, 102, 103, 106, 107, 109, 124, 275, 276, 277, 280]]
return res_epitope_list
def convert_my_ind_to_Lee_HA_numbering(my_indices):
"""
convert list of indices in my numbering to HA numbering used by Lee et al. (PNAS 2018)
"""
Lee_indices = []
for ind in my_indices:
if ind <= 15:
Lee_ind = ind - 16
else:
Lee_ind = ind - 15
Lee_indices.append(Lee_ind)
return Lee_indices
def convert_Lee_HA_numbering_to_my_ind(Lee_indices):
"""
convert list of indices in HA numbering used by Lee et al. (PNAS 2018) to my numbering
"""
my_indices = []
for ind in Lee_indices:
if ind < 0:
my_ind = ind + 16
elif ind > 0:
my_ind = ind + 15
else:
print('error: Lee index=0!!')
my_indices.append(my_ind)
return my_indices
def exe_minus_fhost_yearly(sigma_h, D0, results_directory=('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures')):
"""
calculates -fhost for each strain in each given strain in each year
and saves it in pickled file "HA_MinusFhost_yearly.data"
"""
## define res_targeted as all head epitope residues
# list of residue indices (in my numbering) for epitopes A, B, C, D, E with residue positions taken
# and translated from (Suzuki 2006, Mol. Biol. Evol.):
res_epitope_list = def_res_epitope_list()
res_allepitopes_list = [res for res_list in res_epitope_list for res in res_list]
res_targeted = res_allepitopes_list
# retrieve HA sequences
year_list, yearly = retrieve_seqs()
# divide sequences into strains
[strain_yearly, strain_frequency_yearly, tot_count_yearly,
strain_All, strain_frequency_All] = strain_info(yearly)
# calculate -Fhost for each strain in each year
MinusFhost_yearly = []
for y in range(len(strain_yearly) - 1):
MinusFhost_list = \
minus_fhost_list(strain_yearly[y + 1], strain_yearly[:y + 1], strain_frequency_yearly[:y + 1], sigma_h,
D0, res_targeted)
MinusFhost_yearly.append(MinusFhost_list)
# save minus_fhost_yearly as pickle file in figures folder
results_directory = os.path.normpath(results_directory)
if not os.path.exists(results_directory):
results_directory = os.path.join(os.getcwd(), 'figures')
file_name = 'HA_MinusFhost_yearly' + 'sigma_h_'+ str(sigma_h) + '_D0_' + str(D0) + '.data'
file_path = os.path.join(results_directory, file_name)
with open(file_path, 'wb') as f:
pickle.dump(MinusFhost_yearly, f)
def exe_plot_minus_fhost_yearly(sigma_h, D0,
results_directory=('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures'),
figure_directory=('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures')):
# load minus_fhost_yearly from pickle file in figures folder
results_directory = os.path.normpath(results_directory)
if not os.path.exists(results_directory):
results_directory = os.path.join(os.getcwd(), 'figures')
file_name = 'HA_MinusFhost_yearly' + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + '.data'
file_path = os.path.join(results_directory, file_name)
with open(file_path, 'rb') as f:
MinusFhost_yearly = pickle.load(f)
figure_directory = os.path.normpath(figure_directory)
if not os.path.exists(figure_directory):
figure_directory = os.path.join(os.getcwd(), 'figures')
plt_set = ana.set_plot_settings()
fig_name = 'HA_MFhost_dist' + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + plt_set['file_extension']
this_plot_filepath = os.path.join(figure_directory, fig_name)
fig = plt.figure(figsize=(plt_set['full_page_width']/2, 3))
ax1 = fig.add_axes(plt_set['plot_dim_1pan'][0])
# retrieve HA sequences in order to get year_list
year_list, yearly = retrieve_seqs()
for y in range(len(MinusFhost_yearly)):
ax1.plot([year_list[y]] * len(MinusFhost_yearly[y]), MinusFhost_yearly[y] - np.mean(MinusFhost_yearly[y]), '.',
color='black')
ax1.set_xlabel('year')
ax1.set_ylabel('$-F_{host}$ - $<-F_{host}>$')
plt.savefig(this_plot_filepath, bbox_inches='tight')
def binary_strains(seq_ref, st_yearly, st_freq_yearly, minus_f_host_yearly, res_targeted):
"""
translate strains into binary representation of head epitope region based on chosen reference sequence
and update the respective response values minus_f_host_yearly for the respective binary strains
"""
## turn list of strings into arrays with sequences reduced to the HA head epitope sites
seq_ref = np.array(list(seq_ref))[res_targeted]
st_yearly = [np.array([np.array(list(seq))[res_targeted] for seq in st_current]) for st_current in st_yearly]
st_freq_yearly = [np.array(stf_current) for stf_current in st_freq_yearly]
## compare each strain in each year to the reference seq and create lists of the sequence reps and frequencies of
# the new binary strains
st_bin_yearly = [] # binary strain list
for t in range(len(st_yearly)): # iterate through all prev. time steps
strains = st_yearly[t]
# create array of same dimension as strain list at t
seq_arr = np.repeat([seq_ref], len(strains), axis=0)
# calculate binary strains based on difference to reference seq
binary_strains = (seq_arr!=strains).astype(int)
st_bin_yearly.append(binary_strains)
# update strain and strain frequency lists as well as minus_f_host_yearly for binary strains
st_bin_yearly_new = [[] for t in range(len(st_yearly))] # new list of binary strains
st_yearly_new = [[] for t in range(len(st_yearly))] # non-redundant lists of nonbin strains
minus_f_host_yearly_new = [[] for t in range(len(minus_f_host_yearly))]
st_bin_freq_yearly = [[] for t in range(len(st_yearly))]
for t in range(len(st_bin_yearly)):
for i in range(len(st_bin_yearly[t])):
# if current binary strain saved already
# print(type(st_bin_yearly[t][i]), type(st_bin_yearly_new[t]))
if st_bin_yearly[t][i].tolist() in st_bin_yearly_new[t]:
# if corresponding non-bin strain not saved yet
if st_yearly[t][i].tolist() not in st_yearly_new[t]:
# add new strain to list and add its frequency to the frequency list
st_bin_yearly_new[t].append(st_bin_yearly[t][i].tolist())
st_bin_freq_yearly[t].append(st_freq_yearly[t][i])
if t != 0:
minus_f_host_yearly_new[t-1].append(minus_f_host_yearly[t-1][i])
# if corresponding non-bin strain already saved
else:
st_index = st_yearly_new[t].tolist().index(st_yearly[t][i])
st_bin_freq_yearly[t][st_index] += st_freq_yearly[t][i]
# if current binary strain not saved already
else:
st_bin_yearly_new[t].append(st_bin_yearly[t][i].tolist())
st_bin_freq_yearly[t].append(st_freq_yearly[t][i])
if t != 0:
minus_f_host_yearly_new[t-1].append(minus_f_host_yearly[t-1][i])
return st_bin_yearly_new, st_bin_freq_yearly, minus_f_host_yearly_new
def inference_features_Ising_noCouplings(strain_samp_yearly):
"""
calculate the feature matrix for inference (for Ising strains)
Parameters:
strain_samp_yearly: list
list of strains for each inference time step (between inf_start and inf_end)
Returns:
X: numpy.ndarray
feature matrix for inference of {h,f} from -F_host
Dependencies:
import numpy as np
"""
X = []
for t in range(len(strain_samp_yearly)):
strains_next = strain_samp_yearly[t]
# features (for time-dependent coefficient f)
gen_features = [0] * (len(strain_samp_yearly))
gen_features[t] = 1
# sequence features (for h and J)
X_next = []
for strain in strains_next:
# X_sample = strain.tolist()
X_sample = strain
X_sample = np.concatenate((X_sample, gen_features))
X_next.append(X_sample)
if len(X) != 0:
X = np.concatenate((X, X_next), axis=0)
else:
X = copy.deepcopy(X_next)
X = np.array(X)
return X
def inference_features_Ising_WithCouplings(strain_samp_yearly):
"""
calculate the feature matrix for inference (for Ising strains)
Parameters:
strain_samp_yearly: list
list of strains for each inference time step (between inf_start and inf_end)
Returns:
X: numpy.ndarray
feature matrix for inference of {h,J,f} from -F_host
Dependencies:
import numpy as np
"""
X = []
for t in range(len(strain_samp_yearly)):
strains_next = strain_samp_yearly[t]
# features (for time-dependent coefficient f)
gen_features = [0] * (len(strain_samp_yearly))
gen_features[t] = 1
# sequence features (for h and J)
X_next = []
for strain in strains_next:
# X_sample = strain.tolist()
X_sample = strain
for i in range(len(strain)):
for j in range(i):
X_sample = np.concatenate((X_sample, np.array([strain[i]*strain[j]])))
X_sample = np.concatenate((X_sample, gen_features))
X_next.append(X_sample)
if len(X) != 0:
X = np.concatenate((X, X_next), axis=0)
else:
X = copy.deepcopy(X_next)
X = np.array(X)
return X
def inference_response_FhostPrediction(minus_fhost_yearly):
"""
calculate response function from -F_host
Parameters:
minus_fhost_yearly: list
list of -F_host for each strain at each time step between inf_start and inf_end
Returns:
Y: numpy.ndarray
response function for the inference of intrinsic fitness coeffs
Dependencies:
import numpy as np
"""
Y = []
for t in range(len(minus_fhost_yearly)):
minus_fhosts_next = minus_fhost_yearly[t]
Y_next = minus_fhosts_next
Y = np.concatenate((Y, Y_next))
Y = np.array(Y)
return Y
def infer_ridge_noCouplings(X, Y, lambda_h, lambda_f, inf_start, inf_end):
"""
infer the parameters {h,f} with ridge regression (Gaussian prior for regularized params)
Parameters:
X: numpy.ndarray
feature matrix
Y: numpy.ndarray
response vector
lambda_h, lambda_f: int (or float)
regularization coefficients, if 0 no regularization
inf_start, inf_end: start and end generation for inference
Returns:
M: numpy.ndarray
list of inferred coefficients
M_std: numpy.ndarray
list of standard deviation for inferred coefficients
Dependencies:
import numpy as np
import copy
"""
# number of features
num_param = len(X[0])
num_f = int(inf_end - inf_start - 1)
num_h = int(num_param - num_f)
# regularization matrix
reg_mat = np.zeros((num_param, num_param))
for i in range(num_h):
reg_mat[i, i] = lambda_h
for i in range(num_h, num_param):
reg_mat[i, i] = lambda_f
# standard deviation of features
X_std = np.std(X, axis=0)
std_nonzero = np.where(X_std != 0)[0] # use only features where std is nonzero
param_included = std_nonzero
X_inf = copy.deepcopy(X[:, param_included])
reg_mat_reduced = reg_mat[param_included, :]
reg_mat_reduced = reg_mat_reduced[:, param_included]
# inference by solving X*M = Y for M
XT = np.transpose(X_inf)
XTX = np.matmul(XT, X_inf) # covariance
try:
XTX_reg_inv = np.linalg.inv(XTX + reg_mat_reduced)
XTY = np.matmul(XT, Y)
M_inf = np.matmul(XTX_reg_inv, XTY)
M_full = np.zeros(num_param)
M_full[param_included] = M_inf
# unbiased estimator of variance
sigma_res = np.sqrt(len(Y) / (len(Y) - len(M_inf)) * np.mean([(Y - np.matmul(X_inf, M_inf)) ** 2]))
v_vec = np.diag(XTX_reg_inv)
# use std of prior distribution (if <infinity, else use 0)
# for parameters that are not informed by model
# M_var_inv = copy.deepcopy(np.diag(reg_mat))
M_std = np.zeros(M_full.shape)
for i in range(len(M_std)):
if reg_mat[i, i] != 0:
M_std[i] = np.sqrt(1 / reg_mat[i, i])
# standard deviation of the parameter distribution
# from diagonal of the covariance matrix
M_std[param_included] = np.sqrt(v_vec) * sigma_res
except:
print('exception error')
M_full = np.zeros(num_param)
M_std = np.zeros(num_param)
return M_full, M_std
def infer_ridge_WithCouplings(X, Y, lambda_h, lambda_J, lambda_f, inf_start, inf_end):
"""
infer the parameters {h,J,f} with ridge regression (Gaussian prior for regularized params)
Parameters:
X: numpy.ndarray
feature matrix
Y: numpy.ndarray
response vector
lambda_h, lambda_J, lambda_f: int (or float)
regularization coefficients, if 0 no regularization
inf_start, inf_end: start and end generation for inference
Returns:
M: numpy.ndarray
list of inferred coefficients
M_std: numpy.ndarray
list of standard deviation for inferred coefficients
Dependencies:
import numpy as np
import copy
"""
# number of features
num_param = len(X[0])
num_f = int(inf_end - inf_start - 1)
num_h = int(-1/2 + np.sqrt(1/4 + 2*(num_param - num_f))) # calculate num_h from num_hJ = num_h*(num_h + 1)/2
num_J = num_param - (num_f + num_h)
# regularization matrix
reg_mat = np.zeros((num_param, num_param))
for i in range(num_h):
reg_mat[i, i] = lambda_h
for i in range(num_h, num_h + num_J):
reg_mat[i,i] = lambda_J
for i in range(num_h + num_J, num_param):
reg_mat[i, i] = lambda_f
# standard deviation of features
X_std = np.std(X, axis=0)
std_nonzero = np.where(X_std != 0)[0] # use only features where std is nonzero
param_included = std_nonzero
X_inf = copy.deepcopy(X[:, param_included])
reg_mat_reduced = reg_mat[param_included, :]
reg_mat_reduced = reg_mat_reduced[:, param_included]
# inference by solving X*M = Y for M
XT = np.transpose(X_inf)
XTX = np.matmul(XT, X_inf) # covariance
try:
XTX_reg_inv = np.linalg.inv(XTX + reg_mat_reduced)
XTY = np.matmul(XT, Y)
M_inf = np.matmul(XTX_reg_inv, XTY)
M_full = np.zeros(num_param)
M_full[param_included] = M_inf
# unbiased estimator of variance
sigma_res = np.sqrt(len(Y) / (len(Y) - len(M_inf)) * np.mean([(Y - np.matmul(X_inf, M_inf)) ** 2]))
v_vec = np.diag(XTX_reg_inv)
# use std of prior distribution (if <infinity, else use 0)
# for parameters that are not informed by model
# M_var_inv = copy.deepcopy(np.diag(reg_mat))
M_std = np.zeros(M_full.shape)
for i in range(len(M_std)):
if reg_mat[i, i] != 0:
M_std[i] = np.sqrt(1 / reg_mat[i, i])
# standard deviation of the parameter distribution
# from diagonal of the covariance matrix
M_std[param_included] = np.sqrt(v_vec) * sigma_res
except:
print('exception error')
M_full = np.zeros(num_param)
M_std = np.zeros(num_param)
return M_full, M_std
def exe_inference_noCouplings(seq_ref_name, sigma_h, D0, res_targeted,
lambda_h, lambda_f, inf_start, inf_end,
results_directory=('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures')
):
"""
infer single-mutation intrinsic fitness coefficients h (without couplings), together with temporal params F*
based on specific reference sequence, from which other strains are mutated within the head epitope regions (given by res_targeted)
"""
## retrieve st_yearly and st_freq_yearly from collected HA strains (before dim reduction)
# retrieve HA protein sequences from fasta file
year_list, yearly = retrieve_seqs()
print('start: ', year_list[inf_start], 'end: ', year_list[inf_end-1])
# divide sequences into strains
[st_yearly, st_freq_yearly, tot_count_yearly,
strain_All, strain_frequency_All] = strain_info(yearly)
# load minus_fhost_yearly from pickle file based on values of sigma_h and D0
results_directory = os.path.normpath(results_directory)
if not os.path.exists(results_directory):
results_directory = os.path.join(os.getcwd(), 'figures')
file_name = 'HA_MinusFhost_yearly' + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + '.data'
file_path = os.path.join(results_directory, file_name)
with open(file_path, 'rb') as f:
minus_f_host_yearly = pickle.load(f)
seq_ref_file = os.path.join(results_directory, 'reference_sequences.data')
with open(seq_ref_file, 'rb') as f:
seq_ref_dict = pickle.load(f)
seq_ref = seq_ref_dict[seq_ref_name]
# calculate binary strain rep. and update minus_f_host_yearly respectively
st_bin_yearly_new, st_bin_freq_yearly, minus_f_host_yearly_new =\
binary_strains(seq_ref, st_yearly, st_freq_yearly, minus_f_host_yearly, res_targeted)
# calculate feature matrix and response vector
strain_samp_yearly = st_bin_yearly_new[inf_start+1:inf_end]
minus_f_host_yearly = minus_f_host_yearly_new[inf_start:inf_end-1]
X = inference_features_Ising_noCouplings(strain_samp_yearly)
Y = inference_response_FhostPrediction(minus_f_host_yearly)
# do inference and extract h and h_std from inference
M, M_std = infer_ridge_noCouplings(X, Y, lambda_h, lambda_f, inf_start, inf_end)
num_h = len(M) - (inf_end - inf_start - 1)
h_inf_list = M[:num_h]
h_inf_std_list = M_std[:num_h]
# print basic results:
print('inferred h: ', h_inf_list)
print('number of sites: ', len(h_inf_list))
# save results from inference and used parameters in dictionary
ana_result_dict = {
'seq_ref_name': seq_ref_name,
'seq_ref': seq_ref,
'st_yearly': st_yearly,
'st_freq_yearly': st_freq_yearly,
'inf_start': inf_start,
'inf_end': inf_end,
'sigma_h': sigma_h,
'D0': D0,
'res_targeted': res_targeted,
'lambda_h': lambda_h,
'lambda_f': lambda_f,
'h_inf_list': h_inf_list,
'h_inf_std_list': h_inf_std_list,
'M': M,
'M_std': M_std
}
result_filename = 'HA_Inference_noCouplings' + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + '.data'
# switch to results folder for specific reference seq
seqref_results_folder = os.path.join(results_directory, seq_ref_name)
if not os.path.exists(seqref_results_folder):
os.mkdir(seqref_results_folder)
result_filepath = os.path.join(seqref_results_folder, result_filename)
with open(result_filepath, 'wb') as f:
pickle.dump(ana_result_dict, f)
def exe_inference_WithCouplings(seq_ref_name, sigma_h, D0, res_targeted,
lambda_h, lambda_J, lambda_f, inf_start, inf_end,
results_directory=('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures')
):
"""
infer single-mutation intrinsic fitness coefficients h and J, together with temporal params F*
based on specific reference sequence, from which other strains are mutated within the head epitope regions (given by res_targeted)
"""
## retrieve st_yearly and st_freq_yearly from collected HA strains (before dim reduction)
# retrieve HA protein sequences from fasta file
year_list, yearly = retrieve_seqs()
# divide sequences into strains
[st_yearly, st_freq_yearly, tot_count_yearly,
strain_All, strain_frequency_All] = strain_info(yearly)
# load minus_fhost_yearly from pickle file based on values of sigma_h and D0
results_directory = os.path.normpath(results_directory)
if not os.path.exists(results_directory):
results_directory = os.path.join(os.getcwd(), 'figures')
file_name = 'HA_MinusFhost_yearly' + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + '.data'
file_path = os.path.join(results_directory, file_name)
with open(file_path, 'rb') as f:
minus_f_host_yearly = pickle.load(f)
seq_ref_file = os.path.join(results_directory, 'reference_sequences.data')
with open(seq_ref_file, 'rb') as f:
seq_ref_dict = pickle.load(f)
seq_ref = seq_ref_dict[seq_ref_name]
# calculate binary strain rep. and update minus_f_host_yearly respectively
st_bin_yearly_new, st_bin_freq_yearly, minus_f_host_yearly_new =\
binary_strains(seq_ref, st_yearly, st_freq_yearly, minus_f_host_yearly, res_targeted)
# calculate feature matrix and response vector
strain_samp_yearly = st_bin_yearly_new[inf_start+1:inf_end]
minus_f_host_yearly = minus_f_host_yearly_new[inf_start:inf_end-1]
X = inference_features_Ising_WithCouplings(strain_samp_yearly)
Y = inference_response_FhostPrediction(minus_f_host_yearly)
# do inference and extract h and h_std from inference
M, M_std = infer_ridge_WithCouplings(X, Y, lambda_h, lambda_J, lambda_f, inf_start, inf_end)
num_h = int(-1/2 + np.sqrt(1/4 + 2*(len(M) - (inf_end - inf_start - 1)))) # calculate num_h from num_hJ=num_params-num_f
h_inf_list = M[:num_h]
h_inf_std_list = M_std[:num_h]
# print basic results:
print('inferred h: ', h_inf_list)
print('number of sites: ', len(h_inf_list))
# save results from inference and used parameters in dictionary
ana_result_dict = {
'seq_ref_name': seq_ref_name,
'seq_ref': seq_ref,
'st_yearly': st_yearly,
'st_freq_yearly': st_freq_yearly,
'inf_start': inf_start,
'inf_end': inf_end,
'sigma_h': sigma_h,
'D0': D0,
'res_targeted': res_targeted,
'lambda_h': lambda_h,
'lambda_f': lambda_f,
'h_inf_list': h_inf_list,
'h_inf_std_list': h_inf_std_list,
'M': M,
'M_std': M_std
}
result_filename = 'HA_Inference_WithCouplings' + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + '.data'
# switch to results folder for specific reference seq
seqref_results_folder = os.path.join(results_directory, seq_ref_name)
if not os.path.exists(seqref_results_folder):
os.mkdir(seqref_results_folder)
result_filepath = os.path.join(seqref_results_folder, result_filename)
with open(result_filepath, 'wb') as f:
pickle.dump(ana_result_dict, f)
def round_to_1(x):
"""
round to 1 significant digit
"""
if x == 0:
rounded_x = 0
else:
rounded_x = round(x, -int(floor(log10(abs(x)))))
return rounded_x
def eval_inference_noCouplings(seq_ref_name, sigma_h, D0,
results_directory=('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures')):
"""
retrieve inferred fitness parameters for specific reference seq and fitness params
plot inferred param for each Lee HA residue index
"""
results_directory = os.path.normpath(results_directory)
if not os.path.exists(results_directory):
results_directory = os.path.join(os.getcwd(), 'figures')
result_filename = 'HA_Inference_noCouplings' + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + '.data'
seqref_results_folder = os.path.join(results_directory, seq_ref_name)
result_filepath = os.path.join(seqref_results_folder, result_filename)
with open(result_filepath, 'rb') as f:
ana_result_dict = pickle.load(f)
## inferred fitness params
h_inf_list = ana_result_dict['h_inf_list']
h_inf_std_list = ana_result_dict['h_inf_std_list']
print('h_inf_list: ', h_inf_list)
print('h_inf_std_list: ', h_inf_std_list)
## plot inferred params as function of residue numbers in Lee numbering
res_epitope_list = def_res_epitope_list()
res_allepitopes_list = [res for res_list in res_epitope_list for res in res_list]
res_targeted = res_allepitopes_list
Lee_indices = convert_my_ind_to_Lee_HA_numbering(res_targeted)
plt_set = ana.set_plot_settings()
# plot h inferred on y_axis against HA position (Lee numbering)
fig_name = 'hInferred_vs_Lee_HAposition_' + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + plt_set['file_extension']
this_plot_filepath = os.path.join(seqref_results_folder, fig_name)
fig = plt.figure(figsize=(plt_set['full_page_width']*10, 2))
ax1 = fig.add_axes(plt_set['plot_dim_1pan'][0])
# label x-axis with each epitope position and label each point with rounded inferred h value
h_inf_labels = [round_to_1(h) for h in h_inf_list] # round to 1 significant digit
ax1.set_xticks(Lee_indices)
for i, txt in enumerate(h_inf_labels):
ax1.annotate(txt, (Lee_indices[i], h_inf_list[i]))
ax1.errorbar(Lee_indices, h_inf_list, h_inf_std_list, marker='o', linestyle='none', zorder=1)
ax1.set_ylim(-1.5,1.5)
ax1.set_xlabel('HA position (Lee numbering scheme)')
ax1.set_ylabel('inferred $h$')
plt.savefig(this_plot_filepath, bbox_inches='tight')
def comparison_inference_LeeDeepMutScanning(sigma_h, D0, inf_scheme = 'noCouplings'):
"""
plot inferred params, inferred w specific sigma_h and D0
against mutational effects measured by Lee et al.
calculate rank correlations, print them out and save those results in the result dictionary of the inference results
"""
# get aa preference table (from csv file) as pandas dataframe
data_filename = 'github_jbloomlab_Perth2009-DMS-Manuscript_summary_avgprefs.csv'
data_folder = os.path.normpath('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures/Perth_16_2009_G78D_T212I')
if not os.path.exists(data_folder):
data_folder = os.path.join(os.getcwd(), 'figures', 'Perth_16_2009_G78D_T212I')
data_path = os.path.join(data_folder, data_filename)
data = pd.read_csv(data_path)
# get reference sequence for strain Perth_16_2009_G78D_T212I
strain_name = 'Perth_16_2009_G78D_T212I'
strain_list_folder = os.path.normpath('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape'
'/NewApproachFromMarch2021/InfluenzaFitnessInference/figures')
if not os.path.exists(strain_list_folder):
strain_list_folder = os.path.join(os.getcwd(), 'figures')
strain_list_filename = 'reference_sequences.data'
strain_list_filepath = os.path.join(strain_list_folder, strain_list_filename)
with open(strain_list_filepath, 'rb') as f:
seq_ref_dict = pickle.load(f)
seq_ref = seq_ref_dict[strain_name]
# epitope sites (in my numbering) for which I did the inference
res_epitope_list = def_res_epitope_list()
res_allepitopes_list = [res for res_list in res_epitope_list for res in res_list]
## extract preferences and aa_list as list/array (sequence position in array has my numbering)
# list of amino acids
aa_list = list(data.columns)[1:]
# transform preference table into array of shape N_site rows * num_aa=20 cols
aa_pref_arr = data.to_numpy()[:, 1:]
# extract preference array and ref sequence for epitope sites only (for which I did the inference)
aa_pref_epi = aa_pref_arr[res_allepitopes_list, :]
seq_ref_epi = np.array(seq_ref)[res_allepitopes_list]
## calculate measured mutational effects as log(max(p_mut(i))/p_ref(i)) as
## the intrinsic mutational effect for the easiest mutation at site i away from the aa of the reference seq
## or as avg(log(p_mut(i)/p_ref(i))), i.e. the average mutational effect
max_mut_effect_list = []
avg_mut_effect_list = []
for i in range(len(seq_ref_epi)):
aa_ref = seq_ref_epi[i] # reference state
ref_index = aa_list.index(aa_ref) # index for ref state in array
p_ref_list = aa_pref_epi[i, :]
p_ref = p_ref_list[ref_index] # preference for ref state
p_mut_list = np.delete(p_ref_list, ref_index) # preference for mutated states
p_max = np.amax(p_mut_list) # maximum preference to another state
max_mut_effect = np.log(p_max / p_ref)
mut_effects = np.log(p_mut_list / p_ref) # list of log preference ratios
avg_mut_effect = np.mean(mut_effects)
max_mut_effect_list.append(max_mut_effect)
avg_mut_effect_list.append(avg_mut_effect)
## calculate shannon entropy from aa preferences
shannon_e_list = []
for i in range(len(seq_ref_epi)):
p_list = aa_pref_epi[i, :]
shannon_e = -np.sum(np.log(p_list) * p_list)
shannon_e_list.append(shannon_e)
## get the inferred fitness coefficients for this reference sequence
## and the specified coefficients sigma_h, D0
result_filename = 'HA_Inference_' + inf_scheme + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + '.data'
seqref_results_folder = data_folder
result_filepath = os.path.join(seqref_results_folder, result_filename)
with open(result_filepath, 'rb') as f:
ana_result_dict = pickle.load(f)
# inferred fitness params (in same order as mut_effect_list)
h_inf_list = ana_result_dict['h_inf_list']
h_inf_std_list = ana_result_dict['h_inf_std_list']
## calculate the rank correlation between inferred and measured mutational effects and with measured shannon entropy
rhoMaxEffect_pears, prho_MaxEffect_pears = scipy.stats.pearsonr(max_mut_effect_list, h_inf_list)
rhoMaxEffect, prho_MaxEffect = scipy.stats.spearmanr(max_mut_effect_list, h_inf_list)
rhoAvgEffect, prho_AvgEffect = scipy.stats.spearmanr(avg_mut_effect_list, h_inf_list)
rho_shannon, prho_shannon = scipy.stats.spearmanr(shannon_e_list, h_inf_list)
print('rhoMaxEffect=', rhoMaxEffect, 'p=', prho_MaxEffect)
print('rhoMaxEffect_pears=', rhoMaxEffect_pears, 'p=', prho_MaxEffect_pears)
print('rhoAvgEffect=', rhoAvgEffect, 'p=', prho_AvgEffect)
print('rho_shannon=', rho_shannon, 'p=', prho_shannon)
# save comparison measures in result_dict
ana_result_dict['rho_MaxEffect'] = rhoMaxEffect
ana_result_dict['prho_MaxEffect'] = prho_MaxEffect
ana_result_dict['rho_AvgEffect'] = rhoAvgEffect
ana_result_dict['prho_AvgEffect'] = prho_AvgEffect
ana_result_dict['rho_shannon'] = rho_shannon
ana_result_dict['prho_shannon'] = prho_shannon
with open(result_filepath, 'wb') as f:
pickle.dump(ana_result_dict, f)
# plot comparison inferred vs measured coefficients
plt_set = ana.set_plot_settings()
fig_name = 'hInferred_vs_Exp_' + inf_scheme + 'sigma_h_' + str(sigma_h) + '_D0_' + str(D0) + plt_set['file_extension']
this_plot_filepath = os.path.join(data_folder, fig_name)
# fig = plt.figure(figsize=(plt_set['full_page_width'], 3))
fig = plt.figure(figsize=(plt_set['single_pan_width'], 3))
ax1= fig.add_axes(plt_set['plot_dim_1pan'][0])
# ax2 = fig.add_axes(plt_set['plot_dim_3pan'][1])
# ax3 = fig.add_axes(plt_set['plot_dim_3pan'][2])
# inferred vs max mutational effects
ax1.errorbar(max_mut_effect_list, h_inf_list, h_inf_std_list, marker='o', linestyle='none', zorder=1)
ax1.set_xlabel('measured log preference ratios')
ax1.set_ylabel('inferred $h$')
ax1.set_ylim(-1.5, 1.5)
text = '$r_{h}$ = %.2f, p = %.e' % (rhoMaxEffect_pears, prho_MaxEffect_pears)
ax1.text(0.05, 0.95, text, ha='left', va='top', fontsize=12, transform=ax1.transAxes)
# ax1.text(plt_set['plotlabel_shift_3pan'], plt_set['plotlabel_up_3pan'], '(a)', transform=ax1.transAxes,
# fontsize=plt_set['label_font_size'], va='top', ha='right')
# # inferred vs avg. mutational effects
# ax2.errorbar(avg_mut_effect_list, h_inf_list, h_inf_std_list, marker='o', linestyle='none', zorder=1)
# ax2.set_xlabel('measured avg. log aa preference ratios')
# ax2.set_ylabel('inferred $h$')
# ax2.set_ylim(-1.5, 1.5)
# text = '$r_{spearman}$ = %.2f, p = %.e' % (rhoAvgEffect, prho_AvgEffect)
# ax2.text(0.05, 0.95, text, ha='left', va='top', fontsize=12, transform=ax2.transAxes)
# ax2.text(plt_set['plotlabel_shift_3pan'], plt_set['plotlabel_up_3pan'], '(b)', transform=ax2.transAxes,
# fontsize=plt_set['label_font_size'], va='top', ha='right')
#
# ax3.errorbar(shannon_e_list, h_inf_list, h_inf_std_list, marker='o', linestyle='none', zorder=1)
# ax3.set_xlabel('Shannon entropy of measured aa preferences')
# ax3.set_ylabel('inferred $h$')
# ax3.set_ylim(-1.5, 1.5)
# text = '$r_{spearman}$ = %.2f, p = %.e' % (rho_shannon, prho_shannon)
# ax3.text(0.05, 0.95, text, ha='left', va='top', fontsize=12, transform=ax3.transAxes)
# ax3.text(plt_set['plotlabel_shift_3pan'], plt_set['plotlabel_up_3pan'], '(c)', transform=ax3.transAxes,
# fontsize=plt_set['label_font_size'], va='top', ha='right')
plt.savefig(this_plot_filepath, bbox_inches='tight')
plt.close()
def main():
## plot HA strain succession from 1968 to 2020
exe_plot_strainSuccession_HA()
## calculate and save minus_f_host_yearly
sigma_h = 1
D0 = 5
exe_minus_fhost_yearly(sigma_h, D0)
## plot distribution of minus_f_host_yearly
sigma_h = 1
D0 = 5
exe_plot_minus_fhost_yearly(sigma_h, D0)
## add reference sequence to dictionary
# add_reference_sequences_from_fasta('BI_16190_68_ProteinFasta.fasta', 'BI_16190_68')
# add_reference_sequences_from_fasta('Perth_16_2009_ProteinFasta.fasta', 'Perth_16_2009')
# add_reference_sequences_from_fasta('Perth_16_2009_G78D_T212I_ProteinFasta.fasta', 'Perth_16_2009_G78D_T212I')
# print_seq_refs() # print names of added reference sequences
# ## run trial inference on HA data
# seq_ref_name = 'Perth_16_2009_G78D_T212I' # 'BI_16190_68'
# sigma_h = 1
# D0 = 5
# # fixed params:
# lambda_h = 10 ** (-4) # 10**(-4)
# # lambda_J = 1 # only needed for inference with couplings
# lambda_f = 10 ** (-4)
# inf_start = 0
# inf_end = 53 # 53 (53 is length of year_list, 43 is 2010 as last year)
# res_epitope_list = def_res_epitope_list()
# res_allepitopes_list = [res for res_list in res_epitope_list for res in res_list]
# res_targeted = res_allepitopes_list
# # run inference with chosen params:
# exe_inference_noCouplings(seq_ref_name, sigma_h, D0, res_targeted,
# lambda_h, lambda_f, inf_start, inf_end)
# # exe_inference_WithCouplings(seq_ref_name, sigma_h, D0, res_targeted,
# # lambda_h, lambda_J, lambda_f, inf_start, inf_end)
#
# ## evaluate inference: print and plot inferred params
# seq_ref_name = 'Perth_16_2009_G78D_T212I' # 'BI_16190_68'
# sigma_h = 1
# D0 = 5
# eval_inference_noCouplings(seq_ref_name, sigma_h, D0)
#
# # compare inferred fitness coefficients to mutational fitness effects
# # measured by Lee et al. 2018 (PNAS)
# # save comparison figure and print/save rank correlations
# sigma_h = 1
# D0 = 5
# comparison_inference_LeeDeepMutScanning(sigma_h, D0, inf_scheme='noCouplings')
# # comparison_inference_LeeDeepMutScanning(sigma_h, D0, inf_scheme='WithCouplings')
# if this file is run from the console, the function main will be executed
if __name__ == '__main__':
main()
```
#### File: notebooks/fitnessinference/run_multiple_anas.py
```python
import numpy as np
import copy
import os
from pypet import Trajectory, cartesian_product
import pickle
import scipy
try:
import simulation as simu
except ModuleNotFoundError:
from fitnessinference import simulation as simu
from sklearn.metrics import precision_recall_curve, auc, roc_auc_score, roc_curve
import matplotlib as mpl
import matplotlib.pyplot as plt
from Bio import SeqIO
from Bio.Seq import Seq
import logging
from datetime import date
from general.queuing import QsubHeader, SlurmHeader, run_sbatch
import time
# Writes Slurm files to be run on the cluster
class SlurmProtocol(object):
def __init__(self, simulation_time=2000, nodes=1, ppn=1, mem_gb=10):
self.header = SlurmHeader(simulation_name="fluSimulation", simulation_time=simulation_time,
nodes=nodes, ppn=ppn, mem_gb=mem_gb)
def set_python_script(self, q):
pypath = os.path.normpath('C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape/NewApproachFromMarch2021/'
'InfluenzaFitnessInference/code/notebooks/fitnessinference/analysis.py')
if not os.path.exists(pypath):
pypath = os.path.join(os.getcwd(), 'code', 'notebooks', 'fitnessinference', 'analysis.py')
command_string = 'python ' + pypath + '\n'
q.write(command_string)
def generate_slurm(self):
q = open("sbatch.sh", "w")
self.header.set_header(q)
self.set_python_script(q)
q.close()
def main():
# run analyses on cluster
slurm = SlurmProtocol()
slurm.generate_slurm()
run_sbatch()
# time.sleep(1) # wait for x seconds so that result file gets created before next simu is run
# if this file is run from the console, the function main will be executed
if __name__ == '__main__':
main()
```
|
{
"source": "jdoepfert/google-drive-helpers",
"score": 3
}
|
#### File: google-drive-helpers/gdrive_helpers/gdrive.py
```python
import httplib2
import os
from apiclient.http import MediaIoBaseDownload
from apiclient import discovery
from oauth2client import client
from oauth2client.file import Storage
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
SCOPES = ['https://www.googleapis.com/auth/drive']
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Drive API Python Quickstart'
def get_credentials(secret_file=CLIENT_SECRET_FILE):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(secret_file, SCOPES)
flow.user_agent = APPLICATION_NAME
print('Storing credentials to ' + credential_path)
return credentials
def extract_id_from_url(url):
return url.split('/')[-1]
def is_folder(item):
return item['mimeType'] == 'application/vnd.google-apps.folder'
def download_folder_contents(folder_id, dest_path, service, http, n=1000):
results = service.files().list(pageSize=n,
q="'{}' in parents"
.format(folder_id)).execute()
if 'nextPageToken' in results.keys():
raise RuntimeError("You missed some files! Implement pagination https://developers.google.com/drive/v3/web/search-parameters")
folder_contents = results['files']
files = (i for i in folder_contents if not is_folder(i))
folders = (i for i in folder_contents if is_folder(i))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
for item in files:
print(item)
full_path = os.path.join(dest_path, item['name'].replace('/', '_'))
download_file(item['id'], full_path, service)
for item in folders:
full_path = os.path.join(dest_path, item['name'].replace('/', '_'))
print("found folder {}".format(full_path))
download_folder_contents(item['id'], full_path, service, http)
def download_file(file_id, dest_path, service, overwrite=False):
if not overwrite:
if os.path.exists(dest_path):
return None
request = service.files().get_media(fileId=file_id)
with open(dest_path, "wb+") as fh:
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download {} {}%."
.format(dest_path, int(status.progress() * 100)))
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
url = 'https://drive.google.com/drive/u/1/folders/1wUxW6d9vxSYNFRVtT4ZdNcZxQX-u1jr0'
folder_id = extract_id_from_url(url)
dest_path = './'
download_folder_contents(folder_id, dest_path, service, http)
if __name__ == '__main__':
main()
```
|
{
"source": "jdoepfert/PyRM",
"score": 2
}
|
#### File: PyRM/pyrm/meta_optimizers.py
```python
from pyrm.optimizers import calc_EMSRb
from pyrm.fare_transformation import fare_trafo_decorator
@fare_trafo_decorator
def calc_EMSRb_MR(fares, demands, sigmas=None, cap=None):
return calc_EMSRb(fares, demands, sigmas)
```
|
{
"source": "jdog4783/audio-reactive-led-strip-1",
"score": 3
}
|
#### File: audio-reactive-led-strip-1/tests/test_opc_server.py
```python
import unittest
from audioled import opc_server
from audioled import opc
import numpy as np
import time
import random
import socket
class Test_OPC_Server(unittest.TestCase):
def test_serverReceives(self):
# create server
server = opc_server.Server('127.0.0.1', 7891)
# start receiving without blocking
server.get_pixels(block=False)
# construct client
client = opc.Client('127.0.0.1:7891',long_connection=True)
# transfer some data
for i in range(2):
pixels_in = np.array([[random.randint(0,255),random.randint(0,255),random.randint(0,255)] for i in range(10)]).T.clip(0,255)
print("Pixels sent: {}".format(pixels_in))
client.put_pixels(pixels_in.T.clip(0, 255).astype(int).tolist())
# give some time for networking
time.sleep(0.1)
# receive again (this will return last_message)
pixels_out = server.get_pixels(block=False)
# assert in and out are equal
print("Pixels received: {}".format(pixels_out))
np.testing.assert_array_equal(pixels_in, pixels_out)
def test_serverClosesSocket(self):
# create server
server = opc_server.Server('127.0.0.1', 7892)
# start receiving
server.get_pixels(block=False)
# construct client
client = opc.Client('127.0.0.1:7892', long_connection=True, verbose=False)
# transfer some data
pixels_in = np.array([[random.randint(0,255),random.randint(0,255),random.randint(0,255)] for i in range(10)]).T.clip(0,255)
client.put_pixels(pixels_in.T.clip(0, 255).astype(int).tolist())
time.sleep(0.1)
# receive again (this will return last_message)
pixels_out = server.get_pixels(block=False)
# assert in and out are equal
print("Pixels received: {}".format(pixels_out))
np.testing.assert_array_equal(pixels_in, pixels_out)
# now close server, we need the socket to be closed as well
server = None
time.sleep(1)
print("Proceeding")
# start new server on the same port
newServer = opc_server.Server('127.0.0.1', 7892)
# start receiving
newServer.get_pixels(block=False)
# transfer some data
pixels_in = np.array([[random.randint(0,255),random.randint(0,255),random.randint(0,255)] for i in range(10)]).T.clip(0,255)
# needs range since client realizes at some point that he's disconnected
for i in range(10):
client.put_pixels(pixels_in.T.clip(0, 255).astype(int).tolist())
time.sleep(0.01)
# receive again (this will return last_message)
pixels_out = newServer.get_pixels(block=False)
# assert in and out are equal
print("Pixels received: {}".format(pixels_out))
np.testing.assert_array_equal(pixels_in, pixels_out)
def test_backgroundThreadExitsIfSocketIsClosed(self):
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_socket.bind(('127.0.0.1', 7890))
thread = opc_server.ServerThread(_socket, None, verbose=True)
thread.start()
self.assertTrue(thread.isAlive)
time.sleep(1)
_socket.close()
time.sleep(1)
self.assertTrue(not thread.isAlive())
def test_serverErrorHandlingSameSocket(self):
# create servers
serverA = opc_server.Server('127.0.0.1', 7892, verbose=True)
serverB = opc_server.Server('127.0.0.1', 7892, verbose=True)
# create client
client = opc.Client('127.0.0.1:7892', long_connection=True, verbose=False)
# Run for some time...
for i in range(10):
# init serverA thread
print("Activating serverA")
serverA.get_pixels(block=False)
pixels_in = np.array([[random.randint(0,255),random.randint(0,255),random.randint(0,255)] for i in range(10)]).T.clip(0,255)
for j in range(5):
client.put_pixels(pixels_in.T.clip(0, 255).astype(int).tolist())
time.sleep(0.1)
pixels_out = serverA.get_pixels(block=False)
print("Checking output serverA")
np.testing.assert_array_equal(pixels_in, pixels_out)
time.sleep(0.1)
# init serverB thread
print("Activating serverB")
serverB.get_pixels(block=False)
pixels_in = np.array([[random.randint(0,255),random.randint(0,255),random.randint(0,255)] for i in range(10)]).T.clip(0,255)
for j in range(5):
client.put_pixels(pixels_in.T.clip(0, 255).astype(int).tolist())
time.sleep(0.1)
pixels_out = serverB.get_pixels(block=False)
print("Checking output serverB")
np.testing.assert_array_equal(pixels_in, pixels_out)
```
|
{
"source": "jdoherty7/Adaptive_Interpolation",
"score": 3
}
|
#### File: Adaptive_Interpolation/adaptive_interpolation/adapt.py
```python
from __future__ import division
import copy
import numpy as np
import numpy.linalg as la
import scipy.special as spec
import scipy.optimize as optimize
class Tree:
def __init__(self, root=0):
self.root = root
self.size = 0
self.max_level = 0
def visualize(self):
pass
def adapt(self):
pass
class Node:
def __init__(self, parent, left=0, right=0):
self.parent = parent
self.left = left
self.right = right
self.level = -1
self.level = self.get_level()
self.data = 0
if left != 0:
left.parent = self
if right != 0:
left.parent = self
def get_level(self):
if self.level == -1:
if (self.parent == 0):
self.level = 0
else:
self.level = self.parent.level + 1
return self.level
class Interpolant(object):
# defining parameters of an adaptive method
def __init__(self, f, order, error, interpolant_choice,
dtype, guaranteed_accurate=True, optimizations=[]):
dt = int(dtype)
# use recursions till node interval is order*machine precision - some tol const
# max_recur is max number of recursion levels allowed for adaption
# 34 reaches a spacing of 10**-15
if dt <= 32:
self.dtype = np.float32
self.max_recur = 24
elif dt <= 64:
self.dtype = np.float64
self.max_recur = 53 - 10
elif dt <= 80:
self.dtype = np.longdouble
self.max_recur = 64 - 10
else:
raise Exception("Incorrect data type specified")
if "calc intervals" in optimizations:
# 14 to store in int32
# 25 to store in int64
self.max_recur = 14
if interpolant_choice not in ['chebyshev', 'legendre', 'monomial']:
string_err = "{0} is not a valid \
interpolant.\n".format(interpolant_choice)
string_err+= "legendre, chebyshev, and monomial are the choices."
raise ValueError(string_err)
# function pass, must be vectorized
self.function = f
self.lower_bound = 0
self.upper_bound = 0
# max order allwed to create interpolation
self.max_order = order
# string specifying basis choice
self.basis = interpolant_choice
self.tree = Tree(Node(0))
self.tree.size+=1
self.allowed_error = error
self.guaranteed_accurate = guaranteed_accurate
self.leaves = []
# for testing better methods
self.optimizations=optimizations
# function to evaluate Legendre polynomials of a number, x, up to order n
def legendre(self, n, x):
if n == 0:
return np.array([1.], dtype=self.dtype)
elif n == 1:
return np.array([1., x], dtype=self.dtype)
elif n > 1:
L = [self.dtype(1.), self.dtype(x)]
for i in range(2, int(n+1)):
first_term = self.dtype(2*i-1)*self.dtype(x)*L[i-1]
second_term = self.dtype(i-1)*L[i-2]
L.append((first_term + second_term)*(1./n))
return np.array(L, dtype=self.dtype)
# function to evaluate chebyshev polynomials of a value x up to order n
def chebyshev(self, n, x):
if n == 0:
return np.array([1.], dtype=self.dtype)
elif n == 1:
return np.array([1., x], dtype=self.dtype)
elif n > 1:
C = [self.dtype(1.), self.dtype(x)]
for i in range(2, int(n+1)):
C.append(self.dtype(2)*self.dtype(x)*C[i-1] - C[i-2])
return np.array(C, dtype=self.dtype)
# transformation for orthogonal functions, from [a, b] -> [-1, 1]
def transform(self, x, a, b):
scale = (x - a)/(b - a)
return 2*scale - 1
# given an order an a number, x. the polynomials of order 0 to n
# are returned, evaluated for the given number.
def basis_function(self, x, n, basis, a, b):
xscaled = (2*(x - a)/(b - a)) - 1
if (basis == 'legendre'):
#return spec.eval_legendre(n, x)
return self.legendre(n, xscaled)
elif (basis == 'chebyshev'):
#return spec.eval_chebyt(n, x)
return self.chebyshev(n, xscaled)
else:
#return np.polyval(np.ones(n), x)
return np.array([x**i for i in range(int(n)+1)], dtype=self.dtype)
# given a list of coefficients, evaluate what the interpolant's value
# will be for the given x value(s). Assumes that x is an array
# coeff is coefficients of a basis (string) of a given order (integer)
def eval_coeff(self, coeff, x, basis, order, a, b):
my_vals = []
if type(x) == type([]) or type(x) == type(np.array([0])):
for x0 in x:
xs = self.basis_function(x0, order, basis, a, b)
val = np.dot(coeff, xs)
my_vals.append(val)
return np.array(my_vals, dtype=self.dtype)
else:
xs = self.basis_function(x, order, basis, a, b)
return np.dot(coeff, xs)
# gets n chebyshev nodes from a to b
def get_cheb(self, a, b, n):
if n == 1:
return np.array([(a+b)/2.], dtype=self.dtype)
k = np.array(range(1, int(n) + 1)[::-1], dtype=self.dtype)
nodes = np.cos((2.*k - 2.)*np.pi/(2.*int(n-1)))
# change range from -1 to 1 to a to b
return (b-a)*.5*(nodes + 1.) + a
# find interpolated coefficients given a basis for
# evaluation and nodes to evaluate the function at.
def interpolate(self, nodes, basis, a, b):
length = len(nodes)
V = np.empty(shape=(length, length), dtype=self.dtype)
for i in range(length):
V[i, :] = self.basis_function(nodes[i], length-1, basis, a, b)
# try to solve for coefficients, if there is a singular matrix
# or some other error then return None to indicate an error
try: return la.solve(V, self.function(nodes))
except: return None
# finds error using the max val as the max on the entire interval, not the current
# below is the max number of points that can be evaluated exactly
# (self.upper_bound - self.lower_bound)*(2**(self.max_recur+1))
def find_error(self, coeff, a, b, order):
# get number of points for each interval
n = min(5e3*(b-a) + 10, 5e3)
lb, ub = self.lower_bound, self.upper_bound
num_nodes = 5e3*(ub - lb) + 10
# get full interval and subinterval
full_x = np.linspace(lb, ub, num_nodes, dtype=self.dtype)
x = np.linspace(a, b, n, dtype=self.dtype)
# evaluate absolute infinity norm on subinterval
# and infinity norm of function on full interval
approx = self.eval_coeff(coeff, x, self.basis, order, a, b)
actual = self.function(x)
max_abs_err = la.norm(approx - actual, np.inf)
max_val_full_int = la.norm(self.function(full_x), np.inf)
# calculate relative error on the subinterval
return max_abs_err/max_val_full_int
# adaptive method finding an interpolant for a function
# this uses a specified order and basis function
def adapt(self, a, b, node):
#print(a, b)
self.tree.max_level = max(self.tree.max_level, node.level)
# prevent from refining the interval too greatly
if node.level > self.max_recur:
string_err0 = "Recursed too far. Try changing the order of\n"
string_err0+= "the interpolant used, raise the allowed error,\n"
string_err0+= "or set accurate=False.\n"
if self.guaranteed_accurate:
raise ValueError(string_err0)
else:
return
# get nodes to evaluate interpolant with
nodes = self.get_cheb(a, b, self.max_order+1)
# get coefficients of interpolant defined on the nodes
# guaranteed to never give a singular matrix
coeff = self.interpolate(nodes, self.basis, a, b)
if coeff is None:
string_err1 = "Singular matrix obtained on bounds [{0} {1}]\n".format(a, b)
string_err1+= "If using monomials try using an orthogonal polynomial.\n"
string_err1+= "Otherwise, try a different order interpolant, lower the\n"
string_err1+= "allowed error, or set accurate=False\n"
if self.guaranteed_accurate:
raise ValueError(string_err1)
else:
return
# calculate the maximum relative error on the interval
# using these coefficients
this_error = self.find_error(coeff, a, b, self.max_order)
# append the coefficients and the range they are valid on to this
# array also the basis function and order of in this range
node.data = [(a+b)/2., coeff, [a, b], this_error]
# if error is larger than maximum allowed relative error
# then refine the interval
if this_error > self.allowed_error:
# adapt on the left subinterval then the right subinterval
self.tree.size += 2
node.left = Node(node)
node.right = Node(node)
self.adapt(a, (a+b)/2., node.left)
self.adapt((a+b)/2., b, node.right)
########################################################
# #
# Section Containing Functions for Remez interpolation #
# #
########################################################
# find interpolated coefficients given a basis for
# evaluation and nodes to evaluate the function at.
# n is order
def solve_remez_system(self, nodes, order, a, b):
n = int(order)
length = n + 2
V = np.zeros((length, length))
for i in range(length):
V[i, :-1] = self.basis_function(nodes[i], n, self.basis, a, b)
V[i, -1] = (-1)**(i+1)
try: return la.solve(V, self.function(nodes))
except: return None
# update node choices based on places with maximum error near
# the current node choices, leave endpoints as is
# if order 0 is used the nodes are not changed
def update_nodes_incorrect(self, nodes, coeff, n, a, b):
# see FUNCTION APPROXIMATION AND THE REMEZ ALGORITHM to fix this exchange step
# should find roots and then find the max error in between those roots
if nodes.shape[0] > 2:
err = lambda x: np.abs(self.eval_coeff(coeff, x, self.basis, n, a, b)
- self.function(x))
new_nodes = np.zeros(len(nodes), dtype=self.dtype)
new_nodes[0] = nodes[0]
new_nodes[-1] = nodes[-1]
for i in range(1, len(nodes)-1):
c, d = (new_nodes[i-1] + nodes[i])/2, (nodes[i] + nodes[i+1])/2
x = np.linspace(c, d, 1e3, dtype=self.dtype)
new_nodes[i] = x[np.argmax(err(x))]
# shouldnt this be: new_nodes = locmax(err(x))
# assert new_nodes.shape[0] == n
# locmax is unclear if there are high frequency terms.
return new_nodes
else:
return nodes
def find_roots(self, err, nodes,c,d,coeff):
roots = np.zeros(len(nodes)-1, dtype=self.dtype)
for i in range(len(roots)):
a, b = nodes[i], nodes[i+1]
if (b - a)/(2) < np.finfo(self.dtype).eps*b:
print(c,d)
roots[i] = (a + b)/2
else:
roots[i] = optimize.brentq(err, a, b)
return roots
# update node choices based on places with maximum error near
# the current node choices, leave endpoints as is
# if order 0 is used the nodes are not changed
def update_nodes(self, nodes, coeff, n, a, b):
# Error of the interpolation
err = lambda x: self.eval_coeff(coeff, x, self.basis, n, a, b) \
- self.function(x)
new_nodes = np.zeros(len(nodes), dtype=self.dtype)
# Roots of the Error function. Should be N+1 by Equioscillation Theorem
roots = self.find_roots(err, nodes, a, b, coeff)
# New nodes are the points that have the maximum absolute value of error
# within the intervals between each of the roots.
for i in range(len(nodes)):
c = a if i == 0 else roots[i-1]
d = b if i == len(roots) else roots[i]
neg_abs = lambda x: -np.abs(err(x))
new_nodes[i] = optimize.fminbound(neg_abs, c, d)
return new_nodes
def check_eq_alt(self, array, error):
tolerance = 10*np.finfo(self.dtype).eps
equal = (np.max(np.abs(array)) - np.min(np.abs(array))) <= tolerance
last_sign = np.sign(array[0])
alternate = True
for i in range(1,len(array)):
alternate = alternate and (last_sign == -np.sign(array[i]))
last_sign = np.sign(array[i])
return equal and alternate
def remez(self, a, b, n):
remez_nodes = self.get_cheb(a, b, n+2)
#x = np.linspace(a, b, min(5e3, (b-a)/self.allowed_error), dtype=self.dtype)
for _ in range(40):
solution = self.solve_remez_system(remez_nodes, n, a, b)
if solution is None: return solution # singular matrix
coeff = solution[:-1]
error = np.abs(solution[-1])
if "remez incorrect" in self.optimizations:
M = self.update_nodes_incorrect(remez_nodes, coeff, n, a, b)
else:
try:
M = self.update_nodes(remez_nodes, coeff, n, a, b)
except:
break
err = lambda x: self.eval_coeff(coeff, x, self.basis, n,
a, b) - self.function(x)
remez_nodes = M
if self.check_eq_alt(err(remez_nodes), error): break
#print(err(M))
#print(b-a, error, self.check_eq_alt(err(M), error))
#print(la.norm(self.get_cheb(a, b, n+2)-remez_nodes, np.inf)/(b-a))
#print(M)
return coeff, remez_nodes
# adaptive method utilizing the remez algorithm for interpolation
def remez_adapt(self, a, b, node):
#print(a, b, "Remez")
#print((b-a)/(self.max_order+2))
self.tree.max_level = max(self.tree.max_level, node.level)
if node.level > self.max_recur:
string_err0 = "Recursed too far. Try changing the order of\n"
string_err0+= "the interpolant used, raise the allowed error,\n"
string_err0+= "or set accurate=False.\n"
if self.guaranteed_accurate:
raise ValueError(string_err0)
else:
return
# get coeff on interval utilizing the remez algorithm
ret = self.remez(a, b, self.max_order)
if ret is None:
if self.guaranteed_accurate:
string_err1 = "Singular matrix obtained on bounds [{0} {1}]\n".format(a, b)
string_err1+= "If using monomials try using an orthogonal polynomial.\n"
string_err1+= "Otherwise, try a different order interpolant, lower the\n"
string_err1+= "allowed error, or set accurate=False\n"
raise ValueError(string_err1)
else:
return
coeff, M = ret[0], ret[1]
this_error = self.find_error(coeff, a, b, self.max_order)
node.data = [(a+b)/2., coeff, [a, b], this_error]
#print("Error", np.log10(this_error), (b-a)/(self.max_order+2), node.level)
if this_error > self.allowed_error:
# adapt on the left subinterval then the right subinterval
self.tree.size += 2
node.left = Node(node)
node.right = Node(node)
self.remez_adapt(a, (a+b)/2., node.left)
self.remez_adapt((a+b)/2., b, node.right)
# Method to run the adaptive method initially
def run_adapt(self, lower_bound, upper_bound, adapt_type):
if upper_bound <= lower_bound:
raise Exception("Upper bound must be greater than lower bound.")
self.lower_bound = self.dtype(lower_bound)
self.upper_bound = self.dtype(upper_bound)
if adapt_type.lower() == "variable":
self.variable_order_adapt(self.lower_bound, self.upper_bound, self.tree.root)
elif adapt_type.lower() == "remez":
self.remez_adapt(self.lower_bound, self.upper_bound, self.tree.root)
else:
self.adapt(self.lower_bound, self.upper_bound, self.tree.root)
# Estimated Recursion Depth, From Taylors Remainder Theorem
# assuming smooth and continous and n+1 derivative exists
if 0:
nodes = self.get_cheb(lower_bound, upper_bound, self.max_order+2)
coeff = self.interpolate(nodes, "monomials", lower_bound, upper_bound)
coeff[-1] = coeff[-1]
import scipy.misc as sm
import scipy.special as spec
dfn = abs(sm.factorial(self.max_order+1)*coeff[-1])
print("dfn+1: ", dfn, coeff[-1], np.log2(dfn)/(self.max_order+1))
if 0:
f = lambda x: self.eval_coeff(coeff, x, "monomials", self.max_order+1, lower_bound, upper_bound)
import matplotlib.pyplot as plt
plt.figure()
x = np.linspace(lower_bound, upper_bound, 1000)
dfn = la.norm(spec.jvp(0, x, self.max_order+1), np.inf)
print(dfn)
plt.plot(x, f(x))
plt.plot(x, spec.jvp(0, x, self.max_order+1))
plt.plot(x, self.function(x))
plt.show()
depth = -np.log2(self.allowed_error)/(self.max_order+1)
depth+= np.log2(upper_bound - lower_bound)
depth+= np.log2(dfn)/(self.max_order+1)
print("Estimated Depth: ", depth)
print("Actual Tree Depth: ", self.tree.max_level)
def test_cheb_err():
import numpy.linalg as la
from numpy.polynomial import chebyshev as cheb
import matplotlib.pyplot as plt
x = 0*np.linspace(lower_bound, upper_bound, 5e4)# + np.finfo(np.float64).eps
xs = 2*(x/(upper_bound-lower_bound)) -1- lower_bound
for n in range(3, 20):
nodes = self.get_cheb(lower_bound, upper_bound, n+1)
coeff = self.interpolate(nodes, "chebyshev", lower_bound, upper_bound)
f = lambda x: self.eval_coeff(coeff, x, "chebyshev", n,
lower_bound, upper_bound)
if 0:
plt.figure()
plt.plot(x, cheb.chebval(xs, coeff))
plt.plot(x, f(x), 'g')
plt.plot(x, self.function(x), 'r')
plt.show()
dx = (2**8)*np.finfo(np.float64).eps
print(n, la.norm(f(x) - f(x + dx), np.inf)/dx)
#print(n, la.norm(f(x) - cheb.chebval(xs, coeff), np.inf))
#print(n, la.norm(f(x), np.inf), la.norm(cheb.chebval(xs, coeff), np.inf))
test_cheb_err()
# add a condition to check if tree is good enough already?
optimal = self.tree.size == 2**(self.tree.max_level+1) - 1
if "balance" in self.optimizations and not optimal:
leaves = self.get_leaves(self.tree.root)
#print(leaves, len(leaves))
if "combine" in self.optimizations:
leaves = self.combine_leaves(leaves)
print("Original Height: ", self.tree.max_level)
self.tree = self.create_new_tree(leaves)
print("Balanced Height: ", self.tree.max_level)
print('\n\n\n')
l = self.get_leaves(self.tree.root)
#print(l)
#print(len(l))
"""
import scipy.sparse as sp
N = 2*(self.max_order+1)
bounds = np.arange(-(self.max_order+1)//2,(self.max_order+1)//2)
print(bounds)
diags = []
for i in bounds:
diags.append((self.max_order+1-abs(i))*np.ones(N - abs(i)))
D = sp.diags(diags, bounds)
print(D.todense())
x = np.linspace(lower_bound, upper_bound, N)
depth+= la.norm(D @ self.function(x), np.inf)
"""
# Possible future pruning functions
def get_leaves(self, node, leaves=[]):
left, right = 0, 0
if type(node.left) != int:
left = node.left.data[0]
if type(node.right) != int:
right = node.right.data[0]
print(node.data, left, right)
if node.left == 0 and node.right == 0:
leaves.append(node)
else:
self.get_leaves(node.left, leaves)
self.get_leaves(node.right, leaves)
return leaves
def combine_leaves(self, leaves):
i = 0
while i < len(leaves)-1:
new_node = self.replace(leaves[i], leaves[i+1])
if new_node == False:
i+=1
else:
# found better interpolant
del leaves[i+1]
del leaves[i]
leaves.insert(i, new_node)
return leaves
def replace(self, node1, node2):
a1, b2 = node1.data[2][0], node2.data[2][1]
nodes = self.get_cheb(a1, b2, self.max_order+1)
coeff = self.interpolate(nodes, self.basis, a1, b2)
if coeff is None:
raise ValueError("Singular Matrix while combining leaves?")
error = self.find_error(coeff, a1, b2, self.max_order)
if error < self.allowed_error:
node = Node(0)
node.data = [(a1 + b2)/2., coeff, [a1, b2], error]
return node
return False
def create_new_tree(self, leaves):
level = copy.deepcopy(leaves)
next_level = []
size = len(leaves)
while len(level) > 1:
rev = leaves[-1].get_level() < leaves[0].get_level()
if rev:
level.reverse()
length = len(level)//2
for i in range(length):
# this should set children's parent correctly
left = level.pop(0)
right = level.pop(0)
if rev:
parent = Node(0, right, left)
else:
parent = Node(0, left, right)
parent.data = [(left.data[0] + right.data[0])/2]
next_level.append(parent)
size += length
# add any remaining leaves so they are on the next level
assert len(level) <= 1
for i in range(len(level)):
next_level.append(level.pop(0))
if rev:
next_level.reverse()
level = next_level
next_level = []
################
new_root = level[0]
new_tree = Tree(new_root)
new_tree.size = size
new_tree.max_level = max(leaves[-1].get_level(), leaves[0].get_level())
return new_tree
```
#### File: jdoherty7/Adaptive_Interpolation/run_test.py
```python
from __future__ import absolute_import
import ctypes
import ctypes.util
import os
import time
import numpy as np
import numpy.linalg as la
import scipy.special as spec
import matplotlib as mpl
from tempfile import TemporaryDirectory
#import tempfile
#mpl.use("Agg")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import adaptive_interpolation.adapt as adapt
import adaptive_interpolation.approximator as app
import adaptive_interpolation.generate as generate
import adaptive_interpolation.adaptive_interpolation as adapt_i
#import loopy as lp
#from loopy.tools import (empty_aligned, address_from_numpy,
# build_ispc_shared_lib, cptr_from_numpy)
def address_from_numpy(obj):
ary_intf = getattr(obj, "__array_interface__", None)
if ary_intf is None:
raise RuntimeError("no array interface")
buf_base, is_read_only = ary_intf["data"]
return buf_base + ary_intf.get("offset", 0)
def cptr_from_numpy(obj):
return ctypes.c_void_p(address_from_numpy(obj))
def build_ispc_shared_lib(
cwd, ispc_sources, cxx_sources,
ispc_options=[], cxx_options=[],
ispc_bin="ispc",
cxx_bin="g++",
quiet=True):
from os.path import join
ispc_source_names = []
for name, contents in ispc_sources:
ispc_source_names.append(name)
with open(join(cwd, name), "w") as srcf:
srcf.write(contents)
cxx_source_names = []
for name, contents in cxx_sources:
cxx_source_names.append(name)
with open(join(cwd, name), "w") as srcf:
srcf.write(contents)
from subprocess import check_call
ispc_cmd = ([ispc_bin,
"--pic",
"-o", "ispc.o"]
+ ispc_options
+ list(ispc_source_names))
if not quiet:
print(" ".join(ispc_cmd))
check_call(ispc_cmd, cwd=cwd)
cxx_cmd = ([
cxx_bin,
"-shared", "-Wl,--export-dynamic",
"-fPIC",
"-oshared.so",
"ispc.o",
]
+ cxx_options
+ list(cxx_source_names))
check_call(cxx_cmd, cwd=cwd)
if not quiet:
print(" ".join(cxx_cmd))
def build_scalar_shared_lib(
cwd, cxx_sources,
cxx_options=[],
cxx_bin="g++",
quiet=True):
from os.path import join
cxx_source_names = []
for name, contents in cxx_sources:
cxx_source_names.append(name)
with open(join(cwd, name), "w") as srcf:
srcf.write(contents)
from subprocess import check_call
cxx_cmd = ([cxx_bin,
"-shared", "-Wl,--export-dynamic",
"-fPIC",
"-oshared.so",
]
+ cxx_options
+ list(cxx_source_names))
check_call(cxx_cmd, cwd=cwd)
if not quiet:
print(" ".join(cxx_cmd))
# bessel function for testing
def f(x, order=0):
return spec.jn(order, x)
def f0(x, v):
if v == 0:
return f(x)
elif v == 1:
return spec.jn(10, x)
elif v== 2:
return spec.hankel1(0, x)
elif v == 3:
return spec.hankel2(0, x)
else:
return spec.airy(x)
def run_data(tree_depth, order, size, n, vec=True):
if vec:
flop = size*(4 + 2 + 2*(order-2))
else:
#flop = size*(5 + 3 + 5*(order-2))
# with fused mult add / sub and if 2*x_scaled is done outside loop
flop = size*(4 + 2 + 2*(order-2))
memop = size*(4*tree_depth + order + 4)*4 # 4 bytes each access (single precision)
def run(approx, code, size, NRUNS, vec):
if approx.dtype_name == "float":
assert approx.dtype == np.float32
STREAM_DTYPE = np.float32
STREAM_CTYPE = ctypes.c_float
elif approx.dtype_name == "double":
assert approx.dtype == np.float64
STREAM_DTYPE = np.float64
STREAM_CTYPE = ctypes.c_double
if "calc intervals" in approx.optimizations:
INDEX_DTYPE = np.int64
INDEX_CTYPE = ctypes.c_longlong
else:
INDEX_DTYPE = np.int32
INDEX_CTYPE = ctypes.c_int
with open("tests/tasksys.cpp", "r") as ts_file:
tasksys_source = ts_file.read()
with TemporaryDirectory() as tmpdir:
#if 1:
#tmpdir = os.getcwd() + "/gen"
#print(tmpdir)
#print(code)
# -march g++ cpu flag causes vectorization of scalar code, but this
# is the family that the cpu is so will it be auto vectorized anyways on dunkel?
# when running the compilar on my own it seems like it isnt..
home = os.path.expanduser("~")
build_ispc_shared_lib(
tmpdir,
[("stream.ispc", code)],
[("tasksys.cpp", tasksys_source)],
cxx_options=[
#"-g", "-O0",
"-fopenmp", "-DISPC_USE_OMP", "-std=c++11"],
ispc_options=([
# -g causes optimizations to be disabled
# -O0 turns off default optimizations (three levels available)
"-g", "-O1", "--no-omit-frame-pointer",
"--arch=x86-64",
#"--opt=force-aligned-memory",
#"--opt=fast-math",
#"--opt=disable-fma",
# turn off error messaging
"--woff",
#"--opt=disable-loop-unroll",
"--cpu=core-avx2",
"--target=avx2-i32x16",
]
#+ (["--opt=disable-loop-unroll"] if "unroll" in approx.optimizations
# or "unroll_order" in approx.optimizations else [])
# this is needed because map is int64 ?
# only need to use if accessing more than 4 GB of information?
+ (["--addressing=32"])
),
ispc_bin= home+"/Desktop/ispc-v1.9.1-linux/ispc",
)
if 1:
#os.system("ls "+tmpdir)
os.system("cd "+tmpdir+" && objdump -S ispc.o > ispc.s")
#os.system("ls "+tmpdir)
with open(tmpdir +"/ispc.s", 'r') as asm:
assembly = asm.readlines()
with open(home+"/the_assembly.txt", 'w') as asm_file:
asm_file.write("\n".join(assembly))
dt = approx.dtype
if "output" in approx.optimizations:
x = np.linspace(approx.lower_bound,
#1.1,
approx.upper_bound,
size,
endpoint=False,
dtype=dt)
if "random" in approx.optimizations:
np.random.shuffle(x)
# make sure that these are already numpy arrays of the correct type..
y = np.zeros(size, dtype=dt)
approx.tree_1d = np.array(approx.tree_1d, dtype=dt)
approx.interval_a = np.array(approx.interval_a, dtype=dt)
approx.interval_b = np.array(approx.interval_b, dtype=dt)
approx.intervals = np.array(approx.intervals, dtype=dt)
approx.coeff = np.array(approx.coeff, dtype=dt)
knl_lib = ctypes.cdll.LoadLibrary(os.path.join(tmpdir, "shared.so"))
g = knl_lib.eval
if 'map' in approx.optimizations:
if "calc intervals" in approx.optimizations:
args = [cptr_from_numpy(approx.coeff),
cptr_from_numpy(approx.cmap)]
else:
args = [#cptr_from_numpy(approx.interval_a),
#cptr_from_numpy(approx.interval_b),
cptr_from_numpy(approx.intervals),
cptr_from_numpy(approx.coeff),
cptr_from_numpy(approx.map)]
else:
# evaluating using BST for interval search
args = [cptr_from_numpy(approx.tree_1d)]
if "output" in approx.optimizations:
args.append(cptr_from_numpy(x))
args.append(cptr_from_numpy(y))
else:
ret = np.zeros((2,))
retc = cptr_from_numpy(ret)
args.append(retc)
# run before instantiating too??
for i in range(2):
g(*args)
def call_kernel():
g(*args)
# clear the kernel
for i in range(30):
call_kernel()
if "graph" in approx.optimizations:
s = 2048
if 0:
plt.figure()
plt.title("Function")
plt.scatter(x[::s], y[::s])
plt.show()
if 0:
plt.figure()
plt.title("Absolute Error")
plt.yscale("log")
plt.plot(x[::s], abs(y[::s] - f(x[::s])))
plt.show()
start_time = time.time()
for _ in range(NRUNS):
call_kernel()
elapsed = time.time() - start_time
# Automatically calculate Memory Bandwidth and GigaFlops.
#FLOPS = (4 + 2 + 2*(approx.max_order-2))
# reduction + scale + first terms + order loop
nbytes = 4 if approx.dtype_name == 'float' else 8
d = 4 if approx.cmap.dtype == np.int32 else 8
if "calc intervals" in approx.optimizations:
# without the interval storage
# flops = map + get_data + transform + indexscale + eval
# flops = 2 + 4 + 5 + 1 + 4*order
# memops = (3 + approx.max_order)*nbytes + d
# below is number that was tested with
#FLOPS = 2 + 2 + 5 + 1 + 4*approx.max_order
# should have been this though..
FLOPS = 2 + 4 + 5 + 1 + 4*approx.max_order
memops = (2 + approx.max_order)
Bytes = approx.coeff.nbytes + approx.cmap.nbytes
#Bytes = (1 + approx.max_order)*nbytes + d
else:
# with the interval storage, was 5 +
FLOPS = 2 + 5 + 1 + 4*approx.max_order
memops = (3 + (1 + approx.max_order))
Bytes = approx.coeff.nbytes + approx.map.nbytes + approx.intervals.nbytes
#Bytes = (2 + (1 + approx.max_order))*nbytes + d
# mem reciprocal throughput of instruction between 7 and 12
print("Average Runtime (ns) per x:", (1e9)*elapsed/NRUNS/size)
# times size*4 because thats the number of bytes in x
# GigaByte is 10^9 Bytes
# calculate the predicted values
mc = 6.5 if approx.dtype_name == "float" else 5.5
fc = .5
freq = 2.2
vw = 8 if approx.dtype_name == "float" else 4 # for double, non-turbo
# rutime is cycles
predictedruntime = fc*FLOPS+mc*memops
predictedGFLOPS = vw*8.8#vw*FLOPS*freq/predictedruntime
predictedMB = vw*Bytes*freq/predictedruntime
# calculate the actual
avgtime = elapsed/NRUNS
GFLOPS = (FLOPS/avgtime)*(size/(10**9))#(2**30)
MEMBND = (Bytes/avgtime)*(1./(10**9))
peakGF = 8.8*vw
#peakMB = 76.8
peakMB = 10.88
latency = (vw*avgtime/size)*10**9
#print("Flops/Byte: ", (FLOPS/avgtime)/(memops*size))
print(avgtime, predictedruntime)
print()
print("Latency (ns): ", latency)
print("KiloBytes : ", Bytes/(10**3))
print("Pred GFLOPS/s: ", predictedGFLOPS)
print("Pred MB (GB/s): ", predictedMB)
print()
print("GFLOPS/s: ", GFLOPS, " (Max = "+str(peakGF)+") ", GFLOPS/peakGF)
print("MB (GB/s): ", MEMBND, " (Max = "+str(peakMB)+" GB/s) ", MEMBND/peakMB)
#print("Total Use: ", (GFLOPS/peakGF) + (MEMBND/peakMB))
if "output" in approx.optimizations:
s = 2048
z = f(x[::s])
a = la.norm(z-y[::s], np.inf)
r = a/la.norm(z, np.inf)
#if r > approx.allowed_error:
print("Relative Error:", r)
print("Absolute Error:", a)
else:
x = np.linspace(approx.lower_bound, approx.upper_bound, size, endpoint=False)[::vw]
y = f(x)
ysum = np.sum(y)
print(ysum)
print(ret[0])
print(np.abs(ysum - ret[0]))
return GFLOPS, MEMBND, latency
def run_one(approx, size, num_samples, opt=[]):
print()
print(opt)
#print("Vector: ", order, precision)
approx.optimizations = opt
pre_header_code = adapt_i.generate_code(approx, size=size, vector_width=8, cpu=True)
ispc_code = generate.build_code(approx, ispc=True)
# Bytes of floating point type used, not including x and y
#######################################################
f = 4 if approx.dtype_name == "float" else 8
L, s = approx.leaf_index + 1, len(approx.map)
d = 4 if approx.cmap.dtype == np.float32 else 8
if "calc intervals" in approx.optimizations:
STORAGE = (s*(d/f) + approx.max_order*L)*f
else:
STORAGE = (s + (approx.max_order + 2)*L)*f
STORAGE = STORAGE / (2**10) # convert to GB
print("L, Tree Depth, L/Map Size: ", L, approx.num_levels-1, L/s)
if "verbose" in opt:
print("Space Complexity: ", STORAGE, " kB")
print("(Store [a,b] - Calculate [a,b]) = ", s*f*(1 + 2*(L/s) - d/f)/(2**10))
print("L, Map size, L/Map Size: ", L, s, L/s)
print()
print(ispc_code)
#####################################################
#print(ispc_code)
print(approx.lower_bound, approx.upper_bound)
GFLOPS, MEMBND, latency = run(approx, ispc_code, size, num_samples, True)
print()
return GFLOPS, MEMBND, latency
def test(a, b, orders, precisions):
# Function used to obtain results. DONT CHANGE
size, num_samples = 2**27, 1
baseopt = ["arrays", "map", "random"]
opts = [[], ["calc intervals"]]#, ["scalar"], ["scalar", "calc intervals"]]
stable = {}
dtable = {}
for precision in precisions:
stable[precision] = []
dtable[precision] = []
for order in orders:
print(order, precision)
if precision > 1e-7:
name = "./approximations/32o" + str(order) + "-p" + str(precision)
approx = adapt_i.load_from_file(name)
print(name)
for opt in opts:
run_one(approx, size, 1, baseopt+opt+["output"])
c = run_one(approx, size, num_samples, baseopt + opt)
stable[precision].append((order, opt, c))
name = "./approximations/64o" + str(order) + "-p" + str(precision)
approx = adapt_i.load_from_file(name)
print(name)
for opt in opts:
run_one(approx, size, 1, baseopt+opt+["output"])
c = run_one(approx, size, num_samples, baseopt + opt)
dtable[precision].append((order, opt, c))
def save_approximations(a, b, orders, precisions):
# Change dtypes and precisions manually
size, num_samples = 2**12, 2
opt = ["arrays", "map", "random"]
for precision in precisions:
for order in orders:
print(order, precision)
if precision > 1e-7:
try:
name = "./approximations/32o" + str(order) + "-p" + str(precision)
approx = adapt_i.make_interpolant(a, b, f, order,
precision, 'chebyshev',
dtype=32, optimizations=opt)
adapt_i.write_to_file(name, approx)
run_one(approx, size, num_samples, opt)
run_one(approx, size, num_samples, opt + ["scalar"])
run_one(approx, size, num_samples, opt + ["calc intervals"])
run_one(approx, size, num_samples, opt + ["scalar", "calc intervals"])
except:
pass
opt = ["arrays", "map", "calc intervals", "random"]
name = "./approximations/64o" + str(order) + "-p" + str(precision)
approx = adapt_i.make_interpolant(a, b, f, order,
precision, 'chebyshev',
dtype=64, optimizations=opt)
adapt_i.write_to_file(name, approx)
run_one(approx, size, num_samples, opt)
run_one(approx, size, num_samples, opt + ["scalar"])
run_one(approx, size, num_samples, opt + ["calc intervals"])
run_one(approx, size, num_samples, opt + ["scalar", "calc intervals"])
def test_remez_incorrect():
# tests the lookup table size for incorrect remez algorithm and polynomial interpolation
a, b = 0, 20
order, precision = 6, 1e-6
opt = ["arrays", "map", "calc intervals", "random", "remez incorrect"]
approx = adapt_i.make_interpolant(a, b, f, order,
precision, 'chebyshev',
dtype=32, optimizations=opt)
#adapt_i.write_to_file("./testingclass", approx)
#approx = adapt_i.load_from_file("./testingclass")
run_one(approx, opt=opt)
opt = ["arrays", "map", "calc intervals", "random"]
approx1 = adapt_i.make_interpolant(a, b, f, order,
precision, 'chebyshev',
dtype=32, optimizations=opt)
run_one(approx1, opt=opt)
opt = ["arrays", "map", "calc intervals", "random"]
approx2 = adapt_i.make_interpolant(a, b, f, order,
precision, 'chebyshev',
dtype=32, optimizations=opt,
adapt_type="Trivial")
run_one(approx2, opt=opt)
print("Incorrect Remez, Correct, Polynomial Interpolation")
print(len(approx.map), len(approx1.map), len(approx2.map))
print('{0:.16f}'.format(la.norm(approx.coeff,2)),
'{0:.16f}'.format(la.norm(approx1.coeff,2)),
'{0:.16f}'.format(la.norm(approx2.coeff,2)))
print('{0:.16f}'.format(la.norm(approx.coeff,np.inf)),
'{0:.16f}'.format(la.norm(approx1.coeff,np.inf)),
'{0:.16f}'.format(la.norm(approx2.coeff,np.inf)))
def scalar_test():
# decreasing the size causes the GFLOPS to go down...
# size of 0 takes about 1e-5 seconds to run function.
# with 2**10 and 2**15 size its still about that.
# 2**20 is better but 2**26 guarentees its good
# takes long enough for the measurement to make sense.
a, b = 1, 21
order, precision = 3, np.finfo(np.float32).eps*10
size, num_samples = 2**23, 50
d = 32
opt = ["arrays", "map", "random"]
approx = adapt_i.make_interpolant(a, b, f, order,
precision,
'chebyshev',
dtype=d,
optimizations=opt)
run_one(approx, size, num_samples, opt=opt + ["calc intervals"])
run_one(approx, size, num_samples, opt=opt)
# scalar does something incorrect? oh.. data race?
run_one(approx, size, num_samples, opt=opt + ["scalar", "calc intervals"])
run_one(approx, size, num_samples, opt=opt + ["scalar"])
# run the main program
if __name__ == "__main__":
#scalar_test()
#get_asm()
#new_test()
#test_remez_incorrect()
# Function used to obtain results. DONT CHANGE
# FAILS in case of Double precision near machine precision.
# but only with calc intervals. Something is wrong with that.
# not sure what it is though.
# really fails by zeros. 1.72, but has too high of error on whole interval
# x_scaled is correct. so maybe its something about the coefficients?
# maybe im using the wrong dtype somewhere?
# its actually not. The scaling/L is imprecise for some reason..
#2/(b-a) is accurate though.. at least I figured it out...
if 0:
order, num_samples = 3, 10
a, b = -3, 23
size = 2**23
precision = 90000*np.finfo(np.float32).eps
opt = ["arrays", "map", "verbose"]
#name = "./approximations/64o" + str(order) + "-p" + str(precision)
#approx = adapt_i.load_from_file(name)
approx = adapt_i.make_interpolant(a, b, f, order,
precision, 'chebyshev',
dtype=64, optimizations=opt)
print(2*approx.D + approx.lgD)
scaling = (approx.upper_bound - approx.lower_bound) / len(approx.map)
c = list(map(lambda x: (int( bin(x)[ :-2*approx.D], 2),
int("0b"+bin(x)[-2*approx.D: -approx.D], 2),
int("0b"+bin(x)[ -approx.D: ], 2),
bin(x),
int("0b"+bin(x)[-2*approx.D: -approx.D], 2)*scaling + approx.lower_bound,
int( bin(x)[ :-2*approx.D], 2)*scaling,
), approx.cmap))
"""
print(approx.lower_bound, approx.upper_bound)
print(" L"," l", "leaf index")
for a in c[:20]:
print(a[0], a[1]*scaling, "\t",a[2],"\t",a[4], "\t", a[5])
print(2*approx.D + approx.lgD)
print(approx.cmap.dtype)
print(len(approx.map))
print((approx.upper_bound - approx.lower_bound)/len(approx.cmap))
print(1/((approx.upper_bound - approx.lower_bound)/len(approx.cmap)))
print(2./((approx.upper_bound - approx.lower_bound)/len(approx.cmap)))
"""
#print(approx.interval_a)
#print(approx.interval_b)
print(precision)
run_one(approx, size, num_samples, opt)
run_one(approx, size, num_samples, opt + ["calc intervals"])
if 1:
a, b = 1, 21
orders = [3]
precisions = [10*np.finfo(np.float32).eps, 100*np.finfo(np.float64).eps]
save_approximations(a, b, orders, precisions)
#test(a, b, orders, precisions)
#save_test()
```
#### File: jdoherty7/Adaptive_Interpolation/stream.py
```python
import os
import numpy as np
import numpy.linalg as la
import ctypes
import ctypes.util
from time import time
from tempfile import TemporaryDirectory
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use("Agg")
#STREAM is intended to measure the bandwidth from main memory.
#It can, of course, be used to measure cache bandwidth as well, but that is not what I have been
#publishing at the web site. Maybe someday....
#The general rule for STREAM is that each array must be at least 4x
#the size of the sum of all the last-level caches used in the run, or 1 Million elements -- whichever is larger.
def address_from_numpy(obj):
ary_intf = getattr(obj, "__array_interface__", None)
if ary_intf is None:
raise RuntimeError("no array interface")
buf_base, is_read_only = ary_intf["data"]
return buf_base + ary_intf.get("offset", 0)
def cptr_from_numpy(obj):
return ctypes.c_void_p(address_from_numpy(obj))
# https://github.com/hgomersall/pyFFTW/blob/master/pyfftw/utils.pxi#L172
def align(array, dtype, order='C', n=64):
'''empty_aligned(shape, dtype='float64', order='C', n=None)
Function that returns an empty numpy array that is n-byte aligned,
where ``n`` is determined by inspecting the CPU if it is not
provided.
The alignment is given by the final optional argument, ``n``. If
``n`` is not provided then this function will inspect the CPU to
determine alignment. The rest of the arguments are as per
:func:`numpy.empty`.
'''
shape = array.shape
itemsize = np.dtype(dtype).itemsize
# Apparently there is an issue with numpy.prod wrapping around on 32-bits
# on Windows 64-bit. This shouldn't happen, but the following code
# alleviates the problem.
if not isinstance(shape, (int, np.integer)):
array_length = 1
for each_dimension in shape:
array_length *= each_dimension
else:
array_length = shape
base_ary = np.empty(array_length*itemsize+n, dtype=np.int8)
# We now need to know how to offset base_ary
# so it is correctly aligned
_array_aligned_offset = (n-address_from_numpy(base_ary)) % n
new_array = np.frombuffer(
base_ary[_array_aligned_offset:_array_aligned_offset-n].data,
dtype=dtype).reshape(shape, order=order)
np.copyto(new_array, array)
return new_array
def build_ispc_shared_lib(
cwd, ispc_sources, cxx_sources,
ispc_options=[], cxx_options=[],
ispc_bin="ispc",
cxx_bin="g++",
quiet=True):
from os.path import join
ispc_source_names = []
for name, contents in ispc_sources:
ispc_source_names.append(name)
with open(join(cwd, name), "w") as srcf:
srcf.write(contents)
cxx_source_names = []
for name, contents in cxx_sources:
cxx_source_names.append(name)
with open(join(cwd, name), "w") as srcf:
srcf.write(contents)
from subprocess import check_call
ispc_cmd = ([ispc_bin,
"--pic",
"-o", "ispc.o"]
+ ispc_options
+ list(ispc_source_names))
if not quiet:
print(" ".join(ispc_cmd))
check_call(ispc_cmd, cwd=cwd)
cxx_cmd = ([
cxx_bin,
"-shared", "-Wl,--export-dynamic",
"-fPIC",
"-oshared.so",
"ispc.o",
]
+ cxx_options
+ list(cxx_source_names))
check_call(cxx_cmd, cwd=cwd)
if not quiet:
print(" ".join(cxx_cmd))
def make_code(experiment, runs, single):
if experiment == "triad":
ispc_code = """
export void stream(
uniform double *uniform a,
uniform double *uniform b,
uniform double *uniform c,
uniform double scalar,
uniform int32 n){
for (uniform int32 runs=0; runs<%i; runs+=1){
for (uniform int32 i=0; i<n; i+=programCount){
varying int32 is = i + programIndex;
// broadcast sends the value that i has for the program instance
// specified in the second argument to all other program instances
streaming_store(a + i, broadcast(b[i] + scalar * c[i], 0));
//a[is] = b[is] + scalar * c[is];
}
}
}
""" % runs
elif experiment == "copy":
ispc_code = """
export void stream(
uniform double *uniform a,
uniform double *uniform b,
uniform int32 n){
for (uniform int32 runs=0; runs<%i; runs+=1){
for (uniform int32 i=0; i<n; i+=programCount){
varying int32 is = i + programIndex;
streaming_store(a+i, broadcast(b[i], 0));
//a[is] = b[is];
}
}
}
"""% runs
elif experiment == "scale":
ispc_code = """
export void stream(
uniform double *uniform a,
uniform double *uniform b,
uniform double scalar,
uniform int32 n){
for (uniform int32 runs=0; runs<%i; runs+=1){
for (uniform int32 i=0; i<n; i+=programCount){
varying int32 is = i + programIndex;
streaming_store(a+i, broadcast(scalar * b[i], 0));
//a[is] = scalar * b[is];
}
}
}
"""% runs
elif experiment == "sum":
ispc_code = """
export void stream(
uniform double *uniform a,
uniform double *uniform b,
uniform double *uniform c,
uniform int32 n){
for (uniform int32 runs=0; runs<%i; runs+=1){
for (uniform int32 i=0; i<n; i+=programCount){
varying int32 is = i + programIndex;
streaming_store(a+i, broadcast(b[i] + c[i], 0));
//a[is] = b[is] + c[is];
}
}
}
"""% runs
if single==True:
ispc_code = ispc_code.replace("double", "float")
return ispc_code
# core pinning, frequency scaling.
# cache line is replaced
# read the cacheline then you can write it to memory
# streaming_store is when
# ispc streaming store patch which allows it to do it.
# issue port - sandy bridge architecture article
def main(experiment):
ALIGN_TO = 32
# 22 is the first above the L3, its double the L3 about 50,000 KB
sizes = np.power(2, np.arange(5, 26))
single=True
#ARRAY_SIZE = [size(L1)/3, 3*size(L3)]
"""
L1d cache: 32K – data cache
L1i cache: 32K – instruction cache
L2 cache: 256K
L3 cache: 30720K
cache size: 30720 KB
"""
if single:
STREAM_DTYPE = np.float32
STREAM_CTYPE = ctypes.c_float
INDEX_DTYPE = np.int32
INDEX_CTYPE = ctypes.c_int
else:
STREAM_DTYPE = np.float64
STREAM_CTYPE = ctypes.c_double
INDEX_DTYPE = np.int32
INDEX_CTYPE = ctypes.c_int
KBs = []
Bandwidth = []
for ARRAY_SIZE in sizes:
#NRUNS * ARRAY_SIZE = 10* 2**26
NRUNS = int((50 * 2**26)/ARRAY_SIZE)
print()
print("Task: ", experiment)
with open("tests/tasksys.cpp", "r") as ts_file:
tasksys_source = ts_file.read()
ispc_code = make_code(experiment, NRUNS, single)
with TemporaryDirectory() as tmpdir:
#print(ispc_code)
build_ispc_shared_lib(
tmpdir,
[("stream.ispc", ispc_code)],
[("tasksys.cpp", tasksys_source)],
cxx_options=["-g", "-fopenmp", "-DISPC_USE_OMP"],
ispc_options=([
"-g", "-O1", "--no-omit-frame-pointer",
"--target=avx2-i32x16",
"--opt=force-aligned-memory",
"--opt=disable-loop-unroll",
#"--opt=fast-math",
#"--woff",
#"--opt=disable-fma",
"--addressing=32",
]
),
#ispc_bin= "/home/ubuntu-boot/Desktop/ispc-v1.9.1-linux/ispc",
ispc_bin= "/home/ubuntu-boot/Desktop/ispc-1.9-with-streaming-store/ispc",
quiet=True,
)
knl_lib = ctypes.cdll.LoadLibrary(os.path.join(tmpdir, "shared.so"))
scalar = 4
choice ={ "triad":(1, 3, 0, 7),
"copy": (1, 9,-1,-1),
"scale":(),
"sum": ()
}
a0, b0, c0, scalar = choice[experiment]
a = a0*np.ones(ARRAY_SIZE, dtype=STREAM_DTYPE)
b = b0*np.ones(ARRAY_SIZE, dtype=STREAM_DTYPE)
c = c0*np.ones(ARRAY_SIZE, dtype=STREAM_DTYPE)
a = align(a, dtype=STREAM_DTYPE)#, n=ALIGN_TO)
b = align(b, dtype=STREAM_DTYPE)#, n=ALIGN_TO)
c = align(c, dtype=STREAM_DTYPE)#, n=ALIGN_TO)
g = knl_lib.stream
if experiment == "copy":
x = [cptr_from_numpy(a),
cptr_from_numpy(b),
INDEX_CTYPE(ARRAY_SIZE),]
elif experiment == "triad":
x = [cptr_from_numpy(a),
cptr_from_numpy(b),
cptr_from_numpy(c),
STREAM_CTYPE(scalar),
INDEX_CTYPE(ARRAY_SIZE),]
elif experiment == "scale":
x = [cptr_from_numpy(a),
cptr_from_numpy(b),
STREAM_CTYPE(scalar),
INDEX_CTYPE(ARRAY_SIZE),]
elif experiment == "sum":
x = [cptr_from_numpy(a),
cptr_from_numpy(b),
cptr_from_numpy(c),
INDEX_CTYPE(ARRAY_SIZE),]
for i in range(2):
g(*x)
def call_kernel():
g(*x)
for i in range(4):
call_kernel()
ts = []
start_time = time()
# This will run Nruns # of times
call_kernel()
elapsed = time() - start_time
ts.append(elapsed/NRUNS)
ts = np.array(ts)
#print(ts)
#print("Min Time: ", np.min(ts))
#print("Max Time: ", np.max(ts))
#print("Avg Time: ", np.mean(ts))
by = 3 if experiment in ["triad", "sum"] else 2
# The STREAM BENCHMARK paper considers KB=1024 and GB=2^30
GB = 1e-9*by*a.nbytes
KB = 1e-3*by*a.nbytes
print("KB: ", KB)
KBs.append(KB)
# only care about maximum bandwidth
Bandwidth.append(GB/np.min(ts))
print("Max MB: ", GB/np.min(ts), "GB/s")
#print("Min MB: ", GB/np.max(ts), "GB/s")
#print("Avg MB: ", GB/np.mean(ts), "GB/s")
#print("Max Error")
if experiment == "triad":
error = la.norm(a-b-scalar*c, np.inf)
elif experiment == "copy":
error = la.norm(a-b , np.inf)
elif experiment == "scale":
error = la.norm(a-(b*scalar), np.inf)
else:
error = la.norm(a-b-c , np.inf)
assert error < 1e-1
print()
print("Single=",single)
print(KBs)
print("Bandwidths")
print(Bandwidth)
plt.figure()
plt.title("Memory Bandwidth for '"+experiment+"' Test")
plt.axvline(x=32, color="r", label="End of L1")
plt.axvline(x=256, color="b", label="End of L2")
plt.axvline(x=30720, color="g", label="End of L3")
plt.plot(KBs, Bandwidth, c="k", label=experiment)
plt.xscale("log")
plt.xlabel("Memory Used (KB)")
plt.ylabel("Memory Bandwidth (GB/s)")
plt.legend()
plt.savefig(experiment+str(single)".png")
if __name__ == "__main__":
main("triad")
main("copy")
main("scale")
main("sum")
```
#### File: Adaptive_Interpolation/tests/performance_tests.py
```python
from __future__ import absolute_import
from nose.tools import *
import time
import numpy as np
import numpy.linalg as la
import scipy.special as spec
import matplotlib.pyplot as plt
import adaptive_interpolation.adapt as adapt
import adaptive_interpolation.approximator as app
import adaptive_interpolation.generate as generate
import adaptive_interpolation.adaptive_interpolation as adapt_i
# bessel function for testing
def f(x):
return spec.jn(0, x)
# a function for testing
def f1(x0):
xs = []
for x in x0:
if x < 1:
xs.append(1 + x)
elif (1 <= x) and (x < 2.02):
xs.append(1 + x**2)
elif (2.02 <= x) and (x < 3.5):
xs.append(-3*np.log(x))
elif (3.5 <= x) and (x < 4.4):
xs.append(np.exp(np.sqrt(x)))
elif (4.4 <= x) and (x < 7.001):
xs.append(3)
elif (7.001 <= x) and (x < 9.306):
xs.append(np.sqrt(x**4.4) / 100.)
elif (9.306 <= x) and (x <= 11):
xs.append(x - 3)
return np.array(xs)
# plot the absolute errors as well as the actual and approximated functions
def my_plot(x, actual, approximation, abs_errors):
plt.figure()
plt.title('Actual and Approximate values Graphed')
plt.plot(x, actual, 'r')
plt.plot(x, approximation, 'b')
plt.figure()
plt.yscale('log')
plt.title('Absolute Error in Interpolated Values')
plt.plot(x, abs_errors+1e-17, 'gs')
plt.show()
# Given a specific Approximator class, this will test how the
# performance and accuracy varies when the code is varied from branching
# and vectorized to not branching and not vectorized
def test_parallel(approx):
size = 1e7
interval = approx.heap[1][3]
x = np.linspace(interval[0], inverval[1], size, dtype=np.float64)
nb_nv = adapt_i.generate_code(approx, 0, 0)
nb_v = adapt_i.generate_code(approx, 0, 1)
b_nv = adapt_i.generate_code(approx, 1, 0, size)
b_v = adapt_i.generate_code(approx, 1, 1, size)
# time run_code functions and return times
t00 = time.time()
val_00 = run_code(nb_nv, x, approx=0, vectorized=False)
t00 = time.time() - t00
t01 = time.time()
val_01 = run_code(nb_v, x, approx, vectorized=True)
t01 = time.time() - t01
t10 = time.time()
val_10 = run_code(b_nv, x, approx=0, vectorized=False)
t10 = time.time() - t10
t11 = time.time()
val_11 = run_code(b_v, x, approx, vectorized=True)
t11 = time.time() - t11
# function values are independent of generative method
assert la.norm(val00 - val01, np.inf) < 1e-15
assert la.norm(val00 - val10, np.inf) < 1e-15
assert la.norm(val00 - val11, np.inf) < 1e-15
assert la.norm(val01 - val10, np.inf) < 1e-15
assert la.norm(val01 - val11, np.inf) < 1e-15
assert la.norm(val10 - val11, np.inf) < 1e-15
print("nb_nv\tnb_v\tb_nv\tb_v")
print(t00,'\t', t01, '\t', t10,'\t', t11)
return [t00, t01, t10, t11]
def test_all_parallel_methods():
a, b = 0, 10
est1 = adapt_i.make_interpolant(a, b, f, 3, 1e-9, "monomial")
est2 = adapt_i.make_interpolant(a, b, f, 3, 1e-9, "chebyshev")
est3 = adapt_i.make_interpolant(a, b, f, 3, 1e-9, "legendre")
test_parallel(est1)
test_parallel(est2)
test_parallel(est3)
def test_exact_interpolants():
order1 = lambda x: 3*x + 7
order4 = lambda x: 4.123*x**4 - 5.6*x**3 - x**2 + 4.5
order6 = lambda x: x**6 - 3*x**5 - 2*x**4 + x - 3
order8 = lambda x: x**8 - 42*x**7 + 7.5*x**5 - 4.1234*x**4 - 1.2*x**2
a, b = -10, 10
x = np.linspace(a, b, 100, dtype=np.float64)
est1 = adapt_i.make_interpolant(a,b,order1,1,1e-9, "monomial").evaluate(x)
est4 = adapt_i.make_interpolant(a,b,order4,4,1e-9, "monomial").evaluate(x)
est6 = adapt_i.make_interpolant(a,b,order6,6,1e-9, "monomial").evaluate(x)
est8 = adapt_i.make_interpolant(a,b,order8,8,1e-9, "monomial").evaluate(x)
assert la.norm(est1-order1(x), np.inf)/la.norm(order1(x), np.inf) < 1e-15
assert la.norm(est4-order4(x), np.inf)/la.norm(order4(x), np.inf) < 1e-15
assert la.norm(est6-order6(x), np.inf)/la.norm(order6(x), np.inf) < 1e-15
assert la.norm(est8-order8(x), np.inf)/la.norm(order8(x), np.inf) < 1e-15
# tests that the returned interpolant is below the given error
def test_guaranteed_accuracy():
func1 = lambda x: np.sin(np.sin(x))
func2 = lambda x: np.cos(np.sin(x))
func3 = lambda x: np.sqrt(x)
a, b = -10, 10
x = np.linspace(a, b, 100, dtype=np.float64)
est31 = adapt_i.make_interpolant(a,b,func1,10,1e-3, "monomial").evaluate(x)
est32 = adapt_i.make_interpolant(a,b,func2,10,1e-3, "chebyshev").evaluate(x)
est33 = adapt_i.make_interpolant(a,b,func3,10,1e-3, "legendre").evaluate(x)
est61 = adapt_i.make_interpolant(a,b,func1,10,1e-6, "monomial").evaluate(x)
est62 = adapt_i.make_interpolant(a,b,func2,10,1e-6, "chebyshev").evaluate(x)
est63 = adapt_i.make_interpolant(a,b,func3,10,1e-6, "legendre").evaluate(x)
est91 = adapt_i.make_interpolant(a,b,func1,10,1e-9, "monomial").evaluate(x)
est92 = adapt_i.make_interpolant(a,b,func2,10,1e-9, "chebyshev").evaluate(x)
est93 = adapt_i.make_interpolant(a,b,func3,10,1e-9, "legendre").evaluate(x)
assert la.norm(est31-func1(x), np.inf)/la.norm(func1(x), np.inf) < 1e-3
assert la.norm(est32-func2(x), np.inf)/la.norm(func2(x), np.inf) < 1e-3
assert la.norm(est33-func3(x), np.inf)/la.norm(func3(x), np.inf) < 1e-3
assert la.norm(est61-func1(x), np.inf)/la.norm(func1(x), np.inf) < 1e-6
assert la.norm(est62-func2(x), np.inf)/la.norm(func2(x), np.inf) < 1e-6
assert la.norm(est63-func3(x), np.inf)/la.norm(func3(x), np.inf) < 1e-6
assert la.norm(est91-func1(x), np.inf)/la.norm(func1(x), np.inf) < 1e-9
assert la.norm(est92-func2(x), np.inf)/la.norm(func2(x), np.inf) < 1e-9
assert la.norm(est93-func3(x), np.inf)/la.norm(func3(x), np.inf) < 1e-9
# run the main program
if __name__ == "__main__":
test_exact_interpolants()
test_guaranteed_accuracy()
test_all_parallel_methods()
```
|
{
"source": "jdoiro3/2D-Projectiles",
"score": 3
}
|
#### File: jdoiro3/2D-Projectiles/game.py
```python
from libs import *
import projectile_classes as p
def main():
win = GraphWin('Projectile', 1000, 600)
win.setCoords(0, 0, 10000, 10000)
lan = p.Launcher(30,500)
lan.draw(win)
power_bar = p.Power_Bar(win, screen_cords=10000, color="red")
num = 0
mouse = None
while mouse == None:
time.sleep(.01)
if keyboard.is_pressed('w'):
lan.move_up(win)
if keyboard.is_pressed('s'):
lan.move_down(win)
if keyboard.is_pressed('f'):
# limit the amount of shots that are fired
# if the 'F' key is held down.
if num == 0:
lan.launch(win)
num += 1
if num > 10:
num = 0
if keyboard.is_pressed('d'):
lan.increase_power()
power_bar.move_power_level(lan.power, win)
if keyboard.is_pressed('a'):
lan.decrease_power()
power_bar.move_power_level(lan.power, win)
lan.update_projectiles()
mouse = win.checkMouse()
main()
```
|
{
"source": "jdoiro3/GeoTraceroute",
"score": 3
}
|
#### File: jdoiro3/GeoTraceroute/start_server.py
```python
import aiohttp
from aiohttp import web, WSCloseCode
import asyncio
import webbrowser
import json
import subprocess
import requests
import re
import sys
HOST = "127.0.0.1"
PORT = 8888
IP_PATTERN = re.compile(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
def get_terminal_line(line, line_num):
if line_num == 0:
line_type = "input"
else:
line_type = "output"
return json.dumps({"type": line_type, "msg": line})
def get_ip_from_line(line, line_num):
if line_num != 0:
ip = IP_PATTERN.search(line)
if ip:
return ip[0]
return None
else:
return None
async def http_handler(request):
return web.FileResponse('./index.html')
async def traceroute(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
data = json.loads(msg.data)["data"]
host = data["host"]
ip_info_tk = data["token"]
if sys.platform.startswith('linux'):
command = ["traceroute", "-I", "--max-hop=30", host]
elif sys.platform.startswith('win32'):
command = ["tracert", "-h", "30", host]
with subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as process:
for i, line in enumerate(process.stdout):
await ws.send_str(get_terminal_line(line, i))
ip = get_ip_from_line(line, i)
if ip:
ip_geolocation_data = requests.get(f"https://ipinfo.io/{ip}?token={ip_info_tk}").json()
geoloc_msg = json.dumps({"type": "geo", "msg": ip_geolocation_data})
await ws.send_str(geoloc_msg)
elif msg.type == aiohttp.WSMsgType.ERROR:
print('ws connection closed with exception %s' % ws.exception())
def create_runner():
app = web.Application()
app.add_routes([
web.get('/', http_handler),
web.get('/ws', traceroute),
])
app.router.add_static('/css/', path='static/css', name='css')
app.router.add_static('/scripts/', path='static/scripts', name='scripts')
return web.AppRunner(app)
async def start_server(host=HOST, port=PORT):
runner = create_runner()
await runner.setup()
site = web.TCPSite(runner, host, port)
await site.start()
if __name__ == "__main__":
webbrowser.open_new(f"http://localhost:{PORT}")
loop = asyncio.get_event_loop()
loop.run_until_complete(start_server())
loop.run_forever()
```
|
{
"source": "jdoiwork/TryFastAPI",
"score": 2
}
|
#### File: src/routes/home.py
```python
from fastapi import APIRouter
from loguru import logger
from models import Name
from services import Service
from ioc import Resolver
def create(resolve: Resolver):
router = APIRouter()
@router.get('/{name}')
def show(name: Name, s: Service = resolve(Service)):
logger.info(s.db)
logger.info(s.db.name)
return {
"hello": name,
"db-name": s.db.name
}
return router
```
#### File: src/routes/users.py
```python
from fastapi import APIRouter
from services import UsersService
from ioc import Resolver
def create(resolve: Resolver):
router = APIRouter()
@router.get('/')
def index(s: UsersService = resolve(UsersService)):
return {
"users": s.index(),
}
return router
```
#### File: src/services/db_service.py
```python
class DbService:
def __init__(self) -> None:
self.name = "sqlite"
```
|
{
"source": "jdollarKodi/plugin.video.animepie",
"score": 2
}
|
#### File: resources/lib/plugin.py
```python
import logging
import xbmcaddon
from resources.lib import kodiutils, kodilogging
from resources.lib.routes.routes import generate_all_routes
from resources.lib.router_factory import get_router_instance
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(ADDON.getAddonInfo('id'))
kodilogging.config()
def run():
plugin = get_router_instance()
generate_all_routes(plugin)
plugin.run()
```
#### File: lib/routes/animesearch.py
```python
import requests
import logging
import math
import xbmcaddon
from xbmcgui import ListItem
from xbmcplugin import addDirectoryItem, endOfDirectory
from resources.lib.constants.url import BASE_URL, SEARCH_PATH
from resources.lib.router_factory import get_router_instance
from resources.lib.routes.episodelist import episode_list
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(ADDON.getAddonInfo('id'))
def generate_routes(plugin):
plugin.add_route(anime_search, "/search")
return plugin
def anime_search():
plugin = get_router_instance()
search_value = plugin.args["name"][0] if "name" in plugin.args else ""
page = plugin.args["page"][0] if "page" in plugin.args else "1"
params = {
"name": search_value,
"limit": 10,
"page": int(page)
}
res = requests.get(BASE_URL + SEARCH_PATH, params=params)
json_data = res.json()
for anime in json_data['data']['list']:
li = ListItem(anime["animeName"])
li.setArt({"icon": anime["backgroundSrc"]})
li.setInfo(type="video", infoLabels={"plot": anime["animeSynopsis"]})
addDirectoryItem(
plugin.handle,
plugin.url_for(
episode_list,
id=str(anime["animeID"]),
listId=str(anime["animeListID"]),
episode_count=str(anime["animeEpisode"])
),
li,
True
)
are_pages_remaining = math.ceil(float(json_data["data"]["count"]) / float(params.get("limit"))) > int(page)
if (are_pages_remaining):
next_page_params = { "page": page, "name": search_value }
next_page_params.update({ "page": str(int(params.get("page")) + 1) })
addDirectoryItem(
plugin.handle,
plugin.url_for(
anime_search, **next_page_params
),
ListItem('Next Page'),
True
)
endOfDirectory(plugin.handle)
```
#### File: lib/routes/episodelist.py
```python
import logging
import xbmcaddon
from xbmcgui import ListItem
from xbmcplugin import addDirectoryItem, endOfDirectory
from resources.lib.router_factory import get_router_instance
from resources.lib.routes.videosources import video_sources
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(ADDON.getAddonInfo('id'))
def generate_routes(plugin):
plugin.add_route(episode_list, "/episode-list")
return plugin
def episode_list():
plugin = get_router_instance()
anime_id = plugin.args["id"][0]
anime_list_id = plugin.args["listId"][0]
episode_count = plugin.args["episode_count"][0]
episode_str = ADDON.getLocalizedString(32004)
for i in range(int(episode_count)):
episode = str(i + 1)
addDirectoryItem(
plugin.handle,
plugin.url_for(
video_sources,
id=anime_id,
listId=anime_list_id,
episode=episode
),
ListItem(episode_str % episode),
True
)
endOfDirectory(plugin.handle)
```
#### File: lib/routes/playsource.py
```python
import xbmc
import requests
import logging
import xbmcaddon
import resolveurl
from bs4 import BeautifulSoup
from xbmcgui import ListItem
from xbmcplugin import addDirectoryItem, endOfDirectory
from resources.lib.router_factory import get_router_instance
from resources.lib.embed_processors import mp4upload, streamango
from resources.lib.animepie_exception import AnimePieException
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(ADDON.getAddonInfo('id'))
def generate_routes(plugin):
plugin.add_route(play_source, "/video-source/play")
return plugin
def play_source():
plugin = get_router_instance()
website_name = plugin.args["website_name"][0]
source_url = plugin.args["source_url"][0]
logger.debug("Website: " + website_name)
logger.debug("Source URL: " + source_url)
embedded_processors = {
"MP4UPLOAD": mp4upload,
"STREAMANGO": streamango,
}
decrypted_source = None
processor = embedded_processors.get(website_name.split(".")[1].upper(), None)
try:
if processor:
res = requests.get(source_url)
soup = BeautifulSoup(res.text, 'html.parser')
if processor:
(err, decrypted_source) = processor.retrieve_source_url(soup)
if err:
raise err
else:
# For sources without custom logic use the urlresolver package
decrypted_source = resolveurl.resolve(source_url)
logger.debug(decrypted_source)
if not processor and not decrypted_source:
raise AnimePieException(ADDON.getLocalizedString(32001))
elif decrypted_source:
play_item = ListItem(path=decrypted_source)
xbmc.Player().play(decrypted_source, play_item)
except AnimePieException as e:
logger.error(e.args)
xbmc.executebuiltin("Notification(Error," + e.args[0] + ")")
```
#### File: lib/routes/test_animelist.py
```python
import sys
import os
import json
import unittest
from mock import call, patch, MagicMock, Mock, ANY
# TODO: Check get params of request to ensure those match what is expected
class TestAnimeList(unittest.TestCase):
def setUp(self):
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock_requests = MagicMock()
self.mock_xbmc_plugin = MagicMock()
self.mock_xbmc_gui = MagicMock()
self.mock_route_factory = MagicMock()
modules = {
"requests": self.mock_requests,
"xbmcplugin": self.mock_xbmc_plugin,
"xbmcgui": self.mock_xbmc_gui,
"xbmcadddon": MagicMock(),
"resolveurl": MagicMock(),
"resources.lib.router_factory": self.mock_route_factory
}
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
def tearDown(self):
self.module_patcher.stop()
def test_generate_routes(self):
from resources.lib.routes.animelist import generate_routes, anime_list
mock_plugin = MagicMock()
generate_routes(mock_plugin)
mock_plugin.add_route.assert_has_calls([
call(anime_list, '/anime-list'),
])
def test_get_current_params_returns_values_if_passed_in(self):
from resources.lib.routes.animelist import _get_current_params
expected_year = "2000"
expected_season = "Winter"
expected_genre = "Test,Test2"
expected_page = "Page"
mock_plugin = type('', (), {})
mock_plugin.args = {
"year": [expected_year],
"season": [expected_season],
"genres": [expected_genre],
"page": [expected_page],
}
args = _get_current_params(mock_plugin)
self.assertDictEqual(args, {
"year": expected_year,
"season": expected_season,
"genres": expected_genre,
"page": expected_page
}, "Returned parameter list does not match plugin.arg values")
def test_get_current_params_returns_empty_if_none(self):
from resources.lib.routes.animelist import _get_current_params
mock_plugin = type('', (), {})
mock_plugin.args = {}
args = _get_current_params(mock_plugin)
self.assertDictEqual(args, {}, "Returned parameter list does not match plugin.arg values")
def test_successful_retrieval_page_one_none_page(self):
handle_val = "Random"
mock_url = "Random-url"
mock_plugin = type('', (), {})
mock_plugin.args = {}
mock_plugin.handle = handle_val
mock_plugin.url_for = MagicMock()
fixture_path = self.dir_path + "/fixtures/animeList/list_success.json"
with open(fixture_path, "r") as fixture:
mock_response = fixture.read()
res_mock = MagicMock()
res_mock.json.return_value = json.loads(mock_response)
self.mock_requests.get.return_value = res_mock
from resources.lib.routes.animelist import anime_list
anime_list()
self.mock_xbmc_gui.ListItem.assert_has_calls([
call('Gintama.: Shirogane no Tamashii-hen 2'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Silver Soul Arc - Second Half War'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Shirogane no Tamashii-hen - Kouhan-sen'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Next Page')
])
def test_successful_retrieval_page_one_with_selected(self):
handle_val = "Random"
mock_url = "Random-url"
mock_plugin = type('', (), {})
mock_plugin.args = {
"season": ["Summer"],
"year": ["2018"],
"genres": ["Test1,Test2"],
"page": ["1"]
}
mock_plugin.handle = handle_val
mock_plugin.url_for = Mock(return_value=mock_url)
mock_route_factory = MagicMock()
mock_route_factory.get_router_instance = mock_plugin
sys.modules['resources.lib.router_factory'] = mock_route_factory
fixture_path = self.dir_path + "/fixtures/animeList/list_success.json"
with open(fixture_path, "r") as fixture:
mock_response = fixture.read()
res_mock = MagicMock()
res_mock.json = Mock(return_value=json.loads(mock_response))
self.mock_requests.get = Mock(return_value=res_mock)
from resources.lib.routes.animelist import anime_list
anime_list()
self.mock_requests.get.assert_called_once_with(
'https://api.animepie.to/Anime/AnimeMain/List',
params={
'sort': 1,
'website': '',
'genres': 'Test1,Test2',
'season': 'Summer',
'limit': 15,
'year': 2018,
'sort2': '',
'page': 1
}
)
self.mock_xbmc_gui.ListItem.assert_has_calls([
call('Gintama.: Shirogane no Tamashii-hen 2'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Silver Soul Arc - Second Half War'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Shirogane no Tamashii-hen - Kouhan-sen'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Next Page')
])
# Need to check for order of list items added
self.mock_xbmc_plugin.addDirectoryItem.assert_has_calls([
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
]
)
def test_successful_retrieval_no_next_on_last_page(self):
handle_val = "Random"
mock_url = "Random-url"
mock_plugin = type('', (), {})
mock_plugin.args = {
"season": ["Summer"],
"year": ["2018"],
"genres": ["Test1,Test2"],
"page": ["8"]
}
mock_plugin.handle = handle_val
mock_plugin.url_for = Mock(return_value=mock_url)
mock_route_factory = MagicMock()
mock_route_factory.get_router_instance = mock_plugin
sys.modules['resources.lib.router_factory'] = mock_route_factory
fixture_path = self.dir_path + "/fixtures/animeList/list_success.json"
with open(fixture_path, "r") as fixture:
mock_response = fixture.read()
res_mock = MagicMock()
res_mock.json = Mock(return_value=json.loads(mock_response))
self.mock_requests.get = Mock(return_value=res_mock)
from resources.lib.routes.animelist import anime_list
anime_list()
expected_list_item_calls = [
call('Gintama.: Shirogane no Tamashii-hen 2'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Silver Soul Arc - Second Half War'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Shirogane no Tamashii-hen - Kouhan-sen'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
]
self.assertEquals(self.mock_xbmc_gui.ListItem.call_count, 3)
self.mock_xbmc_gui.ListItem.assert_has_calls(expected_list_item_calls)
self.mock_requests.get.assert_called_once_with(
'https://api.animepie.to/Anime/AnimeMain/List',
params={
'sort': 1,
'website': '',
'genres': 'Test1,Test2',
'season': 'Summer',
'limit': 15,
'year': 2018,
'sort2': '',
'page': 8
}
)
# Need to check for order of list items added
expected_calls = [
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
]
self.assertEquals(self.mock_xbmc_plugin.addDirectoryItem.call_count, 3)
self.mock_xbmc_plugin.addDirectoryItem.assert_has_calls(expected_calls)
```
|
{
"source": "jdolter/django-tagulous",
"score": 2
}
|
#### File: tests/tagulous_tests_app/cast.py
```python
class OldBase:
def __init__(self, v):
self.v = v
class Target(OldBase):
pass
class NewBase:
pass
```
|
{
"source": "jdomer/qarpo",
"score": 2
}
|
#### File: qarpo/qarpo/control_widgets.py
```python
from IPython.core.display import HTML
import threading
from IPython.display import display, Image
import ipywidgets as widgets
from ipywidgets import Layout
import time
import queue
import subprocess
import datetime
import matplotlib
import matplotlib.pyplot as plt
import os, pwd
import warnings
import json
import random
import io
import urllib, base64
import urllib.parse
from .disclaimer import *
from .demoutils_tabs import Interface
class ControlWidget(Interface):
def __init__(self, item, jobDict, Int_obj, command):
self.jobDict = jobDict
self.Int_obj = Int_obj
self.command = command
if item == "cancel_job":
self.button = self.addCancelButton()
elif item == "telemetry":
self.button = self.addTelemetryButton()
def addCancelButton(self):
#Cancel job button and function on click
cancel_job_button = widgets.Button(description='Cancel job', disabled=False, button_style='info')
def cancelJob(event):
if self.Int_obj.jobStillRunning(self.command):
cmd = 'qdel '+self.jobDict[self.command]['jobid']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
frame_id = self.jobDict[self.command]['box_id']
self.Int_obj.tab.set_title(str(frame_id), f'Done: {self.jobDict[self.command]["jobid"]}')
cancel_job_button.disabled=True
cancel_job_button.on_click(cancelJob)
return cancel_job_button
def addTelemetryButton(self):
telemetry_button = widgets.Button(description='Telemetry', disabled=False, button_style='info')
telemetry_status = widgets.HTML(value = "")
telemetry_box = widgets.VBox([telemetry_button, telemetry_status])
def displayTelemetry(event):
if self.Int_obj.jobStillRunning(self.command):
telemetry_status.value = "Telemetry results are not ready yet"
else:
telemetry_status.value = ""
URL = "https://devcloud.intel.com/edge/metrics/d/"+self.jobDict[self.command]['jobid']
script=f"<script>var win = window.open('{URL}', '_blank');</script>"
display(HTML ('''{}'''.format(script)))
telemetry_button.on_click(displayTelemetry)
return telemetry_box
```
|
{
"source": "jdominiczak/CumulusCI",
"score": 2
}
|
#### File: core/config/BaseGlobalConfig.py
```python
from __future__ import unicode_literals
import os
import warnings
from collections import OrderedDict
from cumulusci.core.utils import ordered_yaml_load, merge_config
from cumulusci.core.config.BaseProjectConfig import BaseProjectConfig
from cumulusci.core.config import BaseTaskFlowConfig
__location__ = os.path.dirname(os.path.realpath(__file__))
class BaseGlobalConfig(BaseTaskFlowConfig):
""" Base class for the global config which contains all configuration not specific to projects """
config_filename = "cumulusci.yml"
project_config_class = BaseProjectConfig
config_local_dir = ".cumulusci"
def __init__(self, config=None):
self.config_global_local = {}
self.config_global = {}
super(BaseGlobalConfig, self).__init__(config)
def get_project_config(self, *args, **kwargs):
""" Returns a ProjectConfig for the given project """
warnings.warn(
"BaseGlobalConfig.get_project_config is pending deprecation",
DeprecationWarning,
)
return self.project_config_class(self, *args, **kwargs)
@property
def config_global_local_path(self):
directory = os.path.join(os.path.expanduser("~"), self.config_local_dir)
if not os.path.exists(directory):
os.makedirs(directory)
config_path = os.path.join(directory, self.config_filename)
if not os.path.isfile(config_path):
return None
return config_path
@property
def config_global_path(self):
return os.path.abspath(
os.path.join(__location__, "..", "..", self.config_filename)
)
def _load_config(self):
""" Loads the local configuration """
# load the global config
with open(self.config_global_path, "r") as f_config:
config = ordered_yaml_load(f_config)
self.config_global = config
# Load the local config
if self.config_global_local_path:
config = ordered_yaml_load(open(self.config_global_local_path, "r"))
self.config_global_local = config
self.config = merge_config(
OrderedDict(
[
("global_config", self.config_global),
("global_local", self.config_global_local),
]
)
)
```
#### File: cumulusci/core/sfdx.py
```python
import io
import logging
import sarge
import sys
logger = logging.getLogger(__name__)
def sfdx(command, username=None, log_note=None):
"""Call an sfdx command and capture its output.
Be sure to quote user input that is part of the command using `sarge.shell_format`.
Returns a `sarge` Command instance with returncode, stdout, stderr
"""
command = "sfdx {}".format(command)
if username:
command += sarge.shell_format(" -u {0}", username)
if log_note:
logger.info("{} with command: {}".format(log_note, command))
p = sarge.Command(
command,
stdout=sarge.Capture(buffer_size=-1),
stderr=sarge.Capture(buffer_size=-1),
shell=True,
)
p.run()
p.stdout_text = io.TextIOWrapper(p.stdout, encoding=sys.stdout.encoding)
p.stderr_text = io.TextIOWrapper(p.stderr, encoding=sys.stdout.encoding)
return p
```
#### File: cumulusci/tasks/apexdoc.py
```python
from future import standard_library
standard_library.install_aliases()
import os
import tempfile
import urllib.request
from cumulusci.core.exceptions import CumulusCIException
from cumulusci.tasks.command import Command
class GenerateApexDocs(Command):
""" Generate Apex documentation from local code """
apexdoc_repo_url = "https://github.com/SalesforceFoundation/ApexDoc"
jar_file = "apexdoc.jar"
task_options = {
"tag": {
"description": "The tag to use for links back to the repo. If "
+ "not provided, source_url arg to ApexDoc is omitted."
},
"source_directory": {
"description": "The folder location which contains your apex "
+ ".cls classes. default=<RepoRoot>/src/classes/"
},
"out_dir": {
"description": "The folder location where documentation will be "
+ "generated to. Defaults to project config value "
+ "project/apexdoc/dir if present, otherwise uses repo root."
},
"home_page": {
"description": "The full path to an html file that contains the "
+ "contents for the home page's content area. Defaults to project "
+ "config value project/apexdoc/homepage if present, otherwise is "
+ "not used."
},
"banner_page": {
"description": "The full path to an html file that contains the "
+ "content for the banner section of each generated page. "
+ "Defaults to project config value project/apexdoc/banner if "
+ "present, otherwise is not used."
},
"scope": {
"description": "A semicolon separated list of scopes to "
+ "document. Defaults to project config value "
+ "project/apexdoc/scope if present, otherwise allows ApexDoc to "
+ "use its default (global;public;webService)."
},
"version": {
"description": "Version of ApexDoc to use. Defaults to project "
+ "config value project/apexdoc/version."
},
}
def _init_options(self, kwargs):
super(GenerateApexDocs, self)._init_options(kwargs)
self.options["command"] = None
if "source_directory" not in self.options:
self.options["source_directory"] = os.path.join(
self.project_config.repo_root, "src", "classes"
)
if "out_dir" not in self.options:
self.options["out_dir"] = (
self.project_config.project__apexdoc__dir
if self.project_config.project__apexdoc__dir
else self.project_config.repo_root
)
if "tag" not in self.options:
self.options["tag"] = None
if "home_page" not in self.options:
self.options["home_page"] = (
self.project_config.project__apexdoc__homepage
if self.project_config.project__apexdoc__homepage
else None
)
if "banner_page" not in self.options:
self.options["banner_page"] = (
self.project_config.project__apexdoc__banner
if self.project_config.project__apexdoc__banner
else None
)
if "scope" not in self.options:
self.options["scope"] = (
self.project_config.project__apexdoc__scope
if self.project_config.project__apexdoc__scope
else None
)
if "version" not in self.options:
if not self.project_config.project__apexdoc__version:
raise CumulusCIException("ApexDoc version required")
self.options["version"] = self.project_config.project__apexdoc__version
def _init_task(self):
super(GenerateApexDocs, self)._init_task()
self.working_dir = tempfile.mkdtemp()
self.jar_path = os.path.join(self.working_dir, self.jar_file)
if self.options["tag"] and not self.project_config.project__git__repo_url:
raise CumulusCIException("Repo URL not found in cumulusci.yml")
def _run_task(self):
self._get_jar()
cmd = "java -jar {} -s {} -t {}".format(
self.jar_path, self.options["source_directory"], self.options["out_dir"]
)
if self.options["tag"]:
cmd += " -g {}/blob/{}/src/classes/".format(
self.project_config.project__git__repo_url, self.options["tag"]
)
if self.options["home_page"]:
cmd += " -h {}".format(self.options["home_page"])
if self.options["banner_page"]:
cmd += " -a {}".format(self.options["banner_page"])
if self.options["scope"]:
cmd += ' -p "{}"'.format(self.options["scope"])
self.options["command"] = cmd
self._run_command({})
def _get_jar(self):
url = "{}/releases/download/{}/{}".format(
self.apexdoc_repo_url, self.options["version"], self.jar_file
)
urllib.request.urlretrieve(url, self.jar_path)
```
#### File: tasks/release_notes/provider.py
```python
import os
import pytz
import time
from datetime import datetime
from distutils.version import LooseVersion
import github3.exceptions
from cumulusci.core.exceptions import GithubApiError
from cumulusci.core.exceptions import GithubApiNotFoundError
class BaseChangeNotesProvider(object):
def __init__(self, release_notes_generator):
self.release_notes_generator = release_notes_generator
def __call__(self):
""" Subclasses should provide an implementation that returns an
iterable of each change note """
raise NotImplementedError()
class StaticChangeNotesProvider(BaseChangeNotesProvider):
def __init__(self, release_notes_generator, change_notes):
super(StaticChangeNotesProvider, self).__init__(release_notes_generator)
self.change_notes = change_notes
def __call__(self):
for change_note in self.change_notes:
yield change_note
class DirectoryChangeNotesProvider(BaseChangeNotesProvider):
def __init__(self, release_notes_generator, directory):
super(DirectoryChangeNotesProvider, self).__init__(release_notes_generator)
self.directory = directory
def __call__(self):
for item in sorted(os.listdir(self.directory)):
yield open("{}/{}".format(self.directory, item)).read()
class GithubChangeNotesProvider(BaseChangeNotesProvider):
""" Provides changes notes by finding all merged pull requests to
the default branch between two tags.
Expects the passed release_notes_generator instance to have a github_info
property that contains a dictionary of settings for accessing Github:
- github_repo
- github_owner
- github_username
- github_password
Will optionally use the following if provided by release_notes_generator:
- master_branch: Name of the default branch. Defaults to 'master'
- prefix_prod: Tag prefix for production release tags. Defaults to 'prod/'
"""
def __init__(self, release_notes_generator, current_tag, last_tag=None):
super(GithubChangeNotesProvider, self).__init__(release_notes_generator)
self.current_tag = current_tag
self._last_tag = last_tag
self._start_date = None
self._end_date = None
self.repo = release_notes_generator.get_repo()
self.github_info = release_notes_generator.github_info
def __call__(self):
for pull_request in self._get_pull_requests():
yield pull_request
@property
def last_tag(self):
if not self._last_tag:
self._last_tag = self._get_last_tag()
return self._last_tag
@property
def current_tag_info(self):
if not hasattr(self, "_current_tag_info"):
tag = self._get_tag_info(self.current_tag)
self._current_tag_info = {"tag": tag, "commit": self._get_commit_info(tag)}
return self._current_tag_info
@property
def last_tag_info(self):
if not hasattr(self, "_last_tag_info"):
if self.last_tag:
tag = self._get_tag_info(self.last_tag)
self._last_tag_info = {"tag": tag, "commit": self._get_commit_info(tag)}
else:
self._last_tag_info = None
return self._last_tag_info
def _get_commit_info(self, tag):
return self.repo.git_commit(tag.object.sha)
@property
def start_date(self):
return self._get_commit_date(self.current_tag_info["commit"])
@property
def end_date(self):
if self.last_tag_info:
return self._get_commit_date(self.last_tag_info["commit"])
def _get_commit_date(self, commit):
t = time.strptime(commit.author["date"], "%Y-%m-%dT%H:%M:%SZ")
return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)
def _get_tag_info(self, tag_name):
try:
tag = self.repo.ref("tags/{}".format(tag_name))
except github3.exceptions.NotFoundError:
raise GithubApiNotFoundError("Tag not found: {}".format(tag_name))
if tag.object.type != "tag":
raise GithubApiError(
"Tag {} is lightweight, must be annotated.".format(tag_name)
)
return self.repo.tag(tag.object.sha)
def _get_version_from_tag(self, tag):
if tag.startswith(self.github_info["prefix_prod"]):
return tag.replace(self.github_info["prefix_prod"], "")
elif tag.startswith(self.github_info["prefix_beta"]):
return tag.replace(self.github_info["prefix_beta"], "")
raise ValueError("Could not determine version number from tag {}".format(tag))
def _get_last_tag(self):
""" Gets the last release tag before self.current_tag """
current_version = LooseVersion(
self._get_version_from_tag(self.release_notes_generator.current_tag)
)
versions = []
for tag in self.repo.tags():
if not tag.name.startswith(self.github_info["prefix_prod"]):
continue
version = LooseVersion(self._get_version_from_tag(tag.name))
if version >= current_version:
continue
versions.append(version)
if versions:
versions.sort()
return "{}{}".format(self.github_info["prefix_prod"], versions[-1])
def _get_pull_requests(self):
""" Gets all pull requests from the repo since we can't do a filtered
date merged search """
for pull in self.repo.pull_requests(
state="closed", base=self.github_info["master_branch"], direction="asc"
):
if self._include_pull_request(pull):
yield pull
def _include_pull_request(self, pull_request):
""" Checks if the given pull_request was merged to the default branch
between self.start_date and self.end_date """
merged_date = pull_request.merged_at
if not merged_date:
return False
if self.last_tag:
last_tag_sha = self.last_tag_info["commit"].sha
if pull_request.merge_commit_sha == last_tag_sha:
# Github commit dates can be different from the merged_at date
return False
current_tag_sha = self.current_tag_info["commit"].sha
if pull_request.merge_commit_sha == current_tag_sha:
return True
# include PRs before current tag
if merged_date <= self.start_date:
if self.end_date:
# include PRs after last tag
if (
merged_date > self.end_date
and pull_request.merge_commit_sha != last_tag_sha
):
return True
else:
# no last tag, include all PRs before current tag
return True
return False
```
#### File: release_notes/tests/test_provider.py
```python
from future import standard_library
standard_library.install_aliases()
from datetime import datetime
from datetime import timedelta
import http.client
import mock
import os
import shutil
import tempfile
import unittest
from cumulusci.core.github import get_github_api
import requests
import responses
from cumulusci.core.exceptions import GithubApiError
from cumulusci.core.exceptions import GithubApiNotFoundError
from cumulusci.tasks.release_notes.generator import GithubReleaseNotesGenerator
from cumulusci.tasks.release_notes.provider import BaseChangeNotesProvider
from cumulusci.tasks.release_notes.provider import StaticChangeNotesProvider
from cumulusci.tasks.release_notes.provider import DirectoryChangeNotesProvider
from cumulusci.tasks.release_notes.provider import GithubChangeNotesProvider
from cumulusci.tasks.release_notes.exceptions import LastReleaseTagNotFoundError
from cumulusci.tasks.github.tests.util_github_api import GithubApiTestMixin
from cumulusci.tasks.release_notes.tests.utils import MockUtil
__location__ = os.path.split(os.path.realpath(__file__))[0]
date_format = "%Y-%m-%dT%H:%M:%SZ"
PARSER_CONFIG = [
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubLinesParser",
"title": "Critical Changes",
},
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubLinesParser",
"title": "Changes",
},
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubIssuesParser",
"title": "Issues Closed",
},
]
class TestBaseChangeNotesProvider(unittest.TestCase):
def test_init(self):
provider = BaseChangeNotesProvider("test")
assert provider.release_notes_generator == "test"
def test_call_raises_notimplemented(self):
provider = BaseChangeNotesProvider("test")
self.assertRaises(NotImplementedError, provider.__call__)
class TestStaticChangeNotesProvider(unittest.TestCase):
def test_empty_list(self):
provider = StaticChangeNotesProvider("test", [])
assert list(provider()) == []
def test_single_item_list(self):
provider = StaticChangeNotesProvider("test", ["abc"])
assert list(provider()) == ["abc"]
def test_multi_item_list(self):
provider = StaticChangeNotesProvider("test", ["abc", "d", "e"])
assert list(provider()) == ["abc", "d", "e"]
class TestDirectoryChangeNotesProvider(unittest.TestCase):
def get_empty_dir(self):
tempdir = tempfile.mkdtemp()
return os.path.join(tempdir)
def get_dir_content(self, path):
dir_content = []
for item in sorted(os.listdir(path)):
item_path = "{}/{}".format(path, item)
dir_content.append(open(item_path, "r").read())
return dir_content
def test_empty_directory(self):
directory = self.get_empty_dir()
provider = DirectoryChangeNotesProvider("test", directory)
dir_content = self.get_dir_content(directory)
assert list(provider()) == dir_content
shutil.rmtree(directory)
def test_single_item_directory(self):
directory = "{}/change_notes/single/".format(__location__)
provider = DirectoryChangeNotesProvider("test", directory)
dir_content = self.get_dir_content(directory)
assert list(provider()) == dir_content
def test_multi_item_directory(self):
directory = "{}/change_notes/multi/".format(__location__)
provider = DirectoryChangeNotesProvider("test", directory)
dir_content = self.get_dir_content(directory)
assert list(provider()) == dir_content
class TestGithubChangeNotesProvider(unittest.TestCase, GithubApiTestMixin):
def setUp(self):
# Set up the mock release_tag lookup response
self.repo_api_url = "https://api.github.com/repos/TestOwner/TestRepo"
# Tag that does not exist
self.invalid_tag = "release/1.4"
# The current production release
self.current_tag = "release/1.3"
# The previous beta release
self.beta_tag = "beta/1.3-Beta_1"
# The previous production release with no change notes vs 1.3
self.last_tag = "release/1.2"
# The second previous production release with one change note vs 1.3
self.last2_tag = "release/1.1"
# The third previous production release with three change notes vs 1.3
self.last3_tag = "release/1.0"
self.current_tag_sha = self._random_sha()
self.beta_tag_sha = self._random_sha()
self.current_tag_commit_sha = self._random_sha()
self.current_tag_commit_date = datetime.utcnow()
self.last_tag_sha = self._random_sha()
self.last_tag_commit_sha = self._random_sha()
self.last_tag_commit_date = datetime.utcnow() - timedelta(days=1)
self.last2_tag_sha = self._random_sha()
self.gh = get_github_api("TestUser", "TestPass")
self.init_github()
self.mock_util = MockUtil("TestOwner", "TestRepo")
def _create_generator(self, current_tag, last_tag=None):
generator = GithubReleaseNotesGenerator(
self.gh,
self.github_info.copy(),
PARSER_CONFIG,
current_tag,
last_tag=last_tag,
)
return generator
def _mock_current_tag(self):
api_url = "{}/git/tags/{}".format(self.repo_api_url, self.current_tag_sha)
expected_response = self._get_expected_tag(
self.current_tag,
self.current_tag_commit_sha,
self.current_tag_sha,
self.current_tag_commit_date,
)
responses.add(method=responses.GET, url=api_url, json=expected_response)
return expected_response
def _mock_current_tag_commit(self):
api_url = "{}/git/commits/{}".format(
self.repo_api_url, self.current_tag_commit_sha
)
expected_response = {
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"date": datetime.strftime(self.current_tag_commit_date, date_format),
},
"committer": None,
"message": "",
"parents": [],
"sha": self.current_tag_commit_sha,
"tree": {"sha": "", "url": ""},
"url": "",
"verification": None,
}
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_current_tag_ref(self):
api_url = "{}/git/refs/tags/{}".format(self.repo_api_url, self.current_tag)
expected_response_current_tag_ref = self._get_expected_tag_ref(
self.current_tag, self.current_tag_sha
)
responses.add(
method=responses.GET, url=api_url, json=expected_response_current_tag_ref
)
def _mock_invalid_tag(self):
api_url = "{}/git/refs/tags/{}".format(self.repo_api_url, self.invalid_tag)
expected_response = {
"message": "Not Found",
"documentation_url": "https://developer.github.com/v3",
}
responses.add(
method=responses.GET,
url=api_url,
json=expected_response,
status=http.client.NOT_FOUND,
)
def _mock_last_tag(self):
api_url = "{}/git/tags/{}".format(self.repo_api_url, self.last_tag_sha)
expected_response = self._get_expected_tag(
self.last_tag,
self.last_tag_commit_sha,
self.last_tag_sha,
self.last_tag_commit_date,
)
responses.add(method=responses.GET, url=api_url, json=expected_response)
return expected_response
def _mock_last_tag_commit(self):
api_url = "{}/git/commits/{}".format(
self.repo_api_url, self.last_tag_commit_sha
)
expected_response = {
"author": {
"name": "<NAME>",
"date": datetime.strftime(self.last_tag_commit_date, date_format),
},
"committer": None,
"message": "",
"parents": [],
"sha": self.last_tag_commit_sha,
"tree": {"sha": "", "url": ""},
"url": "",
"verification": None,
}
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_last_tag_ref(self):
api_url = "{}/git/refs/tags/{}".format(self.repo_api_url, self.last_tag)
expected_response_last_tag_ref = self._get_expected_tag_ref(
self.last_tag, self.last_tag_sha
)
responses.add(
method=responses.GET, url=api_url, json=expected_response_last_tag_ref
)
def _mock_list_pull_requests_one_in_range(self):
api_url = "{}/pulls".format(self.repo_api_url)
expected_response = [
self._get_expected_pull_request(
1, 101, "pull 1", datetime.utcnow() - timedelta(seconds=60)
),
self._get_expected_pull_request(
2, 102, "pull 2", datetime.utcnow() - timedelta(days=4)
),
self._get_expected_pull_request(
3, 103, "pull 3", datetime.utcnow() - timedelta(days=5)
),
]
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_list_pull_requests_multiple_in_range(self):
api_url = "{}/pulls".format(self.repo_api_url)
expected_response = [
self._get_expected_pull_request(
1, 101, "pull 1", datetime.utcnow() - timedelta(seconds=60)
),
self._get_expected_pull_request(
2, 102, "pull 2", datetime.utcnow() - timedelta(seconds=90)
),
self._get_expected_pull_request(
3, 103, "pull 3", datetime.utcnow() - timedelta(seconds=120)
),
self._get_expected_pull_request(
4, 104, "pull 4", datetime.utcnow() - timedelta(days=4)
),
self._get_expected_pull_request(
5, 105, "pull 5", datetime.utcnow() - timedelta(days=5)
),
self._get_expected_pull_request(6, 106, "pull 6", None),
self._get_expected_pull_request(
7,
107,
"pull 7",
datetime.utcnow() - timedelta(seconds=180),
merge_commit_sha=self.last_tag_commit_sha,
),
self._get_expected_pull_request(
8,
108,
"pull 8",
datetime.utcnow(),
merge_commit_sha=self.current_tag_commit_sha,
),
]
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_list_tags_multiple(self):
api_url = "{}/tags".format(self.repo_api_url)
expected_response = [
self._get_expected_repo_tag(self.current_tag, self.current_tag_sha),
self._get_expected_repo_tag(self.beta_tag, self.beta_tag_sha),
self._get_expected_repo_tag(self.last_tag, self.last_tag_sha),
self._get_expected_repo_tag(self.last2_tag, self.last2_tag_sha),
]
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_list_tags_single(self):
api_url = "{}/tags".format(self.repo_api_url)
expected_response = [
self._get_expected_repo_tag(self.current_tag, self.current_tag_sha)
]
responses.add(method=responses.GET, url=api_url, json=expected_response)
@responses.activate
def test_invalid_current_tag(self):
self.mock_util.mock_get_repo()
self._mock_invalid_tag()
generator = self._create_generator(self.invalid_tag)
provider = GithubChangeNotesProvider(generator, self.invalid_tag)
with self.assertRaises(GithubApiNotFoundError):
provider.current_tag_info
@responses.activate
def test_current_tag_is_lightweight(self):
self.mock_util.mock_get_repo()
tag = "release/lightweight"
generator = self._create_generator(tag)
provider = GithubChangeNotesProvider(generator, tag)
api_url = "{}/git/refs/tags/{}".format(self.repo_api_url, tag)
responses.add(
method=responses.GET,
url=api_url,
json={
"object": {"type": "commit", "url": "", "sha": ""},
"url": "",
"ref": "tags/{}".format(tag),
},
)
with self.assertRaises(GithubApiError):
provider.current_tag_info
@responses.activate
def test_current_tag_without_last(self):
self.mock_util.mock_get_repo()
self._mock_current_tag_ref()
expected_current_tag = self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
expected_last_tag = self._mock_last_tag()
self._mock_last_tag_commit()
self._mock_list_tags_multiple()
generator = self._create_generator(self.current_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag)
current_tag = provider.current_tag_info["tag"]
last_tag = provider.last_tag_info["tag"]
self.assertEqual(current_tag.tag, expected_current_tag["tag"])
self.assertEqual(last_tag.tag, expected_last_tag["tag"])
@responses.activate
def test_current_tag_without_last_no_last_found(self):
self.mock_util.mock_get_repo()
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_list_tags_single()
generator = self._create_generator(self.current_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag)
self.assertEqual(provider.last_tag, None)
self.assertEqual(provider.last_tag_info, None)
@responses.activate
def test_no_pull_requests_in_repo(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
# Mock the list all pull requests call
api_url = "{}/pulls".format(self.repo_api_url)
responses.add(
method=responses.GET, url=api_url, json=[], content_type="application/json"
)
generator = self._create_generator(self.current_tag, self.last_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag, self.last_tag)
self.assertEqual(list(provider()), [])
@responses.activate
def test_no_pull_requests_in_range(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
# Mock the list all pull requests call
api_url = "{}/pulls".format(self.repo_api_url)
expected_pull_request_1 = self._get_expected_pull_request(
pull_id=1,
issue_number=101,
body="pull 1",
merged_date=datetime.utcnow() - timedelta(days=2),
)
expected_response_list_pull_requests = [expected_pull_request_1]
responses.add(
method=responses.GET, url=api_url, json=expected_response_list_pull_requests
)
generator = self._create_generator(self.current_tag, self.last_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag, self.last_tag)
self.assertEqual(list(provider()), [])
@responses.activate
def test_one_pull_request_in_range(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
self._mock_list_pull_requests_one_in_range()
generator = self._create_generator(self.current_tag, self.last_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag, self.last_tag)
provider_list = list(provider())
pr_body_list = ["pull 1"]
self.assertEqual(len(provider_list), len(pr_body_list))
for pr, pr_body in zip(provider_list, pr_body_list):
self.assertEqual(pr.body, pr_body)
@responses.activate
def test_multiple_pull_requests_in_range(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
self._mock_list_pull_requests_multiple_in_range()
generator = self._create_generator(self.current_tag, self.last_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag, self.last_tag)
provider_list = list(provider())
pr_body_list = []
pr_body_list = ["pull 1", "pull 2", "pull 3", "pull 8"]
self.assertEqual(len(provider_list), len(pr_body_list))
for pr, pr_body in zip(provider_list, pr_body_list):
self.assertEqual(pr.body, pr_body)
@responses.activate
def test_pull_requests_with_no_last_tag(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
self._mock_list_pull_requests_multiple_in_range()
generator = self._create_generator(self.current_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag)
provider._get_last_tag = mock.Mock(return_value=None)
provider_list = list(provider())
pr_body_list = []
pr_body_list = [
"pull 1",
"pull 2",
"pull 3",
"pull 4",
"pull 5",
"pull 7",
"pull 8",
]
self.assertEqual(len(provider_list), len(pr_body_list))
for pr, pr_body in zip(provider_list, pr_body_list):
self.assertEqual(pr.body, pr_body)
@responses.activate
def test_get_version_from_tag(self):
self.mock_util.mock_get_repo()
tag = "beta/1.0-Beta_1"
generator = self._create_generator(tag)
provider = GithubChangeNotesProvider(generator, tag)
self.assertEqual("1.0-Beta_1", provider._get_version_from_tag(tag))
with self.assertRaises(ValueError):
provider._get_version_from_tag("bogus")
```
#### File: release_notes/tests/utils.py
```python
from future import standard_library
standard_library.install_aliases()
import http.client
import responses
from cumulusci.tasks.github.tests.util_github_api import GithubApiTestMixin
class MockUtil(GithubApiTestMixin):
BASE_HTML_URL = "https://github.com"
BASE_API_URL = "https://api.github.com"
def __init__(self, owner, repo):
self.owner = owner
self.repo = repo
self.init_github()
@property
def html_url(self):
return "{}/{}/{}".format(self.BASE_HTML_URL, self.owner, self.repo)
@property
def repo_url(self):
return "{}/repos/{}/{}".format(self.BASE_API_URL, self.owner, self.repo)
def mock_edit_release(self, body=None, draft=True, prerelease=False):
if body == None:
body = "Test release body"
responses.add(
method=responses.PATCH,
url="{}/releases/1".format(self.repo_url),
json=self._get_expected_release(
"1", body=body, draft=draft, prerelease=prerelease
),
status=http.client.OK,
)
def mock_get_repo(self):
responses.add(
method=responses.GET,
url=self.repo_url,
json=self._get_expected_repo(self.owner, self.repo),
status=http.client.OK,
)
def mock_list_pulls(self):
responses.add(
method=responses.GET,
url="{}/pulls".format(self.repo_url),
json=[{"id": 1, "number": 1}],
status=http.client.OK,
)
def mock_get_release(self, tag, body):
responses.add(
method=responses.GET,
url="{}/releases/tags/{}".format(self.repo_url, tag),
json=self._get_expected_release(
tag, url="{}/releases/1".format(self.repo_url), body=body
),
status=http.client.OK,
)
def mock_post_comment(self, issue_number):
responses.add(
method=responses.POST,
url="{}/issues/{}/comments".format(self.repo_url, issue_number),
status=http.client.OK,
)
def mock_pull_request(self, pr_number, body, title=None):
if title == None:
title = "Test Pull Request Title"
responses.add(
method=responses.GET,
url="{}/pulls/{}".format(self.repo_url, pr_number),
json=self._get_expected_pull_request(pr_number, pr_number, body=body),
status=http.client.OK,
)
```
#### File: robotframework/tests/TestLibrary.py
```python
class TestLibrary(object):
"""Documentation for the TestLibrary library."""
def library_keyword_one(self):
"""Keyword documentation with *bold* and _italics_"""
return "this is keyword one from TestLibrary.py"
def library_keyword_two(self):
return "this is keyword two from TestLibrary.py"
```
#### File: tasks/salesforce/BaseSalesforceMetadataApiTask.py
```python
from cumulusci.tasks.salesforce import BaseSalesforceTask
class BaseSalesforceMetadataApiTask(BaseSalesforceTask):
api_class = None
name = "BaseSalesforceMetadataApiTask"
def _get_api(self):
return self.api_class(self)
def _run_task(self):
api = self._get_api()
result = None
if api:
result = api()
self.return_values = result
return result
```
#### File: salesforce/tests/test_UninstallLocalNamespacedBundles.py
```python
import mock
import unittest
from cumulusci.tasks.salesforce import UninstallLocalNamespacedBundles
from cumulusci.tests.util import create_project_config
from cumulusci.utils import temporary_dir
from .util import create_task
class TestUninstallLocalNamespacedBundles(unittest.TestCase):
@mock.patch("cumulusci.tasks.metadata.package.PackageXmlGenerator.__call__")
def test_get_destructive_changes(self, PackageXmlGenerator):
with temporary_dir() as path:
project_config = create_project_config()
project_config.config["project"]["package"]["namespace"] = "ns"
task = create_task(
UninstallLocalNamespacedBundles,
{"path": path, "managed": True, "filename_token": "%TOKEN%"},
project_config,
)
PackageXmlGenerator.return_value = "%TOKEN%"
self.assertEqual("ns__", task._get_destructive_changes())
```
#### File: salesforce/tests/test_UninstallPackagedIncremental.py
```python
import io
import mock
import os
import unittest
import zipfile
from cumulusci.tasks.salesforce import UninstallPackagedIncremental
from cumulusci.tests.util import create_project_config
from cumulusci.utils import temporary_dir
from .util import create_task
class TestUninstallPackagedIncremental(unittest.TestCase):
def test_get_destructive_changes(self):
with temporary_dir():
os.mkdir("src")
with open(os.path.join("src", "package.xml"), "w") as f:
f.write(
"""<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<members>Class1</members>
<members>Class2</members>
<name>ApexClass</name>
</types>
<types>
<members>Page1</members>
<name>ApexPage</name>
</types>
<types>
<name>Empty</name>
</types>
<version>43.0</version>
</Package>"""
)
project_config = create_project_config()
project_config.config["project"]["package"]["name"] = "TestPackage"
project_config.config["project"]["package"]["api_version"] = "43.0"
task = create_task(
UninstallPackagedIncremental,
{"ignore": {"ApexClass": ["Ignored"]}},
project_config,
)
zf = zipfile.ZipFile(io.BytesIO(), "w")
zf.writestr(
"package.xml",
"""<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<members>Test__c</members>
<name>CustomObject</name>
</types>
<types>
<members>Class1</members>
<members>Class2</members>
<members>Class3</members>
<members>Ignored</members>
<name>ApexClass</name>
</types>
<types>
<members>Page1</members>
<name>ApexPage</name>
</types>
<types>
<name>Empty</name>
</types>
<version>43.0</version>
</Package>""",
)
task._retrieve_packaged = mock.Mock(return_value=zf)
result = task._get_destructive_changes()
self.assertEqual(
"""<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<members>Class3</members>
<name>ApexClass</name>
</types>
<types>
<members>Test__c</members>
<name>CustomObject</name>
</types>
<version>43.0</version>
</Package>""",
result,
)
```
#### File: tasks/tests/test_pushfails.py
```python
import csv
import mock
import os.path
import unittest
from cumulusci.core.config import (
BaseGlobalConfig,
BaseProjectConfig,
TaskConfig,
OrgConfig,
)
from cumulusci.utils import temporary_dir
from cumulusci.core.keychain import BaseProjectKeychain
from cumulusci.tasks.salesforce.tests.util import create_task
from cumulusci.tasks.push.pushfails import ReportPushFailures
def error_record(gack=False, ErrorTitle="Unexpected Error"): # type: (bool) -> dict
""" a record that looks like the object returned from the sobject api query we use """
return {
"attributes": {"type": "job"},
"SubscriberOrganizationKey": "<KEY>",
"PackagePushErrors": {
"totalSize": 1,
"records": [
{
"attributes": {"type": "error"},
"ErrorDetails": "None to be had",
"ErrorMessage": "There was an error number: 123456-765432 (-4532)"
if gack
else "Who knows?",
"ErrorSeverity": "Severe",
"ErrorTitle": ErrorTitle,
"ErrorType": "Error",
}
],
},
}
class TestPushFailureTask(unittest.TestCase):
def test_run_task(self,):
task = create_task(
ReportPushFailures,
options={"request_id": "123", "ignore_errors": "IgnoreMe"},
)
def _init_class():
task.sf = mock.Mock()
task.sf.query.side_effect = [
{
"done": True,
"totalSize": 2,
"records": [
error_record(ErrorTitle="IgnoreMe"),
error_record(gack=True),
{
"attributes": {"type": "job"},
"SubscriberOrganizationKey": "<KEY>",
},
],
},
{
"done": True,
"totalSize": 1,
"records": [
{
"OrgKey": "00Dxxx000000001",
"OrgName": "Test Org",
"OrgType": "Sandbox",
"OrgStatus": "Demo",
"InstanceName": "CSxx",
}
],
},
]
task._init_class = _init_class
with temporary_dir():
task()
self.assertEqual(2, task.sf.query.call_count)
self.assertTrue(
os.path.isfile(task.result), "the result file does not exist"
)
with open(task.result, "r") as f:
reader = csv.DictReader(f)
rows = list(reader)
self.assertEqual(len(rows), 2)
self.assertEqual(rows[1]["Stacktrace Id"], "-4532")
def test_run_task__no_results(self):
task = create_task(ReportPushFailures, options={"request_id": "123"})
def _init_class():
task.sf = mock.Mock()
task.sf.query.return_value = {"totalSize": 0, "records": [], "done": True}
task._init_class = _init_class
task()
self.assertFalse(os.path.isfile(task.options["result_file"]))
```
|
{
"source": "jdonaghue/ahgl-site",
"score": 2
}
|
#### File: ahgl/profiles/views.py
```python
from warnings import warn
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseForbidden, HttpResponse, Http404, HttpResponseRedirect
from django.views.generic import DetailView, ListView, UpdateView, CreateView, DeleteView
from django import forms
from django.forms import models as model_forms
from django.forms import ModelForm
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.template.loader import render_to_string
from django.utils import simplejson as json
from django.template import RequestContext
from django.db.models import Count
from django.template.defaultfilters import slugify
from django.db import IntegrityError
from django.contrib import messages
from idios.views import ProfileDetailView
from idios.utils import get_profile_model
from account.models import EmailAddress
from utils.views import ObjectPermissionsCheckMixin
from .models import Team, TeamMembership, Profile, Caster
from tournaments.models import TournamentRound, Tournament
class TournamentSlugContextView(object):
def get_context_data(self, **kwargs):
context = super(TournamentSlugContextView, self).get_context_data(**kwargs)
context['tournament_slug'] = self.kwargs.get('tournament')
"""try:
context['tournament'] = get_object_or_404(Tournament, slug=context['tournament_slug'])
except Tournament.DoesNotExist:
pass"""
return context
class TeamDetailView(TournamentSlugContextView, DetailView):
def get_context_data(self, **kwargs):
context = super(TeamDetailView, self).get_context_data(**kwargs)
context['is_captain'] = self.request.user.is_authenticated() and any((captain.profile.user_id == self.request.user.id for captain in self.object.captains))
return context
def get_queryset(self):
return Team.objects.filter(tournament=self.kwargs['tournament']).select_related('charity')
class TeamUpdateView(ObjectPermissionsCheckMixin, TournamentSlugContextView, UpdateView):
def get_queryset(self):
return Team.objects.filter(tournament=self.kwargs['tournament']).select_related('charity')
@property
def requested_approval(self):
return self.request.POST.get('submit') == 'approval'
def get_form_class(self):
view = self
class UpdateForm(ModelForm):
def __init__(self, *args, **kwargs):
super(UpdateForm, self).__init__(*args, **kwargs)
if view.requested_approval:
for key, field in self.fields.iteritems():
if key != 'approval':
field.required = True
def clean_approval(self):
value = self.cleaned_data.get('approval')
if view.requested_approval:
if value:
self.instance.status = "W"
else:
raise forms.ValidationError("Approval from your company is required.")
return value
class Meta:
model = Team
exclude = ('slug', 'tournament', 'rank', 'seed', 'members', 'status', 'paid', 'karma',)
return UpdateForm
# Override this so we can save self.object for get_success_url.
def form_valid(self, form):
form.save()
team = self.object = form.instance
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse("edit_team", kwargs=self.kwargs)
def check_permissions(self):
if not self.request.user.is_superuser and not self.object.team_membership.filter(captain=True, profile__user=self.request.user).count():
return HttpResponseForbidden("You are not captain of this team.")
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TeamUpdateView, self).dispatch(*args, **kwargs)
class TeamSignupView(CreateView):
model = Team
def get_form_class(self):
view = self
class TeamSignupForm(ModelForm):
char_name = forms.CharField(max_length=TeamMembership._meta.get_field('char_name').max_length,
required=True, label="Your character name", help_text=u"or Summoner name")
def __init__(self, *args, **kwargs):
super(TeamSignupForm, self).__init__(*args, **kwargs)
# Limit tournament choices to those just in the signup stage.
# via http://stackoverflow.com/q/291945/102704
self.fields['tournament'].queryset = Tournament.objects.filter(status='S')
class Meta:
model = Team
fields = [
'tournament',
'name', # Company name
'char_name',
]
def save(self, *args, **kwargs):
view.slug = self.instance.slug = slugify(self.cleaned_data['name'])
try:
super(TeamSignupForm, self).save(*args, **kwargs)
except IntegrityError:
messages.error(view.request, "Team not created - already exists for this tournament.")
else:
membership = TeamMembership(team=self.instance, profile=view.request.user.get_profile(), char_name=self.cleaned_data['char_name'], active=True, captain=True)
membership.save()
return TeamSignupForm
def get_success_url(self):
return reverse("edit_team", kwargs={"tournament": self.request.POST['tournament'], "slug": self.slug})
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not EmailAddress.objects.filter(user=request.user, verified=True).count():
return HttpResponseForbidden("Email verification required. Go here: http://afterhoursgaming.tv/account/settings/ enter your email and hit save. Then click the link in the email to verify. If you don't get an email, try changing it, saving, and changing it back.")
return super(TeamSignupView, self).dispatch(request, *args, **kwargs)
class TeamAdminView(ListView):
def get_queryset(self):
return TeamMembership.objects.filter(profile__user=self.request.user.id, captain=True)
def get_context_data(self, **kwargs):
# Probably a better way to do this with joins, but I never remember how
# to do that with Django. Sorry.
team_ids = set(m.team_id for m in self.get_queryset()) # Why isn't this already in self.queryset?
teams = Team.objects.filter(id__in=team_ids)
memberships = TeamMembership.objects.filter(team_id__in=team_ids)
return {
'teams': teams,
'memberships': memberships,
}
def get_template_names(self):
return "profiles/team_admin.html"
class TeamListView(TournamentSlugContextView, ListView):
def get_queryset(self):
return Team.objects.filter(tournament=self.kwargs['tournament']).only('name', 'slug', 'photo', 'tournament')
class StandingsView(TournamentSlugContextView, ListView):
def get_context_data(self, **kwargs):
ctx = super(StandingsView, self).get_context_data(**kwargs)
ctx["show_points"] = get_object_or_404(Tournament.objects.only('structure'), pk=self.kwargs['tournament']).structure == "I"
return ctx
def get_queryset(self):
return TournamentRound.objects.filter(tournament=self.kwargs['tournament'], published=True)
def get_template_names(self):
return "profiles/standings.html"
class TeamMembershipCreateView(CreateView):
model = TeamMembership
template_name = "profiles/membership_form.html"
context_object_name = "membership"
def get_form_class(self):
view = self
class MembershipCreateForm(ModelForm):
team = forms.ModelChoiceField(queryset=Team.objects.filter(team_membership__profile__user=view.request.user))
profile = forms.ModelChoiceField(queryset=Profile.objects.filter(slug=self.kwargs['slug']), initial=view.profile, widget=forms.HiddenInput())
class Meta:
model = TeamMembership
fields = ('char_name', 'team', 'profile')
def save(self, *args, **kwargs):
self.cleaned_data['profile'] = view.profile
return super(MembershipCreateForm, self).save(*args, **kwargs)
return MembershipCreateForm
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.profile = get_object_or_404(Profile, slug=kwargs['slug'])
return super(TeamMembershipCreateView, self).dispatch(request, *args, **kwargs)
class TeamMembershipUpdateView(ObjectPermissionsCheckMixin, UpdateView):
template_name = "idios/profile_edit.html"
template_name_ajax = "idios/profile_edit_ajax.html"
template_name_ajax_success = "idios/profile_edit_ajax_success.html"
context_object_name = "profile"
model = TeamMembership
def get_template_names(self):
if self.request.is_ajax():
return [self.template_name_ajax]
else:
return [self.template_name]
def get_context_data(self, **kwargs):
ctx = super(TeamMembershipUpdateView, self).get_context_data(**kwargs)
ctx["profile_form"] = ctx["form"]
return ctx
def get_form_class(self):
exclude = ["team", "profile"]
if not self.captain_user:
exclude += ["captain", "active"]
return model_forms.modelform_factory(TeamMembership, exclude=exclude)
def form_valid(self, form):
self.object = form.save()
if self.request.is_ajax():
data = {
"status": "success",
"location": self.object.get_absolute_url(),
"html": render_to_string(self.template_name_ajax_success),
}
return HttpResponse(json.dumps(data), content_type="application/json")
else:
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
if self.request.is_ajax():
ctx = RequestContext(self.request, self.get_context_data(form=form))
data = {
"status": "failed",
"html": render_to_string(self.template_name_ajax, ctx),
}
return HttpResponse(json.dumps(data), content_type="application/json")
else:
return self.render_to_response(self.get_context_data(form=form))
def check_permissions(self):
self.captain_user = bool(TeamMembership.objects.filter(team=self.object.team, profile__user=self.request.user, captain=True).count())
if self.object.profile.user != self.request.user and not self.captain_user:
return HttpResponseForbidden("This is not your membership to edit.")
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TeamMembershipUpdateView, self).dispatch(*args, **kwargs)
class TeamMembershipDeleteView(ObjectPermissionsCheckMixin, DeleteView):
context_object_name = "profile"
model = TeamMembership
def get_success_url(self):
return reverse("team_page", kwargs={"tournament": self.object.team.tournament.slug, "slug": self.object.team.slug})
def check_permissions(self):
self.captain_user = bool(TeamMembership.objects.filter(team=self.object.team, profile__user=self.request.user, captain=True).count())
if self.object.profile.user != self.request.user and not self.captain_user:
return HttpResponseForbidden("This is not your membership to delete.")
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TeamMembershipDeleteView, self).dispatch(*args, **kwargs)
class TeamMembershipView(TournamentSlugContextView, DetailView):
template_name = "profiles/player_profile.html"
context_object_name = "membership"
def get_queryset(self):
return TeamMembership.get(**self.kwargs)
def get_context_data(self, **kwargs):
ctx = super(TeamMembershipView, self).get_context_data(**kwargs)
ctx['is_me'] = self.request.user.is_authenticated() and self.request.user.id == self.object.profile.user_id
return ctx
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
try:
obj = queryset.get()
except ObjectDoesNotExist:
raise Http404(_(u"No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
class MVPView(TournamentSlugContextView, ListView):
template_name = "profiles/mvp.html"
context_object_name = "players"
def get_queryset(self):
return TeamMembership.objects.filter(team__tournament=self.kwargs.get('tournament'), game_wins__match__published=True).select_related('team', 'profile').annotate(win_count=Count('game_wins')).order_by('-win_count')
class MyProfileDetailView(ProfileDetailView):
def get_object(self):
queryset = get_profile_model().objects.select_related("user")
slug = self.kwargs.get("slug")
try:
if slug:
profile = get_object_or_404(queryset, slug=slug)
self.page_user = profile.user
return profile
except:
self.kwargs['username'] = slug
return super(MyProfileDetailView, self).get_object()
class CasterListView(ListView):
template_name = "profiles/casters.html"
context_object_name = "casters"
def get_queryset(self):
return Caster.objects.filter(tournament=self.kwargs.get('tournament')).order_by('-active', '?')
```
|
{
"source": "JDonaldM/Matryoshka",
"score": 3
}
|
#### File: Matryoshka/matryoshka/eft_funcs.py
```python
import numpy as np
def multipole(P_n, b, f, stochastic=None, kbins=None, ng=None,
multipole=None):
'''
Calculates the galaxy power spectrum multipole given a P_n matrix
that corresponds to the desired multipole.
Args:
P_n (list) : List of arrays ``[P11, Ploop, Pct]``. The arrays
should have shape ``(3, nk)``, ``(12, nk)``, and ``(6, nk)``
respectively.
b (array) : Array of bias parameters and counter terms.
f (float) : Growth rate at the same redshift as ``P_n``.
Returns:
The galaxy multipole.
'''
# The block of code is a slightly modified version of
# the code in cell 21 of the example PyBird notebook
# run_pybird.ipynb
b1, b2, b3, b4, b5, b6, b7 = b
b11 = np.array([ b1**2, 2.*b1*f, f**2 ])
bct = np.array([ 2.*b1*b5, 2.*b1*b6, 2.*b1*b7, 2.*f*b5, 2.*f*b6, 2.*f*b7 ])
bloop = np.array([ 1., b1, b2, b3, b4, b1*b1, b1*b2, b1*b3, b1*b4, b2*b2, b2*b4, b4*b4 ])
lin = np.einsum('b,bx->x', b11, P_n[0])
loop = np.einsum('b,bx->x', bloop, P_n[1])
counterterm = np.einsum('b,bx->x', bct, P_n[2])
if stochastic is not None and multipole==0:
return lin + loop + counterterm + stochastic[0]/ng + (stochastic[1]*kbins**2)/ng
elif stochastic is not None and multipole==2:
return lin + loop + counterterm + (stochastic[2]*kbins**2)/ng
else:
return lin + loop + counterterm
def multipole_vec(P_n, b, f, stochastic=None, kbins=None, ng=None,
multipole=None):
'''
Vectorized version of ``multipole`` that allows for multipoles to be calculated for
multiple cosmologies.
Args:
P_n (list) : List of arrays ``[P11, Ploop, Pct]``. The arrays
should have shape ``(nd, 3, nk)``, ``(nd, 12, nk)``, and ``(nd, 6, nk)``
respectively.
b (array) : Array of bias parameters and counter terms. Should have shape
(nd, 7).
f (float) : Growth rate at the same redshift as ``P_n``. Should have shape
(nd, 1).
stochastic (array) : Input stochastic counterterms. Should have
shape (n, 3). Default is ``None``, in which case no stochastic
terms are used.
kbins (array) : k-bins associated to ``P_n``. Only required if
``stochastic`` is not ``None``. Default is ``None``
ng (float) : Mean galaxy number density. Only required if ``stochastic``
is not ``None``. Default is ``None``.
multipole (int) : Desired multipole. Can either be 0 or 1. Default is
``None``. Only is required if ``stochastic`` is not ``None``.
Returns:
The galaxy multipoles.
'''
# The block of code is a slightly modified version of
# the code in cell 21 of the example PyBird notebook
# run_pybird.ipynb
b1, b2, b3, b4, b5, b6, b7 = np.split(b,7,axis=1)
b11 = np.array([ b1**2, 2.*b1*f, f**2 ])[:,:,0].T
bct = np.array([ 2.*b1*b5, 2.*b1*b6, 2.*b1*b7, 2.*f*b5, 2.*f*b6, 2.*f*b7 ])[:,:,0].T
bloop = np.array([ np.ones((b.shape[0],1)), b1, b2, b3, b4, b1*b1, b1*b2, b1*b3, b1*b4, b2*b2, b2*b4, b4*b4 ])[:,:,0].T
lin = np.einsum('nb,nbx->nx', b11, P_n[0])
loop = np.einsum('nb,nbx->nx', bloop, P_n[1])
counterterm = np.einsum('nb,nbx->nx', bct, P_n[2])
if stochastic is not None and multipole==0:
return lin + loop + counterterm + stochastic[:,0].reshape(-1,1)/ng + (stochastic[:,1].reshape(-1,1)*kbins**2)/ng
elif stochastic is not None and multipole==2:
return lin + loop + counterterm + (stochastic[:,2].reshape(-1,1)*kbins**2)/ng
else:
return lin + loop + counterterm
```
#### File: Matryoshka/matryoshka/emulator.py
```python
from tensorflow.keras.models import load_model
import numpy as np
from .training_funcs import UniformScaler, LogScaler
#from halomod.concentration import Duffy08
#from hmf.halos.mass_definitions import SOMean
from .halo_model_funcs import Duffy08cmz
from . import halo_model_funcs
from . import eft_funcs
from scipy.interpolate import interp1d
import os
import pathlib
# Path to directory containing the NN weights as well as scalers needed produce
# predictions with the NNs.
cache_path = os.fsdecode(pathlib.Path(os.path.dirname(__file__)
).parent.absolute())+"/matryoshka-data/"
# Define list of redshifts where there are trained NNs
matter_boost_zlist = ['0', '0.5', '1']
galaxy_boost_zlist = ['0.57']
# Define lists of relevant parameters for T(k) for each of the emulator versions.
relevant_transfer = {'class_aemulus':[0, 1, 3, 5, 6],
'QUIP':[0, 1, 2]}
# Define some dictionaries that map which index of X_COSMO matches which parameter
# for the different emulator versions.
parameter_ids = {'class_aemulus':{'Om':0,'Ob':1,'sigma8':2,'h':3,'ns':4,'Neff':5,'w0':6},
'QUIP':{'Om':0,'Ob':1,'h':2,'ns':3,'sigma8':4}}
# Default k values where PyBird makes predictions. Needed by the EFT emulators.
kbird = np.array([0.001, 0.005, 0.0075, 0.01, 0.0125, 0.015, 0.0175, 0.02, 0.025, 0.03,
0.035, 0.04, 0.045, 0.05, 0.055, 0.06, 0.065, 0.07, 0.075, 0.08, 0.085,
0.09, 0.095, 0.1, 0.105, 0.11, 0.115, 0.12, 0.125, 0.13, 0.135, 0.14,
0.145, 0.15, 0.155, 0.16, 0.17, 0.18, 0.19, 0.2, 0.21, 0.22, 0.23, 0.24,
0.25, 0.26, 0.27, 0.28, 0.29, 0.3])
class Transfer:
'''
Class for the transfer function componenet emulator.
On initalisation the weights for the NN ensmble will be loaded,
along with the scalers required to make predictions with the NNs.
Args:
version (str) : String to specify what version of the emulator to
load. Default is 'class_aemulus'.
.. note::
See the `Basic emulator usage <../example_notebooks/transfer_basic.ipynb>`_
example.
'''
def __init__(self, version='class_aemulus'):
self.kbins = np.logspace(-4, 1, 300)
'''The k-bins at which predictions will be made.'''
self.relevant_params = relevant_transfer[version]
models_path = cache_path+version+"/"+"models/transfer/"
# Load the ensemble of NNs that makes up the T(k) emulator.
models = list()
for member in os.listdir(models_path):
model = load_model(models_path+member,
compile=False)
models.append(model)
self.models = models
'''A list containing the individual ensemble members.'''
scalers_path = cache_path+version+"/"+"scalers/transfer/"
xscaler = UniformScaler()
yscaler = LogScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(scalers_path+"xscaler_min_diff.npy")
ymin_diff = np.load(scalers_path+"yscaler_min_diff.npy")
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X, mean_or_full='mean'):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
mean_or_full (str) : Can be either 'mean' or 'full'. Determines if the
ensemble mean prediction should be returned, or the predictions
from each ensemble member (default is 'mean').
Returns:
Array containing the predictions from the component emulator. Array
will have shape (m,n,k). If mean_or_full='mean' will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)[:,self.relevant_params]
X_prime = self.scalers[0].transform(X)
if mean_or_full == "mean":
preds = 0
for i in range(len(self.models)):
preds += self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds/float(len(self.models))
elif mean_or_full == "full":
preds = np.zeros(
(len(self.models), X_prime.shape[0], self.kbins.shape[0]))
for i in range(len(self.models)):
preds[i, :, :] = self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds
class Sigma:
'''
Class for the mass variance componenet emulator.
On initalisation the weights for the NN ensmble will be loaded,
along with the scalers required to make predictions with the NNs.
Args:
version (str) : String to specify what version of the emulator to
load. Default is 'class_aemulus'.
'''
def __init__(self, version='class_aemulus'):
# Assume that all versions use the same mass bins.
# TODO: Make this more general.
self.mbins = np.load(cache_path+"AEMULUS-class_ms-test.npy")
'''The m-bins at which predictions will be made.'''
models_path = cache_path+version+"/"+"models/sigma/"
# Load the ensemble of NNs that makes up the sigma(m) emulator.
models = list()
for member in os.listdir(models_path):
model = load_model(models_path+member,
compile=False)
models.append(model)
self.models = models
'''A list containing the individual ensemble members.'''
scalers_path = cache_path+version+"/"+"scalers/sigma/"
xscaler = UniformScaler()
yscaler = LogScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(scalers_path+"xscaler_min_diff.npy")
ymin_diff = np.load(scalers_path+"yscaler_min_diff.npy")
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X, mean_or_full='mean'):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
mean_or_full : Can be either 'mean' or 'full'. Determines if the
ensemble mean prediction should be returned, or the predictions
from each ensemble member (default is 'mean').
Returns:
Array containing the predictions from the component emulator. Array
will have shape (m,n,k). If mean_or_full='mean' will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
X_prime = self.scalers[0].transform(X)
if mean_or_full == "mean":
preds = 0
for i in range(len(self.models)):
preds += self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds/float(len(self.models))
elif mean_or_full == "full":
preds = np.zeros(
(len(self.models), X_prime.shape[0], self.mbins.shape[0]))
for i in range(len(self.models)):
preds[i, :, :] = self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds
class SigmaPrime:
'''
Class for the mass variance logarithmic derviative componenet emulator.
On initalisation the weights for the NN ensmble will be loaded,
along with the scalers required to make predictions with the NNs.
Args:
version (str) : String to specify what version of the emulator to
load. Default is 'class_aemulus'.
'''
def __init__(self, version='class_aemulus'):
# Assume that all versions use the same mass bins.
# TODO: Make this more general.
self.mbins = np.load(cache_path+"AEMULUS-class_ms-test.npy")
'''The m-bins at which predictions will be made.'''
models_path = cache_path+version+"/"+"models/dlns/"
# Load the ensemble of NNs that makes up the dlns(m) emulator.
models = list()
for member in os.listdir(models_path):
model = load_model(models_path+member,
compile=False)
models.append(model)
self.models = models
'''A list containing the individual ensemble members.'''
scalers_path = cache_path+version+"/"+"scalers/dlns/"
xscaler = UniformScaler()
yscaler = UniformScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(scalers_path+"xscaler_min_diff.npy")
ymin_diff = np.load(scalers_path+"yscaler_min_diff.npy")
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X, mean_or_full='mean'):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
mean_or_full : Can be either 'mean' or 'full'. Determines if the
ensemble mean prediction should be returned, or the predictions
from each ensemble member (default is 'mean').
Returns:
Array containing the predictions from the component emulator. Array
will have shape (m,n,k). If mean_or_full='mean' will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
X_prime = self.scalers[0].transform(X)
if mean_or_full == "mean":
preds = 0
for i in range(len(self.models)):
preds += self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds/float(len(self.models))
elif mean_or_full == "full":
preds = np.zeros(
(len(self.models), X_prime.shape[0], self.mbins.shape[0]))
for i in range(len(self.models)):
preds[i, :, :] = self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds
class Growth:
'''
Class for the growth function componenet emulator.
On initalisation the weights for the NN ensmble will be loaded,
along with the scalers required to make predictions with the NNs.
Args:
version (str) : String to specify what version of the emulator to
load. Default is 'class_aemulus'.
'''
def __init__(self, version='class_aemulus'):
# Assume that all versions use the same redshift bins.
# TODO: Make this more general.
self.zbins = np.linspace(0, 2, 200)
'''The z-bins at which predictions will be made.'''
self.relevant_params = relevant_transfer[version]
models_path = cache_path+version+"/"+"models/growth/"
# Load the ensemble of NNs that makes up the D(z) emulator.
models = list()
for member in os.listdir(models_path):
model = load_model(models_path+member,
compile=False)
models.append(model)
self.models = models
'''A list containing the individual ensemble members.'''
scalers_path = cache_path+version+"/"+"scalers/growth/"
xscaler = UniformScaler()
yscaler = LogScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(scalers_path+"xscaler_min_diff.npy")
ymin_diff = np.load(scalers_path+"yscaler_min_diff.npy")
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X, mean_or_full='mean'):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
mean_or_full : Can be either 'mean' or 'full'. Determines if the
ensemble mean prediction should be returned, or the predictions
from each ensemble member (default is 'mean').
Returns:
Array containing the predictions from the component emulator. Array
will have shape (m,n,k). If mean_or_full='mean' will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)[:,self.relevant_params]
X_prime = self.scalers[0].transform(X)
if mean_or_full == "mean":
preds = 0
for i in range(len(self.models)):
pred = self.scalers[1].inverse_transform(
self.models[i](X_prime))
pred[:, 0] = 1.
preds += pred
return preds/float(len(self.models))
elif mean_or_full == "full":
preds = np.zeros(
(len(self.models), X_prime.shape[0], self.zbins.shape[0]))
for i in range(len(self.models)):
preds[i, :, :] = self.scalers[1].inverse_transform(
self.models[i](X_prime))
preds[i, :, 0] = 1.
return preds
class Boost:
'''
Class for the nonlinear boost componenet emulator.
On initalisation the weights for the NN ensmble will be loaded,
along with the scalers required to make predictions with the NNs.
Args:
redshift_id (int) : Index in matter_boost_zlist or galaxy_boost_zlist
that corespons to the desired redshift.
'''
def __init__(self, redshift_id):
# The scales where the Boost component emulator produces predictions is
# dependent on the simulation suite used to generate the training data.
# Currently based on the Aemulus suite.
# TODO: Make this more generic.
Lbox = 1050
Nmesh = 1024
k_ny = np.pi * Nmesh / Lbox
k_fund = 2*np.pi / Lbox
ksim = np.arange(k_fund, 0.5*k_ny, 2*k_fund)
ksim = (ksim[:-1]+ksim[1:])/2.
self.kbins = ksim
'''The k-bins at which predictions will be made.'''
boost_path = cache_path+"class_aemulus/boost_kwanspace_z{a}/".format(a=galaxy_boost_zlist[redshift_id])
# Load the ensemble of NNs that makes up the B(k) emulator.
models = list()
for member in os.listdir(boost_path+"model"):
model = load_model(boost_path+"model/"+member,
compile=False)
models.append(model)
self.models = models
'''A list containing the individual ensemble members.'''
xscaler = UniformScaler()
yscaler = LogScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(boost_path+"scalers/xscaler_min_diff.npy")
ymin_diff = np.load(boost_path+"scalers/yscaler_min_diff.npy")
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X, mean_or_full='mean'):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
mean_or_full : Can be either 'mean' or 'full'. Determines if the
ensemble mean prediction should be returned, or the predictions
from each ensemble member (default is 'mean').
Returns:
Array containing the predictions from the component emulator. Array
will have shape (m,n,k). If mean_or_full='mean' will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
X_prime = self.scalers[0].transform(X)
if mean_or_full == "mean":
preds = 0
for i in range(len(self.models)):
preds += self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds/float(len(self.models))
elif mean_or_full == "full":
preds = np.zeros(
(len(self.models), X_prime.shape[0], self.kbins.shape[0]))
for i in range(len(self.models)):
preds[i, :, :] = self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds
class MatterBoost:
'''
Emulator for predicting the nonlinear boost for the matter power
spectrum in real space. Trained with the QUIJOTE simulations.
Args:
redshift_id (int) : Index in ``matter_boost_zlist``
that corespons to the desired redshift.
'''
def __init__(self, redshift_id):
# Currently only trained on Quijote sims so defining the
# kbins based on that.
# TODO: MAke more general.
k, _ = np.loadtxt(cache_path+'QUIP/Pk_m_z=0.txt',
unpack=True)
ks_good = k < 1.0
self.kbins = k[ks_good]
'''The k-bins at which predictions will be made.'''
self.redshift = float(matter_boost_zlist[redshift_id])
models_path = cache_path+"QUIP/"+"models/"
# Load the ensemble of NNs that makes up the B(k) emulator.
models = list()
for member in os.listdir(models_path+"boost_z{a}".format(a=matter_boost_zlist[redshift_id])):
model = load_model(models_path+"boost_z{a}/".format(a=matter_boost_zlist[redshift_id])+member,
compile=False)
models.append(model)
self.models = models
'''A list containing the individual ensemble members.'''
scalers_path = cache_path+"QUIP/"+"scalers/"
xscaler = UniformScaler()
yscaler = LogScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(scalers_path+"boost_z{a}/xscaler_min_diff.npy".format(a=matter_boost_zlist[redshift_id]))
ymin_diff = np.load(scalers_path+"boost_z{a}/yscaler_min_diff.npy".format(a=matter_boost_zlist[redshift_id]))
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X, mean_or_full='mean'):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
mean_or_full : Can be either 'mean' or 'full'. Determines if the
ensemble mean prediction should be returned, or the predictions
from each ensemble member (default is 'mean').
Returns:
Array containing the predictions from the component emulator. Array
will have shape (m,n,k). If mean_or_full='mean' will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
X_prime = self.scalers[0].transform(X)
if mean_or_full == "mean":
preds = 0
for i in range(len(self.models)):
preds += self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds/float(len(self.models))
elif mean_or_full == "full":
preds = np.zeros(
(len(self.models), X_prime.shape[0], self.kbins.shape[0]))
for i in range(len(self.models)):
preds[i, :, :] = self.scalers[1].inverse_transform(
self.models[i](X_prime))
return preds
class P11l:
'''
Class for emulator that predicts the P11l contributions to the
P_n matrix.
'''
def __init__(self, multipole, version='EFTv2', redshift=0.51):
if version=='EFTv3':
self.kbins = kbird
else:
self.kbins = kbird[:39]
models_path = cache_path+version+"/z{a}/models/P11{b}/".format(a=redshift,
b=multipole)
# Unlike many of the other matryoshka componenet emulators
# the EFT components consist of just one NN.
model = load_model(models_path+"member_0", compile=False)
self.model = model
'''The NN that forms this component emulator'''
xscalers_path = cache_path+version+"/z{a}/scalers/".format(a=redshift)
yscalers_path = cache_path+version+"/z{a}/scalers/P11{b}/".format(a=redshift,
b=multipole)
self.nonzero_cols = np.load(yscalers_path+"nonzero_cols.npy")
'''There can be zeros for all cosmologies at certain k-values.
The emulator does not make predictions here so we need to
know where to put zeros.'''
xscaler = UniformScaler()
yscaler = UniformScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(xscalers_path+"xscaler_min_diff.npy")
ymin_diff = np.load(yscalers_path+"yscaler_min_diff.npy")
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
Returns:
Array containing the predictions from the component emulator
will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
X_prime = self.scalers[0].transform(X)
preds = self.scalers[1].inverse_transform(
self.model(X_prime))
preds_incl_zeros = np.zeros((X.shape[0], 3*len(self.kbins)))
preds_incl_zeros[:,self.nonzero_cols] = preds
return preds_incl_zeros
class Ploopl:
'''
Class for emulator that predicts the Ploopl contributions to the
P_n matrix.
'''
def __init__(self, multipole, version='EFTv2', redshift=0.51):
if version=='EFTv3':
self.kbins = kbird
else:
self.kbins = kbird[:39]
models_path = cache_path+version+"/z{a}/models/Ploop{b}/".format(a=redshift,
b=multipole)
# Unlike many of the other matryoshka componenet emulators
# the EFT components consist of just one NN.
model = load_model(models_path+"member_0", compile=False)
self.model = model
'''The NN that forms this component emulator'''
xscalers_path = cache_path+version+"/z{a}/scalers/".format(a=redshift)
yscalers_path = cache_path+version+"/z{a}/scalers/Ploop{b}/".format(a=redshift,
b=multipole)
self.nonzero_cols = np.load(yscalers_path+"nonzero_cols.npy")
'''There can be zeros for all cosmologies at certain k-values.
The emulator does not make predictions here so we need to
know where to put zeros.'''
xscaler = UniformScaler()
yscaler = UniformScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(xscalers_path+"xscaler_min_diff.npy")
ymin_diff = np.load(yscalers_path+"yscaler_min_diff.npy")
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
Returns:
Array containing the predictions from the component emulator
will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
X_prime = self.scalers[0].transform(X)
preds = self.scalers[1].inverse_transform(
self.model(X_prime))
preds_incl_zeros = np.zeros((X.shape[0], 12*len(self.kbins)))
preds_incl_zeros[:,self.nonzero_cols] = preds
return preds_incl_zeros
class Pctl:
'''
Class for emulator that predicts the Pctl contributions to the
P_n matrix.
'''
def __init__(self, multipole, version='EFTv2' , redshift=0.51):
if version=='EFTv3':
self.kbins = kbird
else:
self.kbins = kbird[:39]
models_path = cache_path+version+"/z{a}/models/Pct{b}/".format(a=redshift,
b=multipole)
# Unlike many of the other matryoshka componenet emulators
# the EFT components consist of just one NN.
model = load_model(models_path+"member_0", compile=False)
self.model = model
'''The NN that forms this component emulator'''
xscalers_path = cache_path+version+"/z{a}/scalers/".format(a=redshift)
yscalers_path = cache_path+version+"/z{a}/scalers/Pct{b}/".format(a=redshift,
b=multipole)
self.nonzero_cols = np.load(yscalers_path+"nonzero_cols.npy")
'''There can be zeros for all cosmologies at certain k-values.
The emulator does not make predictions here so we need to
know where to put zeros.'''
xscaler = UniformScaler()
yscaler = UniformScaler()
# Load the variables that define the scalers.
xmin_diff = np.load(xscalers_path+"xscaler_min_diff.npy")
ymin_diff = np.load(yscalers_path+"yscaler_min_diff.npy")
xscaler.min_val = xmin_diff[0, :]
xscaler.diff = xmin_diff[1, :]
yscaler.min_val = ymin_diff[0, :]
yscaler.diff = ymin_diff[1, :]
self.scalers = (xscaler, yscaler)
def emu_predict(self, X):
'''
Make predictions with the component emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape (d,), if a batch prediction
should have the shape (N,d).
Returns:
Array containing the predictions from the component emulator
will have shape (n,k).
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
X_prime = self.scalers[0].transform(X)
preds = self.scalers[1].inverse_transform(
self.model(X_prime))
preds_incl_zeros = np.zeros((X.shape[0], 6*len(self.kbins)))
preds_incl_zeros[:,self.nonzero_cols] = preds
return preds_incl_zeros
class EFT:
'''
Emulator for predicting power spectrum multipoles that would
be predicted using EFTofLSS.
Args:
multipole (int) : Desired multipole. Can either be 0 or 2.
version (str): Version of ``EFTEMU``. Can be ``EFTv2``, ``EFT-optiresum``,
or ``EFT_lowAs``. Default is ``EFTv2``.
redshift (float) : Desired redshift. Can be 0.38, 0.51, or 0.61.
Default is 0.51.
.. note::
See the `EFTEMU <../example_notebooks/EFTEMU_example.ipynb>`_
example.
'''
def __init__(self, multipole, version='EFTv2', redshift=0.51):
self.P11 = P11l(multipole, version=version, redshift=redshift)
'''The ``P_11`` component emulator.'''
self.Ploop = Ploopl(multipole, version=version, redshift=redshift)
'''The ``P_loop`` component emulator.'''
self.Pct = Pctl(multipole, version=version, redshift=redshift)
'''The ``P_ct`` component emulator.'''
self.multipole = multipole
self.redshift = redshift
self.param_names = ["w_c", "w_b", "h", "As", "ns"]
'''List of the input parameters.'''
def emu_predict(self, X, bias, stochastic=None, km=None,
ng=None, kvals=None):
'''
Make predictions with the emulator.
Args:
X (array) : Input cosmological parameters.
Should have shape (n, 5).
bias (array) : Input bias parameters and counterterms. Should
have shape (n, 7)
stochastic (array) : Input stochastic counterterms. Should have
shape (n, 3). Default is ``None``, in which case no stochastic
terms are used.
km (float) : Controls the bias derivative expansion (see eq. 5
in arXiv:1909.05271). Default in ``None``, in which case all
counterterm inputs are assumed to be a ratio with km i.e.
``c_i/km**2``.
ng (float) : Mean galaxy number density. Default is ``None``.
Only required if ``stochastic`` is not ``None``.
kvals (array) : Array containing k-values at which to produce predictions.
Needs to be within the k-range that the emulator has been trained to
predict. Default is ``None``, in which case predicts will be made at the
default k-values.
'''
P11_preds = self.P11.emu_predict(X)
Ploop_preds = self.Ploop.emu_predict(X)
Pct_preds = self.Pct.emu_predict(X)
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
bias = np.atleast_2d(bias)
if stochastic is not None:
stochastic = np.atleast_2d(stochastic)
if km is not None:
stochastic[:,1:] = stochastic[:,1:]/km**2
if km is not None:
bias[:,4:] = bias[:,4:]/km**2
f = halo_model_funcs.fN_vec((X[:,0]+X[:,1])/X[:,2]**2, self.redshift)
multipole_array = eft_funcs.multipole_vec([P11_preds.reshape(X.shape[0],3,self.P11.kbins.shape[0]),
Ploop_preds.reshape(X.shape[0],12,self.Ploop.kbins.shape[0]),
Pct_preds.reshape(X.shape[0],6,self.Pct.kbins.shape[0])],
bias, f.reshape(-1,1))
if stochastic is not None:
if self.multipole==0:
multipole_array += stochastic[:,0].reshape(-1,1)/ng
multipole_array += (stochastic[:,1].reshape(-1,1)*self.P11.kbins**2)/ng
elif self.multipole==2:
multipole_array += (stochastic[:,2].reshape(-1,1)*self.P11.kbins**2)/ng
if kvals is not None:
if kvals.max()<self.P11.kbins.max() and kvals.min()>self.P11.kbins.min():
return interp1d(self.P11.kbins, multipole_array)(kvals)
else:
raise ValueError("kvals need to be covered by default eulator range.")
else:
return multipole_array
class QUIP:
'''
Emulator for predicting the real space nonlinear matter power spectrum. Trained
with the QUIJOTE simulations.
Args:
redshift_id (int) : Index in ``matter_boost_zlist``
that corespons to the desired redshift.
.. note::
See the `QUIP <../example_notebooks/QUIP.ipynb>`_ example.
'''
def __init__(self, redshift_id):
self.Transfer = Transfer(version='QUIP')
'''The transfer function component emulator.'''
self.MatterBoost = MatterBoost(redshift_id=redshift_id)
'''The nonlinear boost component emulator.'''
self.param_names = ["O_m", "O_b", "h", "ns", "sig8"]
'''List of the input parameters.'''
def emu_predict(self, X, kvals=None, mean_or_full='mean'):
'''
Make predictions with the emulator.
Args:
X (array) : Array containing the relevant input parameters. If making
a single prediction should have shape ``(d,)``, if a batch prediction
should have the shape ``(N,d)``.
kvals (array) : Array containing k-values at which to produce predictions.
Needs to be within the k-range that the emulator has been trained to
predict. Default is ``None``, in which case predicts will be made at the
default k-values.
mean_or_full : Can be either 'mean' or 'full'. Determines if the
ensemble mean prediction should be returned, or the predictions
from each ensemble member (default is 'mean').
Returns:
Array containing the predictions from the emulator. Array
will have shape ``(m,n,k)``. If ``mean_or_full='mean'`` will have shape ``(n,k)``.
'''
# If making a prediction on single parameter set, input array needs to
# be reshaped.
X = np.atleast_2d(X)
transfer_preds = self.Transfer.emu_predict(X, mean_or_full=mean_or_full)
boost_preds = self.MatterBoost.emu_predict(X, mean_or_full=mean_or_full)
linPk0 = halo_model_funcs.power0_v2(self.Transfer.kbins, transfer_preds,
sigma8=X[:, parameter_ids['QUIP']['sigma8']],
ns=X[:, parameter_ids['QUIP']['ns']])
growths = halo_model_funcs.DgN_vec(X[:, parameter_ids['QUIP']['Om']], self.MatterBoost.redshift)
growths /= halo_model_funcs.DgN_vec(X[:, parameter_ids['QUIP']['Om']], 0.)
linPk = interp1d(self.Transfer.kbins, linPk0, kind='cubic')(self.MatterBoost.kbins)\
*(growths**2).reshape(-1,1)
if kvals is not None:
if kvals.max()<self.MatterBoost.kbins.max() and kvals.min()>self.MatterBoost.kbins.min():
return interp1d(self.MatterBoost.kbins, linPk*boost_preds)(kvals)
else:
raise ValueError("kvals need to be covered by default eulator range.")
else:
return linPk*boost_preds
class HaloModel:
'''
Class for the emulated halo model.
Upon initalisation each of the component emulators will be initalised.
Args:
k (array) : The k-bins over which predictions will be made. Cannot be
outside the ranges used when training the component emulators.
redshift_id (int) : Index in matter_boost_zlist or galaxy_boost_zlist
that corespons to the desired redshift. Only needed if nonlinear is True.
Default is None.
redshift (float) : The redshift at which predictions should be made. Can
only be used if nonlinear is False. If nonlinear is True this will be ignored.
nonlinear (bool) : Determines if nonlinear predictions should be made.
If False, the nonlinear boost componenet emulator will not be
initalised.
matter (bool) : If nonlinear=True setting matter=True will use emulated
nonlinear matter power. If matter=False the nonlinear boost will be
applied to the galaxy power spectrum.
version (str) : Version of the emulators to be loaded.
kspace_filt (bool) : If True reduces contribution from P2h on small scales.
Inspired by halomod. See section 2.9.1 of arXiv:2009.14066.
'''
def __init__(self, k, redshift_id=None, redshift=None, nonlinear=True, matter=True,
version='class_aemulus', kspace_filt=False):
# Initalise the base model components.
self.Transfer = Transfer(version=version)
self.sigma = Sigma(version=version)
self.dlns = SigmaPrime(version=version)
# Load the growth function emulator for non LCDM models.
if version=='class_aemulus':
self.growth = Growth()
# Only load the nonlinear boost component if nonlinear predictions are
# required.
self.nonlinear = nonlinear
if nonlinear and matter:
self.boost = MatterBoost(redshift_id)
self.redshift = float(matter_boost_zlist[redshift_id])
elif nonlinear:
self.boost = Boost(redshift_id)
self.redshift = float(galaxy_boost_zlist[redshift_id])
else:
self.redshift = redshift
# Make sure desired prediction range is covered by the emulators.
if k.min() < self.Transfer.kbins.min() or k.max() > self.Transfer.kbins.max():
print("Input k outside emulator coverage! (LINEAR)")
if nonlinear and k.max() > self.boost.kbins.max():
print("Input k outside emulator coverage! (NONLINEAR)")
if kspace_filt:
self.filter = halo_model_funcs.TopHatrep(None, None)
self.k = k
self.version = version
self.matter = matter
# Initalise halmod mass defenition and calculate the conentration mass
# realtion.
#md_mean = SOMean(overdensity=200)
#duffy = Duffy08(mdef=md_mean)
#conc_duffy = duffy.cm(self.sigma.mbins, z=redshift)
conc_duffy = Duffy08cmz(self.sigma.mbins, self.redshift)
self.cm = conc_duffy
def emu_predict(self, X_COSMO, X_HOD, kspace_filt=False, RT=3.0):
'''
Make predictions for the halo model power spectrum with the
pre-initalised component emulators.
Args:
X_COSMO (array) : Input cosmological parameters.
X_HOD (array) : Input HOD parameters.
Returns:
Array containing the predictions from the halo model power spectrum.
Array will have shape (n,k). If making a prediction for a single set
of input parameters will have shape (1,k).
'''
# Input must be reshaped if producing sinlge prediction.
X_COSMO = np.atleast_2d(X_COSMO)
X_HOD = np.atleast_2d(X_HOD)
# Produce predictions from each of the components.
T_preds = self.Transfer.emu_predict(X_COSMO,
mean_or_full="mean")
sigma_preds = self.sigma.emu_predict(X_COSMO,
mean_or_full="mean")
dlns_preds = self.dlns.emu_predict(X_COSMO,
mean_or_full="mean")
if self.version=='class_aemulus':
gf_preds = self.growth.emu_predict(X_COSMO,
mean_or_full="mean")
if self.nonlinear and self.matter:
boost_preds = self.boost.emu_predict(X_COSMO,
mean_or_full="mean")
# Force the nonlinear boost to unity outside the emulation range.
boost_preds = interp1d(self.boost.kbins, boost_preds, bounds_error=False,
fill_value=1.0)(self.k)
elif self.nonlinear:
boost_preds = self.boost.emu_predict(np.hstack([X_HOD, X_COSMO]),
mean_or_full="mean")
# Force the nonlinear boost to unity outside the emulation range.
boost_preds = interp1d(self.boost.kbins, boost_preds, bounds_error=False,
fill_value=1.0)(self.k)
# Calculate the linear matter power spectrum at z=0 from the transfer
# function prediction.
p_ml = halo_model_funcs.power0_v2(self.Transfer.kbins, T_preds, sigma8=X_COSMO[:, parameter_ids[self.version]['sigma8']],
ns=X_COSMO[:, parameter_ids[self.version]['ns']])
# Interpolate the power spectrum to cover the desired k-range.
p_ml = interp1d(self.Transfer.kbins, p_ml)(self.k)
if self.nonlinear and self.matter:
p_ml = p_ml*boost_preds
if kspace_filt:
# Inspired by halomod.
p_ml = p_ml*self.filter.k_space(self.k*RT)
if self.version=='class_aemulus':
# Interpolate the predicted growth function to return D(z) at the
# desired redshift.
D_z = interp1d(self.growth.zbins, gf_preds)(self.redshift)
else:
D_z = np.zeros((p_ml.shape[0],))
for i in range(D_z.shape[0]):
# Assumes Om is in the first column of X_COSMO
D_z[i] = halo_model_funcs.DgN(X_COSMO[i,0],self.redshift)/halo_model_funcs.DgN(X_COSMO[i,0],0.)
# Produce HM galaxy power spectrum predictions using the component
# predictions.
# TODO: I haven't found a nice way of vectorising the halo profile
# calculation. This loop currently dominates the prediction time so
# should be the first step when working on further optimisation.
hm_preds = np.zeros((X_HOD.shape[0], self.k.shape[0]))
n_ts = np.zeros((X_HOD.shape[0]))
for i in range(X_HOD.shape[0]):
# Create mass mask.
tm = self.sigma.mbins >= X_HOD[i, 0] - 5*X_HOD[i, 1]
Nc = halo_model_funcs.cen_Z09(
self.sigma.mbins[tm], X_HOD[i, 0], X_HOD[i, 1])
Ns = halo_model_funcs.sat_Z09(
self.sigma.mbins[tm], X_HOD[i, 2], X_HOD[i, 4], X_HOD[i, 3], X_HOD[i, 0])
Ntot = Nc*(1+Ns)
mean_dens = halo_model_funcs.mean_density0_v2(
h=X_COSMO[i, 3], Om0=X_COSMO[i, 0])
halo_bias = halo_model_funcs.TinkerBias(
np.sqrt(sigma_preds[i, tm]**2*D_z[i]**2))
hmf = halo_model_funcs.hmf(
sigma_preds[i, tm], dlns_preds[i, tm], mean_dens, self.sigma.mbins[tm], D_z[i], self.redshift)
u_m = halo_model_funcs.u(
self.k, self.sigma.mbins[tm], self.cm[tm], mean_dens, 200)
n_t = halo_model_funcs.ngal(self.sigma.mbins[tm].reshape(
1, -1), hmf.reshape(1, -1), Ntot.reshape(1, -1))[0]
n_ts[i] = n_t
P1h_ss = halo_model_funcs.power_1h_ss(
u_m, hmf, self.sigma.mbins[tm], Nc, Ns, n_t)
P1h_cs = halo_model_funcs.power_1h_cs(
u_m, hmf, self.sigma.mbins[tm], Nc, Ns, n_t)
P2h = halo_model_funcs.power_2h(
u_m, hmf, self.sigma.mbins[tm], Ntot, n_t, p_ml[i]*D_z[i]**2, halo_bias)
if self.nonlinear and not self.matter:
# If making nonlinear predictions, combine the base model
# prediction with the boost component prediction.
hm_preds[i, :] = (P2h+P1h_cs+P1h_ss)*boost_preds[i]
else:
hm_preds[i, :] = P2h+P1h_cs+P1h_ss
return hm_preds, n_ts
```
#### File: Matryoshka/matryoshka/training_funcs.py
```python
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout
from tensorflow.keras.optimizers import Adam
import os
import pathlib
class UniformScaler:
'''
Class for a simple uniform scaler. Linearly transforms X such that all
samples in X are in the range [0,1].
'''
min_val = 0
diff = 1
def fit(self, X):
'''
Fit the parameters of the transformer based on the training data.
Args:
X (array) : The training data. Must have shape (nsamps, nfeatures).
'''
# Check shape of X.
if len(X.shape) != 2:
raise ValueError("X does not have the correct shape. Must have shape (nsamps, nfeatures)")
# Calculate min. value and largest diff. of all samples of X along the
# 0th axis. Both min_val and diff can be vectors if required.
self.min_val = np.min(X, axis=0)
self.diff = np.max(X, axis=0) - np.min(X, axis=0)
def transform(self, X):
'''
Transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the transformed data.
'''
x = np.subtract(X, self.min_val)
return np.true_divide(x, self.diff)
def inverse_transform(self, X):
'''
Inverse transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the inverse transformed data.
'''
x = np.multiply(X, self.diff)
return np.add(x, self.min_val)
class LogScaler:
'''
Class for a log scaler. Linearly transforms logX such that all samples in
logX are in the range [0,1].
'''
min_val = 0
diff = 1
def fit(self, X):
'''
Fit the parameters of the transformer based on the training data.
Args:
X (array) : The training data. Must have shape (nsamps, nfeatures).
'''
# Check shape of X.
if len(X.shape) != 2:
raise ValueError("X does not have the correct shape. Must have shape (nsamps, nfeatures)")
# Make sure there are no negative values or zeros.
if np.any(X<=0.):
raise ValueError("X contains negative values or zeros.")
X = np.log(X)
# Calculate min. value and largest diff. of all samples of X along the
# 0th axis. Both min_val and diff can be vectors if required.
self.min_val = np.min(X, axis=0)
self.diff = np.max(X, axis=0) - np.min(X, axis=0)
def transform(self, X):
'''
Transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the transformed data.
'''
X = np.log(X)
x = np.subtract(X, self.min_val)
return np.true_divide(x, self.diff)
def inverse_transform(self, X):
'''
Inverse transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the inverse transformed data.
'''
x = np.multiply(X, self.diff)
return np.exp(np.add(x, self.min_val))
class StandardScaler:
'''
Replacement for sklearn StandardScaler(). Rescales X such that it has zero
mean and unit variance.
'''
mean = 0
scale = 1
def fit(self, X):
'''
Fit the parameters of the transformer based on the training data.
Args:
X (array) : The training data. Must have shape (nsamps, nfeatures).
'''
# Check shape of X.
if len(X.shape) != 2:
raise ValueError("X does not have the correct shape. Must have shape (nsamps, nfeatures).")
# Calculate the mean and strandard deviation of X along the 0th axis.
# Can be vectors if needed.
self.mean = np.mean(X, axis=0)
self.scale = np.std(X, axis=0)
def transform(self, X):
'''
Transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the transformed data.
'''
x = np.subtract(X, self.mean)
return np.true_divide(x, self.scale)
def inverse_transform(self, X):
'''
Inverse transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the inverse transformed data.
'''
x = np.multiply(X, self.scale)
return np.add(x, self.mean)
class Resampler:
'''
Class for re-sampling the parameter space covered by a suite of simulations.
The new samples can then be used to generate training data for the base model
componenet emulators.
.. note::
See the `Generating training samples for the base model componenets
<../example_notebooks/resample_example.ipynb>`_ example.
Args:
simulation_samples (array) : The samples in the parameter space from the
simulation suite. Default is None.
parameter_ranges (array) : Ranges that define the extent of the parameter
space. Should have shape (n, 2), where the first column is the minimum
value for the n parameters, and the second column is the maximum.
Default is None.
use_latent_space (bool): If True the origonal simulation samples will be
transfromed into an uncorrelated latent space for re-sampling. Default
is False.
'''
def __init__(self, simulation_samples=None, parameter_ranges=None,
use_latent_space=False):
# Make sure the user has passed either simulation_samples or parameter_ranges.
if (simulation_samples is None) and (parameter_ranges is None):
raise ValueError("Please provide either simulation samples or parameter ranges.")
elif (parameter_ranges is None) and (use_latent_space is False):
self.min = np.min(simulation_samples, axis=0)
self.max = np.max(simulation_samples, axis=0)
self.diff = self.max - self.min
self.use_latent_space = use_latent_space
elif (parameter_ranges is None) and (use_latent_space is True):
self.L = np.linalg.cholesky(np.cov(simulation_samples, rowvar=False))
self.use_latent_space = use_latent_space
self.mean = np.mean(simulation_samples, axis=0)
latent_samples = np.matmul(np.linalg.inv(self.L), (simulation_samples-self.mean).T).T
self.min = latent_samples.min(axis=0)
self.max = latent_samples.max(axis=0)
self.diff = self.max - self.min
elif parameter_ranges is not None:
self.min = parameter_ranges[:,0]
self.max = parameter_ranges[:,1]
self.diff = self.max - self.min
self.use_latent_space = use_latent_space
def new_samples(self, nsamps, LH=True, buffer=None):
'''
Generate new samples from the region covered by the simulations.
Args:
nsamps (int) : The number of new samples to generate.
LH (bool) : If True will use latin-hypercube sampling. Default
is True.
Returns:
Array containing the new samples. Has shape (nsamps, d).
'''
if buffer is not None:
self.min = self.min*(1-buffer)
self.max = self.max*(1+buffer)
self.diff = self.max - self.min
if (LH is False) and (self.use_latent_space is False):
return np.random.uniform(self.min, self.max, size=(nsamps,self.min.shape[0]))
# How many dimensions in the sample space.
d = self.min.shape[0]
# Define the bin edges.
low_edge = np.arange(0, nsamps)/nsamps
high_edge = np.arange(1, nsamps+1)/nsamps
# Generate the samples.
latent_samples = np.random.uniform(low_edge, high_edge, (d, nsamps)).T
for i in range(1,d):
np.random.shuffle(latent_samples[:, i:])
samples = np.zeros_like(latent_samples)
for i in range(d):
samples[:,i] = (latent_samples[:,i]*self.diff[i])+(self.min[i])
if self.use_latent_space is False:
return samples
else:
return np.matmul(self.L, samples.T).T+self.mean
def trainNN(trainX, trainY, validation_data, nodes, learning_rate, batch_size, epochs,
callbacks=None, DR=None, verbose=0):
'''
A high-level function for quickly training a simple NN based emulator. The user
NN will be optimsed with an Adam optimser and mean squared error loss function.
Args:
trainX (array) : Array containing the parameters/features of the training set.
Should have shape (n, d).
trainY (aray) : Array containing the target function of the training set.
Should have shape (n, k).
validation_data (tuple) : Tuple of arrays (valX, valY). Where `valX` and `valY`
are the equivalent of `trainX` and `trainY` for the validation data. Can be
None if there is not a validation set.
nodes (array) : Array containing the number of nodes in each hidden layer.
Should have shape (N, ), with N being the desired number of hidden layers.
learning_rate (float) : The learning rate to be used during training.
batch_size (int) : The batch size to be used during training.
epochs (int) : The number of epochs to train the NN.
callbacks (list) : List of `tensorflow` callbacks e.g. EarlyStopping
DR (float) : Float between 0 and 1 that defines the dropout rate. If None
dropout will not be used.
verbose (int) : Defines how much information `tensorflow` prints during training.
0 = silent, 1 = progress bar, 2 = one line per epoch.
Returns:
Trained keras Sequential model.
'''
# Define the NN as a keras Sequential model
model = Sequential()
# Add the input layer
model.add(InputLayer(input_shape=(trainX.shape[1], )))
# Add the user specified number of hidden layers.
for layer in range(nodes.shape[0]):
model.add(Dense(nodes[layer], activation='relu'))
if DR is not None:
model.add(Dropout(DR))
# Add the output layer
model.add(Dense(trainY.shape[1], activation='linear'))
# Complile the model with the user specified learning rate.
model.compile(loss='mean_squared_error', optimizer=Adam(learning_rate=learning_rate))
# Train the model
model.fit(trainX, trainY, validation_data=validation_data, epochs=epochs,
batch_size=batch_size, callbacks = callbacks, verbose=verbose)
return model
def dataset(target, split, X_or_Y):
'''
Convenience function for loading datasets for the base model component emulators.
Args:
target (str) : The target function of interest.
split (str) : Can be "train", "test", or "val" (when a validation set is available).
X_or_Y (str) : Do you want the features ("X") or the function ("Y").
Returns:
Array containing the dataset.
'''
cache_path = os.fsdecode(pathlib.Path(os.path.dirname(__file__)
).parent.absolute())+"/matryoshka-data/"
cache_path += "class_aemulus/"
return np.load(cache_path+split+"/"+X_or_Y+"_"+target+"-v3.npy")
def train_test_indices(N, split=0.2):
'''
Return indicies that can be used to split a dataset into train and test sets.
Args:
N (int) : The size of the original dataset
split (float) : The proportion of the data to be used for the test set.
Should be a float between 0 and 1. Default is 0.2
Returns:
The train and test indicies arrays.
'''
all = np.arange(N)
np.random.shuffle(all)
# How many samples in the test set
N_test = int(split*N)
return all[:N_test], all[N_test:]
```
|
{
"source": "jdonenine/adelphi",
"score": 2
}
|
#### File: adelphi/adelphi/anonymize.py
```python
from adelphi.store import get_standard_columns_from_table_metadata
# default prefixes for the anonymized names
KEYSPACE_PREFIX = "ks"
TABLE_PREFIX = "tbl"
PARTITION_KEY_PREFIX = "pk"
CLUSTERING_KEY_PREFIX = "ck"
COLUMN_PREFIX = "col"
TYPE_PREFIX = "udt"
FIELD_PREFIX = "fld"
INDEX_PREFIX = "idx"
# maps the original schema names to the replacement names
name_map = {
KEYSPACE_PREFIX: {},
TABLE_PREFIX: {},
PARTITION_KEY_PREFIX: {},
CLUSTERING_KEY_PREFIX: {},
COLUMN_PREFIX: {},
TYPE_PREFIX: {},
FIELD_PREFIX: {},
INDEX_PREFIX: {}
}
def get_name(original_name, prefix):
"""
Looks up the anonymized name for the provided original name in the cache.
If not present, one is created, inserted into the cache and returned.
"""
count = len(name_map[prefix])
anonymized_named_prefixed = "%s_%s" % (prefix, count)
return name_map[prefix].setdefault(original_name, anonymized_named_prefixed)
def anonymize_keyspace(keyspace):
keyspace.name = get_name(keyspace.name, KEYSPACE_PREFIX)
for table in keyspace.tables.values():
anonymize_table(table)
for udt in keyspace.user_types.values():
anonymize_udt(udt)
def anonymize_udt(udt):
udt.keyspace = get_name(udt.keyspace, KEYSPACE_PREFIX)
udt.name = get_name(udt.name, TYPE_PREFIX)
# field names
udt.field_names = [get_name(field_name, FIELD_PREFIX) for field_name in udt.field_names]
# field types
udt.field_types = [get_name(field_type, TYPE_PREFIX)
if field_type in name_map[TYPE_PREFIX]
else field_type
for field_type in udt.field_types]
def anonymize_column(column, prefix):
column.name = get_name(column.name, prefix)
def anonymize_index(index):
index.name = get_name(index.name, INDEX_PREFIX)
prefix = COLUMN_PREFIX if index.index_options['target'] in name_map[COLUMN_PREFIX] \
else CLUSTERING_KEY_PREFIX
index.index_options['target'] = name_map[prefix][index.index_options["target"]]
index.keyspace_name = name_map[KEYSPACE_PREFIX][index.keyspace_name]
index.table_name = name_map[TABLE_PREFIX][index.table_name]
def anonymize_table(table):
table.keyspace_name = get_name(table.keyspace_name, KEYSPACE_PREFIX)
table.name = get_name(table.name, TABLE_PREFIX)
for partition_key in table.partition_key:
anonymize_column(partition_key, PARTITION_KEY_PREFIX)
for clustering_key in table.clustering_key:
anonymize_column(clustering_key, CLUSTERING_KEY_PREFIX)
# CK are also in the standard columns, but different objects
# if we don't anonymize them there too, the generated cql is wrong
for clustering_key in [t for t in table.columns.values() if t.name in name_map[CLUSTERING_KEY_PREFIX]]:
clustering_key.name = name_map[CLUSTERING_KEY_PREFIX][clustering_key.name]
for column in get_standard_columns_from_table_metadata(table):
anonymize_column(column, COLUMN_PREFIX)
for index in list(table.indexes.values()):
if (index.index_options["target"] not in name_map[COLUMN_PREFIX].keys() and
index.index_options["target"] not in name_map[CLUSTERING_KEY_PREFIX].keys()):
del table.indexes[index.name]
continue
anonymize_index(index)
```
#### File: adelphi/adelphi/gemini.py
```python
import json
from cassandra.cqltypes import cqltype_to_python
from adelphi.anonymize import anonymize_keyspace
from adelphi.store import get_standard_columns_from_table_metadata, set_replication_factor
def export_gemini_schema(keyspaces_metadata, options):
if options['anonymize']:
for ks in keyspaces_metadata:
anonymize_keyspace(ks)
# set replication factor
set_replication_factor(keyspaces_metadata, options['rf'])
keyspace = keyspaces_metadata[0]
replication = json.loads(
keyspace.replication_strategy.export_for_schema().replace("'", "\""))
data = {
"keyspace": {
"name": keyspace.name,
"replication": replication,
"oracle_replication": replication
},
"tables": []
}
for t in keyspace.tables.values():
table_data = {
"name": t.name,
"partition_keys": [],
"clustering_keys": [],
"columns": [],
"indexes": []
}
for pk in t.partition_key:
table_data["partition_keys"].append({
"name": pk.name,
"type": pk.cql_type
})
for ck in t.clustering_key:
table_data["clustering_keys"].append({
"name": ck.name,
"type": ck.cql_type
})
columns = get_standard_columns_from_table_metadata(t)
for c in columns:
table_data["columns"].append({
"name": c.name,
"type": cql_type_to_gemini(cqltype_to_python(c.cql_type))
})
for index in t.indexes.values():
table_data["indexes"].append({
"name": index.name,
"column": index.index_options["target"]
})
data["tables"].append(table_data)
return data
def to_string(data):
return json.dumps(data, indent=4)
def cql_type_to_gemini(cql_type, is_frozen=False):
"""
Convert a cql type representation to the gemini json one.
Limitations:
* no support for udt
* limited nested complex types support
"""
if isinstance(cql_type, str):
return cql_type
elif len(cql_type) == 1:
return cql_type[0]
else:
is_frozen_type = is_frozen
gemini_type = {}
token = cql_type.pop(0)
if isinstance(token, (list, tuple)):
return cql_type_to_gemini(token, is_frozen_type)
elif token == 'frozen':
return cql_type_to_gemini(cql_type.pop(0), True)
elif token == 'map':
subtypes = cql_type.pop(0)
gemini_type['key_type'] = cql_type_to_gemini(subtypes[0], is_frozen_type)
gemini_type['value_type'] = cql_type_to_gemini(subtypes[1], is_frozen_type)
elif token == 'list':
gemini_type['kind'] = 'list'
gemini_type['type'] = cql_type_to_gemini(cql_type.pop(0)[0], is_frozen_type)
elif token == 'set':
gemini_type['kind'] = 'set'
gemini_type['type'] = cql_type_to_gemini(cql_type.pop(0)[0], is_frozen_type)
elif token == 'tuple':
gemini_type['types'] = cql_type.pop(0)
gemini_type['frozen'] = is_frozen_type
return gemini_type
```
|
{
"source": "JDong820/acky",
"score": 2
}
|
#### File: acky/acky/ec2.py
```python
from acky.api import (
AwsCollection,
AwsApiClient,
make_filters,
)
from itertools import chain
class EC2ApiClient(AwsApiClient):
service_name = "ec2"
class EC2(EC2ApiClient):
def regions(self, continent='us', include_gov=False):
# returns (string, ...)
# DescribeRegions
regions = self.call("DescribeRegions", response_data_key="Regions")
if regions and continent and continent != "all":
regions = [r for r in regions
if r['RegionName'].startswith("{}-".format(continent))]
return regions
def zones(self, region):
# returns (string, ...)
# DescribeAvailabilityZones
raise NotImplementedError("aws.ec2.zones")
@property
def environment(self):
env = super(EC2, self).environment
env['hoster'] = 'ec2'
return env
@property
def ACLs(self):
return ACLCollection(self._aws)
@property
def ACEs(self):
return ACECollection(self._aws)
@property
def ElasticIPs(self):
return ElasticIPCollection(self._aws)
@property
def Instances(self):
return InstanceCollection(self._aws)
@property
def SecurityGroups(self):
return SecurityGroupCollection(self._aws)
@property
def IpPermissions(self):
return IpPermissionsCollection(self._aws)
@property
def Volumes(self):
return VolumeCollection(self._aws)
@property
def Snapshots(self):
return SnapshotCollection(self._aws)
@property
def Subnets(self):
return SubnetCollection(self._aws)
@property
def VPCs(self):
return VPCCollection(self._aws)
@property
def PlacementGroups(self):
return PlacementGroupCollection(self._aws)
@property
def KeyPairs(self):
return KeyPairCollection(self._aws)
@property
def Tags(self):
return TagCollection(self._aws)
@property
def Images(self):
return ImageCollection(self._aws)
class ACLCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (acl_info, ...)
# DescribeNetworkAcls
raise NotImplementedError()
def create(self, vpc):
# returns acl_info
# CreateNetworkAcl
raise NotImplementedError()
def destroy(self, acl):
# returns bool
# DeleteNetworkAcl
raise NotImplementedError()
class ACECollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (ace_info, ...)
# DescribeNetworkAcls
raise NotImplementedError()
def add(self, acl, ace_list):
# returns ace_info
# CreateNetworkAclEntry
raise NotImplementedError()
def remove(self, acl, ace_list):
# returns bool
# DeleteNetworkAclEntry
raise NotImplementedError()
def replace(self, acl, old, new):
# returns ace_info
# CreateNetworkAclEntry, DeleteNetworkAclEntry
raise NotImplementedError()
class ElasticIPCollection(AwsCollection, EC2ApiClient):
"""Interface to get, create, destroy, associate, and disassociate EIPs for
classic EC2 domains and VPCs. (Amazon EC2 API Version 2014-06-15)
"""
def get(self, filters=None):
"""List EIPs and associated information."""
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeAddresses",
response_data_key="Addresses",
**params)
def create(self, vpc=False):
"""Set vpc=True to allocate an EIP for a EC2-Classic instance.
Set vpc=False to allocate an EIP for a VPC instance.
"""
return self.call("AllocateAddress",
Domain="vpc" if vpc else "standard")
def destroy(self, eip_or_aid, disassociate=False):
"""Release an EIP. If the EIP was allocated for a VPC instance, an
AllocationId(aid) must be provided instead of a PublicIp. Setting
disassociate to True will attempt to disassociate the IP before
releasing it (required for associated nondefault VPC instances).
"""
if "." in eip_or_aid: # If an IP is given (Classic)
# NOTE: EIPs are automatically disassociated for Classic instances.
return "true" == self.call("ReleaseAddress",
response_data_key="return",
PublicIp=eip_or_aid)
else: # If an AID is given (VPC)
if disassociate:
self.disassociate(eip_or_aid)
return "true" == self.call("ReleaseAddress",
response_data_key="return",
AllocationId=eip_or_aid)
def associate(self, eip_or_aid,
instance_id='', network_interface_id='', private_ip=''):
"""Associate an EIP with a given instance or network interface. If
the EIP was allocated for a VPC instance, an AllocationId(aid) must
be provided instead of a PublicIp.
"""
if "." in eip_or_aid: # If an IP is given (Classic)
return self.call("AssociateAddress",
PublicIp=eip_or_aid,
InstanceId=instance_id,
NetworkInterfaceId=network_interface_id,
PrivateIpAddress=private_ip)
else: # If an AID is given (VPC)
return self.call("AssociateAddress",
AllocationId=eip_or_aid,
InstanceId=instance_id,
NetworkInterfaceId=network_interface_id,
PrivateIpAddress=private_ip)
def disassociate(self, eip_or_aid):
"""Disassociates an EIP. If the EIP was allocated for a VPC instance,
an AllocationId(aid) must be provided instead of a PublicIp.
"""
if "." in eip_or_aid: # If an IP is given (Classic)
return "true" == self.call("DisassociateAddress",
response_data_key="return",
PublicIp=eip_or_aid)
else: # If an AID is given (VPC)
return "true" == self.call("DisassociateAddress",
response_data_key="return",
AllocationId=eip_or_aid)
class InstanceCollection(AwsCollection, EC2ApiClient):
def get(self, instance_ids=None, filters=None):
"""List instance info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
if instance_ids:
params['InstanceIds'] = instance_ids
reservations = self.call("DescribeInstances",
response_data_key="Reservations",
**params)
if reservations:
return list(chain(*(r["Instances"] for r in reservations)))
return []
def create(self, ami, count, config=None):
"""Create an instance using the launcher."""
return self.Launcher(config=config).launch(ami, count)
def destroy(self, instance_id):
"""Terminate a single given instance."""
return self.control(instance_id, "terminate")
def control(self, instances, action):
"""Valid actions: start, stop, reboot, terminate, protect, and
unprotect.
"""
if not isinstance(instances, list) and\
not isinstance(instances, tuple):
instances = [instances]
actions = {'start': {'operation': "StartInstances",
'response_data_key': "StartingInstances",
'InstanceIds': instances},
'stop': {'operation': "StopInstances",
'response_data_key': "StoppingInstances",
'InstanceIds': instances},
'reboot': {'operation': "RebootInstances",
'response_data_key': "return",
'InstanceIds': instances},
'terminate': {'operation': "TerminateInstances",
'response_data_key': "TerminatingInstances",
'InstanceIds': instances},
'protect': {'operation': "ModifyInstanceAttribute",
'response_data_key': "return",
'Attribute': 'disableApiTermination',
'Value': 'true'},
'unprotect': {'operation': "ModifyInstanceAttribute",
'response_data_key': "return",
'Attribute': 'disableApiTermination',
'Value': 'false'}}
if (action in ('protect', 'unprotect')):
for instance in instances:
self.call(InstanceId=instance, **actions[action])
return "true"
else:
return self.call(**actions[action])
def Launcher(self, config=None):
"""Provides a configurable launcher for EC2 instances."""
class _launcher(EC2ApiClient):
"""Configurable launcher for EC2 instances. Create the Launcher
(passing an optional dict of its attributes), set its attributes
(as described in the RunInstances API docs), then launch().
"""
def __init__(self, aws, config):
super(_launcher, self).__init__(aws)
self.config = config
self._attr = list(self.__dict__.keys()) + ['_attr']
def launch(self, ami, min_count, max_count=0):
"""Use given AMI to launch min_count instances with the
current configuration. Returns instance info list.
"""
params = config.copy()
params.update(dict([i for i in self.__dict__.items()
if i[0] not in self._attr]))
return self.call("RunInstances",
ImageId=ami,
MinCount=min_count,
MaxCount=max_count or min_count,
response_data_key="Instances",
**params)
if not config:
config = {}
return _launcher(self._aws, config)
def status(self, all_instances=None, instance_ids=None, filters=None):
"""List instance info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
if instance_ids:
params['InstanceIds'] = instance_ids
if all_instances is not None:
params['IncludeAllInstances'] = all_instances
statuses = self.call("DescribeInstanceStatus",
response_data_key="InstanceStatuses",
**params)
return statuses
def events(self, all_instances=None, instance_ids=None, filters=None):
"""a list of tuples containing instance Id's and event information"""
params = {}
if filters:
params["filters"] = make_filters(filters)
if instance_ids:
params['InstanceIds'] = instance_ids
statuses = self.status(all_instances, **params)
event_list = []
for status in statuses:
if status.get("Events"):
for event in status.get("Events"):
event[u"InstanceId"] = status.get('InstanceId')
event_list.append(event)
return event_list
class KeyPairCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
"""List key info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeKeyPairs",
response_data_key="KeyPairs",
**params)
def create(self, key_name):
"""Create a new key with a given name."""
return self.call("CreateKeyPair", KeyName=key_name)
def destroy(self, key_name):
"""Delete a key."""
return self.call("DeleteKeyPair", KeyName=key_name)
class PlacementGroupCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (sg_info, ...)
# DescribePlacementGroups
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribePlacementGroups",
response_data_key="PlacementGroups",
**params)
def create(self, group_name, strategy="cluster"):
# returns sg_info
params = {
"strategy": strategy
}
# CreatePlacementGroup
if callable(group_name):
params['group_name'] = group_name(self.environment)
else:
params['group_name'] = group_name
return self.call("CreatePlacementGroup", **params)
def destroy(self, pg):
# returns bool
# DeletePlacementGroup
return self.call("DeletePlacementGroup", group_name=pg)
class SecurityGroupCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None, exclude_vpc=False):
# returns (sg_info, ...)
# DescribeSecurityGroups
params = {}
if filters:
params["filters"] = make_filters(filters)
groups = self.call("DescribeSecurityGroups",
response_data_key="SecurityGroups",
**params)
if groups and exclude_vpc:
# Exclude any group that belongs to a VPC
return [g for g in groups if not g.get('VpcId')]
else:
return groups
def create(self, name, description, vpc=None):
# returns sg_info
params = {
"Description": description,
}
# CreateSecurityGroup
if callable(name):
params['GroupName'] = name(self.environment)
else:
params['GroupName'] = name
if vpc:
params["VpcId"] = vpc
return self.call("CreateSecurityGroup", **params)
def destroy(self, sg):
# returns bool
# DeleteSecurityGroup
return self.call("DeleteSecurityGroup", GroupId=sg)
class IpPermissionsCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (sgr_info, ...)
# DescribeSecurityGroups
raise NotImplementedError()
def modify(self, api_action, sgid, other, proto_spec):
"""Make a change to a security group. api_action is an EC2 API name.
Other is one of:
- a group (sg-nnnnnnnn)
- a group with account (<user id>/sg-nnnnnnnn)
- a CIDR block (n.n.n.n/n)
Proto spec is a triplet (<proto>, low_port, high_port)."""
params = {'group_id': sgid, 'ip_permissions': []}
perm = {}
params['ip_permissions'].append(perm)
proto, from_port, to_port = proto_spec
perm['IpProtocol'] = proto
perm['FromPort'] = from_port or 0
perm['ToPort'] = to_port or from_port or 65535
if other.startswith("sg-"):
perm['UserIdGroupPairs'] = [{'GroupId': other}]
elif "/sg-" in other:
account, group_id = other.split("/", 1)
perm['UserIdGroupPairs'] = [{
'UserId': account,
'GroupId': group_id,
}]
else:
perm['IpRanges'] = [{'CidrIp': other}]
return self.call(api_action, **params)
def add(self, sgid, other, proto_spec, direction="in"):
"""Add a security group rule to group <sgid>.
Direction is either 'in' (ingress) or 'out' (egress).
See modify() for other parameters."""
# returns bool
# AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress
if direction == "in":
api = "AuthorizeSecurityGroupIngress"
elif direction == "out":
api = "AuthorizeSecurityGroupEgress"
else:
raise ValueError("direction must be one of ('in', 'out')")
return self.modify(api, sgid, other, proto_spec)
def remove(self, sgid, other, proto_spec, direction="in"):
"""Remove a security group rule from group <sgid>.
Direction is either 'in' (ingress) or 'out' (egress).
See modify() for other parameters."""
# returns (removed_sgr_info, ...)
# RevokeSecurityGroupIngress, RevokeSecurityGroupEgress
if direction == "in":
api = "RevokeSecurityGroupIngress"
elif direction == "out":
api = "RevokeSecurityGroupEgress"
else:
raise ValueError("direction must be one of ('in', 'out')")
return self.modify(api, sgid, other, proto_spec)
class VolumeCollection(AwsCollection, EC2ApiClient):
"""Interface to get, create, destroy, and attach for EBS Volumes.
(Amazon EC2 API Version 2014-06-15)
"""
def get(self, volume_ids=None, filters=None):
"""List EBS Volume info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
if isinstance(volume_ids, str):
volume_ids = [volume_ids]
return self.call("DescribeVolumes",
VolumeIds=volume_ids,
response_data_key="Volumes",
**params)
def create(self, az, size_or_snap, volume_type=None, iops=None,
encrypted=True):
"""Create an EBS Volume using an availability-zone and size_or_snap
parameter, encrypted by default.
If the volume is crated from a snapshot, (str)size_or_snap denotes
the snapshot id. Otherwise, (int)size_or_snap denotes the amount of
GiB's to allocate. iops must be set if the volume type is io1.
"""
kwargs = {}
kwargs['encrypted'] = encrypted
if volume_type:
kwargs['VolumeType'] = volume_type
if iops:
kwargs['Iops'] = iops
is_snapshot_id = False
try:
size_or_snap = int(size_or_snap)
except ValueError:
is_snapshot_id = True
if is_snapshot_id:
return self.call("CreateVolume", AvailabilityZone=az,
SnapshotId=size_or_snap, **kwargs)
return self.call("CreateVolume", AvailabilityZone=az,
Size=size_or_snap, **kwargs)
def destroy(self, volume_id):
"""Delete a volume by volume-id and return success boolean."""
return 'true' == self.call("DeleteVolume", VolumeId=volume_id,
response_data_key="return")
def attach(self, volume_id, instance_id, device_path):
"""Attach a volume to an instance, exposing it with a device name."""
return self.call("AttachVolume",
VolumeId=volume_id, InstanceId=instance_id,
Device=device_path)
def detach(self, volume_id, instance_id='', device_path='', force=False):
"""Detach a volume from an instance."""
return self.call("DetachVolume",
VolumeId=volume_id, InstanceId=instance_id,
Device=device_path, force=force)
class SnapshotCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (snap_info, ...)
# DescribeSnapshots
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeSnapshots",
response_data_key="Snapshots",
**params)
def create(self, volume_id, description=None):
# returns snap_info
# CreateSnapshot
return self.call("CreateSnapshot",
VolumeId=volume_id,
Description=description)
def destroy(self, snapshot_id):
# returns bool
# DeleteSnapshot
return self.call("DeleteSnapshot", SnapshotId=snapshot_id)
class SubnetCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (subnet_info, ...)
# DescribeSubnets
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeSubnets",
response_data_key="Subnets",
**params)
def create(self, vpc_id, cidr, availability_zone):
# returns subnet_info
# CreateSubnet
return self.call("CreateSubnet",
VpcId=vpc_id,
CidrBlock=cidr,
response_data_key="Subnet")
def destroy(self, subnet_id):
# returns bool
# DeleteSubnet
if self.call("DeleteSubnet", SubnetId=subnet_id,
response_data_key="return"):
return True
return False
class VPCCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (vpc_info, ...)
# DescribeVpcs
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeVpcs", response_data_key="Vpcs", **params)
def create(self, cidr, tenancy="default"):
# returns vpc_info
# CreateVpc
raise NotImplementedError()
def destroy(self, vpc):
# returns bool
# DeleteVpc
raise NotImplementedError()
class TagCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (tag_info, ...)
# DescribeTags
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeTags",
response_data_key="Tags",
**params)
def create(self, resource_ids, tags):
# returns bool
# CreateTags
return self.call("CreateTags", resources=resource_ids, tags=tags)
def destroy(self, resource_ids, tags):
# returns bool
# DeleteTags
return self.call("DeleteTags", resources=resource_ids, tags=tags)
class ImageCollection(AwsCollection, EC2ApiClient):
def get(self, image_ids=None, owners=None, executable_users=None, filters=None):
# returns (image_info, ...)
# DescribeImages
params = {}
if filters:
params["filters"] = make_filters(filters)
if image_ids:
params["ImageIds"] = image_ids
if owners:
params["Owners"] = owners
if executable_users:
params["ExecutableUsers"] = executable_users
return self.call("DescribeImages",
response_data_key="Images",
**params)
def create(self, instance_id, name, no_reboot=True, description=None, block_device_mappings=None):
# returns image_id
# CreateImage
params = {
"InstanceId": instance_id,
"Name": name,
"NoReboot": no_reboot
}
if description:
params["Description"] = description
if block_device_mappings:
params["BlockDeviceMappings"] = block_device_mappings
return self.call("CreateImage",
response_data_key="ImageId",
**params)
def destroy(self, image_id):
# returns bool
# CreateImage
return self.call("DeregisterImage", ImageId=image_id)
```
#### File: acky/acky/s3.py
```python
from acky.api import AwsApiClient
try:
from urllib import parse
except ImportError:
import urlparse as parse
class InvalidURL(Exception):
def __init__(self, url, msg=None):
self.url = url
if not msg:
msg = "Invalid URL: {0}".format(url)
super(InvalidURL, self).__init__(msg)
def _parse_url(url=None):
"""Split the path up into useful parts: bucket, obj_key"""
if url is None:
return ('', '')
scheme, netloc, path, _, _ = parse.urlsplit(url)
if scheme != 's3':
raise InvalidURL(url, "URL scheme must be s3://")
if path and not netloc:
raise InvalidURL(url)
return netloc, path[1:]
class S3(AwsApiClient):
"""Interface for managing S3 buckets. (API Version 2006-03-01)"""
service_name = "s3"
def get(self, url=None, delimiter="/"):
"""Path is an s3 url. Ommiting the path or providing "s3://" as the
path will return a list of all buckets. Otherwise, all subdirectories
and their contents will be shown.
"""
params = {'Delimiter': delimiter}
bucket, obj_key = _parse_url(url)
if bucket:
params['Bucket'] = bucket
else:
return self.call("ListBuckets", response_data_key="Buckets")
if obj_key:
params['Prefix'] = obj_key
objects = self.call("ListObjects", response_data_key="Contents",
**params)
if objects:
for obj in objects:
obj['url'] = "s3://{0}/{1}".format(bucket, obj['Key'])
return objects
def create(self, url):
"""Create a bucket, directory, or empty file."""
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
return self.call("CreateBucket", bucket=target)
def destroy(self, url, recursive=False):
"""Destroy a bucket, directory, or file. Specifying recursive=True
recursively deletes all subdirectories and files."""
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
if recursive:
for obj in self.get(url, delimiter=''):
self.destroy(obj['url'])
return self.call("DeleteBucket", bucket=target)
def upload(self, local_path, remote_url):
"""Copy a local file to an S3 location."""
bucket, key = _parse_url(remote_url)
with open(local_path, 'rb') as fp:
return self.call("PutObject", bucket=bucket, key=key, body=fp)
def download(self, remote_url, local_path, buffer_size=8 * 1024):
"""Copy S3 data to a local file."""
bucket, key = _parse_url(remote_url)
response_file = self.call("GetObject", bucket=bucket, key=key)['Body']
with open(local_path, 'wb') as fp:
buf = response_file.read(buffer_size)
while buf:
fp.write(buf)
buf = response_file.read(buffer_size)
def copy(self, src_url, dst_url):
"""Copy an S3 object to another S3 location."""
src_bucket, src_key = _parse_url(src_url)
dst_bucket, dst_key = _parse_url(dst_url)
if not dst_bucket:
dst_bucket = src_bucket
params = {
'copy_source': '/'.join((src_bucket, src_key)),
'bucket': dst_bucket,
'key': dst_key,
}
return self.call("CopyObject", **params)
def move(self, src_url, dst_url):
"""Copy a single S3 object to another S3 location, then delete the
original object."""
self.copy(src_url, dst_url)
self.destroy(src_url)
```
|
{
"source": "JDongian/ColorOfChoice",
"score": 3
}
|
#### File: ColorOfChoice/color_names/scrape.py
```python
import requests
from pyquery import PyQuery as pq
from colour import Color
URL_W3 = "https://www.w3.org/TR/css3-color/#svg-color"
SELECTOR_W3 = ".colortable:last tbody td, .colortable:last dfn" # wat
LEGAL_W3 = """ORIGINAL URL: https://www.w3.org/TR/css3-color/#html4
COPYRIGHT: Copyright © 2011 World Wide Web Consortium, (MIT, ERCIM, Keio, Beihang). http://www.w3.org/Consortium/Legal/2015/doc-license
STATUS: This section describes the status of this document at the time of its publication. Other documents may supersede this document. A list of current W3C publications and the latest revision of this technical report can be found in the W3C technical reports index at http://www.w3.org/TR/.
The (archived) public mailing list <EMAIL> (see instructions) is preferred for discussion of this specification. When sending e-mail, please put the text “css3-color” in the subject, preferably like this: “[css3-color] …summary of comment…”
This document was produced by the CSS Working Group (part of the Style Activity).
A separate implementation report contains a test suite and shows that each test in the test suite was passed by at least two independent implementations.
The list of comments on the most recent Last Call draft explains the changes that were made since that draft. Comments received during the Candidate Recommendation period (for the 14 May 2003 draft) and how they were addressed in this draft can be found in the disposition of comments. Comments received during the Last Call period (for the 14 February 2003 draft) and how they were addressed can be found in the disposition of comments.
A complete list of changes to this document is available.
This document has been reviewed by W3C Members, by software developers, and by other W3C groups and interested parties, and is endorsed by the Director as a W3C Recommendation. It is a stable document and may be used as reference material or cited from another document. W3C's role in making the Recommendation is to draw attention to the specification and to promote its widespread deployment. This enhances the functionality and interoperability of the Web.
This document was produced by a group operating under the 5 February 2004 W3C Patent Policy. W3C maintains a public list of any patent disclosures made in connection with the deliverables of the group; that page also includes instructions for disclosing a patent. An individual who has actual knowledge of a patent which the individual believes contains Essential Claim(s) must disclose the information in accordance with section 6 of the W3C Patent Policy.
"""
URL_RESENE = "http://people.csail.mit.edu/jaffer/Color/resenecolours.txt"
LEGAL_RESENE = """Resene RGB Values List
For further information refer to http://www.resene.co.nz
Copyright Resene Paints Ltd 2001
Permission to copy this software, to modify it, to redistribute it,
to distribute modified versions, and to use it for any purpose is
granted, subject to the following restrictions and understandings.
1. Any text copy made of this dictionary must include this copyright
notice in full.
2. Any redistribution in binary form must reproduce this copyright
notice in the documentation or other materials provided with the
distribution.
3. Resene Paints Ltd makes no warranty or representation that this
dictionary is error-free, and is under no obligation to provide
any services, by way of maintenance, update, or otherwise.
4. There shall be no use of the name of Resene or Resene Paints Ltd in
any advertising, promotional, or sales literature without prior
written consent in each case.
5. These RGB colour formulations may not be used to the detriment of
Resene Paints Ltd."""
URL_WIKI = "https://en.wikipedia.org/wiki/List_of_colors_%28compact%29"
URL_CRAYOLA = "https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors"
def fetch_w3():
raw_w3 = pq(URL_W3)(SELECTOR_W3)
data_w3 = [r.text.strip() for r in raw_w3 if (r.text and r.text.strip())]
return zip(data_w3[0::3], (Color(_) for _ in data_w3[1::3])), LEGAL_W3
def fetch_resene():
raw_resene = (r.decode().split('\t')
for r in requests.get(URL_RESENE).content.splitlines()[27:])
data_resene = ((' '.join(n.split()[1:])[:-1],
Color('#' + hex(int(r) * 0x10000 +
int(g) * 0x100 +
int(b))[2:].zfill(6)))
for (n, r, g, b) in raw_resene)
return data_resene, LEGAL_RESENE
if __name__ == "__main__":
print(list(fetch_w3())[0])
print(list(fetch_resene())[0])
```
|
{
"source": "JDongian/cookiecutter-pypackage",
"score": 3
}
|
#### File: {{cookiecutter.project_slug}}/tests/test_example.py
```python
import unittest
class Test{{cookiecutter.project_slug}}(unittest.TestCase):
"""Basic tests.
"""
def test_basic(self):
"""Check basic functionality.
"""
tests = ((), )
targets = ((), )
for test, target in zip(tests, targets):
assert test == target, \
"{0} is not {1}.".format(test, target)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.