prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Draw images to enhance interpretability and explaination
"""
import numpy as np
import pandas as pd
import torch
import os
import os.path as osp
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from torch.utils.data import DataLoader
import sys
sys.path.append("..")
import func.cal as cal
def bnp(h):
return h.detach().cpu().numpy()
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# device = "cpu"
l_x = 60 # Data sequence length
l_y = 1 # Label sequence length
lr = 0.0001 # Learning rate
weight_decay = 5e-4
epochs = 4000
hidden_dim = 64
save_fig = True # Whether to save picture
ratio_train = 0.5 # Proportion of training datasets
fig_size = (16, 16)
ts_name_all = ["cli_dash", "HadCRUT5", "temp_month", "temp_year", "elect", "traffic", "sales"]
ts_name_folder = "HadCRUT5" # Name of the folder where the data resides
ts_name = "HadCRUT5_global" # Name of the selected time series
iv = 1 # sampling interval, used for plotting curves
way = "mean" # The style of plot curves of real data and predict results
x_address = osp.join("../datasets", ts_name_folder, ts_name + ".npy")
x = np.load(x_address)
num = x.shape[0] # The length of time series
graph_address = osp.join("../graph", ts_name)
pca = PCA(n_components=1)
num_train = int(ratio_train * num)
data_train, data_test = x[:num_train], x[num_train:num] # get training dataset and test dataset
"""
ResGraphNet
"""
mid = np.array(data_test.tolist() + [data_test[-1]] * 9)
len_interp = l_y + 6
data_test_ = np.array(mid[:-l_y].tolist() + mid[-len_interp-l_y:-l_y].tolist() + mid[-l_y:].tolist())
# Using Graph Neural network, prepare data information
x_train, y_train = cal.create_inout_sequences(data_train, l_x, l_y, style="arr")
x_test, y_test = cal.create_inout_sequences(data_test_, l_x, l_y, style="arr")
x_train = torch.from_numpy(x_train).float().to(device)
x_test = torch.from_numpy(x_test).float().to(device)
y_train = torch.from_numpy(y_train).float().to(device)
y_test = torch.from_numpy(y_test).float().to(device)
num_nodes = x_train.shape[0] + x_test.shape[0]
num_train = x_train.shape[0]
x = torch.cat((x_train, x_test), dim=0).to(device)
y = torch.cat((y_train, y_test), dim=0).to(device)
adm = cal.path_graph(num_nodes)
edge_index, edge_weight = cal.tran_adm_to_edge_index(adm)
edge_index = edge_index.to(device)
train_index = torch.arange(num_train, dtype=torch.long)
test_index = torch.arange(num_train, num_nodes, dtype=torch.long)
train_mask = cal.index_to_mask(train_index, num_nodes).to(device)
test_mask = cal.index_to_mask(test_index, num_nodes).to(device)
ResGraphNet_address = osp.join("../result", ts_name, "ResGraphNet")
model = torch.load(osp.join(ResGraphNet_address, "ResGraphNet.pkl")).to(device)
sage1, sage2 = model.sage1, model.sage2
cnn1, cnn2, cnn3, cnn4, cnn5, cnn6 = model.cnn1, model.cnn2, model.cnn3, model.cnn4, model.cnn5, model.cnn6
linear1, linear2 = model.linear1, model.linear2
drop = model.drop
out_sage1 = sage1(x, edge_index)
out_cnn1 = cnn1(out_sage1.unsqueeze(0).unsqueeze(0))
out_cnn3 = cnn3(drop(cnn2(drop(out_cnn1)))) + out_cnn1
out_cnn5 = cnn5(drop(cnn4(drop(out_cnn3)))) + out_cnn3
out_cnn6 = cnn6(out_cnn5).squeeze(0).squeeze(0)
out_sage2 = sage2(out_cnn6, edge_index)
out_sage1 = pca.fit_transform(bnp(out_sage1[test_mask, :][:-len_interp, :]))
out_res = pca.fit_transform(bnp(out_cnn6[test_mask, :][:-len_interp, :]))
out_sage2 = out_sage2[test_mask, :][:-len_interp, -1]
y = y[test_mask][:-len_interp, -1]
plt.figure(figsize=fig_size)
plt.plot(bnp(y), alpha=0.5, linestyle='--', label="$y$", c="b")
plt.plot(out_sage1, label="$f_{1}$", alpha=0.5, c="g")
plt.plot(out_res, label="$f_{2}$", alpha=0.5, c="orange")
plt.plot(bnp(out_sage2), label="$\hat{y}$", alpha=0.5, c="r")
plt.legend(fontsize=30)
plt.xlabel("Year", fontsize=40)
plt.ylabel("Anomaly ($^{\circ}$C)", fontsize=40)
x_tick = [0, 240, 480, 720, 960]
x_label = ["1940", "1960", "1980", "2000", "2020"]
plt.xticks(x_tick, x_label, fontsize=25)
plt.yticks(fontsize=25)
# plt.title("ResGraphNet", fontsize=40)
if save_fig:
plt.savefig(osp.join(graph_address, "explanation_ResGraphNet.png"))
"""
GNN Model
"""
x_train, y_train = cal.create_inout_sequences(data_train, l_x, l_y, style="arr")
x_test, y_test = cal.create_inout_sequences(data_test, l_x, l_y, style="arr")
x_train = torch.from_numpy(x_train).float().to(device)
x_test = torch.from_numpy(x_test).float().to(device)
y_train = torch.from_numpy(y_train).float().to(device)
y_test = torch.from_numpy(y_test).float().to(device)
num_nodes = x_train.shape[0] + x_test.shape[0]
x = torch.cat((x_train, x_test), dim=0)
y = torch.cat((y_train, y_test), dim=0)
adm = cal.path_graph(num_nodes)
edge_index, edge_weight = cal.tran_adm_to_edge_index(adm)
edge_index = edge_index.to(device)
train_index = torch.arange(num_train, dtype=torch.long)
test_index = torch.arange(num_train, num_nodes, dtype=torch.long)
train_mask = cal.index_to_mask(train_index, num_nodes).to(device)
test_mask = cal.index_to_mask(test_index, num_nodes).to(device)
GNNModel_address = osp.join("../result", ts_name, "GNNModel")
model = torch.load(osp.join(GNNModel_address, "GraphSage.pkl")).to(device)
sage1, sage2 = model.sage1, model.sage2
drop = model.drop
out_sage1 = sage1(x, edge_index)
out_sage2 = sage2(drop(out_sage1), edge_index)
out_sage1 = pca.fit_transform(bnp(out_sage1[test_mask, :]))
out_sage2 = out_sage2[test_mask, :][:, -1]
y = y[test_mask, :][:, -1]
plt.figure(figsize=fig_size)
plt.plot(bnp(y), alpha=0.5, linestyle='--', label="$y$", c="b")
plt.plot(out_sage1, label="$f_{1}^{\ \ ''}$", alpha=0.5, c="g")
plt.plot(bnp(out_sage2), label="$\hat{y}^{\ ''}$", alpha=0.5, c="r")
plt.legend(fontsize=30)
# plt.title("GNNModel", fontsize=40)
plt.xlabel("Year", fontsize=40)
plt.ylabel("Anomaly ($^{\circ}$C)", fontsize=40)
x_tick = [0, 240, 480, 720, 960]
x_label = ["1940", "1960", "1980", "2000", "2020"]
plt.xticks(x_tick, x_label, fontsize=25)
plt.yticks(fontsize=25)
if save_fig:
plt.savefig(osp.join(graph_address, "explanation_GNNModel.png"))
"""
RES Model
"""
batch_size = 32
x_train, y_train = cal.create_inout_sequences(data_train, l_x, l_y, style="arr")
x_test, y_test = cal.create_inout_sequences(data_test, l_x, l_y, style="arr")
train_dataset = cal.MyData(x_train, y_train)
test_dataset = cal.MyData(x_test, y_test)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
RESModel_address = osp.join("../result", ts_name, "RESModel")
model = torch.load(osp.join(RESModel_address, "RESModel.pkl")).to(device)
lin_pre = model.lin_pre
cnn1, cnn2, cnn3, cnn4, cnn5, cnn6 = model.cnn1, model.cnn2, model.cnn3, model.cnn4, model.cnn5, model.cnn6
last1, last2 = model.last1, model.last2
drop = model.drop
out_fc1, out_res, out_fc2, true = [], [], [], []
for item, (x_test, y_test) in enumerate(test_loader):
x_test, y_test = x_test.to(device), y_test.to(device)
out_fc1_one = lin_pre(x_test.unsqueeze(1).unsqueeze(3))
out_cnn1_one = cnn1(out_fc1_one)
out_cnn2_one = cnn2(drop(out_cnn1_one))
out_cnn3_one = cnn3(drop(out_cnn2_one)) + out_cnn1_one
out_cnn4_one = cnn4(drop(out_cnn3_one))
out_cnn5_one = cnn5(drop(out_cnn4_one)) + out_cnn3_one
out_res_one = cnn6(out_cnn5_one)
out_fc2_one = last2(last1(out_res_one).squeeze(3).squeeze(1))
out_fc1_one = out_fc1_one.detach().cpu().numpy()[:, 0, :, :]
out_fc1_one = pca.fit_transform(np.max(out_fc1_one, axis=2))
out_res_one = out_res_one.detach().cpu().numpy()[:, 0, :, :]
out_res_one = pca.fit_transform(np.max(out_res_one, axis=2))
out_fc2_one = out_fc2_one.detach().cpu().numpy()[:, -1]
true_one = y_test.detach().cpu().numpy()[:, -1]
if item == 0:
out_fc1 = out_fc1_one
out_res = out_res_one
out_fc2 = out_fc2_one
true = true_one
else:
out_fc1 = np.concatenate((out_fc1, out_fc1_one), axis=0)
out_res = np.concatenate((out_res, out_res_one), axis=0)
out_fc2 = np.concatenate((out_fc2, out_fc2_one), axis=0)
true = | np.concatenate((true, true_one), axis=0) | numpy.concatenate |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import cm
from genepy.epigenetics import chipseq as chip
from genepy.utils import helper as h
def plotAverageOfSamples(samples, folder="", showAll=False, maxv=None, minv=None):
res = []
plt.figure()
plt.ylim(minv,maxv)
for sample in samples:
data = pd.read_csv(sample, sep='\t', skiprows=1, header=None, names=['chr', 'start', 'end', 'name', "foldchange","."]+list(range(600)))
r = data[list(range(600))].mean().tolist()
res.append(r)
if showAll:
sns.lineplot(data=np.array(r), color="#BFBFFF")
sns.lineplot(data=np.array(res).mean(0))
if folder:
plt.savefig(folder+"_averageofsamples.pdf", color="#1F1FFF")
return res
def pysam_getPeaksAt(peaks, bams, folder='data/seqs/', window=1000, numpeaks=1000, numthreads=8):
# get pysam data
# ask for counts only at specific locus based on windows from center+-size from sorted MYC peaks
# for each counts, do a rolling average (or a convolving of the data) with numpy
# append to an array
# return array, normalized
loaded = {}
res = {i: np.zeros((len(peaks), window * 2)) for i in bams}
peaks = peaks.sort_values(by="foldchange", ascending=False).iloc[:numpeaks]
peaks.chrom = peaks.chrom.astype(str)
for val in bams:
loaded.update({val: pysam.AlignmentFile(
folder + val, 'rb', threads=numthreads)})
for k, bam in loaded.items():
for num, (i, val) in enumerate(peaks.iterrows()):
print(int(num / len(peaks)), end='\r')
center = int((val['start'] + val['end']) / 2)
for pileupcolumn in bam.pileup(val['chrom'], start=center - window,
stop=center + window, truncate=True):
res[k][num][pileupcolumn.pos - (center - window)] = pileupcolumn.n
fig, ax = plt.subplots(1, len(res))
for i, (k, val) in enumerate(res.items()):
sns.heatmap(val, ax=ax[i])
ax[i].set_title(k.split('.')[0])
fig.show()
return res, fig
def bedtools_getPeaksAt(peaks, bams, folder='data/seqs/', window=1000, numpeaks=1000, numthreads=8):
"""
get pysam data
ask for counts only at specific locus based on windows from center+-size from sorted MYC peaks
for each counts, do a rolling average (or a convolving of the data) with numpy
append to an array
return array, normalized
"""
loaded = {}
center = [int((val['start'] + val['end']) / 2) for k, val in peaks.iterrows()]
peaks['start'] = [c - window for c in center]
peaks['end'] = [c + window - 1 for c in center]
peaks[peaks.columns[:3]].sort_values(by=['chrom', 'start']).to_csv(
'temp/peaks.bed', sep='\t', index=False, header=False)
bedpeaks = BedTool('temp/peaks.bed')
fig, ax = plt.subplots(1, len(bams))
peakset = peaks["foldchange"].values.argsort()[::-1][:numpeaks]
for i, val in enumerate(bams):
coverage = BedTool(folder + val).intersect(bedpeaks).genome_coverage(bga=True, split=True)\
.intersect(bedpeaks).to_dataframe(names=['chrom', 'start', 'end', 'coverage'])
cov = np.zeros((len(peaks), window * 2), dtype=int)
j = 0
pdb.set_trace()
for i, (k, val) in enumerate(peaks.iterrows()):
print(i / len(peaks), end='\r')
while coverage.iloc[j].start > val.start:
j -= 1
while coverage.iloc[j].start < val.end:
cov[i][coverage.iloc[j].start - val.start:coverage.iloc[j].end - val.start] =\
coverage.iloc[j].coverage
j += 1
sns.heatmap(coverage, ax=ax[i])
ax[i].set_title(val.split('.')[0])
fig.show()
return None, fig
def makeProfiles(matx=[], folder='', matnames=[], title='',
name='temp/peaksat.pdf', refpoint="TSS", scale=None,
sort=False, withDeeptools=True, cluster=1, vmax=None, vmin=None, overlap=False,
legendLoc=None):
if withDeeptools:
if not (len(matnames) == 2 and len(matx) == 2):
raise ValueError('you need two mat.gz files and two names')
h.createFoldersFor(name)
cmd = 'computeMatrixOperations relabel -m '
cmd += matx[0] + ' -o '+matx[0]+' --groupLabels '+matnames[0]
cmd += ' && computeMatrixOperations relabel -m '
cmd += matx[1] + ' -o '+matx[1]+' --groupLabels '+matnames[1]
cmd += ' && computeMatrixOperations rbind -m '
cmd += matx[0] + ' ' + matx[1] + " -o " + \
'.'.join(name.split('.')[:-1]) + ".gz"
cmd += ' && plotProfile'
cmd += " --matrixFile " + '.'.join(name.split('.')[:-1]) + ".gz"
cmd += " --outFileName " + name
cmd += " --refPointLabel " + refpoint
if vmax is not None:
cmd += " -max "+str(vmax)
if vmin is not None:
cmd += " -min "+str(vmin)
if cluster > 1:
cmd += " --perGroup --kmeans "+str(cluster)
if legendLoc:
cmd += " --legendLocation "+legendLoc
if title:
cmd += " --plotTitle " + title
data = subprocess.run(cmd, shell=True, capture_output=True)
print(data)
def getPeaksAt(peaks, bigwigs, folder='', bigwignames=[], peaknames=[], window=1000, title='', numpeaks=4000, numthreads=8,
width=5, length=10, torecompute=False, name='temp/peaksat.pdf', refpoint="TSS", scale=None,
sort=False, withDeeptools=True, onlyProfile=False, cluster=1, vmax=None, vmin=None, overlap=False,
legendLoc=None):
"""
get pysam data
ask for counts only at specific locus based on windows from center+-size from sorted MYC peaks
for each counts, do a rolling average (or a convolving of the data) with numpy
append to an array
return array, normalized
"""
if withDeeptools:
if isinstance(peaks, pd.DataFrame):
peaks = 'peaks.bed '
peaks.to_csv('peaks.bed', sep='\t', index=False, header=False)
elif type(peaks) == list:
pe = ''
i = 0
for n, p in enumerate(peaks):
if 20 < int(os.popen('wc -l ' + p).read().split(' ')[0]):
pe += p + ' '
elif len(peaknames) > 0:
peaknames.pop(n-i)
i += 1
peaks = pe
elif type(peaks) == str:
peaks += ' '
else:
raise ValueError(' we dont know this filetype')
if type(bigwigs) is list:
pe = ''
for val in bigwigs:
pe += folder + val + ' '
bigwigs = pe
else:
bigwigs = folder + bigwigs + ' '
h.createFoldersFor(name)
cmd = ''
if not os.path.exists('.'.join(name.split('.')[:-1]) + ".gz") or torecompute:
cmd += "computeMatrix reference-point -S "
cmd += bigwigs
cmd += " --referencePoint "+refpoint
cmd += " --regionsFileName " + peaks
cmd += " --missingDataAsZero"
cmd += " --outFileName " + '.'.join(name.split('.')[:-1]) + ".gz"
cmd += " --upstream " + str(window) + " --downstream " + str(window)
cmd += " --numberOfProcessors " + str(numthreads) + ' && '
cmd += "plotHeatmap" if not onlyProfile else 'plotProfile'
if type(name) is list:
if not onlyProfile:
raise ValueError('needs to be set to True, can\'t average heatmaps')
cmd += " --matrixFile " + '.gz '.join(name) + ".gz"
if average:
cmd += "--averageType mean"
else:
cmd += " --matrixFile " + '.'.join(name.split('.')[:-1]) + ".gz"
cmd += " --outFileName " + name
cmd += " --refPointLabel " + refpoint
if vmax is not None:
cmd += " -max "+str(vmax)
if vmin is not None:
cmd += " -min "+str(vmin)
if cluster > 1:
cmd += " --perGroup --kmeans "+str(cluster)
if overlap:
if onlyProfile:
cmd += " --plotType overlapped_lines"
else:
raise ValueError("overlap only works when onlyProfile is set")
if legendLoc:
cmd += " --legendLocation "+legendLoc
if len(peaknames) > 0:
pe = ''
for i in peaknames:
pe += ' ' + i
cmd += " --regionsLabel" + pe
if type(bigwigs) is list:
if len(bigwignames) > 0:
pe = ''
for i in bigwignames:
pe += ' "' + i + '"'
cmd += " --samplesLabel" + pe
if title:
cmd += " --plotTitle '"+title+"'"
data = subprocess.run(cmd, shell=True, capture_output=True)
print(data)
else:
if 'relative_summit_pos' in peaks.columns:
center = [int((val['start'] + val['relative_summit_pos']))
for k, val in peaks.iterrows()]
else:
center = [int((val['start'] + val['end']) / 2)
for k, val in peaks.iterrows()]
pd.set_option('mode.chained_assignment', None)
peaks['start'] = [c - window for c in center]
peaks['end'] = [c + window for c in center]
fig, ax = plt.subplots(1, len(bigwigs), figsize=[
width, length], title=title if title else 'Chip Heatmap')
if sort:
peaks = peaks.sort_values(by=["foldchange"], ascending=False)
if numpeaks > len(peaks):
numpeaks = len(peaks) - 1
cov = {}
maxs = []
for num, bigwig in enumerate(bigwigs):
bw = pyBigWig.open(folder + bigwig)
co = np.zeros((numpeaks, window * 2), dtype=int)
scale = scale[bigwig] if scale is dict else 1
for i, (k, val) in enumerate(peaks.iloc[:numpeaks].iterrows()):
try:
co[i] = np.nan_to_num(bw.values(str(val.chrom), val.start, val.end), 0)
except RuntimeError as e:
print(str(val.chrom), val.start, val.end)
pass
cov[bigwig] = co
maxs.append(co.max())
for num, bigwig in enumerate(bigwigs):
sns.heatmap(cov[bigwig] * scale, ax=ax[num], vmax=max(maxs), yticklabels=[], cmap=cmaps[num],
cbar=True)
ax[num].set_title(bigwig.split('.')[0])
fig.subplots_adjust(wspace=0.1)
fig.show()
fig.savefig(name)
return cov, fig
def andrew(groups, merged, annot, enr=None, pvals=None, cols=8, precise=True, title = "sorted clustermap of cobindings clustered", folder="", rangeval=4, okpval=10**-3, size=(20,15),vmax=3, vmin=0):
if enr is None or pvals is None:
enr, pvals = chip.enrichment(merged, groups=groups)
rand = | np.random.choice(merged.index,5000) | numpy.random.choice |
"""
A module containing unit tests for the `tpwcs` module.
Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
import copy
import pytest
from distutils.version import LooseVersion
import numpy as np
from astropy.modeling.models import Scale, Identity
from astropy import wcs as fitswcs
try:
import gwcs
if LooseVersion(gwcs.__version__) > '0.12.0':
from gwcs.geometry import SphericalToCartesian
_GWCS_VER_GT_0P12 = True
else:
_GWCS_VER_GT_0P12 = False
except ImportError:
_GWCS_VER_GT_0P12 = False
import astropy
if LooseVersion(astropy.__version__) >= '4.0':
_ASTROPY_VER_GE_4 = True
from astropy.modeling import CompoundModel
else:
_ASTROPY_VER_GE_4 = False
from tweakwcs.linearfit import build_fit_matrix
from tweakwcs import tpwcs
from .helper_tpwcs import (make_mock_jwst_wcs, make_mock_jwst_pipeline,
DummyTPWCS, create_DetToV2V3, create_V2V3ToDet)
_ATOL = 100 * np.finfo(np.array([1.]).dtype).eps
_NO_JWST_SUPPORT = not (_ASTROPY_VER_GE_4 and _GWCS_VER_GT_0P12)
def test_tpwcs():
tpwcs = DummyTPWCS(None, meta={})
x, y, ra, dec = np.random.random(4)
matrix = np.random.random((2, 2))
shift = np.random.random(2)
assert tpwcs.world_to_det(ra, dec) == (ra, dec)
assert tpwcs.world_to_tanp(ra, dec) == (ra, dec)
assert tpwcs.det_to_world(x, y) == (x, y)
assert tpwcs.det_to_tanp(x, y) == (x, y)
assert tpwcs.tanp_to_det(x, y) == (x, y)
assert tpwcs.tanp_to_world(x, y) == (x, y)
assert tpwcs.tanp_center_pixel_scale == 1
assert tpwcs.wcs is None
assert tpwcs.original_wcs is None
assert isinstance(tpwcs.copy(), DummyTPWCS)
assert tpwcs.bounding_box is None
tpwcs.set_correction(matrix=matrix, shift=shift,
meta={'pytest': 'ABC.TPWCS'}, pytest_kwarg=True)
assert np.all(tpwcs.meta['matrix'] == matrix)
assert np.all(tpwcs.meta['shift'] == shift)
assert tpwcs.meta['pytest'] == 'ABC.TPWCS'
with pytest.raises(TypeError) as arg_err:
tpwcs.set_correction(matrix, shift, None, {'pytest': 'ABC.TPWCS'},
'some_weird_arg')
assert (
arg_err.value.args[0].endswith(
"set_correction() takes from 1 to 5 positional arguments but 6 were given"
)
)
@pytest.mark.skipif(_NO_JWST_SUPPORT, reason="requires gwcs>=0.12.1")
def test_mock_jwst_gwcs():
w = make_mock_jwst_wcs(v2ref=123, v3ref=500, roll=115, crpix=[-512, -512],
cd=[[1e-5, 0], [0, 1e-5]], crval=[82, 12])
assert np.allclose(w.invert(*w(23, 1023)), (23, 1023))
@pytest.mark.skipif(_NO_JWST_SUPPORT, reason="requires gwcs>=0.12.1")
@pytest.mark.parametrize('crpix, cd', [
(np.zeros(3), np.diag(np.ones(3))),
(np.zeros((2, 2)), np.diag(np.ones(2))),
])
def test_mock_wcs_fails(crpix, cd):
from astropy.modeling import InputParameterError
with pytest.raises(InputParameterError):
make_mock_jwst_wcs(v2ref=123, v3ref=500, roll=15, crpix=crpix,
cd=cd, crval=[82, 12])
with pytest.raises(InputParameterError):
create_DetToV2V3(v2ref=123, v3ref=500, roll=15, crpix=crpix, cd=cd)
with pytest.raises(InputParameterError):
create_V2V3ToDet(v2ref=123, v3ref=500, roll=15, crpix=crpix, cd=cd)
@pytest.mark.skipif(_NO_JWST_SUPPORT, reason="requires gwcs>=0.12.1")
def test_v2v3todet_roundtrips():
s2c = (Scale(1.0 / 3600.0) & Scale(1.0 / 3600.0)) | SphericalToCartesian(wrap_lon_at=180)
s = 1.0e-5
crpix = np.random.random(2)
alpha = 0.25 * np.pi * np.random.random()
x, y = 1024 * np.random.random(2)
v2, v3 = 45 * np.random.random(2)
cd = [[s * np.cos(alpha), -s * np.sin(alpha)],
[s * | np.sin(alpha) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Created on Monday 18 may 2020
All the thesis code, no code excecution!
@author: Dainean
"""
#Prepare the python system
import pandas as pd #Dataframes
import numpy as np #Numpy
# Reading and saving fits files
import os #Move around in our OS
from astropy.table import Table
from astropy.io import fits #Working with fits
#Isolation Foreststuffs
import eif as iso #Expanded Isolation Forest
#Clustering
from scipy.sparse import diags # Laplacian scoring
from skfeature.utility.construct_W import construct_W # Laplacian scoring
from sklearn.cluster import KMeans #Kmeans clustering
from sklearn.preprocessing import StandardScaler
# For PFA
from sklearn.decomposition import PCA
from collections import defaultdict
from sklearn.metrics.pairwise import euclidean_distances
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns #improved plots
#Working directory control
cwd = os.getcwd()
#Selecting dataset
#change dataset here, Alpha, prichi or beta
#dataset = "Alpha" #Initial max row dataset
#dataset = "prichi" #prichi < 3 filtered dataset, 24999 rows. OBSELETE
#dataset = "beta" #prichi < 2 filtered dataset, 13787 rows
#dataset = "gamma" #prichi < 2 filtered dataset, (removed photometric)) OBSELETE
#dataset = "delta" #updated DB creator, based on GaussFitSimple, 28128 rows
#dataset = "epsilon" #trimmed down version of delta, prichi <2, 10941 rows (for easier computation)
#dataset = "zeta" # Full Photometric, GaussFitSimple, prichi <2, 10941 rows × 134 columns
#dataset = "zeta" # Full Photometric, GaussFitSimple, prichi <2, 10941 rows × 134 columns
dataset = "eta" # Full Photometric, GaussFitSimple, all columns
detect_path = True #this is for easier working in spyder
#Set up directory path, load initial dataframes
if detect_path == True:
print("Initial working directory is:", cwd)
if '31618' in cwd:
print("Working at Dora")
location = "dora"
if 'Dainean' in cwd:
print("Working at home, changing to onedrive folder")
location = "home"
if 'Onedrive' in cwd:
print("Working in onedrive folder")
location = "home"
if 'Dropbox' in cwd:
print("Working at home, changing to onedrive folder")
location = "home"
if location == "home":
os.chdir('D:\Onedrive\Thesis\support\%s'%(dataset))
print(os.getcwd())
if location == "dora":
os.chdir('C:\Sander\support\%s'%(dataset))
print(os.getcwd())
#Loading dataframes Only part for now
phot = pd.read_hdf('Parts_DB.h5', 'Photometric')
col = pd.read_hdf('Parts_DB.h5', 'Colour')
spec = pd.read_hdf('Parts_DB.h5', 'Spectral')
full = pd.read_hdf('ThesisDB.h5', 'Dataframe')
dropped = int(phot.shape[0] * 0.05) #we can safely drop 5% of our dataset.
# Is this enough with such a large feature space? It seems to be more then we get by filtering EIF above 0.5 out!
#full = full.iloc[:,6:] #Addition
combi = pd.merge(phot,spec, right_index=True, left_index=True, how='inner') #just phot and spec
#full[full.columns[full.columns.str.endswith('u')]]
#a = np.array(['size90u', 'ABSMAGu','MU@Eu','HA_EW', 'OII_EW'])
#inv = full[a]
#often used to return the name of a dataframe as a string
# Assorted functions:
def get_df_name(df):
"""returns the name of a dataframe as a string"""
name =[x for x in globals() if globals()[x] is df][0]
return name
def pandafy(fits_filename):
"""
Turns an .fits file into a pandas dataframe"""
dat = Table.read(fits_filename, format='fits')
df = dat.to_pandas(index = 'CATAID')
return(df)
def contains(df, string):
df = df[df.columns[df.columns.str.contains(string)]]
return df
def endswith(df, string):
df = df[df.columns[df.columns.str.endswith(string)]]
return df
def startswith(df, string):
df = df[df.columns[df.columns.str.startswith(string)]]
return df
def fittify(df,filename='ThesisDB_selected.fits'): #say which dataframe you want to turn into a fit file
holder = []
for i in range(df.columns.values.size):
holder.append(fits.Column(name=df.columns.values[i], format='D', array=df.iloc[:,i]))
cols = fits.ColDefs(holder)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(filename,overwrite=True)
#%% EIF Isolation
# Removes the most isolated points from a dataframe using EIF
def eif_isolation(pd_df,dropped = 500,ntrees=1024,sample_size=512,remake=False,save = True):
"""
Removes the most isolated points from a DataFrame using EIF
-------------------------------
Input:
pd_df: pandas dataframe
dropped: how many values to drop afterwards
ntrees: how many trees to make for EIF
sample_size: how many samples to initiate EIF with
remake: wether or not to remake if results are found
save: save the results (needs to be disabled for certain recursions)
--------------------
proces:
Removes the dropped most isolated points from a DataFrame using EIF
--------------
Retuns:
Returns: New dataframe, where the least relevant datapoints have been dropped
"""
#Set up variables
try:
df_name = get_df_name(pd_df)
except (IndexError):
df_name = pd_df.name
while True:
try:
if remake == True:
print("New file requested")
raise NameError('remake')
df_isolated = pd.read_hdf('eif_results.h5',"_%s_%i_dropped_%i_%i"\
%(df_name,dropped,ntrees,sample_size))
print("succes, EIF sorted matrix found")
print("settings: Dataframe = %s, number dropped = %i, number of trees = %i, samplesize = %i"\
%(df_name,dropped,ntrees,sample_size))
break
except (FileNotFoundError,KeyError,NameError):
print("Failed to find this combination, creating one")
# main bit of code goes here:
values = pd_df.values.astype('double') # numpy array. .astype('double') is as spec is in float32 while EIF expects float64
elevel = (values.shape[1]-1) #only doing one extension level anymore, but the largest
EIF_model = iso.iForest(values, ntrees=ntrees, sample_size=sample_size, ExtensionLevel=elevel) #create a model
EIF_paths = EIF_model.compute_paths(X_in=values) #calculate isolation value for every point
EIF_sorted = np.argsort(EIF_paths) #sort these by integers from least to most isolated
np_remainder = values[:][EIF_sorted[0:-dropped]] #drop values
index = pd_df.index.values[:][EIF_sorted[0:-(dropped)]] #Create a new index that has the same ordering (CATAID)
df_isolated = pd.DataFrame(np_remainder, columns = pd_df.columns.values, index = index) #selected dataframe
if save == True:
df_isolated.to_hdf('eif_results.h5',"_%s_%i_dropped_%i_%i"%(df_name,dropped,ntrees,sample_size))
print('EIF sorted matrix created and saved')
print("settings: Dataframe = %s, number dropped = %i, number of trees = %i, samplesize = %i"%(df_name,dropped,ntrees,sample_size))
break
return df_isolated
#setup filtered dataframes
remake = False
phot_eif = eif_isolation(phot, dropped = dropped, remake = remake)
phot_eif.name = 'Photometric'
spec_eif = eif_isolation(spec, dropped = dropped, remake = remake)
spec_eif.name = 'Spectral'
combi_eif = eif_isolation(combi, dropped = dropped, remake = remake)
combi_eif.name = 'Combined'
#%%
remake = False
# dataframe around u
u_df = full[full.columns[full.columns.str.endswith('u')]]
u_df.name = "u_phot"
u_eif = eif_isolation(u_df, dropped = dropped, remake = remake)
u_eif.name = 'u_phot'
# dataframe around g
g_df = full[full.columns[full.columns.str.endswith('g')]]
g_df.name = "g_phot"
g_eif = eif_isolation(u_df, dropped = dropped, remake = remake)
g_eif.name = 'g_phot'
# dataframe around r
r_df = full[full.columns[full.columns.str.endswith('r')]]
r_df.name = "r_phot"
r_eif = eif_isolation(u_df, dropped = dropped, remake = remake)
r_eif.name = 'r_phot'
# sample if we want really quick testing
sample = phot_eif.sample(1000)
dataframes = [phot_eif,spec_eif,combi_eif]
k_list = [2,3,4]
#inv_eif = eif_isolation(inv, dropped = dropped, remake = False)
#inv_eif.name = "investigate"
#inv_eif2 = eif_isolation(inv, dropped = dropped*2, remake = False)
#inv_eif2.name = "investigateplus"
"""
col_eif = eif_isolation(col, dropped = dropped, remake = remake)
spec_eif = eif_isolation(spec, dropped = dropped, remake = remake)
full_eif = eif_isolation(full, dropped = dropped, remake = remake)
"""
#%%
# 2 d heatmap for EIF
def getVals(forest,x,sorted=True):
theta = np.linspace(0,2*np.pi, forest.ntrees)
r = []
for i in range(forest.ntrees):
temp = forest.compute_paths_single_tree(np.array([x]),i)
r.append(temp[0])
if sorted:
r = np.sort(np.array(r))
return r, theta
def fmax(x):
if x.max() > 0:
xmax = x.max()*1.1
else:
xmax = x.max()*0.9
return xmax
def fmin(x):
if x.min() > 0:
xmin = x.min()*0.9
else:
xmin = x.min()*1.1
return xmin
def heat_plot(i=6,j=18,df = phot):
"""
Plots Anomaly score contour for iForest and EIF
Parameters
----------
i : Integer,
First column of the dataframe to use. The default is 6.
j : Integer
First column of the dataframe to use. The default is 18.
df : pandas dataframe
Pandas dataframe to compare The default is phot.
Returns
-------
Created anomaly score contour plots
"""
ntrees = 512 #number of trees we use
sample_size=512 #how many data points we sample to create our forest
grid_density = 60 #Density of the grid we make
iname = df.columns[i]
jname = df.columns[j]
#define x and y (easier later)
np_array = df.values # converts df into numpy object
np_array = np_array.astype('double') #Type is sometimes confused. Easiest to just force
x, y = np_array[:,i], np_array[:,j]
bigX = np.array([x,y]).T #combine them into a single object
# grabbing a 2d plane from the bigger datafield
#Sample to calculate over in 2d plane
xx, yy = np.meshgrid(np.linspace(fmin(x), fmax(x), grid_density),
np.linspace(fmin(y), fmax(y), grid_density))
elevel = [0,1] #0 is normal IF, 1 is EIF
counter = 0
for k in elevel:
#Calculations
counter += 1
F0 = iso.iForest(bigX, ntrees=ntrees, sample_size=sample_size, ExtensionLevel=k)
grid = F0.compute_paths(X_in=np.c_[xx.ravel(), yy.ravel()])
grid = grid.reshape(xx.shape)
#plotting
f = plt.figure(figsize=(10,8))
ax1 = f.add_subplot()
levels = np.linspace(np.min(grid),np.max(grid),20)
CS = ax1.contourf(xx, yy, grid, levels, cmap=plt.cm.OrRd) #alt colour = cmap=plt.cm.YlOrRd) #alt colour = plt.cm.Blues_r
plt.scatter(x[::2],y[::2],s=1.8,c='k',edgecolor='None')
rn, thetan = getVals(F0,np.array([10.,0.]),sorted=sorted)
ra, thetaa = getVals(F0,np.array([0.,0.]),sorted=sorted)
if counter == 1:
ax1.set_title("Generic Isolation Forest\nNominal: Mean={0:.3f}, Var={1:.3f}\nAnomaly: Mean={2:.3f}, Var={3:.3f}".
format(np.mean(rn),np.var(rn), | np.mean(ra) | numpy.mean |
import os
import glob
import h5py
import numpy as np
from mask_DC2 import read_selections
from mask_DC2 import mask_cat
lowz_lib = '/gpfs/mira-fs0/projects/DarkUniverse_esp/dkorytov/data/Galacticus/low_z/galaxy_library/*.hdf5'
hiz_lib = '/gpfs/mira-fs0/projects/DarkUniverse_esp/dkorytov/data/Galacticus/high_z/galaxy_library/*.hdf5'
galaxyProperties = 'galaxyProperties'
#Rv variables
Lum_v = 'otherLuminosities/totalLuminositiesStellar:V:rest'
Lum_v_dust = 'otherLuminosities/totalLuminositiesStellar:V:rest:dustAtlas'
Lum_b = 'otherLuminosities/totalLuminositiesStellar:B:rest'
Lum_b_dust = 'otherLuminosities/totalLuminositiesStellar:B:rest:dustAtlas'
def _calc_Av(lum_v, lum_v_dust):
with | np.errstate(divide='ignore', invalid='ignore') | numpy.errstate |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyscf.nao.m_xjl import xjl
#
#
#
class sbt_c():
'''
Spherical Bessel Transform by <NAME>. Functions are given on logarithmic mesh
See m_log_mesh
Args:
nr : integer, number of points on radial mesh
rr : array of points in coordinate space
kk : array of points in momentum space
lmax : integer, maximal angular momentum necessary
with_sqrt_pi_2 : if one, then transforms will be multiplied by sqrt(pi/2)
fft_flags : ??
Returns:
a class preinitialized to perform the spherical Bessel Transform
Examples:
label = 'siesta'
sv = system_vars_c(label)
sbt = sbt_c(sv.ao_log.rr, sv.ao_log.pp)
print(sbt.exe(sv.ao_log.psi_log[0,0,:], 0))
'''
def __init__(self, rr, kk, lmax=12, with_sqrt_pi_2=True, fft_flags=None):
assert(type(rr)==np.ndarray)
assert(rr[0]>0.0)
assert(type(kk)==np.ndarray)
assert(kk[0]>0.0)
self.nr = len(rr)
n = self.nr
assert(self.nr>1)
assert(lmax>-1)
self.rr,self.kk = rr,kk
nr2, self.rr3, self.kk3 = self.nr*2, rr**3, kk**3
self.rmin,self.kmin = rr[0],kk[0]
self.rhomin,self.kapmin= np.log(self.rmin),np.log(self.kmin)
self.dr_jt = np.log(rr[1]/rr[0])
dr = self.dr_jt
dt = 2.0*np.pi/(nr2*dr)
self._smallr = self.rmin*np.array([np.exp(-dr*(n-i)) for i in range(n)], dtype='float64')
self._premult = np.array([np.exp(1.5*dr*(i-n)) for i in range(2*n)], dtype='float64')
coeff = 1.0/np.sqrt(np.pi/2.0) if with_sqrt_pi_2 else 1.0
self._postdiv = np.array([coeff*np.exp(-1.5*dr*i) for i in range(n)], dtype='float64')
temp1 = np.zeros((nr2), dtype='complex128')
temp2 = np.zeros((nr2), dtype='complex128')
temp1[0] = 1.0
temp2 = np.fft.fft(temp1)
xx = sum(np.real(temp2))
if abs(nr2-xx)>1e-10 : raise SystemError('err: sbt_plan: problem with fftw sum(temp2):')
self._mult_table1 = np.zeros((lmax+1, self.nr), dtype='complex128')
for it in range(n):
tt = it*dt # Define a t value
phi3 = (self.kapmin+self.rhomin)*tt # See Eq. (33)
rad,phi = np.sqrt(10.5**2+tt**2),np.arctan((2.0*tt)/21.0)
phi1 = -10.0*phi-np.log(rad)*tt+tt+np.sin(phi)/(12.0*rad) \
-np.sin(3.0*phi)/(360.0*rad**3)+np.sin(5.0*phi)/(1260.0*rad**5) \
-np.sin(7.0*phi)/(1680.0*rad**7)
for ix in range(1,11): phi1=phi1+np.arctan((2.0*tt)/(2.0*ix-1)) # see Eqs. (27) and (28)
phi2 = -np.arctan(1.0) if tt>200.0 else -np.arctan(np.sinh(np.pi*tt/2)/np.cosh(np.pi*tt/2)) # see Eq. (20)
phi = phi1+phi2+phi3
self._mult_table1[0,it] = np.sqrt(np.pi/2)*np.exp(1j*phi)/n # Eq. (18)
if it==0 : self._mult_table1[0,it] = 0.5*self._mult_table1[0,it]
phi = -phi2 - | np.arctan(2.0*tt) | numpy.arctan |
import functools
import itertools
import re
import sys
import warnings
import threading
import operator
import numpy as np
import unittest
from numba import typeof, njit
from numba.core import types, typing, utils
from numba.core.compiler import compile_isolated, Flags, DEFAULT_FLAGS
from numba.np.numpy_support import from_dtype
from numba import jit, vectorize
from numba.core.errors import LoweringError, TypingError
from numba.tests.support import TestCase, CompilationCache, MemoryLeakMixin, tag
from numba.core.typing.npydecl import supported_ufuncs, all_ufuncs
from numba.np import numpy_support
from numba.core.registry import cpu_target
from numba.core.base import BaseContext
from numba.np import ufunc_db
is32bits = tuple.__itemsize__ == 4
iswindows = sys.platform.startswith('win32')
# NOTE: to test the implementation of Numpy ufuncs, we disable rewriting
# of array expressions.
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
enable_pyobj_flags.no_rewrites = True
no_pyobj_flags = Flags()
no_pyobj_flags.no_rewrites = True
enable_nrt_flags = Flags()
enable_nrt_flags.nrt = True
enable_nrt_flags.no_rewrites = True
def _unimplemented(func):
"""An 'expectedFailure' like decorator that only expects compilation errors
caused by unimplemented functions that fail in no-python mode"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except TypingError:
raise unittest._ExpectedFailure(sys.exc_info())
raise unittest._UnexpectedSuccess
def _make_ufunc_usecase(ufunc):
ldict = {}
arg_str = ','.join(['a{0}'.format(i) for i in range(ufunc.nargs)])
func_str = 'def fn({0}):\n np.{1}({0})'.format(arg_str, ufunc.__name__)
exec(func_str, globals(), ldict)
fn = ldict['fn']
fn.__name__ = '{0}_usecase'.format(ufunc.__name__)
return fn
def _make_unary_ufunc_op_usecase(ufunc_op):
ldict = {}
exec("def fn(x):\n return {0}(x)".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
return fn
def _make_binary_ufunc_op_usecase(ufunc_op):
ldict = {}
exec("def fn(x,y):\n return x{0}y".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
return fn
def _make_inplace_ufunc_op_usecase(ufunc_op):
"""Generates a function to be compiled that performs an inplace operation
ufunc_op can be a string like '+=' or a function like operator.iadd
"""
if isinstance(ufunc_op, str):
ldict = {}
exec("def fn(x,y):\n x{0}y".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
else:
def inplace_op(x, y):
ufunc_op(x, y)
fn = inplace_op
return fn
def _as_dtype_value(tyargs, args):
"""Convert python values into numpy scalar objects.
"""
return [np.dtype(str(ty)).type(val) for ty, val in zip(tyargs, args)]
class BaseUFuncTest(MemoryLeakMixin):
def setUp(self):
super(BaseUFuncTest, self).setUp()
self.inputs = [
(np.uint32(0), types.uint32),
(np.uint32(1), types.uint32),
(np.int32(-1), types.int32),
(np.int32(0), types.int32),
(np.int32(1), types.int32),
(np.uint64(0), types.uint64),
(np.uint64(1), types.uint64),
(np.int64(-1), types.int64),
(np.int64(0), types.int64),
(np.int64(1), types.int64),
(np.float32(-0.5), types.float32),
(np.float32(0.0), types.float32),
(np.float32(0.5), types.float32),
(np.float64(-0.5), types.float64),
(np.float64(0.0), types.float64),
(np.float64(0.5), types.float64),
(np.array([0,1], dtype='u4'), types.Array(types.uint32, 1, 'C')),
(np.array([0,1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1,0,1], dtype='i4'), types.Array(types.int32, 1, 'C')),
(np.array([-1,0,1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f4'), types.Array(types.float32, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C')),
(np.array([0,1], dtype=np.int8), types.Array(types.int8, 1, 'C')),
(np.array([0,1], dtype=np.int16), types.Array(types.int16, 1, 'C')),
(np.array([0,1], dtype=np.uint8), types.Array(types.uint8, 1, 'C')),
(np.array([0,1], dtype=np.uint16), types.Array(types.uint16, 1, 'C')),
]
self.cache = CompilationCache()
def _determine_output_type(self, input_type, int_output_type=None,
float_output_type=None):
ty = input_type
if isinstance(ty, types.Array):
ty = ty.dtype
if ty in types.signed_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
elif ty in types.unsigned_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
else:
if float_output_type:
output_type = types.Array(float_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
return output_type
class TestUFuncs(BaseUFuncTest, TestCase):
def basic_ufunc_test(self, ufunc, flags=no_pyobj_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None,
kinds='ifc', positive_only=False):
# Necessary to avoid some Numpy warnings being silenced, despite
# the simplefilter() call below.
self.reset_module_warnings(__name__)
pyfunc = _make_ufunc_usecase(ufunc)
inputs = list(self.inputs) + additional_inputs
for input_tuple in inputs:
input_operand = input_tuple[0]
input_type = input_tuple[1]
is_tuple = isinstance(input_operand, tuple)
if is_tuple:
args = input_operand
else:
args = (input_operand,) * ufunc.nin
if input_type in skip_inputs:
continue
if positive_only and np.any(args[0] < 0):
continue
# Some ufuncs don't allow all kinds of arguments
if (args[0].dtype.kind not in kinds):
continue
output_type = self._determine_output_type(
input_type, int_output_type, float_output_type)
input_types = (input_type,) * ufunc.nin
output_types = (output_type,) * ufunc.nout
cr = self.cache.compile(pyfunc, input_types + output_types,
flags=flags)
cfunc = cr.entry_point
if isinstance(args[0], np.ndarray):
results = [
np.zeros(args[0].size,
dtype=out_ty.dtype.name)
for out_ty in output_types
]
expected = [
np.zeros(args[0].size,
dtype=out_ty.dtype.name)
for out_ty in output_types
]
else:
results = [
np.zeros(1, dtype=out_ty.dtype.name)
for out_ty in output_types
]
expected = [
np.zeros(1, dtype=out_ty.dtype.name)
for out_ty in output_types
]
invalid_flag = False
with warnings.catch_warnings(record=True) as warnlist:
warnings.simplefilter('always')
pyfunc(*args, *expected)
warnmsg = "invalid value encountered"
for thiswarn in warnlist:
if (issubclass(thiswarn.category, RuntimeWarning)
and str(thiswarn.message).startswith(warnmsg)):
invalid_flag = True
cfunc(*args, *results)
for expected_i, result_i in zip(expected, results):
msg = '\n'.join(["ufunc '{0}' failed",
"inputs ({1}):", "{2}",
"got({3})", "{4}",
"expected ({5}):", "{6}"
]).format(ufunc.__name__,
input_type, input_operand,
output_type, result_i,
expected_i.dtype, expected_i)
try:
np.testing.assert_array_almost_equal(
expected_i, result_i,
decimal=5,
err_msg=msg)
except AssertionError:
if invalid_flag:
# Allow output to mismatch for invalid input
print("Output mismatch for invalid input",
input_tuple, result_i, expected_i)
else:
raise
def basic_int_ufunc_test(self, name=None, flags=no_pyobj_flags):
self.basic_ufunc_test(name, flags=flags,
skip_inputs=[types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')])
############################################################################
# Math operations
def test_add_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.add, flags=flags)
def test_subtract_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.subtract, flags=flags)
def test_multiply_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.multiply, flags=flags)
def test_divide_ufunc(self, flags=no_pyobj_flags):
# Bear in mind that in python3 divide IS true_divide
# so the out type for int types will be a double
int_out_type = None
int_out_type = types.float64
self.basic_ufunc_test(np.divide, flags=flags, int_output_type=int_out_type)
def test_logaddexp_ufunc(self):
self.basic_ufunc_test(np.logaddexp, kinds='f')
def test_logaddexp2_ufunc(self):
self.basic_ufunc_test(np.logaddexp2, kinds='f')
def test_true_divide_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.true_divide, flags=flags, int_output_type=types.float64)
def test_floor_divide_ufunc(self):
self.basic_ufunc_test(np.floor_divide)
def test_negative_ufunc(self, flags=no_pyobj_flags):
# NumPy ufunc has bug with uint32 as input and int64 as output,
# so skip uint32 input.
self.basic_ufunc_test(np.negative, int_output_type=types.int64,
skip_inputs=[types.Array(types.uint32, 1, 'C'), types.uint32],
flags=flags)
def test_positive_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.positive, flags=flags)
def test_power_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.power, flags=flags,
positive_only=True)
def test_float_power_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.float_power, flags=flags, kinds="fc")
def test_gcd_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.gcd, flags=flags, kinds="iu")
def test_lcm_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.lcm, flags=flags, kinds="iu")
def test_remainder_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.remainder, flags=flags)
def test_mod_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.mod, flags=flags, kinds='ifcu',
additional_inputs = [
((np.uint64(np.iinfo(np.uint64).max), np.uint64(16)), types.uint64)
])
def test_fmod_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmod, flags=flags)
def test_abs_ufunc(self, flags=no_pyobj_flags, ufunc=np.abs):
self.basic_ufunc_test(ufunc, flags=flags,
additional_inputs = [
(np.uint32(np.iinfo(np.uint32).max), types.uint32),
(np.uint64(np.iinfo(np.uint64).max), types.uint64),
(np.float32(np.finfo(np.float32).min), types.float32),
(np.float64(np.finfo(np.float64).min), types.float64)
])
def test_absolute_ufunc(self, flags=no_pyobj_flags):
self.test_abs_ufunc(flags=flags, ufunc=np.absolute)
def test_fabs_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fabs, flags=flags, kinds='f')
def test_rint_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.rint, flags=flags, kinds='cf')
def test_sign_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sign, flags=flags)
def test_conj_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.conj, flags=flags)
def test_exp_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.exp, flags=flags, kinds='cf')
def test_exp2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.exp2, flags=flags, kinds='cf')
def test_log_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log, flags=flags, kinds='cf')
def test_log2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log2, flags=flags, kinds='cf')
def test_log10_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log10, flags=flags, kinds='cf')
def test_expm1_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.expm1, flags=flags, kinds='cf')
def test_log1p_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log1p, flags=flags, kinds='cf')
def test_sqrt_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sqrt, flags=flags, kinds='cf')
def test_square_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.square, flags=flags)
def test_cbrt_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.cbrt, flags=flags, kinds='f')
def test_reciprocal_ufunc(self, flags=no_pyobj_flags):
# reciprocal for integers doesn't make much sense and is problematic
# in the case of division by zero, as an inf will overflow float to
# int conversions, which is undefined behavior.
to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32,
types.Array(types.int32, 1, 'C'), types.int32,
types.Array(types.uint64, 1, 'C'), types.uint64,
types.Array(types.int64, 1, 'C'), types.int64]
self.basic_ufunc_test(np.reciprocal, skip_inputs=to_skip, flags=flags)
def test_conjugate_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.conjugate, flags=flags)
############################################################################
# Trigonometric Functions
def test_sin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sin, flags=flags, kinds='cf')
def test_cos_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.cos, flags=flags, kinds='cf')
def test_tan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.tan, flags=flags, kinds='cf')
def test_arcsin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arcsin, flags=flags, kinds='cf')
def test_arccos_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arccos, flags=flags, kinds='cf')
def test_arctan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arctan, flags=flags, kinds='cf')
def test_arctan2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arctan2, flags=flags, kinds='cf')
def test_hypot_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.hypot, kinds='f')
def test_sinh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sinh, flags=flags, kinds='cf')
def test_cosh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.cosh, flags=flags, kinds='cf')
def test_tanh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.tanh, flags=flags, kinds='cf')
def test_arcsinh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arcsinh, flags=flags, kinds='cf')
def test_arccosh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arccosh, flags=flags, kinds='cf')
def test_arctanh_ufunc(self, flags=no_pyobj_flags):
# arctanh is only valid is only finite in the range ]-1, 1[
# This means that for any of the integer types it will produce
# conversion from infinity/-infinity to integer. That's undefined
# behavior in C, so the results may vary from implementation to
# implementation. This means that the result from the compiler
# used to compile NumPy may differ from the result generated by
# llvm. Skipping the integer types in this test avoids failed
# tests because of this.
to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32,
types.Array(types.int32, 1, 'C'), types.int32,
types.Array(types.uint64, 1, 'C'), types.uint64,
types.Array(types.int64, 1, 'C'), types.int64]
self.basic_ufunc_test(np.arctanh, skip_inputs=to_skip, flags=flags,
kinds='cf')
def test_deg2rad_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.deg2rad, flags=flags, kinds='f')
def test_rad2deg_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.rad2deg, flags=flags, kinds='f')
def test_degrees_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.degrees, flags=flags, kinds='f')
def test_radians_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.radians, flags=flags, kinds='f')
############################################################################
# Bit-twiddling Functions
def test_bitwise_and_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_and, flags=flags)
def test_bitwise_or_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_or, flags=flags)
def test_bitwise_xor_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_xor, flags=flags)
def test_invert_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.invert, flags=flags)
def test_bitwise_not_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_not, flags=flags)
# Note: there is no entry for left_shift and right_shift as this harness
# is not valid for them. This is so because left_shift and right
# shift implementation in NumPy has undefined behavior (in C-parlance)
# when the second argument is a negative (or bigger than the number
# of bits) value.
# Also, right_shift for negative first arguments also relies on
# implementation defined behavior, although numba warantees "sane"
# behavior (arithmetic shifts on signed integers, logic shifts on
# unsigned integers).
############################################################################
# Comparison functions
def test_greater_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.greater, flags=flags)
def test_greater_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.greater_equal, flags=flags)
def test_less_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.less, flags=flags)
def test_less_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.less_equal, flags=flags)
def test_not_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.not_equal, flags=flags)
def test_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.equal, flags=flags)
def test_logical_and_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_and, flags=flags)
def test_logical_or_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_or, flags=flags)
def test_logical_xor_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_xor, flags=flags)
def test_logical_not_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_not, flags=flags)
def test_maximum_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.maximum, flags=flags)
def test_minimum_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.minimum, flags=flags)
def test_fmax_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmax, flags=flags)
def test_fmin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmin, flags=flags)
############################################################################
# Floating functions
def bool_additional_inputs(self):
return [
(np.array([True, False], dtype=np.bool_),
types.Array(types.bool_, 1, 'C')),
]
def test_isfinite_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isfinite, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_isinf_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isinf, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_isnan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isnan, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_signbit_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.signbit, flags=flags)
def test_copysign_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.copysign, flags=flags, kinds='f')
def test_nextafter_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.nextafter, flags=flags, kinds='f')
@_unimplemented
def test_modf_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.modf, flags=flags, kinds='f')
# Note: there is no entry for ldexp as this harness isn't valid for this
# ufunc. this is so because ldexp requires heterogeneous inputs.
# However, this ufunc is tested by the TestLoopTypes test classes.
@_unimplemented
def test_frexp_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.frexp, flags=flags, kinds='f')
def test_floor_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.floor, flags=flags, kinds='f')
def test_ceil_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.ceil, flags=flags, kinds='f')
def test_trunc_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.trunc, flags=flags, kinds='f')
def test_spacing_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.spacing, flags=flags, kinds='f')
############################################################################
# Other tests
def binary_ufunc_mixed_types_test(self, ufunc, flags=no_pyobj_flags):
ufunc_name = ufunc.__name__
ufunc = _make_ufunc_usecase(ufunc)
inputs1 = [
(1, types.uint64),
(-1, types.int64),
(0.5, types.float64),
(np.array([0, 1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1, 1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C'))]
inputs2 = inputs1
output_types = [types.Array(types.int64, 1, 'C'),
types.Array(types.float64, 1, 'C')]
pyfunc = ufunc
for input1, input2, output_type in itertools.product(inputs1, inputs2, output_types):
input1_operand = input1[0]
input1_type = input1[1]
input2_operand = input2[0]
input2_type = input2[1]
# Skip division by unsigned int because of NumPy bugs
if ufunc_name == 'divide' and (input2_type == types.Array(types.uint32, 1, 'C') or
input2_type == types.Array(types.uint64, 1, 'C')):
continue
# Skip some subtraction tests because of NumPy bugs
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint32 and types.Array(types.int64, 1, 'C'):
continue
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint64 and types.Array(types.int64, 1, 'C'):
continue
if ((isinstance(input1_type, types.Array) or
isinstance(input2_type, types.Array)) and
not isinstance(output_type, types.Array)):
continue
cr = self.cache.compile(pyfunc,
(input1_type, input2_type, output_type),
flags=flags)
cfunc = cr.entry_point
if isinstance(input1_operand, np.ndarray):
result = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
elif isinstance(input2_operand, np.ndarray):
result = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
cfunc(input1_operand, input2_operand, result)
pyfunc(input1_operand, input2_operand, expected)
scalar_type = getattr(output_type, 'dtype', output_type)
prec = ('single'
if scalar_type in (types.float32, types.complex64)
else 'double')
self.assertPreciseEqual(expected, result, prec=prec)
def test_broadcasting(self):
# Test unary ufunc
pyfunc = _make_ufunc_usecase(np.negative)
input_operands = [
np.arange(3, dtype='i8'),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3*3, dtype='i8').reshape(3,3)]
output_operands = [
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3)]
for x, result in zip(input_operands, output_operands):
input_type = types.Array(types.uint64, x.ndim, 'C')
output_type = types.Array(types.int64, result.ndim, 'C')
cr = self.cache.compile(pyfunc, (input_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.zeros(result.shape, dtype=result.dtype)
np.negative(x, expected)
cfunc(x, result)
self.assertPreciseEqual(result, expected)
# Test binary ufunc
pyfunc = _make_ufunc_usecase(np.add)
input1_operands = [
np.arange(3, dtype='u8'),
np.arange(3*3, dtype='u8').reshape(3,3),
np.arange(3*3*3, dtype='u8').reshape(3,3,3),
np.arange(3, dtype='u8').reshape(3,1),
np.arange(3, dtype='u8').reshape(1,3),
| np.arange(3, dtype='u8') | numpy.arange |
import numpy as np
import pytest
import snc.agents.hedgehog.strategic_idling.strategic_idling_utils
from snc.agents.hedgehog.asymptotic_workload_cov.\
compute_asymptotic_cov_bernoulli_service_and_arrivals \
import ComputeAsymptoticCovBernoulliServiceAndArrivals
import snc.agents.hedgehog.strategic_idling.hedging_utils as hedging_utils
import snc.agents.hedgehog.workload.workload as wl
from snc.agents.hedgehog.params import StrategicIdlingParams
from snc.agents.hedgehog.strategic_idling.strategic_idling import StrategicIdlingCore
from snc.agents.hedgehog.strategic_idling.strategic_idling_hedgehog_gto import \
StrategicIdlingGTO, StrategicIdlingHedgehogGTO
from snc.agents.hedgehog.strategic_idling.strategic_idling_hedging import StrategicIdlingHedging
from snc.agents.hedgehog.strategic_idling.strategic_idling_utils import get_dynamic_bottlenecks
import snc.environments.examples as examples
import snc.utils.alt_methods_test as alt_methods_test
import snc.utils.exceptions as exceptions
def test_create_strategic_idling_get_dynamic_bottlenecks():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.69, mu2=0.35, mu3=0.69,
cost_per_buffer=np.array([1, 1, 1])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
gto_object = StrategicIdlingGTO(workload_mat=workload_mat,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
x = np.array([[158], [856], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
x = np.array([[493], [476], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
x = np.array([[631], [338], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([1])
def test_create_strategic_idling_hedgehog_gto_normal_hedging():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.69, mu2=0.35, mu3=0.69,
cost_per_buffer=np.array([1.5, 1, 2])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
workload_cov = np.array([[2, 0.5], [0.5, 3]])
hgto_object = StrategicIdlingHedgehogGTO(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
# this case corresponds to normal hedging regime below hedging threshold
x = np.array([[631], [338], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
# this case corresponds to normal hedging regime above hedging threshold
x = np.array([[969],
[ 0],
[351]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([1])
# this case corresponds to monotone region
x = np.array([[493],
[476],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
# this case corresponds to monotone region
x = np.array([[100],
[476],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert hgto_object._min_drain_lp is None
def test_create_strategic_idling_hedgehog_gto_switching_curve():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.7, mu2=0.345, mu3=0.7,
cost_per_buffer=np.array([1.5, 1, 2])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
workload_cov = np.array([[2, 0.5], [0.5, 3]])
h_object = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
hgto_object = StrategicIdlingHedgehogGTO(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
# This case corresponds to switching curve regime, i.e. minimum cost
# effective state can only be reached by extending the minimum draining time.
# `w` is below the hedging threshold so standard Hedgehog would allow one
# resource to idle, but it turns out that this resource is a dynamic
# bottleneck for the current `w`.
x = np.array(([[955],
[ 0],
[202]]))
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# This case corresponds to switching curve regime (i.e., drift @ psi_plus < 0),
# `w` is below the hedging threshold so standard Hedgehog would allow one resource to idle.
# Since this resource is not a dynamic bottleneck the GTO constraint also allows it to idle.
x = np.array([[ 955],
[ 0],
[1112]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# This case corresponds to switching curve regime (i.e., drift @ psi_plus < 0),
# `w` is below the hedging threshold so standard Hedgehog would allow the
# less loaded resource to idle. This is similar to the first case, but when both
# resources are dynamic bottlenecks for the current `w`.
x = np.array([[759],
[ 0],
[595]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# this case corresponds to monotone region so both bottlenecks are not
# allowed to idle under both standard Hedgehog and GTO policy
x = np.array([[283],
[672],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert hgto_object._min_drain_lp is not None
def test_create_strategic_idling_no_hedging_object_with_no_asymptotic_covariance():
"""
Raise exception if asymptotic covariance is tried to be updated.
"""
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
x = np.array([[413],
[ 0],
[100]])
si_object = StrategicIdlingCore(workload_mat=workload_mat, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
# these methods should not fail
si_object.get_allowed_idling_directions(x)
def test_create_strategic_idling_object_with_no_asymptotic_covariance():
"""
Check asymptotic covariance is passed before querying the idling decision
"""
neg_log_discount_factor = - np.log(0.95)
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
x = np.array([[413],
[ 0],
[100]])
si_object = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
with pytest.raises(AssertionError):
si_object._verify_offline_preliminaries()
with pytest.raises(AssertionError):
si_object.get_allowed_idling_directions(x)
def create_strategic_idling_object(
workload_mat=np.ones((2, 2)),
workload_cov=None,
neg_log_discount_factor=None,
load=None,
cost_per_buffer=np.ones((2, 1)),
model_type='push',
strategic_idling_params=None):
if strategic_idling_params is None:
strategic_idling_params = StrategicIdlingParams()
return StrategicIdlingHedging(workload_mat=workload_mat,
workload_cov=workload_cov,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=cost_per_buffer,
model_type=model_type,
strategic_idling_params=strategic_idling_params)
def test_create_strategic_idling_object_without_strategic_idling_params():
"""
Check assert `strategic_idling_params is not None` in constructor.
"""
neg_log_discount_factor = - np.log(0.95)
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
with pytest.raises(AssertionError):
_ = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type)
def test_is_negative_orthant_true():
w = np.zeros((3, 1))
w[0] = -1
assert StrategicIdlingHedging._is_negative_orthant(w)
def test_is_negative_orthant_false():
w = np.zeros((3, 1))
w[0] = 1
assert not StrategicIdlingHedging._is_negative_orthant(w)
def test_is_negative_orthant_false_since_zero_w():
w = np.zeros((3, 1))
assert not StrategicIdlingHedging._is_negative_orthant(w)
def check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer):
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
barc_a, _, eff_cost_a_1 = si_object.c_bar_solver.solve(w)
_, x_a, eff_cost_a_2 = alt_methods_test.compute_effective_cost_scipy(w, workload_mat,
cost_per_buffer)
barc_b, x_b, eff_cost_b = alt_methods_test.compute_effective_cost_cvxpy(w, workload_mat,
cost_per_buffer)
barc_c, x_c, eff_cost_c = alt_methods_test.compute_dual_effective_cost_cvxpy(w, workload_mat,
cost_per_buffer)
np.testing.assert_almost_equal(barc_a, barc_b)
np.testing.assert_almost_equal(barc_a, barc_c)
np.testing.assert_almost_equal(x_a, x_b)
np.testing.assert_almost_equal(x_a, x_c)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_b)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_c)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_a_2)
return barc_a
def test_effective_cost_superfluous_inequalities():
"""We check that Scipy linprog() used in compute_dual_effective_cost() does not return a status
4 (encountered numerical difficulties)"""
# This example was known to return this status 4 before the fix
env = examples.simple_reentrant_line_with_demand_model(alpha_d=2, mu1=3, mu2=2.5, mu3=3,
mus=1e3, mud=1e3,
cost_per_buffer=np.ones((5, 1)),
initial_state=np.array([10, 25,
55, 0,
100])[:, None],
capacity=np.ones((5, 1)) * np.inf,
job_conservation_flag=True)
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec=2,
load_threshold=None)
w = np.array([[1.], [0.]])
try:
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,)
c_bar, _, eff_cost = si_object.c_bar_solver.solve(w)
except exceptions.ScipyLinprogStatusError:
pytest.fail()
def test_effective_cost_ksrs_network_model_case_1():
"""Example 5.3.3 case 1 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
barc_1 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, 0]
np.testing.assert_almost_equal(barc_1, 1 / 3 * np.array([[0], [1]]))
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w1 = 2
w2 = 1
w = np.array([[w1], [w2]])
barc_2 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
np.testing.assert_almost_equal(barc_2, 1 / 4 * np.ones((2, 1)))
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 1
w = np.array([[w1], [w2]])
barc_3 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, 0]
np.testing.assert_almost_equal(barc_3, 1 / 3 * np.array([[1], [0]]))
def test_effective_cost_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
barc_1 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, -2]
np.testing.assert_almost_equal(barc_1, np.array([[-2], [1]]))
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w1 = 2
w2 = 1
w = np.array([[w1], [w2]])
barc_2 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
np.testing.assert_almost_equal(barc_2, 1 / 4 * np.ones((2, 1)))
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 1
w = np.array([[w1], [w2]])
barc_3 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [-2, 1]
np.testing.assert_almost_equal(barc_3, np.array([[1], [-2]]))
def test_all_effective_cost_vectors_ksrs_network_model_case_1():
"""Example 5.3.3 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Compute cost vectors.
barc_vectors = alt_methods_test.get_all_effective_cost_linear_vectors(workload_mat,
cost_per_buffer)
barc_vectors_theory = np.array([[1 / 3, 0],
[0, 1 / 3],
[0.25, 0.25]])
# Due to numerical noise, different computers can obtain the barc vectors in different order.
# So we will compare sets instead of ndarrays.
np.around(barc_vectors, decimals=7, out=barc_vectors)
np.around(barc_vectors_theory, decimals=7, out=barc_vectors_theory)
barc_vectors_set = set(map(tuple, barc_vectors))
barc_vectors_theory_set = set(map(tuple, barc_vectors_theory))
assert barc_vectors_set == barc_vectors_theory_set
def test_all_effective_cost_vectors_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Compute cost vectors.
barc_vectors = alt_methods_test.get_all_effective_cost_linear_vectors(workload_mat,
cost_per_buffer)
# Order of the vectors not relevant, just made up for easy comparison.
barc_vectors_theory = np.array([[1, -2],
[-2, 1],
[0.25, 0.25]])
# Due to numerical noise, different computers can obtain the barc vectors in different order.
# So we will compare sets instead of ndarrays.
np.around(barc_vectors, decimals=7, out=barc_vectors)
np.around(barc_vectors_theory, decimals=7, out=barc_vectors_theory)
barc_vectors_set = set(map(tuple, barc_vectors))
barc_vectors_theory_set = set(map(tuple, barc_vectors_theory))
assert barc_vectors_set == barc_vectors_theory_set
def test_get_vector_defining_possible_idling_direction_1():
w = np.array([[1], [0]])
w_star = np.array([[1], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[0], [1]]))
def test_get_vector_defining_possible_idling_direction_2():
w = np.array([[0], [1]])
w_star = np.array([[1], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[1], [0]]))
def test_get_vector_defining_possible_idling_direction_3():
# Although this w_star is impossible since w_star >= w, we can still calculate v_star.
w = np.array([[1], [1]])
w_star = np.array([[1], [0]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[0], [-1]]))
def test_get_vector_defining_possible_idling_direction_4():
# Although this w_star is impossible since w_star >= w, we can still calculate v_star.
w = np.array([[1], [1]])
w_star = np.array([[0], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[-1], [0]]))
def test_project_workload_on_monotone_region_along_minimal_cost_negative_w():
"""We use the single server queue with demand model. The expected result when we project
negative workload with the effective cost LP is zero."""
env = examples.single_station_demand_model(alpha_d=9, mu=10, mus=1e3, mud=1e2)
_, workload_mat, _ = wl.compute_load_workload_matrix(env)
num_wl = workload_mat.shape[0]
w = - np.ones((num_wl, 1))
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.zeros((num_wl, 1)))
def test_project_workload_on_monotone_region_along_minimal_cost_w_equal_w_star_ksrs_region_2():
"""We use the KSRS model, for which we know the boundary of the monotone region. Therefore, if
we set w in the boundary, we should get w_star = w."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 1 = {0 < 3 * w1 < w2 < inf}, and Region 2 = {0 < w1 < 3 * w2 < 9 * w1}, so w = (1, 3)
# is already right in the boundary.
w1 = 1
w2 = 3
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w, w_star)
def test_project_workload_on_monotone_region_along_minimal_cost_ksrs_region_1():
"""We use the KSRS model, for which we know the boundary of the monotone region."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 1 = {0 < 3 * w1 < w2 < inf}, so w = (0.5, 3) should be projected to w_star = (1, 3)
w1 = 0.5
w2 = 3
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.array([[1], [3]]))
def test_project_workload_on_monotone_region_along_minimal_cost_ksrs_region_3():
"""We use the KSRS model, for which we know the boundary of the monotone region."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 3 = {0 < 3 * w2 < w1}, so w = (3, 0.5) should be projected to w_star = (3, 1)
w1 = 3
w2 = 0.5
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.array([[3], [1]]))
def test_project_workload_on_monotone_region_along_minimal_cost_pseudorandom_values():
"""Since this uses random values, it could happen that the simplex (SciPy-LinProg) and SCS (CVX)
solvers give different solutions. This is uncommon, but possible."""
np.random.seed(42)
num_buffers = 4
num_wl = 3
num_tests = 1e3
strategic_idling_params = StrategicIdlingParams()
discrepancy = 0
for i in range(int(num_tests)):
w = np.random.random_sample((num_wl, 1))
cost_per_buffer = np.random.random_sample((num_buffers, 1))
workload_mat = np.random.random_sample((num_wl, num_buffers))
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
if not np.allclose(w_star, w_star_b):
discrepancy += 1
assert discrepancy < 5
def test_project_workload_when_monotone_region_is_a_ray():
"""We use the simple re-entrant line model."""
c_1 = 1
c_2 = 2
c_3 = 3
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)],
[mu_2 * c_2 + (mu_1 * mu_2) / mu_3 * (c_2 - c_1)]])
c_minus = np.array([[c_3 * mu_3],
[mu_2 * c_1 - c_3 * mu_2 * (mu_3 / mu_1 + 1)]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
np.testing.assert_almost_equal(w_star, w_star_theory)
def test_project_workload_when_idling_direction_lies_in_c_plus_level_set_zero_penalty():
"""We use the simple re-entrant line model."""
c_1 = 2
c_2 = 1
c_3 = 2
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)], [mu_2 * (c_2 * (1 + mu_1/mu_3) - c_1 * mu_1 / mu_3)]])
c_minus = np.array([[mu_3 * c_3], [mu_2 * (c_1 - c_3 * (1 + mu_3/mu_1))]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
strategic_idling_params = StrategicIdlingParams(penalty_coeff_w_star=0)
si_object = create_strategic_idling_object(
workload_mat=workload_mat, cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
with pytest.raises(AssertionError):
np.testing.assert_almost_equal(w_star, w_star_theory)
def test_project_workload_when_idling_direction_lies_in_c_plus_level_set():
"""We use the simple re-entrant line model."""
c_1 = 2
c_2 = 1
c_3 = 2
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)], [mu_2 * (c_2 * (1 + mu_1/mu_3) - c_1 * mu_1 / mu_3)]])
c_minus = np.array([[mu_3 * c_3], [mu_2 * (c_1 - c_3 * (1 + mu_3/mu_1))]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
si_object = create_strategic_idling_object(
workload_mat=workload_mat, cost_per_buffer=cost_per_buffer,
strategic_idling_params=StrategicIdlingParams(penalty_coeff_w_star=1e-5))
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
np.testing.assert_almost_equal(w_star, w_star_theory, decimal=5)
def test_is_w_inside_monotone_region_ksrs_network_model_case_1():
"""Example 5.3.3 case 1 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Since w is already in \W^+ in any of the 3 regions, any increment in w will increase the cost,
# so w_star should equal w. Thus, v_star should be a vector of nan, in every case.
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1_1 = 1
w1_2 = 4
w_1 = np.array([[w1_1], [w1_2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w_1)
c_bar_1 = si_object_1._get_level_set_for_current_workload(w_1)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_1, w_star_1, c_bar_1)
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w2_1 = 2
w2_2 = 1
w_2 = np.array([[w2_1], [w2_2]])
si_object_2 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_2 = si_object_2._find_workload_with_min_eff_cost_by_idling(w_2)
c_bar_2 = si_object_2._get_level_set_for_current_workload(w_2)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_2, w_star_2, c_bar_2)
# Region 3 = {0 < 3 * w2 < w1}
w3_1 = 4
w3_2 = 0.05
w_3 = np.array([[w3_1], [w3_2]])
si_object_3 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w_3)
c_bar_3 = si_object_3._get_level_set_for_current_workload(w_3)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_3, w_star_3, c_bar_3)
def test_closest_face_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
strategic_idling_params = StrategicIdlingParams()
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1_1 = 1
w1_2 = 4
w_1 = np.array([[w1_1], [w1_2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w_1)
w_star_1b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_1, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_1, w_star_1b)
v_star_1 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_1, w_1)
psi_plus_1, c_plus_1, c_minus_1 = si_object_1._get_closest_face_and_level_sets(w_star_1,
v_star_1)
np.testing.assert_almost_equal(c_minus_1, np.array([[-2], [1]]), decimal=5)
np.testing.assert_almost_equal(c_plus_1, np.array([[0.25], [0.25]]), decimal=5)
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w2_1 = 2
w2_2 = 1
w_2 = np.array([[w2_1], [w2_2]])
si_object_2 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_2 = si_object_2._find_workload_with_min_eff_cost_by_idling(w_2)
w_star_2b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_2, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_2, w_star_2b)
# Region 2 is in the monotone region W^+
c_bar_2 = si_object_2._get_level_set_for_current_workload(w_2)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_2, w_star_2, c_bar_2)
# Region 3 = {0 < 3 * w2 < w1}
w3_1 = 4
w3_2 = 0.05
w_3 = np.array([[w3_1], [w3_2]])
si_object_3 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w_3)
w_star_3b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_3, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_3, w_star_3b)
v_star_3 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_3, w_3)
psi_plus_3, c_plus_3, c_minus_3 = si_object_3._get_closest_face_and_level_sets(w_star_3,
v_star_3)
np.testing.assert_almost_equal(c_minus_3, np.array([[1], [-2]]), decimal=5)
np.testing.assert_almost_equal(c_plus_3, np.array([[0.25], [0.25]]), decimal=5)
def test_is_monotone_region_a_ray_negative_c_plus():
c_plus = - np.ones((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_nonpositive_c_plus():
c_plus = np.array([[-1], [-1], [0]])
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_zero_c_plus():
c_plus = np.zeros((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_positive_c_plus():
c_plus = np.ones((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_c_plus_with_positive_negative_and_zero_components():
c_plus = np.array([[1], [-1], [0]])
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_c_plus_with_positive_and_negative_components():
c_plus = np.array([[1], [-1], [-1]])
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_simple_reentrant_line():
"""We use the simple re-entrant line with parameters that make monotone region to be a ray."""
w = np.array([[1], [0]])
env = examples.simple_reentrant_line_model(mu1=2, mu2=1, mu3=2,
cost_per_buffer=np.array([[1], [2], [3]]))
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_infeasible_with_real_c_plus():
c_plus = np.array([[1], [-1], [-1]])
assert not StrategicIdlingHedging._is_infeasible(c_plus)
def test_is_monotone_region_infeasible():
c_plus = None
assert StrategicIdlingHedging._is_infeasible(c_plus)
def test_is_w_inside_monotone_region_when_small_tolerance():
w = np.random.random_sample((3, 1))
w_star = w + 1e-4
c_bar = np.ones((3, 1))
assert StrategicIdlingHedging._is_w_inside_monotone_region(w, w_star, c_bar)
def test_is_w_inside_monotone_region_false():
w = np.random.random_sample((3, 1))
w_star = w + 1e-2
c_bar = np.ones((3, 1))
assert not StrategicIdlingHedging._is_w_inside_monotone_region(w, w_star, c_bar)
def check_lambda_star(w, c_plus, psi_plus, w_star, test_strong_duality_flag=True):
lambda_star = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
lambda_star_b = alt_methods_test.get_price_lambda_star_lp_1_cvxpy(w, c_plus, psi_plus)
lambda_star_c = alt_methods_test.get_price_lambda_star_lp_2_cvxpy(w, c_plus, psi_plus)
lambda_star_d = alt_methods_test.get_price_lambda_star_lp_scipy(w, c_plus, psi_plus)
if test_strong_duality_flag:
lambda_star_a = alt_methods_test.get_price_lambda_star_strong_duality(w, w_star, c_plus,
psi_plus)
np.testing.assert_almost_equal(lambda_star, lambda_star_a, decimal=5)
if lambda_star_b is not None: # If primal is not accurately solved with CVX
np.testing.assert_almost_equal(lambda_star, lambda_star_b, decimal=5)
if lambda_star_c is not None:
np.testing.assert_almost_equal(lambda_star, lambda_star_c, decimal=5)
np.testing.assert_almost_equal(lambda_star, lambda_star_d)
return lambda_star
def test_get_price_lambda_star_when_c_plus_is_positive():
"""lambda_star depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[1], [1]])
w = np.array([[3], [0.1]])
psi_plus = np.array([[-.1], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_c_plus_is_negative():
"""c_plus should always be nonnegative"""
c_plus = np.array([[-1], [1]])
psi_plus = np.array([[-1], [0.5]])
with pytest.raises(exceptions.ArraySignError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert (excinfo.value.array_name == "c_plus" and excinfo.value.all_components and
excinfo.value.positive and not excinfo.value.strictly)
def test_get_price_lambda_star_when_c_plus_is_zero():
"""c_plus should always have at least one strictly positive component"""
c_plus = np.array([[0], [0]])
psi_plus = np.array([[-1], [0.5]])
with pytest.raises(exceptions.ArraySignError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert (excinfo.value.array_name == "c_plus" and not excinfo.value.all_components and
excinfo.value.positive and excinfo.value.strictly)
def test_get_price_lambda_star_when_c_plus_has_zero_components():
"""lambda_star only depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[0], [1]])
w = np.array([[3], [0.1]])
psi_plus = np.array([[-.1], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_c_plus_has_zero_components_with_positive_psi_plus():
"""lambda_star only depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[0], [1]])
w = np.array([[-3], [0.1]])
psi_plus = np.array([[0.5], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_psi_plus_is_negative():
c_plus = np.array([[1], [1]])
psi_plus = - np.ones((2, 1))
with pytest.raises(exceptions.EmptyArrayError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert excinfo.value.array_name == "ratio"
def test_get_price_lambda_star_when_psi_plus_has_zero_and_positive_components():
c_plus = np.array([[1], [1]])
psi_plus = np.array([[0], [1]])
lambda_star = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert lambda_star == 1
def test_get_price_lambda_star_when_psi_plus_has_zero_and_negative_components():
c_plus = np.array([[1], [1]])
psi_plus = np.array([[0], [-1]])
with pytest.raises(exceptions.EmptyArrayError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert excinfo.value.array_name == "ratio"
def test_get_price_lambda_star_simple_reentrant_line():
env = examples.simple_reentrant_line_model(alpha1=0.5, mu1=1.1, mu2=1.2, mu3=1.3)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
strategic_idling_params = StrategicIdlingParams()
for i in range(100):
# Set w such that a path-wise optimal solution starting from w cannot exist (p. 187,
# CTCN online ed).
w1 = i + 1
w2 = load[1] / load[0] * w1 * 0.9
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, env.cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star, w_star_b, decimal=5)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
check_lambda_star(w, c_plus, psi_plus, w_star)
def test_get_price_lambda_star_when_monotone_region_is_a_ray_other_workload_value_using_new_cplus():
"""We use the simple re-entrant line with parameters that make monotone region to be a ray."""
state = np.array([[302], [297], [300]])
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.array([[1], [2], [3]]))
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
w = workload_mat @ state # = np.array([[59.9], [54.59090909]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
# Set near zero epsilon to get same result with all lambda_star methods.
psi_plus, c_plus \
= StrategicIdlingHedging._get_closest_face_and_level_sets_for_ray_or_feasibility_boundary(
c_minus, w_star, epsilon=1e-10)
check_lambda_star(w, c_plus, psi_plus, w_star)
# Positive epsilon makes the strong duality method for lambda_star give different solution.
psi_plus, c_plus \
= StrategicIdlingHedging._get_closest_face_and_level_sets_for_ray_or_feasibility_boundary(
c_minus, w_star, epsilon=0.01)
with pytest.raises(AssertionError):
check_lambda_star(w, c_plus, psi_plus, w_star)
def test_get_price_lambda_star_when_monotone_region_is_a_ray_with_high_epsilon():
"""We use the simple re-entrant line with parameters that make monotone region to be a ray.
This test shows that if the artificial cone is very wide, w will be inside, so that we should
not compute lambda_star."""
state = np.array([[302], [297], [300]])
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.array([[1], [2], [3]]))
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
w = workload_mat @ state # = np.array([[59.9], [54.59090909]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
# Positive epsilon makes the strong duality method for lambda_star give different solution.
psi_plus, c_plus \
= StrategicIdlingHedging._get_closest_face_and_level_sets_for_ray_or_feasibility_boundary(
c_minus, w_star, epsilon=0.3)
assert psi_plus.T @ w >= 0
with pytest.raises(AssertionError):
_ = alt_methods_test.get_price_lambda_star_strong_duality(w, w_star, c_plus, psi_plus)
with pytest.raises(AssertionError):
_ = alt_methods_test.get_price_lambda_star_lp_1_cvxpy(w, c_plus, psi_plus)
with pytest.raises(AssertionError):
_ = alt_methods_test.get_price_lambda_star_lp_2_cvxpy(w, c_plus, psi_plus)
with pytest.raises(AssertionError):
_ = alt_methods_test.get_price_lambda_star_lp_scipy(w, c_plus, psi_plus)
def test_get_price_lambda_star_with_infeasible_workload_space():
"""We use the single server queue with demand for which we know that there is always nonempty
infeasible region."""
env = examples.single_station_demand_model(alpha_d=9, mu=10, mus=1e3, mud=1e2,
initial_state=np.array(([300, 0, 1000])))
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec=2)
w = np.array([[100], [10.01]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_infeasible(c_plus)
psi_plus, c_plus = \
StrategicIdlingHedging._get_closest_face_and_level_sets_for_ray_or_feasibility_boundary(
c_minus, w_star, epsilon=0)
check_lambda_star(w, c_plus, psi_plus, w_star)
def test_lambda_star_in_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
strategic_idling_params = StrategicIdlingParams()
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w)
w_star_1b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_1, w_star_1b)
v_star_1 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_1, w)
psi_plus_1, c_plus_1, c_minus_1 = si_object_1._get_closest_face_and_level_sets(w_star_1,
v_star_1)
check_lambda_star(w, c_plus_1, psi_plus_1, w_star_1)
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 0.05
w = np.array([[w1], [w2]])
si_object_3 = create_strategic_idling_object(
workload_mat=workload_mat, cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w)
w_star_3b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_3, w_star_3b)
v_star_3 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_3, w)
psi_plus_3, c_plus_3, c_minus_3 = si_object_3._get_closest_face_and_level_sets(w_star_3,
v_star_3)
check_lambda_star(w, c_plus_3, psi_plus_3, w_star_3)
def test_compute_height_process_case_1():
psi_plus = -np.ones((3, 1))
w = np.ones((3, 1))
height = StrategicIdlingHedging._compute_height_process(psi_plus, w)
assert height == 3
def test_compute_height_process_case_2():
psi_plus = np.array([[-1], [-0.4], [-0.3]])
w = np.array([[0.2], [1], [2]])
height = StrategicIdlingHedging._compute_height_process(psi_plus, w)
np.testing.assert_almost_equal(height, 1.2)
def test_get_possible_idling_directions_single_min_no_threshold():
w = np.array([[-1], [1]])
psi_plus = np.array([[1], [-0.5]])
beta_star = 0
v_star = np.array([[0.25], [0]])
k_idling_set = StrategicIdlingHedging._get_possible_idling_directions(w, beta_star, psi_plus,
v_star)
assert np.all(k_idling_set == np.array([0]))
def test_get_possible_idling_directions_single_min_with_high_threshold():
w = np.array([[-1], [1]])
psi_plus = | np.array([[1], [-0.5]]) | numpy.array |
import numpy as np
import logging
import six
import loopy as lp
import cantera as ct
from nose.plugins.attrib import attr
from unittest.case import SkipTest
from parameterized import parameterized
try:
from scipy.sparse import csr_matrix, csc_matrix
except ImportError:
csr_matrix = None
csc_matrix = None
from pyjac.core.rate_subs import (
get_concentrations,
get_rop, get_rop_net, get_spec_rates, get_molar_rates, get_thd_body_concs,
get_rxn_pres_mod, get_reduced_pressure_kernel, get_lind_kernel,
get_sri_kernel, get_troe_kernel, get_simple_arrhenius_rates,
polyfit_kernel_gen, get_plog_arrhenius_rates, get_cheb_arrhenius_rates,
get_rev_rates, get_temperature_rate, get_extra_var_rates)
from pyjac.loopy_utils.loopy_utils import (
loopy_options, kernel_call, set_adept_editor, populate, get_target)
from pyjac.core.enum_types import RateSpecialization, FiniteDifferenceMode
from pyjac.core.create_jacobian import (
dRopi_dnj, dci_thd_dnj, dci_lind_dnj, dci_sri_dnj, dci_troe_dnj,
total_specific_energy, dTdot_dnj, dEdot_dnj, thermo_temperature_derivative,
dRopidT, dRopi_plog_dT, dRopi_cheb_dT, dTdotdT, dci_thd_dT, dci_lind_dT,
dci_troe_dT, dci_sri_dT, dEdotdT, dTdotdE, dEdotdE, dRopidE, dRopi_plog_dE,
dRopi_cheb_dE, dci_thd_dE, dci_lind_dE, dci_troe_dE, dci_sri_dE,
determine_jac_inds, reset_arrays, get_jacobian_kernel,
finite_difference_jacobian)
from pyjac.core import array_creator as arc
from pyjac.core.enum_types import reaction_type, falloff_form
from pyjac.kernel_utils import kernel_gen as k_gen
from pyjac.tests import get_test_langs, TestClass
from pyjac.tests.test_utils import (
kernel_runner, get_comparable, _generic_tester,
_full_kernel_test, with_check_inds, inNd, skipif, xfail)
from pyjac.core.enum_types import KernelType
from pyjac import utils
class editor(object):
def __init__(self, independent, dependent,
problem_size, order, do_not_set=[],
skip_on_missing=None):
def __replace_problem_size(shape):
new_shape = []
for x in shape:
if x != arc.problem_size.name:
new_shape.append(x)
else:
new_shape.append(problem_size)
return tuple(new_shape)
assert len(independent.shape) == 2
self.independent = independent.copy(shape=__replace_problem_size(
independent.shape))
indep_size = independent.shape[1]
assert len(dependent.shape) == 2
self.dependent = dependent.copy(shape=__replace_problem_size(
dependent.shape))
dep_size = dependent.shape[1]
self.problem_size = problem_size
# create the jacobian
self.output = arc.creator('jac', np.float64,
(problem_size, dep_size, indep_size),
order=order)
self.output = self.output(*['i', 'j', 'k'])[0]
self.do_not_set = utils.listify(do_not_set)
self.skip_on_missing = skip_on_missing
def set_single_kernel(self, single_kernel):
"""
It's far easier to use two generated kernels, one that uses the full
problem size (for calling via loopy), and another that uses a problem
size of 1, to work with Adept indexing in the AD kernel
"""
self.single_kernel = single_kernel
def set_skip_on_missing(self, func):
"""
If set, skip if the :class:`kernel_info` returned by this function
is None
"""
self.skip_on_missing = func
def __call__(self, knl):
return set_adept_editor(knl, self.single_kernel, self.problem_size,
self.independent, self.dependent, self.output,
self.do_not_set)
# various convenience wrappers
def _get_fall_call_wrapper():
def fall_wrapper(loopy_opts, namestore, test_size):
return get_simple_arrhenius_rates(loopy_opts, namestore,
test_size, falloff=True)
return fall_wrapper
def _get_plog_call_wrapper(rate_info):
def plog_wrapper(loopy_opts, namestore, test_size):
if rate_info['plog']['num']:
return get_plog_arrhenius_rates(loopy_opts, namestore,
rate_info['plog']['max_P'],
test_size)
return plog_wrapper
def _get_cheb_call_wrapper(rate_info):
def cheb_wrapper(loopy_opts, namestore, test_size):
if rate_info['cheb']['num']:
return get_cheb_arrhenius_rates(loopy_opts, namestore,
np.max(rate_info['cheb']['num_P']),
np.max(rate_info['cheb']['num_T']),
test_size)
return cheb_wrapper
def _get_poly_wrapper(name, conp):
def poly_wrapper(loopy_opts, namestore, test_size):
return polyfit_kernel_gen(name, loopy_opts, namestore, test_size)
return poly_wrapper
def _get_ad_jacobian(self, test_size, conp=True, pregen=None, return_kernel=False):
"""
Convenience method to evaluate the finite difference Jacobian from a given
Phi / parameter set
Parameters
----------
test_size: int
The number of conditions to test
conp: bool
If True, CONP else CONV
pregen: Callable [None]
If not None, this corresponds to a previously generated AD-Jacobian kernel
Used in the validation tester to speed up chunked Jacobian evaluation
return_kernel: bool [False]
If True, we want __get_jacobian to return the kernel and kernel call
rather than the evaluated array (to be used with :param:`pregen`)
"""
class create_arr(object):
def __init__(self, dim):
self.dim = dim
@classmethod
def new(cls, inds):
if isinstance(inds, np.ndarray):
dim = inds.size
elif isinstance(inds, list):
dim = len(inds)
elif isinstance(inds, arc.creator):
dim = inds.initializer.size
elif isinstance(inds, int):
dim = inds
else:
return None
return cls(dim)
def __call__(self, order):
return np.zeros((test_size, self.dim), order=order)
# get rate info
rate_info = determine_jac_inds(
self.store.reacs, self.store.specs, RateSpecialization.fixed)
# create loopy options
# --> have to turn off the temperature guard to avoid fmin / max issues with
# Adept
ad_opts = loopy_options(order='C', lang='c', auto_diff=True)
# create namestore
store = arc.NameStore(ad_opts, rate_info, conp, test_size)
# and the editor
edit = editor(store.n_arr, store.n_dot, test_size,
order=ad_opts.order)
# setup args
phi = self.store.phi_cp if conp else self.store.phi_cv
allint = {'net': rate_info['net']['allint']}
args = {
'phi': lambda x: np.array(phi, order=x, copy=True),
'jac': lambda x: np.zeros((test_size,) + store.jac.shape[1:], order=x),
'wdot': create_arr.new(store.num_specs),
'Atroe': create_arr.new(store.num_troe),
'Btroe': create_arr.new(store.num_troe),
'Fcent': create_arr.new(store.num_troe),
'Fi': create_arr.new(store.num_fall),
'Pr': create_arr.new(store.num_fall),
'X': create_arr.new(store.num_sri),
'conc': create_arr.new(store.num_specs),
'dphi': lambda x: np.zeros_like(phi, order=x),
'kf': create_arr.new(store.num_reacs),
'kf_fall': create_arr.new(store.num_fall),
'kr': create_arr.new(store.num_rev_reacs),
'pres_mod': create_arr.new(store.num_thd),
'rop_fwd': create_arr.new(store.num_reacs),
'rop_rev': create_arr.new(store.num_rev_reacs),
'rop_net': create_arr.new(store.num_reacs),
'thd_conc': create_arr.new(store.num_thd),
'b': create_arr.new(store.num_specs),
'Kc': create_arr.new(store.num_rev_reacs)
}
if conp:
args['P_arr'] = lambda x: np.array(self.store.P, order=x, copy=True)
args['h'] = create_arr.new(store.num_specs)
args['cp'] = create_arr.new(store.num_specs)
else:
args['V_arr'] = lambda x: np.array(self.store.V, order=x, copy=True)
args['u'] = create_arr.new(store.num_specs)
args['cv'] = create_arr.new(store.num_specs)
# trim unused args
args = {k: v for k, v in six.iteritems(args) if v is not None}
# obtain the finite difference jacobian
kc = kernel_call('dnkdnj', [None], **args)
# check for pregenerated kernel
if pregen is not None:
return pregen(kc)
__b_call_wrapper = _get_poly_wrapper('b', conp)
__cp_call_wrapper = _get_poly_wrapper('cp', conp)
__cv_call_wrapper = _get_poly_wrapper('cv', conp)
__h_call_wrapper = _get_poly_wrapper('h', conp)
__u_call_wrapper = _get_poly_wrapper('u', conp)
def __extra_call_wrapper(loopy_opts, namestore, test_size):
return get_extra_var_rates(loopy_opts, namestore,
conp=conp, test_size=test_size)
def __temperature_wrapper(loopy_opts, namestore, test_size):
return get_temperature_rate(loopy_opts, namestore,
conp=conp, test_size=test_size)
return _get_jacobian(
self, __extra_call_wrapper, kc, edit, ad_opts, conp,
extra_funcs=[get_concentrations, get_simple_arrhenius_rates,
_get_plog_call_wrapper(rate_info),
_get_cheb_call_wrapper(rate_info),
get_thd_body_concs, _get_fall_call_wrapper(),
get_reduced_pressure_kernel, get_lind_kernel,
get_sri_kernel, get_troe_kernel,
__b_call_wrapper, get_rev_rates,
get_rxn_pres_mod, get_rop, get_rop_net,
get_spec_rates] + (
[__h_call_wrapper, __cp_call_wrapper] if conp else
[__u_call_wrapper, __cv_call_wrapper]) + [
get_molar_rates, __temperature_wrapper],
allint=allint, return_kernel=return_kernel)
def _make_array(self, array):
"""
Creates an array for comparison to an autorun kernel from the result
of __get_jacobian
Parameters
----------
array : :class:`numpy.ndarray`
The input Jacobian array
Returns
-------
reshaped : :class:`numpy.ndarray`
The reshaped / reordered array for comparison to the autorun
kernel
"""
for i in range(array.shape[0]):
# reshape inner array
array[i, :, :] = np.reshape(array[i, :, :].flatten(order='K'),
array.shape[1:],
order='F')
return array
def _get_jacobian(self, func, kernel_call, editor, ad_opts, conp, extra_funcs=[],
return_kernel=False, **kwargs):
"""
Computes an autodifferentiated kernel, exposed to external classes in order
to share with the :mod:`functional_tester`
Parameters
----------
func: Callable
The function to autodifferentiate
kernel_call: :class:`kernel_call`
The kernel call with arguements, etc. to use
editor: :class:`editor`
The jacobian editor responsible for creating the AD kernel
ad_opts: :class:`loopy_options`
The AD enabled loopy options object
extra_funcs: list of Callable
Additional functions that must be called before :param:`func`.
These can be used to chain together functions to find derivatives of
complicated values (e.g. ROP)
return_kernel: bool [False]
If True, return a callable function that takes as as an arguement the
new kernel_call w/ updated args and returns the result
Note: The user is responsible for checking that the arguements are of
valid shape
kwargs: dict
Additional args for :param:`func
Returns
-------
ad_jac : :class:`numpy.ndarray`
The resulting autodifferentiated jacobian. The shape of which depends on
the values specified in the editor
"""
# find rate info
rate_info = determine_jac_inds(
self.store.reacs,
self.store.specs,
ad_opts.rate_spec)
# create namestore
namestore = arc.NameStore(ad_opts, rate_info, conp,
self.store.test_size)
# get kw args this function expects
def __get_arg_dict(check, **in_args):
try:
# py2-3 compat
arg_count = check.func_code.co_argcount
args = check.func_code.co_varnames[:arg_count]
except AttributeError:
arg_count = check.__code__.co_argcount
args = check.__code__.co_varnames[:arg_count]
args_dict = {}
for k, v in six.iteritems(in_args):
if k in args:
args_dict[k] = v
return args_dict
# create the kernel info
infos = []
info = func(ad_opts, namestore,
test_size=self.store.test_size,
**__get_arg_dict(func, **kwargs))
infos.extend(utils.listify(info))
# create a dummy kernel generator
knl = k_gen.make_kernel_generator(
kernel_type=KernelType.jacobian,
loopy_opts=ad_opts,
kernels=infos,
namestore=namestore,
test_size=self.store.test_size,
extra_kernel_data=[editor.output]
)
knl._make_kernels()
# get list of current args
have_match = kernel_call.strict_name_match
new_args = []
new_kernels = []
for k in knl.kernels:
if have_match and kernel_call.name != k.name:
continue
new_kernels.append(k)
for arg in k.args:
if arg not in new_args and not isinstance(
arg, lp.TemporaryVariable):
new_args.append(arg)
knl = new_kernels[:]
# generate dependencies with full test size to get extra args
def __raise(f):
raise SkipTest('Mechanism {} does not contain derivatives corresponding to '
'{}'.format(self.store.gas.name, f.__name__))
infos = []
for f in extra_funcs:
info = f(ad_opts, namestore,
test_size=self.store.test_size,
**__get_arg_dict(f, **kwargs))
is_skip = editor.skip_on_missing is not None and \
f == editor.skip_on_missing
if is_skip and any(x is None for x in utils.listify(info)):
# empty map (e.g. no PLOG)
__raise(f)
infos.extend([x for x in utils.listify(info) if x is not None])
for i in infos:
for arg in i.kernel_data:
if arg not in new_args and not isinstance(
arg, lp.TemporaryVariable):
new_args.append(arg)
for i in range(len(knl)):
knl[i] = knl[i].copy(args=new_args[:])
# and a generator for the single kernel
single_name = arc.NameStore(ad_opts, rate_info, conp, 1)
single_info = []
for f in extra_funcs + [func]:
info = f(ad_opts, single_name,
test_size=1,
**__get_arg_dict(f, **kwargs))
for i in utils.listify(info):
if f == func and have_match and kernel_call.name != i.name:
continue
if i is None:
# empty map (e.g. no PLOG)
continue
single_info.append(i)
single_knl = k_gen.make_kernel_generator(
kernel_type=KernelType.species_rates,
loopy_opts=ad_opts,
kernels=single_info,
namestore=single_name,
test_size=1,
extra_kernel_data=[editor.output]
)
single_knl._make_kernels()
# set in editor
editor.set_single_kernel(single_knl.kernels)
kernel_call.set_state(single_knl.array_split, ad_opts.order)
# and place output
kernel_call.kernel_args[editor.output.name] = np.zeros(
editor.output.shape,
order=editor.output.order)
# and finally tell us not to copy
kernel_call.do_not_copy.add(editor.output.name)
if return_kernel:
def __pregen(kernel_call):
# setup the kernel call
# reset the state
kernel_call.set_state(single_knl.array_split, ad_opts.order)
# and place output
kernel_call.kernel_args[editor.output.name] = np.zeros(
editor.output.shape,
order=editor.output.order)
# and finally tell us not to copy
kernel_call.do_not_copy.add(editor.output.name)
# run
populate([knl[0]], kernel_call, editor=editor)
# get result
return _make_array(self, kernel_call.kernel_args[editor.output.name])
return __pregen
# run kernel
populate([knl[0]], kernel_call, editor=editor)
return _make_array(self, kernel_call.kernel_args[editor.output.name])
class SubTest(TestClass):
"""
The base Jacobian tester class
"""
def setUp(self):
# steal the global function decls
self._get_jacobian = lambda *args, **kwargs: _get_jacobian(
self, *args, **kwargs)
self._make_array = lambda *args, **kwargs: _make_array(
self, *args, **kwargs)
self._get_ad_jacobian = lambda *args, **kwargs: _get_ad_jacobian(
self, *args, **kwargs)
super(SubTest, self).setUp()
def _generic_jac_tester(self, func, kernel_calls, do_ratespec=False,
do_ropsplit=None, do_conp=False, do_sparse=True,
sparse_only=False, **kwargs):
"""
A generic testing method that can be used for testing jacobian kernels
This is primarily a thin wrapper for :func:`_generic_tester`
Parameters
----------
func : function
The function to test
kernel_calls : :class:`kernel_call` or list thereof
Contains the masks and reference answers for kernel testing
do_ratespec : bool [False]
If true, test rate specializations and kernel splitting for simple rates
do_ropsplit : bool [False]
If true, test kernel splitting for rop_net
do_conp: bool [False]
If true, test for both constant pressure _and_ constant volume
do_sparse: bool [True]
Test the sparse Jacobian as well
sparse_only: bool [False]
Test only the sparse jacobian (e.g. for testing indexing)
"""
_generic_tester(self, func, kernel_calls, determine_jac_inds,
do_ratespec=do_ratespec, do_ropsplit=do_ropsplit,
do_conp=do_conp, do_sparse=do_sparse,
sparse_only=sparse_only, **kwargs)
def _make_namestore(self, conp):
# get number of sri reactions
reacs = self.store.reacs
specs = self.store.specs
rate_info = determine_jac_inds(reacs, specs, RateSpecialization.fixed)
ad_opts = loopy_options(order='C', lang='c', auto_diff=True)
# create namestore
namestore = arc.NameStore(ad_opts, rate_info, conp, self.store.test_size)
return namestore, rate_info
@attr('long')
@with_check_inds(check_inds={
1: lambda self: 2 + np.arange(self.store.gas.n_species - 1),
2: lambda self: 2 + np.arange(self.store.gas.n_species - 1)})
def test_dropi_dnj(self):
# test conp
namestore, rate_info = self._make_namestore(True)
ad_opts = namestore.loopy_opts
# set up arguements
allint = {'net': rate_info['net']['allint']}
# create the editor
edit = editor(
namestore.n_arr, namestore.n_dot, self.store.test_size,
order=ad_opts.order)
args = {'rop_fwd': lambda x: np.zeros_like(
self.store.fwd_rxn_rate, order=x),
'rop_rev': lambda x: np.zeros_like(
self.store.rev_rxn_rate, order=x),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'rop_net': lambda x: np.zeros_like(
self.store.rxn_rates, order=x),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(
self.store.P, order=x, copy=True),
'kf': lambda x: np.array(
self.store.fwd_rate_constants, order=x, copy=True),
'kr': lambda x: np.array(
self.store.rev_rate_constants, order=x, copy=True),
'conc': lambda x: np.zeros_like(
self.store.concs, order=x),
'wdot': lambda x: np.zeros_like(
self.store.species_rates, order=x),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x)
}
# obtain the finite difference jacobian
kc = kernel_call('dRopidnj', [self.store.rxn_rates], **args)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, True,
extra_funcs=[get_concentrations, get_rop, get_rop_net,
get_spec_rates],
do_not_set=[namestore.rop_fwd, namestore.rop_rev,
namestore.conc_arr, namestore.spec_rates,
namestore.presmod],
allint=allint)
def _chainer(self, out_vals):
self.kernel_args['jac'] = out_vals[-1][0].copy(
order=self.current_order)
jac_size = rate_info['Ns'] + 1
args = {
'kf': lambda x: np.array(
self.store.fwd_rate_constants, order=x, copy=True),
'kr': lambda x: np.array(
self.store.rev_rate_constants, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'conc': lambda x: np.array(
self.store.concs, order=x, copy=True),
'jac': lambda x: np.zeros(
(self.store.test_size, jac_size, jac_size), order=x)
}
comp = self._get_compare(fd_jac)
# and test
kc = [kernel_call('dRopidnj', [fd_jac], check=False,
strict_name_match=True, **args),
kernel_call('dRopidnj_ns', comp.ref_answer, compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True, **args)]
return self._generic_jac_tester(dRopi_dnj, kc, allint=allint)
def __get_check(self, include_test, rxn_test=None):
include = set()
exclude = set()
# get list of species not in falloff / chemically activated
for i_rxn, rxn in enumerate(self.store.gas.reactions()):
if rxn_test is None or rxn_test(rxn):
specs = set(
list(rxn.products.keys()) + list(rxn.reactants.keys()))
nonzero_specs = set()
for spec in specs:
if spec == self.store.gas.species_names[-1]:
# ns derivative -> no jacobian entry
continue
nu = 0
if spec in rxn.products:
nu += rxn.products[spec]
if spec in rxn.reactants:
nu -= rxn.reactants[spec]
if nu != 0:
nonzero_specs.update([spec])
if include_test(rxn):
include.update(nonzero_specs)
else:
exclude.update(nonzero_specs)
test = set(self.store.gas.species_index(x)
for x in include - exclude)
return np.array(sorted(test)) + 2
def __get_dci_check(self, include_test):
return self.__get_check(include_test, lambda rxn:
isinstance(rxn, ct.FalloffReaction) or
isinstance(rxn, ct.ThreeBodyReaction))
def __get_comp_extractor(self, kc, mask):
cm = mask.compare_mask[0] if isinstance(mask, get_comparable) else mask
if len(cm) != 3:
return tuple([None]) * 4 # only compare masks w/ conditions
# first, invert the conditions mask
cond, x, y = cm
cond = np.where(np.logical_not(
np.in1d(np.arange(self.store.test_size), cond)))[0]
if not cond.size:
return tuple([None]) * 4 # nothing to test
def __get_val(vals, mask, **kwargs):
outv = vals.copy()
for ax, m in enumerate(mask):
outv = np.take(outv, m, axis=ax)
return outv
extractor = __get_val
# create a new compare mask if necessary
if isinstance(mask, get_comparable):
mask = get_comparable(
compare_mask=[(cond, x, y)],
compare_axis=mask.compare_axis,
ref_answer=mask.ref_answer)
# and redefine the value extractor
def __get_val(vals, *args, **kwargs):
return mask(kc, vals, 0, **kwargs)
extractor = __get_val
# and return the extractor
return extractor, cond, x, y
@attr('long')
@with_check_inds(check_inds={
# get list of species not in falloff / chemically activated
# to get the check mask
1: lambda self: self.__get_dci_check(
lambda x: isinstance(x, ct.ThreeBodyReaction)),
2: lambda self: 2 + np.arange(self.store.gas.n_species - 1)})
def test_dci_thd_dnj(self):
# test conp
namestore, rate_info = self._make_namestore(True)
ad_opts = namestore.loopy_opts
# setup arguemetns
# create the editor
edit = editor(
namestore.n_arr, namestore.n_dot, self.store.test_size,
order=ad_opts.order, skip_on_missing=get_thd_body_concs)
args = {'rop_fwd': lambda x: np.array(
self.store.fwd_rxn_rate, order=x, copy=True),
'rop_rev': lambda x: np.array(
self.store.rev_rxn_rate, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'rop_net': lambda x: np.zeros_like(
self.store.rxn_rates, order=x),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(
self.store.P, order=x, copy=True),
'conc': lambda x: np.zeros_like(
self.store.concs, order=x),
'wdot': lambda x: np.zeros_like(
self.store.species_rates, order=x),
'thd_conc': lambda x: np.zeros_like(
self.store.ref_thd, order=x),
'Fi': lambda x: np.array(
self.store.ref_Fall, order=x, copy=True),
'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x)
}
# obtain the finite difference jacobian
kc = kernel_call('dci_thd_nj', [None], **args)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, True,
extra_funcs=[get_concentrations, get_thd_body_concs,
get_rxn_pres_mod, get_rop_net,
get_spec_rates],
do_not_set=[
namestore.conc_arr, namestore.spec_rates, namestore.rop_net])
# setup args
jac_size = rate_info['Ns'] + 1
args = {
'rop_fwd': lambda x: np.array(
self.store.fwd_rxn_rate, order=x, copy=True),
'rop_rev': lambda x: np.array(
self.store.rev_rxn_rate, order=x, copy=True),
'jac': lambda x: np.zeros(
(self.store.test_size, jac_size, jac_size), order=x)
}
def _chainer(self, out_vals):
self.kernel_args['jac'] = out_vals[-1][0].copy(
order=self.current_order)
# and get mask
comp = self._get_compare(fd_jac)
kc = [kernel_call('dci_thd_dnj', comp.ref_answer, check=False,
strict_name_match=True, **args),
kernel_call('dci_thd_dnj_ns', comp.ref_answer, compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True, **args)]
return self._generic_jac_tester(dci_thd_dnj, kc)
def nan_compare(self, kc, our_val, ref_val, mask, allow_our_nans=False):
# get the condition extractor
extractor, cond, x, y = self.__get_comp_extractor(kc, mask)
if extractor is None:
# no need to test
return True
def __compare(our_vals, ref_vals):
# sometimes if only one value is selected, we end up with a
# non-dimensional array
if not ref_vals.shape and ref_vals:
ref_vals = np.expand_dims(ref_vals, axis=0)
if not our_vals.shape and our_vals:
our_vals = np.expand_dims(our_vals, axis=0)
# find where close
bad = np.where(np.logical_not(np.isclose(ref_vals, our_vals)))
good = np.where(np.isclose(ref_vals, our_vals))
# make sure all the bad conditions here in the ref val are nan's
is_correct = np.all(np.isnan(ref_vals[bad]))
# or failing that, just that they're much "larger" than the other
# entries (sometimes the Pr will not be exactly zero if it's
# based on the concentration of the last species)
fac = 1 if not good[0].size else np.max(np.abs(ref_vals[good]))
is_correct = is_correct or (
(np.min(np.abs(ref_vals[bad])) / fac) > 1e10)
# and ensure all our values are 'large' but finite numbers
# _or_ allow_our_nans is True _and_ they're all nan's
is_correct = is_correct and (
(allow_our_nans and np.all(np.isnan(our_vals[bad]))) or
np.all(np.abs(our_vals[bad]) >= utils.inf_cutoff))
return is_correct
return __compare(extractor(our_val, (cond, x, y)),
extractor(ref_val, (cond, x, y), is_answer=True))
def our_nan_compare(self, kc, our_val, ref_val, mask):
return self.nan_compare(kc, our_val, ref_val, mask, allow_our_nans=True)
def __get_removed(self):
# get our form of rop_fwd / rop_rev
fwd_removed = self.store.fwd_rxn_rate.copy()
rev_removed = self.store.rev_rxn_rate.copy()
if self.store.thd_inds.size:
with np.errstate(divide='ignore', invalid='ignore'):
fwd_removed[:, self.store.thd_inds] = fwd_removed[
:, self.store.thd_inds] / self.store.ref_pres_mod
thd_in_rev = np.where(
np.in1d(self.store.thd_inds, self.store.rev_inds))[0]
rev_update_map = np.where(
np.in1d(
self.store.rev_inds, self.store.thd_inds[thd_in_rev]))[0]
rev_removed[:, rev_update_map] = rev_removed[
:, rev_update_map] / self.store.ref_pres_mod[:, thd_in_rev]
# remove ref pres mod = 0 (this is a 0 rate)
fwd_removed[np.where(np.isnan(fwd_removed))] = 0
rev_removed[np.where(np.isnan(rev_removed))] = 0
return fwd_removed, rev_removed
def __get_kf_and_fall(self, conp=True):
reacs = self.store.reacs
specs = self.store.specs
rate_info = determine_jac_inds(reacs, specs, RateSpecialization.fixed)
# create args and parameters
phi = self.store.phi_cp if conp else self.store.phi_cv
args = {'phi': lambda x: np.array(phi, order=x, copy=True),
'kf': lambda x: np.zeros_like(self.store.fwd_rate_constants,
order=x)}
opts = loopy_options(order='C', lang='c')
namestore = arc.NameStore(opts, rate_info, True, self.store.test_size)
# get kf
runner = kernel_runner(get_simple_arrhenius_rates,
self.store.test_size, args)
kf = runner(opts, namestore, self.store.test_size)['kf']
if self.store.ref_Pr.size:
args = {'phi': lambda x: np.array(phi, order=x, copy=True),
'kf_fall': lambda x: np.zeros_like(self.store.ref_Fall, order=x)}
# get kf_fall
runner = kernel_runner(get_simple_arrhenius_rates,
self.store.test_size, args,
{'falloff': True})
kf_fall = runner(opts, namestore, self.store.test_size)['kf_fall']
else:
kf_fall = None
if namestore.num_plog is not None:
args = {'phi': lambda x: np.array(phi, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True)}
if conp:
args['P_arr'] = lambda x: np.array(
self.store.P, order=x, copy=True)
# get plog
runner = kernel_runner(_get_plog_call_wrapper(rate_info),
self.store.test_size, args)
kf = runner(opts, namestore, self.store.test_size)['kf']
if namestore.num_cheb is not None:
args = {'phi': lambda x: np.array(phi, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True)}
if conp:
args['P_arr'] = lambda x: np.array(
self.store.P, order=x, copy=True)
# get plog
runner = kernel_runner(_get_cheb_call_wrapper(rate_info),
self.store.test_size, args)
kf = runner(opts, namestore, self.store.test_size)['kf']
return kf, kf_fall
def __get_kr(self, kf):
reacs = self.store.reacs
specs = self.store.specs
rate_info = determine_jac_inds(reacs, specs, RateSpecialization.fixed)
args = {
'kf': lambda x: np.array(kf, order=x, copy=True),
'b': lambda x: np.array(
self.store.ref_B_rev, order=x, copy=True)}
opts = loopy_options(order='C', lang='c')
namestore = arc.NameStore(opts, rate_info, True, self.store.test_size)
allint = {'net': rate_info['net']['allint']}
# get kf
runner = kernel_runner(get_rev_rates,
self.store.test_size, args, {'allint': allint})
kr = runner(opts, namestore, self.store.test_size)['kr']
return kr
def __get_db(self):
reacs = self.store.reacs
specs = self.store.specs
rate_info = determine_jac_inds(reacs, specs, RateSpecialization.fixed)
opts = loopy_options(order='C', lang='c')
namestore = arc.NameStore(opts, rate_info, True, self.store.test_size)
# need dBk/dT
args = {
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
}
def __call_wrapper(loopy_opts, namestore, test_size):
return thermo_temperature_derivative(
'db',
loopy_opts, namestore,
test_size)
# get db
runner = kernel_runner(__call_wrapper, self.store.test_size, args)
return runner(opts, namestore, self.store.test_size)['db']
@attr('long')
@with_check_inds(check_inds={
1: lambda self: self.__get_dci_check(
lambda rxn: isinstance(rxn, ct.FalloffReaction) and
rxn.falloff.type == 'Simple'),
2: lambda self: 2 + np.arange(self.store.gas.n_species - 1)
})
def test_dci_lind_dnj(self):
# test conp
namestore, rate_info = self._make_namestore(True)
ad_opts = namestore.loopy_opts
# set up arguements
allint = {'net': rate_info['net']['allint']}
fwd_removed, rev_removed = self.__get_removed()
# setup arguements
# create the editor
edit = editor(
namestore.n_arr, namestore.n_dot, self.store.test_size,
order=ad_opts.order, skip_on_missing=get_lind_kernel)
kf, kf_fall = self.__get_kf_and_fall()
args = {'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'pres_mod': lambda x: np.zeros_like(
self.store.ref_pres_mod, order=x),
'rop_net': lambda x: np.zeros_like(
self.store.rxn_rates, order=x),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(
self.store.P, order=x, copy=True),
'conc': lambda x: np.zeros_like(
self.store.concs, order=x),
'wdot': lambda x: np.zeros_like(
self.store.species_rates, order=x),
'thd_conc': lambda x: np.zeros_like(
self.store.ref_thd, order=x),
'Fi': lambda x: np.zeros_like(self.store.ref_Fall, order=x),
'Pr': lambda x: np.zeros_like(self.store.ref_Pr, order=x),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
}
# obtain the finite difference jacobian
kc = kernel_call('dci_lind_nj', [None], **args)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, True,
extra_funcs=[get_concentrations, get_thd_body_concs,
get_reduced_pressure_kernel, get_lind_kernel,
get_rxn_pres_mod, get_rop_net,
get_spec_rates],
do_not_set=[namestore.conc_arr, namestore.spec_rates,
namestore.rop_net, namestore.Fi],
allint=allint)
# setup args
args = {
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'Fi': lambda x: np.array(
self.store.ref_Fall, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
}
def _chainer(self, out_vals):
self.kernel_args['jac'] = out_vals[-1][0].copy(
order=self.current_order)
# and get mask
comp = self._get_compare(fd_jac)
kc = [kernel_call('dci_lind_dnj', comp.ref_answer, check=False,
strict_name_match=True, **args),
kernel_call('dci_lind_dnj_ns', comp.ref_answer, compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True, **args)]
return self._generic_jac_tester(dci_lind_dnj, kc)
def __get_sri_params(self, namestore):
sri_args = {'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True)}
runner = kernel_runner(get_sri_kernel, self.store.test_size, sri_args)
opts = loopy_options(order='C', lang='c')
X = runner(opts, namestore, self.store.test_size)['X']
return X
@attr('long')
@with_check_inds(check_inds={
# find non-NaN SRI entries for testing
# NaN entries will be handled by :func:`nan_compare`
0: lambda self: np.where(np.all(
self.store.ref_Pr[:, self.store.sri_to_pr_map] != 0.0, axis=1))[0],
1: lambda self: self.__get_dci_check(
lambda rxn: isinstance(rxn, ct.FalloffReaction) and
rxn.falloff.type == 'SRI'),
2: lambda self: 2 + np.arange(self.store.gas.n_species - 1)
})
def test_dci_sri_dnj(self):
# test conp
namestore, rate_info = self._make_namestore(True)
ad_opts = namestore.loopy_opts
# set up arguements
allint = {'net': rate_info['net']['allint']}
# get our form of rop_fwd / rop_rev
fwd_removed, rev_removed = self.__get_removed()
# setup arguements
# create the editor
edit = editor(
namestore.n_arr, namestore.n_dot, self.store.test_size,
order=ad_opts.order, skip_on_missing=get_sri_kernel)
if not rate_info['fall']['sri']['num']:
raise SkipTest('No SRI reactions in mechanism {}'.format(
self.store.gas.name))
# get kf / kf_fall
kf, kf_fall = self.__get_kf_and_fall()
# create X
X = self.__get_sri_params(namestore)
args = {
'pres_mod': lambda x: np.zeros_like(
self.store.ref_pres_mod, order=x),
'thd_conc': lambda x: np.array(
self.store.ref_thd, order=x, copy=True),
'Fi': lambda x: np.zeros_like(self.store.ref_Fall, order=x),
'Pr': lambda x: np.array(self.store.ref_Pr, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'X': lambda x: np.zeros_like(X, order=x),
'phi': lambda x: np.array(self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(self.store.P, order=x, copy=True),
'conc': lambda x: np.zeros_like(self.store.concs, order=x),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'wdot': lambda x: np.zeros_like(self.store.species_rates, order=x),
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'rop_net': lambda x: np.zeros_like(self.store.rxn_rates, order=x)
}
# obtain the finite difference jacobian
kc = kernel_call('dci_sri_nj', [None], **args)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, True,
extra_funcs=[get_concentrations, get_thd_body_concs,
get_reduced_pressure_kernel, get_sri_kernel,
get_rxn_pres_mod, get_rop_net, get_spec_rates],
do_not_set=[namestore.conc_arr, namestore.Fi, namestore.X_sri,
namestore.thd_conc],
allint=allint)
# setup args
args = {
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'Fi': lambda x: np.array(
self.store.ref_Fall, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'X': lambda x: np.array(X, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True)
}
def _chainer(self, out_vals):
self.kernel_args['jac'] = out_vals[-1][0].copy(
order=self.current_order)
# and get mask
comp = self._get_compare(fd_jac)
kc = [kernel_call('dci_sri_dnj', comp.ref_answer, check=False,
strict_name_match=True, **args),
kernel_call('dci_sri_dnj_ns', comp.ref_answer,
compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True,
other_compare=self.nan_compare, rtol=5e-4, **args)]
return self._generic_jac_tester(dci_sri_dnj, kc)
def __get_troe_params(self, namestore):
troe_args = {'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True)}
runner = kernel_runner(
get_troe_kernel, self.store.test_size, troe_args)
opts = loopy_options(order='C', lang='c')
Fcent, Atroe, Btroe = [runner(
opts, namestore, self.store.test_size)[x] for x in
['Fcent', 'Atroe', 'Btroe']]
return Fcent, Atroe, Btroe
@attr('long')
@with_check_inds(check_inds={
# find non-NaN Troe entries for testing
# NaN entries will be handled by :func:`nan_compare`
0: lambda self: np.where(np.all(
self.store.ref_Pr[:, self.store.troe_to_pr_map] != 0.0, axis=1))[0],
1: lambda self: self.__get_dci_check(
lambda rxn: isinstance(rxn, ct.FalloffReaction) and
rxn.falloff.type == 'Troe'),
2: lambda self: 2 + np.arange(self.store.gas.n_species - 1)
})
def test_dci_troe_dnj(self):
# test conp
namestore, rate_info = self._make_namestore(True)
ad_opts = namestore.loopy_opts
# set up arguements
allint = {'net': rate_info['net']['allint']}
# get our form of rop_fwd / rop_rev
fwd_removed, rev_removed = self.__get_removed()
# setup arguements
# create the editor
edit = editor(
namestore.n_arr, namestore.n_dot, self.store.test_size,
order=ad_opts.order, skip_on_missing=get_troe_kernel)
if not rate_info['fall']['troe']['num']:
raise SkipTest('No Troe reactions in mechanism {}'.format(
self.store.gas.name))
# get kf / kf_fall
kf, kf_fall = self.__get_kf_and_fall()
Fcent, Atroe, Btroe = self.__get_troe_params(namestore)
args = {
'pres_mod': lambda x: np.zeros_like(
self.store.ref_pres_mod, order=x),
'thd_conc': lambda x: np.array(
self.store.ref_thd, order=x, copy=True),
'Fi': lambda x: np.zeros_like(self.store.ref_Fall, order=x),
'Pr': lambda x: np.array(self.store.ref_Pr, order=x, copy=True),
'phi': lambda x: np.array(self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(self.store.P, order=x, copy=True),
'conc': lambda x: np.zeros_like(self.store.concs, order=x),
'wdot': lambda x: np.zeros_like(self.store.species_rates, order=x),
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'rop_net': lambda x: np.zeros_like(self.store.rxn_rates, order=x),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'Atroe': lambda x: np.zeros_like(Atroe, order=x),
'Btroe': lambda x: np.zeros_like(Btroe, order=x),
'Fcent': lambda x: np.zeros_like(Fcent, order=x)
}
# obtain the finite difference jacobian
kc = kernel_call('dci_sri_nj', [None], **args)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, True,
extra_funcs=[get_concentrations, get_thd_body_concs,
get_reduced_pressure_kernel, get_troe_kernel,
get_rxn_pres_mod, get_rop_net, get_spec_rates],
do_not_set=[namestore.conc_arr, namestore.Fi, namestore.Atroe,
namestore.Btroe, namestore.Fcent, namestore.thd_conc],
allint=allint)
# setup args
args = {
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'Fi': lambda x: np.array(
self.store.ref_Fall, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'Atroe': lambda x: np.array(Atroe, order=x, copy=True),
'Btroe': lambda x: np.array(Btroe, order=x, copy=True),
'Fcent': lambda x: np.array(Fcent, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
}
def _chainer(self, out_vals):
self.kernel_args['jac'] = out_vals[-1][0].copy(
order=self.current_order)
comp = self._get_compare(fd_jac)
# and get mask
kc = [kernel_call('dci_troe_dnj', comp.ref_answer, check=False,
strict_name_match=True, **args),
kernel_call('dci_troe_dnj_ns', comp.ref_answer,
compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True,
other_compare=self.nan_compare, **args)]
return self._generic_jac_tester(dci_troe_dnj, kc)
@attr('long')
def test_total_specific_energy(self):
# conp
ref_cp = np.sum(self.store.concs * self.store.spec_cp, axis=1)
# cp args
cp_args = {'cp': lambda x: np.array(
self.store.spec_cp, order=x, copy=True),
'conc': lambda x: np.array(
self.store.concs, order=x, copy=True),
'cp_tot': lambda x: np.zeros_like(ref_cp, order=x)}
# call
kc = [kernel_call('cp_total', [ref_cp], strict_name_match=True,
**cp_args)]
self._generic_jac_tester(total_specific_energy, kc, conp=True,
do_sparse=False)
# conv
ref_cv = np.sum(self.store.concs * self.store.spec_cv, axis=1)
# cv args
cv_args = {'cv': lambda x: np.array(
self.store.spec_cv, order=x, copy=True),
'conc': lambda x: np.array(
self.store.concs, order=x, copy=True),
'cv_tot': lambda x: np.zeros_like(ref_cp, order=x)}
# call
kc = [kernel_call('cv_total', [ref_cv], strict_name_match=True,
**cv_args)]
self._generic_jac_tester(total_specific_energy, kc, conp=False,
do_sparse=False)
def __get_full_jac(self, conp=True):
# see if we've already computed this, no need to redo if we have it
attr = 'fd_jac' + ('_cp' if conp else '_cv')
if hasattr(self.store, attr):
return getattr(self.store, attr).copy()
# get the jacobian
jac = self._get_ad_jacobian(self.store.test_size, conp=conp)
# store the jacobian for later
setattr(self.store, attr, jac.copy())
return jac
@attr('long')
@with_check_inds(check_inds={
1: np.array([0]),
2: lambda self: np.arange(2, self.store.jac_dim)
})
def test_dTdot_dnj(self):
# conp
# get total cp
cp_sum = np.sum(self.store.concs * self.store.spec_cp, axis=1)
# get species jacobian
jac = self.__get_full_jac(True)
# instead of whittling this down to the actual answer [:, 0, 2:], it's
# way easier to keep this full sized such that we can use the same
# :class:`get_comparable` object as the output from the kernel
ref_answer = jac.copy()
# reset the values to be populated
self._set_at(jac, 0)
# cp args
cp_args = {'cp': lambda x: np.array(
self.store.spec_cp, order=x, copy=True),
'h': lambda x: np.array(
self.store.spec_h, order=x, copy=True),
'cp_tot': lambda x: np.array(
cp_sum, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
'dphi': lambda x: np.array(
self.store.dphi_cp, order=x, copy=True),
'jac': lambda x: np.array(
jac, order=x, copy=True)}
comp = self._get_compare(ref_answer)
# call
kc = [kernel_call('dTdot_dnj', comp.ref_answer,
compare_axis=comp.compare_axis, compare_mask=[comp],
equal_nan=True, **cp_args)]
self._generic_jac_tester(dTdot_dnj, kc, conp=True)
# conv
cv_sum = np.sum(self.store.concs * self.store.spec_cv, axis=1)
# get species jacobian
jac = self.__get_full_jac(False)
# instead of whittling this down to the actual answer [:, 0, 2:], it's
# way easier to keep this full sized such that we can use the same
# :class:`get_comparable` object as the output from the kernel
ref_answer = jac.copy()
# reset the values to be populated
self._set_at(jac, 0)
# cv args
cv_args = {'cv': lambda x: np.array(
self.store.spec_cv, order=x, copy=True),
'u': lambda x: np.array(
self.store.spec_u, order=x, copy=True),
'cv_tot': lambda x: np.array(
cv_sum, order=x, copy=True),
'dphi': lambda x: np.array(
self.store.dphi_cv, order=x, copy=True),
'V_arr': lambda x: np.array(
self.store.V, order=x, copy=True),
'jac': lambda x: np.array(
jac, order=x, copy=True)}
comp = self._get_compare(ref_answer)
# call
kc = [kernel_call('dTdot_dnj', comp.ref_answer,
compare_axis=comp.compare_axis, compare_mask=[comp],
equal_nan=True, **cv_args)]
self._generic_jac_tester(dTdot_dnj, kc, conp=False)
@attr('long')
@with_check_inds(check_inds={
1: np.array([1]),
2: lambda self: np.arange(2, self.store.jac_dim)
})
def test_dEdot_dnj(self):
# conp
# get species jacobian
jac = self.__get_full_jac(True)
# instead of whittling this down to the actual answer [:, 1, 2:], it's
# way easier to keep this full sized such that we can use the same
# :class:`get_comparable` object as the output from the kernel
ref_answer = jac.copy()
# reset values to be populated by kernel
self._set_at(jac, 0)
# cp args
cp_args = {
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
'jac': lambda x: np.array(
jac, order=x, copy=True),
'P_arr': lambda x: np.array(
self.store.P, order=x, copy=True)}
# get the compare mask
comp = self._get_compare(ref_answer)
# call
kc = [kernel_call('dVdot_dnj', comp.ref_answer,
compare_axis=comp.compare_axis, compare_mask=[comp],
equal_nan=True, strict_name_match=True, **cp_args)]
self._generic_jac_tester(dEdot_dnj, kc, conp=True)
# get species jacobian
jac = self.__get_full_jac(False)
# instead of whittling this down to the actual answer [:, 1, 2:], it's
# way easier to keep this full sized such that we can use the same
# :class:`get_comparable` object as the output from the kernel
ref_answer = jac.copy()
# reset values to be populated by kernel
self._set_at(jac, 0)
# cv args
cv_args = {
'phi': lambda x: np.array(
self.store.phi_cv, order=x, copy=True),
'jac': lambda x: np.array(
jac, order=x, copy=True),
'V_arr': lambda x: np.array(
self.store.V, order=x, copy=True)}
# get the compare mask
comp = self._get_compare(ref_answer)
# call
kc = [kernel_call('dPdot_dnj', comp.ref_answer,
compare_axis=comp.compare_axis, compare_mask=[comp],
equal_nan=True, strict_name_match=True, **cv_args)]
self._generic_jac_tester(dEdot_dnj, kc, conp=False)
@attr('long')
def test_thermo_derivatives(self):
def __test_name(myname):
conp = myname in ['cp']
namestore, rate_info = self._make_namestore(conp)
ad_opts = namestore.loopy_opts
phi = self.store.phi_cp if conp else self.store.phi_cv
# dname/dT
edit = editor(
namestore.T_arr, getattr(namestore, myname),
self.store.test_size,
order=ad_opts.order)
args = {
'phi': lambda x: np.array(
phi, order=x, copy=True),
}
# obtain the finite difference jacobian
kc = kernel_call(myname, [None], **args)
def __call_wrapper(loopy_opts, namestore, test_size):
return thermo_temperature_derivative(
name,
loopy_opts, namestore,
test_size)
name = myname
ref_ans = self._get_jacobian(
__call_wrapper, kc, edit, ad_opts, namestore.conp)
ref_ans = ref_ans[:, :, 0]
# force all entries to zero for split comparison
name = 'd' + myname
args.update({name: lambda x: np.zeros_like(ref_ans, order=x)})
# call
kc = [kernel_call(myname, [ref_ans], **args)]
self._generic_jac_tester(__call_wrapper, kc, do_sparse=False)
__test_name('cp')
__test_name('cv')
__test_name('b')
def __run_ropi_test(self, rxn_type=reaction_type.elementary,
test_variable=False, conp=True):
# setup for FD jac
namestore, rate_info = self._make_namestore(conp)
ad_opts = namestore.loopy_opts
# setup arguements
# create the editor
edit = editor(
namestore.T_arr if not test_variable else namestore.E_arr,
namestore.n_dot, self.store.test_size,
order=ad_opts.order)
# get kf / kf_fall
kf, _ = self.__get_kf_and_fall()
# and kr
kr = self.__get_kr(kf)
args = {
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'conc': lambda x: np.zeros_like(self.store.concs, order=x),
'wdot': lambda x: np.zeros_like(self.store.species_rates, order=x),
'rop_fwd': lambda x: np.zeros_like(
self.store.fwd_rxn_rate, order=x),
'rop_rev': lambda x: np.zeros_like(
self.store.rev_rxn_rate, order=x),
'rop_net': lambda x: np.zeros_like(self.store.rxn_rates, order=x),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
}
if test_variable and (rxn_type == reaction_type.elementary or conp):
args.update({
'kf': lambda x: np.array(kf, order=x, copy=True),
'kr': lambda x: np.array(kr, order=x, copy=True)
})
else:
args.update({
'kf': lambda x: np.zeros_like(kf, order=x),
'kr': lambda x: np.zeros_like(kr, order=x),
'b': lambda x: np.zeros_like(
self.store.ref_B_rev, order=x),
'Kc': lambda x: np.zeros_like(
self.store.equilibrium_constants, order=x),
# 'kf_fall': lambda x: np.zeros_like(
# self.store.ref_Pr, order=x)
})
if conp:
args.update({
'P_arr': lambda x: np.array(self.store.P, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
})
else:
args.update({
'V_arr': lambda x: np.array(self.store.V, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cv, order=x, copy=True),
})
# obtain the finite difference jacobian
kc = kernel_call('dRopidT', [None], **args)
allint = {'net': rate_info['net']['allint']}
rate_sub = get_simple_arrhenius_rates
if rxn_type == reaction_type.plog:
if not rate_info['plog']['num']:
raise SkipTest('No PLOG reactions in mechanism {}'.format(
self.store.gas.name))
rate_sub = _get_plog_call_wrapper(rate_info)
elif rxn_type == reaction_type.cheb:
if not rate_info['cheb']['num']:
raise SkipTest('No Chebyshev reactions in mechanism {}'.format(
self.store.gas.name))
rate_sub = _get_cheb_call_wrapper(rate_info)
edit.set_skip_on_missing(rate_sub)
rate_sub = [rate_sub] + [_get_poly_wrapper('b', conp), get_rev_rates]
if test_variable and (rxn_type == reaction_type.elementary or conp):
rate_sub = []
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, conp,
extra_funcs=[get_concentrations] + rate_sub +
[get_rop, get_rop_net, get_spec_rates],
allint=allint)
# get our form of rop_fwd / rop_rev
fwd_removed, rev_removed = self.__get_removed()
# setup args
args = {
'rop_fwd': lambda x: np.array(fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(rev_removed, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kr': lambda x: np.array(kr, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'conc': lambda x: np.array(self.store.concs, order=x, copy=True)
}
if conp:
args.update({
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(self.store.P, order=x, copy=True)
})
else:
args.update({
'phi': lambda x: np.array(
self.store.phi_cv, order=x, copy=True),
'V_arr': lambda x: np.array(self.store.V, order=x, copy=True),
})
# input_mask = []
if not test_variable:
# and finally dBk/dT
dBkdT = self.__get_db()
args['db'] = lambda x: np.array(dBkdT, order=x, copy=True)
# input masking
input_mask = ['V_arr']
if rxn_type == reaction_type.elementary:
input_mask.append('P_arr')
elif test_variable:
# needed for the test variable for the extras
input_mask = []
if conp and rxn_type != reaction_type.elementary:
input_mask = ['P_arr']
def _chainer(self, out_vals):
if out_vals[-1][0] is not None:
self.kernel_args['jac'] = out_vals[-1][0].copy(
order=self.current_order)
# set variable name and check index
var_name = 'T'
if test_variable:
var_name = 'V' if conp else 'P'
# get descriptor
name_desc = ''
other_args = {'conp': conp} if test_variable else {}
tester = dRopidT if not test_variable else dRopidE
if rxn_type == reaction_type.plog:
name_desc = '_plog'
tester = dRopi_plog_dT if not test_variable else dRopi_plog_dE
other_args['maxP'] = rate_info['plog']['max_P']
elif rxn_type == reaction_type.cheb:
name_desc = '_cheb'
tester = dRopi_cheb_dT if not test_variable else dRopi_cheb_dE
other_args['maxP'] = np.max(rate_info['cheb']['num_P'])
other_args['maxT'] = np.max(rate_info['cheb']['num_T'])
rtol = 1e-3
atol = 1e-7
def _small_compare(kc, our_vals, ref_vals, mask):
# get the condition extractor
extractor, cond, x, y = self.__get_comp_extractor(kc, mask)
if extractor is None:
# no need to test
return True
# find where there isn't a match
outv = extractor(our_vals, (cond, x, y))
refv = extractor(ref_vals, (cond, x, y), is_answer=True)
check = np.where(
np.logical_not(np.isclose(outv, refv, rtol=rtol)))[0]
correct = True
if check.size:
# check that our values are zero (which is correct)
correct = np.all(outv[check] == 0)
# and that the reference values are "small"
correct &= np.all(np.abs(refv[check]) <= atol)
return correct
# get compare mask
comp = self._get_compare(fd_jac)
kc = [kernel_call('dRopi{}_d{}'.format(name_desc, var_name),
comp.ref_answer, check=False,
strict_name_match=True,
allow_skip=test_variable,
input_mask=['kf', 'kr', 'conc'] + input_mask,
**args),
kernel_call('dRopi{}_d{}_ns'.format(name_desc, var_name),
comp.ref_answer, compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True,
rtol=rtol, atol=atol, other_compare=_small_compare,
input_mask=['db', 'rop_rev', 'rop_fwd'],
**args)]
return self._generic_jac_tester(tester, kc, **other_args)
@attr('long')
@with_check_inds(check_inds={
1: lambda self: self.__get_check(
lambda rxn: not (isinstance(rxn, ct.PlogReaction)
or isinstance(rxn, ct.ChebyshevReaction))),
2: np.array([0])
})
def test_dRopidT(self):
self.__run_ropi_test()
@attr('long')
@with_check_inds(check_inds={
1: lambda self: self.__get_check(
lambda rxn: isinstance(rxn, ct.PlogReaction)),
2: np.array([0])
})
def test_dRopi_plog_dT(self):
self.__run_ropi_test(reaction_type.plog)
@attr('long')
@with_check_inds(check_inds={
1: lambda self: self.__get_check(
lambda rxn: isinstance(rxn, ct.ChebyshevReaction)),
2: np.array([0])
})
def test_dRopi_cheb_dT(self):
self.__run_ropi_test(reaction_type.cheb)
@attr('long')
@with_check_inds(check_inds={
# find states where the last species conc should be zero, as this
# can cause some problems in the FD Jac
0: lambda self: np.where(self.store.concs[:, -1] != 0)[0],
1: lambda self: self.__get_check(
lambda rxn: not (isinstance(rxn, ct.PlogReaction)
or isinstance(rxn, ct.ChebyshevReaction))),
2: np.array([1])
})
def test_dRopi_dE(self):
self.__run_ropi_test(test_variable=True, conp=True)
self.__run_ropi_test(test_variable=True, conp=False)
@attr('long')
@with_check_inds(check_inds={
# find states where the last species conc should be zero, as this
# can cause some problems in the FD Jac
0: lambda self: np.where(self.store.concs[:, -1] != 0)[0],
1: lambda self: self.__get_check(
lambda rxn: isinstance(rxn, ct.PlogReaction)),
2: np.array([1])
})
def test_dRopi_plog_dE(self):
self.__run_ropi_test(reaction_type.plog, True, conp=True)
self.__run_ropi_test(reaction_type.plog, True, conp=False)
@attr('long')
@with_check_inds(check_inds={
# find states where the last species conc should be zero, as this
# can cause some problems in the FD Jac
0: lambda self: | np.where(self.store.concs[:, -1] != 0) | numpy.where |
"""Model Objects and ML algorithm serialisation."""
import os
import pickle
import warnings
import logging
from itertools import chain
from functools import partial
from os.path import join, isdir, abspath
import numpy as np
from revrand import StandardLinearModel, GeneralisedLinearModel
from revrand.basis_functions import LinearBasis, RandomRBF, \
RandomLaplace, RandomCauchy, RandomMatern32, RandomMatern52
from revrand.btypes import Parameter, Positive
from revrand.likelihoods import Gaussian
from revrand.optimize import Adam
from revrand.utils import atleast_list
from scipy.integrate import fixed_quad
from scipy.stats import norm
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.svm import SVR, SVC
from sklearn.ensemble import (RandomForestRegressor as RFR,
RandomForestClassifier as RFC,
GradientBoostingClassifier)
from sklearn.linear_model import ARDRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, ExtraTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import LabelEncoder
from sklearn.kernel_approximation import RBFSampler
from xgboost import XGBRegressor
from uncoverml import mpiops
from uncoverml.interpolate import SKLearnNearestNDInterpolator, \
SKLearnLinearNDInterpolator, SKLearnRbf, SKLearnCT
from uncoverml.cubist import Cubist
from uncoverml.cubist import MultiCubist
from uncoverml.transforms import target as transforms
warnings.filterwarnings("ignore", category=DeprecationWarning)
#
# Module constants
#
log = logging.getLogger(__name__)
QUADORDER = 5 # Order of quadrature used for transforming probabilistic vals
#
# Mixin classes for providing pipeline compatibility to revrand
#
class BasisMakerMixin():
"""
Mixin class for easily creating approximate kernel functions for revrand.
This is primarily used for the approximate Gaussian process algorithms.
"""
def fit(self, X, y, *args, **kwargs):
self._make_basis(X)
return super().fit(X, y, *args, **kwargs) # args for GLMs
def _store_params(self, kernel, regulariser, nbases, lenscale, ard):
self.kernel = kernel
self.nbases = nbases
self.ard = ard
self.lenscale = lenscale if np.isscalar(lenscale) \
else np.asarray(lenscale)
self.regulariser = Parameter(regulariser, Positive())
def _make_basis(self, X):
D = X.shape[1]
lenscale = self.lenscale
if self.ard and D > 1:
lenscale = np.ones(D) * lenscale
lenscale_init = Parameter(lenscale, Positive())
gpbasis = basismap[self.kernel](Xdim=X.shape[1], nbases=self.nbases,
lenscale=lenscale_init,
regularizer=self.regulariser)
self.basis = gpbasis + LinearBasis()
class PredictDistMixin():
"""
Mixin class for providing a ``predict_dist`` method to the
StandardLinearModel class in revrand.
"""
def predict_dist(self, X, interval=0.95, *args, **kwargs):
"""
Predictive mean and variance for a probabilistic regressor.
Parameters
----------
X: ndarray
(Ns, d) array query dataset (Ns samples, d dimensions).
interval: float, optional
The percentile confidence interval (e.g. 95%) to return.
fields: dict, optional
dictionary of fields parsed from the shape file.
``indicator_field`` should be a key in this dictionary. If this is
not present, then a Gaussian likelihood will be used for all
predictions. The only time this may be input if for cross
validation.
Returns
-------
Ey: ndarray
The expected value of ys for the query inputs, X of shape (Ns,).
Vy: ndarray
The expected variance of ys (excluding likelihood noise terms) for
the query inputs, X of shape (Ns,).
ql: ndarray
The lower end point of the interval with shape (Ns,)
qu: ndarray
The upper end point of the interval with shape (Ns,)
"""
Ey, Vy = self.predict_moments(X, *args, **kwargs)
ql, qu = norm.interval(interval, loc=Ey, scale=np.sqrt(Vy))
return Ey, Vy, ql, qu
class GLMPredictDistMixin():
"""
Mixin class for providing a ``predict_dist`` method to the
GeneralisedLinearModel class in revrand.
This is especially for use with Gaussian likelihood models.
"""
def predict_dist(self, X, interval=0.95, *args, **kwargs):
"""
Predictive mean and variance for a probabilistic regressor.
Parameters
----------
X: ndarray
(Ns, d) array query dataset (Ns samples, d dimensions).
interval: float, optional
The percentile confidence interval (e.g. 95%) to return.
fields: dict, optional
dictionary of fields parsed from the shape file.
``indicator_field`` should be a key in this dictionary. If this is
not present, then a Gaussian likelihood will be used for all
predictions. The only time this may be input if for cross
validation.
Returns
-------
Ey: ndarray
The expected value of ys for the query inputs, X of shape (Ns,).
Vy: ndarray
The expected variance of ys (excluding likelihood noise terms) for
the query inputs, X of shape (Ns,).
ql: ndarray
The lower end point of the interval with shape (Ns,)
qu: ndarray
The upper end point of the interval with shape (Ns,)
"""
Ey, Vy = self.predict_moments(X, *args, **kwargs)
Vy += self.like_hypers_
ql, qu = norm.interval(interval, loc=Ey, scale=np.sqrt(Vy))
return Ey, Vy, ql, qu
class MutualInfoMixin():
"""
Mixin class for providing predictive entropy reduction functionality to the
StandardLinearModel class (only).
"""
def entropy_reduction(self, X):
"""
Predictice entropy reduction (a.k.a mutual information).
Estimate the reduction in the posterior distribution's entropy (i.e.
model uncertainty reduction) as a result of including a particular
observation.
Parameters
----------
X: ndarray
(Ns, d) array query dataset (Ns samples, d dimensions).
Returns
-------
MI: ndarray
Prediction of mutual information (expected reduiction in posterior
entrpy) assocated with each query input. The units are 'nats', and
the shape of the returned array is (Ns,).
"""
Phi = self.basis.transform(X, *atleast_list(self.hypers_))
pCp = [p.dot(self.covariance_).dot(p.T) for p in Phi]
MI = 0.5 * (np.log(self.var_ + np.array(pCp)) - np.log(self.var_))
return MI
class TagsMixin():
"""
Mixin class to aid a pipeline in establishing the types of predictive
outputs to be expected from the ML algorithms in this module.
"""
def get_predict_tags(self):
"""
Get the types of prediction outputs from this algorithm.
Returns
-------
list:
of strings with the types of outputs that can be returned by this
algorithm. This depends on the prediction methods implemented (e.g.
``predict``, `predict_dist``, ``entropy_reduction``).
"""
# Classification
if hasattr(self, 'predict_proba'):
tags = self.get_classes()
return tags
# Regression
tags = ['Prediction']
if hasattr(self, 'predict_dist'):
tags.extend(['Variance', 'Lower quantile', 'Upper quantile'])
if hasattr(self, 'entropy_reduction'):
tags.append('Expected reduction in entropy')
if hasattr(self, 'krige_residual'):
tags.append('Kriged correction')
if hasattr(self, 'ml_prediction'):
tags.append('ml prediction')
return tags
#
# Specialisation of revrand's interface to work from the command line with a
# few curated algorithms
#
class LinearReg(StandardLinearModel, PredictDistMixin, MutualInfoMixin):
"""
Bayesian standard linear model.
Parameters
----------
onescol: bool, optional
If true, prepend a column of ones onto X (i.e. a bias term)
var: Parameter, optional
observation variance initial value.
regulariser: Parameter, optional
weight regulariser (variance) initial value.
tol: float, optional
optimiser function tolerance convergence criterion.
maxiter: int, optional
maximum number of iterations for the optimiser.
nstarts : int, optional
if there are any parameters with distributions as initial values, this
determines how many random candidate starts shoulds be evaluated before
commencing optimisation at the best candidate.
"""
def __init__(self, onescol=True, var=1., regulariser=1., tol=1e-8,
maxiter=1000, nstarts=100):
basis = LinearBasis(onescol=onescol,
regularizer=Parameter(regulariser, Positive()))
super().__init__(basis=basis,
var=Parameter(var, Positive()),
tol=tol,
maxiter=maxiter,
nstarts=nstarts
)
class ApproxGP(BasisMakerMixin, StandardLinearModel, PredictDistMixin,
MutualInfoMixin):
"""
An approximate Gaussian process for medium scale data.
Parameters
----------
kernel: str, optional
the (approximate) kernel to use with this Gaussian process. Have a look
at :code:`basismap` dictionary for appropriate kernel approximations.
nbases: int
how many unique random bases to create (twice this number will be
actually created, i.e. real and imaginary components for each base).
The higher this number, the more accurate the kernel approximation, but
the longer the runtime of the algorithm. Usually if X is high
dimensional, this will have to also be high dimensional.
lenscale: float, optional
the initial value for the kernel length scale to be learned.
ard: bool, optional
Whether to use a different length scale for each dimension of X or a
single length scale. This will result in a longer run time, but
potentially better results.
var: Parameter, optional
observation variance initial value.
regulariser: Parameter, optional
weight regulariser (variance) initial value.
tol: float, optional
optimiser function tolerance convergence criterion.
maxiter: int, optional
maximum number of iterations for the optimiser.
nstarts : int, optional
if there are any parameters with distributions as initial values, this
determines how many random candidate starts shoulds be evaluated before
commencing optimisation at the best candidate.
"""
def __init__(self, kernel='rbf', nbases=50, lenscale=1., var=1.,
regulariser=1., ard=True, tol=1e-8, maxiter=1000,
nstarts=100):
super().__init__(basis=None,
var=Parameter(var, Positive()),
tol=tol,
maxiter=maxiter,
nstarts=nstarts
)
self._store_params(kernel, regulariser, nbases, lenscale, ard)
class SGDLinearReg(GeneralisedLinearModel, GLMPredictDistMixin):
"""
Bayesian standard linear model, using stochastic gradients.
This uses the Adam stochastic gradients algorithm;
http://arxiv.org/pdf/1412.6980
Parameters
----------
onescol: bool, optional
If true, prepend a column of ones onto X (i.e. a bias term)
var: Parameter, optional
observation variance initial value.
regulariser: Parameter, optional
weight regulariser (variance) initial value.
maxiter: int, optional
Number of iterations to run for the stochastic gradients algorithm.
batch_size: int, optional
number of observations to use per SGD batch.
alpha: float, optional
stepsize to give the stochastic gradient optimisation update.
beta1: float, optional
smoothing/decay rate parameter for the stochastic gradient, must be
[0, 1].
beta2: float, optional
smoothing/decay rate parameter for the squared stochastic gradient,
must be [0, 1].
epsilon: float, optional
"jitter" term to ensure continued learning in stochastic gradients
(should be small).
random_state: int or RandomState, optional
random seed
nstarts : int, optional
if there are any parameters with distributions as initial values, this
determines how many random candidate starts shoulds be evaluated before
commencing optimisation at the best candidate.
Note
----
Setting the ``random_state`` may be important for getting consistent
looking predictions when many chunks/subchunks are used. This is because
the predictive distribution is sampled for these algorithms!
"""
def __init__(self, onescol=True, var=1., regulariser=1., maxiter=3000,
batch_size=10, alpha=0.01, beta1=0.9, beta2=0.99,
epsilon=1e-8, random_state=None, nstarts=500):
basis = LinearBasis(onescol=onescol,
regularizer=Parameter(regulariser, Positive()))
super().__init__(likelihood=Gaussian(Parameter(var, Positive())),
basis=basis,
maxiter=maxiter,
batch_size=batch_size,
updater=Adam(alpha, beta1, beta2, epsilon),
random_state=random_state,
nstarts=nstarts
)
class SGDApproxGP(BasisMakerMixin, GeneralisedLinearModel,
GLMPredictDistMixin):
"""
An approximate Gaussian process for large scale data using stochastic
gradients.
This uses the Adam stochastic gradients algorithm;
http://arxiv.org/pdf/1412.6980
Parameters
----------
kern: str, optional
the (approximate) kernel to use with this Gaussian process. Have a look
at :code:`basismap` dictionary for appropriate kernel approximations.
nbases: int
how many unique random bases to create (twice this number will be
actually created, i.e. real and imaginary components for each base).
The higher this number, the more accurate the kernel approximation, but
the longer the runtime of the algorithm. Usually if X is high
dimensional, this will have to also be high dimensional.
lenscale: float, optional
the initial value for the kernel length scale to be learned.
ard: bool, optional
Whether to use a different length scale for each dimension of X or a
single length scale. This will result in a longer run time, but
potentially better results.
var: float, optional
observation variance initial value.
regulariser: float, optional
weight regulariser (variance) initial value.
maxiter: int, optional
Number of iterations to run for the stochastic gradients algorithm.
batch_size: int, optional
number of observations to use per SGD batch.
alpha: float, optional
stepsize to give the stochastic gradient optimisation update.
beta1: float, optional
smoothing/decay rate parameter for the stochastic gradient, must be
[0, 1].
beta2: float, optional
smoothing/decay rate parameter for the squared stochastic gradient,
must be [0, 1].
epsilon: float, optional
"jitter" term to ensure continued learning in stochastic gradients
(should be small).
random_state: int or RandomState, optional
random seed
nstarts : int, optional
if there are any parameters with distributions as initial values, this
determines how many random candidate starts shoulds be evaluated before
commencing optimisation at the best candidate.
Note
----
Setting the ``random_state`` may be important for getting consistent
looking predictions when many chunks/subchunks are used. This is because
the predictive distribution is sampled for these algorithms!
"""
def __init__(self, kernel='rbf', nbases=50, lenscale=1., var=1.,
regulariser=1., ard=True, maxiter=3000, batch_size=10,
alpha=0.01, beta1=0.9, beta2=0.99, epsilon=1e-8,
random_state=None, nstarts=500):
super().__init__(likelihood=Gaussian(Parameter(var, Positive())),
basis=None,
maxiter=maxiter,
batch_size=batch_size,
updater=Adam(alpha, beta1, beta2, epsilon),
random_state=random_state,
nstarts=nstarts
)
self._store_params(kernel, regulariser, nbases, lenscale, ard)
#
# Approximate probabilistic output for Random Forest
#
class RandomForestRegressor(RFR):
"""
Implements a "probabilistic" output by looking at the variance of the
decision tree estimator ouputs.
"""
def predict_dist(self, X, interval=0.95):
if hasattr(self, "_notransform_predict"):
Ey = self._notransform_predict(X)
else:
Ey = self.predict(X)
Vy = np.zeros_like(Ey)
for dt in self.estimators_:
Vy += (dt.predict(X) - Ey)**2
Vy /= len(self.estimators_)
# FIXME what if elements of Vy are zero?
ql, qu = norm.interval(interval, loc=Ey, scale=np.sqrt(Vy))
return Ey, Vy, ql, qu
class RandomForestRegressorMulti():
def __init__(self,
outdir='.',
forests=10,
parallel=True,
n_estimators=10,
random_state=1,
**kwargs):
self.forests = forests
self.n_estimators = n_estimators
self.parallel = parallel
self.kwargs = kwargs
self.random_state = random_state
self._trained = False
assert isdir(abspath(outdir)), 'Make sure the outdir exists ' \
'and writeable'
self.temp_dir = join(abspath(outdir), 'results')
os.makedirs(self.temp_dir, exist_ok=True)
def fit(self, x, y, *args, **kwargs):
# set a different random seed for each thread
np.random.seed(self.random_state + mpiops.chunk_index)
if self.parallel:
process_rfs = np.array_split(range(self.forests),
mpiops.chunks)[mpiops.chunk_index]
else:
process_rfs = range(self.forests)
for t in process_rfs:
print('training forest {} using '
'process {}'.format(t, mpiops.chunk_index))
# change random state in each forest
self.kwargs['random_state'] = np.random.randint(0, 10000)
rf = RandomForestTransformed(
n_estimators=self.n_estimators, **self.kwargs)
rf.fit(x, y)
if self.parallel: # used in training
pk_f = join(self.temp_dir, 'rf_model_{}.pk'.format(t))
else: # used when parallel is false, i.e., during x-val
pk_f = join(self.temp_dir,
'rf_model_{}_{}.pk'.format(t, mpiops.chunk_index))
with open(pk_f, 'wb') as fp:
pickle.dump(rf, fp)
if self.parallel:
mpiops.comm.barrier()
# Mark that we are now trained
self._trained = True
def predict_dist(self, x, interval=0.95, *args, **kwargs):
# We can't make predictions until we have trained the model
if not self._trained:
print('Train first')
return
y_pred = np.zeros((x.shape[0], self.forests * self.n_estimators))
for i in range(self.forests):
if self.parallel: # used in training
pk_f = join(self.temp_dir,
'rf_model_{}.pk'.format(i))
else: # used when parallel is false, i.e., during x-val
pk_f = join(self.temp_dir,
'rf_model_{}_{}.pk'.format(i, mpiops.chunk_index))
with open(pk_f, 'rb') as fp:
f = pickle.load(fp)
for m, dt in enumerate(f.estimators_):
y_pred[:, i * self.n_estimators + m] = dt.predict(x)
y_mean = np.mean(y_pred, axis=1)
y_var = np.var(y_pred, axis=1)
# Determine quantiles
ql, qu = norm.interval(interval, loc=y_mean, scale=np.sqrt(y_var))
return y_mean, y_var, ql, qu
def predict(self, x):
return self.predict_dist(x)[0]
#
# Approximate large scale kernel classifier factory
#
def kernelize(classifier):
class ClassifierRBF:
def __init__(self, gamma='auto', n_components=100, random_state=None,
**kwargs):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
self.clf = classifier(**kwargs)
def fit(self, X, y):
if self.gamma == 'auto':
D = X.shape[1]
self.gamma = 1 / D
self.rbf = RBFSampler(
gamma=self.gamma,
n_components=self.n_components,
random_state=self.random_state
)
self.clf.fit(self.rbf.fit_transform(X), y)
return self
def predict(self, X):
p = self.clf.predict(self.rbf.transform(X))
return p
def predict_proba(self, X):
p = self.clf.predict_proba(self.rbf.transform(X))
return p
return ClassifierRBF
#
# Target Transformer factory
#
def transform_targets(Regressor):
"""
Factory function that add's target transformation capabiltiy to compatible
scikit learn objects.
Look at the ``transformers.py`` module for more information on valid target
transformers.
Example
-------
>>> svr = transform_targets(SVR)(target_transform='Standardise', gamma=0.1)
"""
class TransformedRegressor(Regressor):
# NOTE: All of these explicitly ignore **kwargs on purpose. All generic
# revrand and scikit learn algorithms don't need them. Custom models
# probably shouldn't be using this factory
def __init__(self, target_transform='identity', *args, **kwargs):
super().__init__(*args, **kwargs)
self.target_transform = transforms.transforms[target_transform]()
def fit(self, X, y, *args, **kwargs):
self.target_transform.fit(y)
y_t = self.target_transform.transform(y)
return super().fit(X, y_t)
def _notransform_predict(self, X, *args, **kwargs):
Ey = super().predict(X)
return Ey
def predict(self, X, *args, **kwargs):
Ey_t = self._notransform_predict(X, *args, **kwargs)
Ey = self.target_transform.itransform(Ey_t)
return Ey
if hasattr(Regressor, 'predict_dist'):
def predict_dist(self, X, interval=0.95, *args, **kwargs):
# Expectation and variance in latent space
Ey_t, Vy_t, ql, qu = super().predict_dist(X, interval)
# Save computation if identity transform
if type(self.target_transform) is transforms.Identity:
return Ey_t, Vy_t, ql, qu
# Save computation if standardise transform
elif type(self.target_transform) is transforms.Standardise:
Ey = self.target_transform.itransform(Ey_t)
Vy = Vy_t * self.target_transform.ystd ** 2
ql, qu = norm.interval(interval, loc=Ey, scale= | np.sqrt(Vy) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 16:21:39 2015
@author: ajaver
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 19:39:41 2015
@author: ajaver
"""
import json
import os
import cv2
import numpy as np
import pandas as pd
import tables
from tierpsy.analysis.ske_create.helperIterROI import generateMoviesROI
from tierpsy.analysis.ske_create.segWormPython.mainSegworm import getSkeleton, resampleAll
from tierpsy.analysis.ske_create.zebrafishAnalysis import zebrafishAnalysis, zebrafishSkeleton
from tierpsy.helper.misc import TABLE_FILTERS
def _zebra_func(worm_img, skel_args, resampling_N):
# Get zebrafish mask
config = zebrafishAnalysis.ModelConfig(**skel_args)
worm_mask, worm_cnt, cnt_area, cleaned_mask, head_point, smoothed_points = zebrafishAnalysis.getZebrafishMask(worm_img, config)
if worm_mask is None:
return None
# Get zebrafish skeleton
skeleton, ske_len, cnt_side1, cnt_side2, cnt_widths, cnt_area = zebrafishSkeleton.getZebrafishSkeleton(cleaned_mask, head_point, smoothed_points, config)
if skeleton is None:
return None
# Resample skeleton and other variables
skeleton, ske_len, cnt_side1, cnt_side2, cnt_widths = resampleAll(skeleton, cnt_side1, cnt_side2, cnt_widths, resampling_N)
if skeleton is None or cnt_side1 is None or cnt_side2 is None:
return None
return skeleton, ske_len, cnt_side1, cnt_side2, cnt_widths, cnt_area
def getWormMask(
worm_img,
threshold,
strel_size=5,
min_blob_area=50,
roi_center_x=-1,
roi_center_y=-1,
is_light_background=True):
'''
Calculate worm mask using an specific threshold.
-> Used by trajectories2Skeletons
'''
if any(x < 3 for x in worm_img.shape):
return np.zeros_like(worm_img), np.zeros(0), 0
# let's make sure the strel is larger than 3 and odd, otherwise it will
# shift the mask position.
strel_size_half = round(strel_size / 2)
if strel_size_half % 2 == 0:
strel_size_half += 1
if strel_size_half < 3:
strel_size_half = 3
strel_half = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE, (strel_size_half, strel_size_half))
# make the worm more uniform. This is important to get smoother contours.
worm_img = cv2.medianBlur(worm_img, 3)
# compute the thresholded mask
worm_mask = worm_img < threshold if is_light_background else worm_img > threshold
worm_mask = (worm_mask & (worm_img != 0)).astype(np.uint8)
# first compute a small closing to join possible fragments of the worm.
worm_mask = cv2.morphologyEx(worm_mask, cv2.MORPH_CLOSE, strel_half)
# then get the best contour to be the worm
worm_cnt, _ = binaryMask2Contour(
worm_mask, min_blob_area=min_blob_area, roi_center_x=roi_center_x, roi_center_y=roi_center_y)
# create a new mask having only the best contour
worm_mask = np.zeros_like(worm_mask)
if worm_cnt.size > 0:
cv2.drawContours(worm_mask, [worm_cnt.astype(np.int32)], 0, 1, -1)
# let's do closing with a larger structural element to close any gaps inside the worm.
# It is faster to do several iterations rather than use a single larger
# strel.
worm_mask = cv2.morphologyEx(
worm_mask,
cv2.MORPH_CLOSE,
strel_half,
iterations=3)
# finally get the contour from the last element
worm_cnt, cnt_area = binaryMask2Contour(
worm_mask, min_blob_area=min_blob_area, roi_center_x=roi_center_x, roi_center_y=roi_center_y)
worm_mask = np.zeros_like(worm_mask)
if worm_cnt.size > 0:
cv2.drawContours(worm_mask, [worm_cnt.astype(np.int32)], 0, 1, -1)
return worm_mask, worm_cnt, cnt_area
def binaryMask2Contour(
worm_mask,
min_blob_area=50,
roi_center_x=-1,
roi_center_y=-1,
pick_center=True):
'''
convert binary mask into a single work contour.
-> Used by getWormMask
'''
if worm_mask.size == 0:
return np.zeros(0), 0 # assest this is not an empty arrays
# get the center of the mask
if roi_center_x < 1:
roi_center_x = (worm_mask.shape[1] - 1) / 2.
if roi_center_y < 1:
roi_center_y = (worm_mask.shape[0] - 1) / 2.
# select only one contour in the binary mask
# get contour
contour, hierarchy = cv2.findContours(
worm_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
if len(contour) == 1:
contour = np.squeeze(contour[0], axis=1)
# filter for small areas
cnt_area = cv2.contourArea(contour)
if cnt_area < min_blob_area:
return np.zeros(0), cnt_area
elif len(contour) > 1:
# clean mask if there is more than one contour
# select the largest area object
cnt_areas = [cv2.contourArea(cnt) for cnt in contour]
# filter only contours with areas larger than min_blob_area and do not
# consider contour with holes
cnt_tuple = [(contour[ii], cnt_area) for ii, cnt_area in enumerate(
cnt_areas) if cnt_area >= min_blob_area and hierarchy[0][ii][3] == -1] # shouldn't the last condition be automatically satisified by using RETR_EXTERNAL in cv2.findContours?
# if there are not contour left continue
if not cnt_tuple:
return np.zeros(0), 0
else:
# get back the contour areas for filtering
contour, cnt_areas = zip(*cnt_tuple)
if pick_center:
# In the multiworm tracker the worm should be in the center of the
# ROI
min_dist_center = np.inf
valid_ind = -1
for ii, cnt in enumerate(contour):
#mm = cv2.moments(cnt)
cm_x = np.mean(cnt[:, :, 1]) # mm['m10']/mm['m00']
cm_y = | np.mean(cnt[:, :, 0]) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
print('The 3 equations are entered individually, each value of the equation is entered separated by a space, for example: \ninput = 6 5 -3 4 \nThis will be equal to 6x + 5y- 3z = 4')
print('Enter values for equation 1: ')
a, b, c, d = map(float, input().split())
print('Enter values for equation 2: ')
e, f, g, h = map(float, input().split())
print('Enter values for equation 3: ')
i, j, k, l = map(float, input().split())
# solve the linear equation
A = np.array([[a, b, c], [e, f, g], [i, j, k]])
b_a = np.array([d, h, l])
sol = np.linalg.solve(A, b_a)
print(sol)
x, y = np.linspace(0, 10, 10), | np.linspace(0, 10, 10) | numpy.linspace |
# MIT License
#
# Copyright (c) 2019-2020 Tskit Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Python implementation of the Li and Stephens algorithms.
"""
import itertools
import unittest
import msprime
import numpy as np
import pytest
import _tskit # TMP
import tskit
from tests import tsutil
def in_sorted(values, j):
# Take advantage of the fact that the numpy array is sorted.
ret = False
index = np.searchsorted(values, j)
if index < values.shape[0]:
ret = values[index] == j
return ret
def ls_forward_matrix_naive(h, alleles, G, rho, mu):
"""
Simple matrix based method for LS forward algorithm using Python loops.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
F = np.zeros((m, n))
S = np.zeros(m)
f = np.zeros(n) + 1 / n
for el in range(0, m):
for j in range(n):
# NOTE Careful with the difference between this expression and
# the Viterbi algorithm below. This depends on the different
# normalisation approach.
p_t = f[j] * (1 - rho[el]) + rho[el] / n
p_e = mu[el]
if G[el, j] == h[el] or h[el] == tskit.MISSING_DATA:
p_e = 1 - (len(alleles[el]) - 1) * mu[el]
f[j] = p_t * p_e
S[el] = np.sum(f)
# TODO need to handle the 0 case.
assert S[el] > 0
f /= S[el]
F[el] = f
return F, S
def ls_viterbi_naive(h, alleles, G, rho, mu):
"""
Simple matrix based method for LS Viterbi algorithm using Python loops.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
L = np.ones(n)
T = [set() for _ in range(m)]
T_dest = np.zeros(m, dtype=int)
for el in range(m):
# The calculation below is undefined otherwise.
if len(alleles[el]) > 1:
assert mu[el] <= 1 / (len(alleles[el]) - 1)
L_next = np.zeros(n)
for j in range(n):
# NOTE Careful with the difference between this expression and
# the Forward algorithm above. This depends on the different
# normalisation approach.
p_no_recomb = L[j] * (1 - rho[el] + rho[el] / n)
p_recomb = rho[el] / n
if p_no_recomb > p_recomb:
p_t = p_no_recomb
else:
p_t = p_recomb
T[el].add(j)
p_e = mu[el]
if G[el, j] == h[el] or h[el] == tskit.MISSING_DATA:
p_e = 1 - (len(alleles[el]) - 1) * mu[el]
L_next[j] = p_t * p_e
L = L_next
j = np.argmax(L)
T_dest[el] = j
if L[j] == 0:
assert mu[el] == 0
raise ValueError(
"Trying to match non-existent allele with zero mutation rate"
)
L /= L[j]
P = np.zeros(m, dtype=int)
P[m - 1] = T_dest[m - 1]
for el in range(m - 1, 0, -1):
j = P[el]
if j in T[el]:
j = T_dest[el - 1]
P[el - 1] = j
return P
def ls_viterbi_vectorised(h, alleles, G, rho, mu):
# We must have a non-zero mutation rate, or we'll end up with
# division by zero problems.
# assert np.all(mu > 0)
m, n = G.shape
alleles = check_alleles(alleles, m)
V = np.ones(n)
T = [None for _ in range(m)]
max_index = np.zeros(m, dtype=int)
for site in range(m):
# Transition
p_neq = rho[site] / n
p_t = (1 - rho[site] + rho[site] / n) * V
recombinations = np.where(p_neq > p_t)[0]
p_t[recombinations] = p_neq
T[site] = recombinations
# Emission
p_e = np.zeros(n) + mu[site]
index = G[site] == h[site]
if h[site] == tskit.MISSING_DATA:
# Missing data is considered equal to everything
index[:] = True
p_e[index] = 1 - (len(alleles[site]) - 1) * mu[site]
V = p_t * p_e
# Normalise
max_index[site] = np.argmax(V)
# print(site, ":", V)
if V[max_index[site]] == 0:
assert mu[site] == 0
raise ValueError(
"Trying to match non-existent allele with zero mutation rate"
)
V /= V[max_index[site]]
# Traceback
P = np.zeros(m, dtype=int)
site = m - 1
P[site] = max_index[site]
while site > 0:
j = P[site]
if in_sorted(T[site], j):
j = max_index[site - 1]
P[site - 1] = j
site -= 1
return P
def check_alleles(alleles, num_sites):
"""
Checks the specified allele list and returns a list of lists
of alleles of length num_sites.
If alleles is a 1D list of strings, assume that this list is used
for each site and return num_sites copies of this list.
Otherwise, raise a ValueError if alleles is not a list of length
num_sites.
"""
if isinstance(alleles[0], str):
return [alleles for _ in range(num_sites)]
if len(alleles) != num_sites:
raise ValueError("Malformed alleles list")
return alleles
def ls_forward_matrix(h, alleles, G, rho, mu):
"""
Simple matrix based method for LS forward algorithm using numpy vectorisation.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
F = np.zeros((m, n))
S = np.zeros(m)
f = np.zeros(n) + 1 / n
p_e = np.zeros(n)
for el in range(0, m):
p_t = f * (1 - rho[el]) + rho[el] / n
eq = G[el] == h[el]
if h[el] == tskit.MISSING_DATA:
# Missing data is equal to everything
eq[:] = True
p_e[:] = mu[el]
p_e[eq] = 1 - (len(alleles[el]) - 1) * mu[el]
f = p_t * p_e
S[el] = np.sum(f)
# TODO need to handle the 0 case.
assert S[el] > 0
f /= S[el]
F[el] = f
return F, S
def forward_matrix_log_proba(F, S):
"""
Given the specified forward matrix and scaling factor array, return the
overall log probability of the input haplotype.
"""
return np.sum(np.log(S)) - np.log(np.sum(F[-1]))
def ls_forward_matrix_unscaled(h, alleles, G, rho, mu):
"""
Simple matrix based method for LS forward algorithm.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
F = np.zeros((m, n))
f = np.zeros(n) + 1 / n
for el in range(0, m):
s = np.sum(f)
for j in range(n):
p_t = f[j] * (1 - rho[el]) + s * rho[el] / n
p_e = mu[el]
if G[el, j] == h[el] or h[el] == tskit.MISSING_DATA:
p_e = 1 - (len(alleles[el]) - 1) * mu[el]
f[j] = p_t * p_e
F[el] = f
return F
# TODO change this to use the log_proba function below.
def ls_path_probability(h, path, G, rho, mu):
"""
Returns the probability of the specified path through the genotypes for the
specified haplotype.
"""
# Assuming num_alleles = 2
assert rho[0] == 0
m, n = G.shape
# TODO It's not entirely clear why we're starting with a proba of 1 / n for the
# model. This was done because it made it easier to compare with an existing
# HMM implementation. Need to figure this one out when writing up.
proba = 1 / n
for site in range(0, m):
pe = mu[site]
if h[site] == G[site, path[site]] or h[site] == tskit.MISSING_DATA:
pe = 1 - mu[site]
pt = rho[site] / n
if site == 0 or path[site] == path[site - 1]:
pt = 1 - rho[site] + rho[site] / n
proba *= pt * pe
return proba
def ls_path_log_probability(h, path, alleles, G, rho, mu):
"""
Returns the log probability of the specified path through the genotypes for the
specified haplotype.
"""
assert rho[0] == 0
m, n = G.shape
alleles = check_alleles(alleles, m)
# TODO It's not entirely clear why we're starting with a proba of 1 / n for the
# model. This was done because it made it easier to compare with an existing
# HMM implementation. Need to figure this one out when writing up.
log_proba = np.log(1 / n)
for site in range(0, m):
if len(alleles[site]) > 1:
assert mu[site] <= 1 / (len(alleles[site]) - 1)
pe = mu[site]
if h[site] == G[site, path[site]] or h[site] == tskit.MISSING_DATA:
pe = 1 - (len(alleles[site]) - 1) * mu[site]
assert 0 <= pe <= 1
pt = rho[site] / n
if site == 0 or path[site] == path[site - 1]:
pt = 1 - rho[site] + rho[site] / n
assert 0 <= pt <= 1
log_proba += np.log(pt) + np.log(pe)
return log_proba
def ls_forward_tree(h, alleles, ts, rho, mu, precision=30, use_lib=True):
"""
Forward matrix computation based on a tree sequence.
"""
if use_lib:
acgt_alleles = tuple(alleles) == tskit.ALLELES_ACGT
ls_hmm = _tskit.LsHmm(
ts.ll_tree_sequence,
recombination_rate=rho,
mutation_rate=mu,
precision=precision,
acgt_alleles=acgt_alleles,
)
cm = _tskit.CompressedMatrix(ts.ll_tree_sequence)
ls_hmm.forward_matrix(h, cm)
return cm
else:
fa = ForwardAlgorithm(ts, rho, mu, alleles, precision=precision)
return fa.run(h)
def ls_viterbi_tree(h, alleles, ts, rho, mu, precision=30, use_lib=True):
"""
Viterbi path computation based on a tree sequence.
"""
if use_lib:
acgt_alleles = tuple(alleles) == tskit.ALLELES_ACGT
ls_hmm = _tskit.LsHmm(
ts.ll_tree_sequence,
recombination_rate=rho,
mutation_rate=mu,
precision=precision,
acgt_alleles=acgt_alleles,
)
vm = _tskit.ViterbiMatrix(ts.ll_tree_sequence)
ls_hmm.viterbi_matrix(h, vm)
return vm
else:
va = ViterbiAlgorithm(ts, rho, mu, alleles, precision=precision)
return va.run(h)
class ValueTransition:
"""
Simple struct holding value transition values.
"""
def __init__(self, tree_node=-1, value=-1, value_index=-1):
self.tree_node = tree_node
self.value = value
self.value_index = value_index
def copy(self):
return ValueTransition(self.tree_node, self.value, self.value_index)
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return repr(self)
class LsHmmAlgorithm:
"""
Abstract superclass of Li and Stephens HMM algorithm.
"""
def __init__(self, ts, rho, mu, alleles, precision=10):
self.ts = ts
self.mu = mu
self.rho = rho
self.alleles = check_alleles(alleles, ts.num_sites)
self.precision = precision
# The array of ValueTransitions.
self.T = []
# indexes in to the T array for each node.
self.T_index = np.zeros(ts.num_nodes, dtype=int) - 1
# The number of nodes underneath each element in the T array.
self.N = np.zeros(ts.num_nodes, dtype=int)
# Efficiently compute the allelic state at a site
self.allelic_state = | np.zeros(ts.num_nodes, dtype=int) | numpy.zeros |
import cv2
import numpy as np
import dlib
from gaze_tracking import GazeTracking
import pyautogui as pag
from math import hypot
from numpy import array
import win32com.client
import winsound
# Load sound
speaker = win32com.client.Dispatch("SAPI.SpVoice")
gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
board = np.zeros((300, 1400), np.uint8)
board[:] = 255
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
first_frame = None
# Keyboard settings
keyboard = np.zeros((400, 1100, 4), np.uint8)
key_arr_1 = np.array(
[("1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "."), ("Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P", "?"),
("A", "S", "D", "F", "G", "H", "J", "K", "L", "'"," "), ("Z", "X", "C", "V", "B", "N", "M", ",","<", "CL","")])
pts = np.array([[1020,340],[1020,360],[1040,360],[1070,390],[1070,310],[1040,340],[1020,340]],np.int32)
def direction(nose_point, anchor_point, w, h, multiple=1):
nx = nose_point[0]
ny = nose_point[1]
x = anchor_point[0]
y = anchor_point[1]
if ny > y + multiple * h:
return 'DOWN'
elif ny <= y - multiple * h:
return 'UP'
return '-'
def letter(letter_index_i, letter_index_j, text, letter_light):
width = 100
height = 100
th = 3 # thickness
# Keys
x = letter_index_j * width
y = letter_index_i * height
# Text settings
font_letter = cv2.FONT_HERSHEY_PLAIN
font_scale = 5
font_th = 4
text_size = cv2.getTextSize(text, font_letter, font_scale, font_th)[0]
width_text, height_text = text_size[0], text_size[1]
text_x = int((width - width_text) / 2) + x
text_y = int((height + height_text) / 2) + y
if letter_light is True:
cv2.rectangle(keyboard, (x + th, y + th), (x + width - th, y + height - th), (255, 255, 255), -1)
cv2.putText(keyboard, text, (text_x, text_y), font_letter, font_scale, (51, 51, 51), font_th)
cv2.polylines(keyboard, [pts], 1, (51, 51, 51), 4)
cv2.line(keyboard,(858,349),(888,349),(51,51,51),4)
else:
cv2.rectangle(keyboard, (x + th, y + th), (x + width - th, y + height - th), (51, 51, 51), -1)
cv2.putText(keyboard, text, (text_x, text_y), font_letter, font_scale, (255, 255, 255), font_th)
cv2.polylines(keyboard, [pts], 1, (255, 255, 255), 4)
cv2.line(keyboard, (858, 349), (888, 349), (255,255,255), 4)
def midpoint(p1, p2):
return int((p1.x + p2.x) / 2), int((p1.y + p2.y) / 2)
def draw_menu():
rows, cols, _ = keyboard.shape
th_lines = 4 # thickness lines
cv2.line(keyboard, (int(cols / 2) - int(th_lines / 2), 0), (int(cols / 2) - int(th_lines / 2), rows),
(51, 51, 51), th_lines)
cv2.putText(keyboard, "LEFT", (80, 300), font, 6, (255, 255, 255), 5)
cv2.putText(keyboard, "RIGHT", (80 + int(cols / 2), 300), font, 6, (255, 255, 255), 5)
font = cv2.FONT_HERSHEY_PLAIN
def get_blinking_ratio(eye_points, facial_landmarks):
left_point = (facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)
right_point = (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)
center_top = midpoint(facial_landmarks.part(eye_points[1]), facial_landmarks.part(eye_points[2]))
center_bottom = midpoint(facial_landmarks.part(eye_points[5]), facial_landmarks.part(eye_points[4]))
# hor_line = cv2.line(frame, left_point, right_point, (0, 255, 0), 2)
# ver_line = cv2.line(frame, center_top, center_bottom, (0, 255, 0), 2)
hor_line_lenght = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))
ver_line_lenght = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))
if ver_line_lenght == 0:
ver_line_lenght = 1;
ratio = hor_line_lenght / ver_line_lenght
return ratio
def get_gaze_ratio(eye_points, facial_landmarks):
left_eye_region = np.array([(facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y),
(facial_landmarks.part(eye_points[1]).x, facial_landmarks.part(eye_points[1]).y),
(facial_landmarks.part(eye_points[2]).x, facial_landmarks.part(eye_points[2]).y),
(facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y),
(facial_landmarks.part(eye_points[4]).x, facial_landmarks.part(eye_points[4]).y),
(facial_landmarks.part(eye_points[5]).x, facial_landmarks.part(eye_points[5]).y)],
np.int32)
# cv2.polylines(frame, [left_eye_region], True, (0, 0, 255), 2)
height, width, _ = frame.shape
mask = | np.zeros((height, width), np.uint8) | numpy.zeros |
### NumPy: Entrada/Salida (I/O) ###
# Para el caso de la lectura se usa la función np.loadtxt
# Primero, importamos las librerías que vamos a usar: Numpy y Matplotlib.
# También usaremos plt.ion() para activar el modo interactivo de matplotlib
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
## Cargando los datos ##
# Usaremos la función loadtxt para cargar los datos en un array. Usaremos los
# argumentos opcionales skiprows, delimiter y usecols para captar los datos que queremos
# loading the data:
# ./data/barrio_del_pilar-20160322.csv
data1 = np.loadtxt('../data/barrio_del_pilar-20160322.csv', skiprows=3, delimiter=';', usecols=(2,3,4))
data1[:10,:]
## Valores inexistentes: ##
# El archivo que contiene los datos de 2015 tiene algunos agujeros por errores de medida.
# Como alternativa a loadtxt, podremos usar la función genfromtxt, teniendo cuidado
# de que el ella el argumento opcional de saltar líneas pasa a llamarse skip_header.
#Loading the data 2:
#../data/barrio_del_pilar-20151222.csv
data2 = np.genfromtxt('../data/barrio_del_pilar-20151222.csv', skip_header=3, delimiter=';', usecols=(2,3,4))
data2[:10,:]
# Podemos comprobar como afecta la existencia de estos valores a algunas funciones de Numpy,
# como np.mean. A veces es posible esquivar estos problemas con otras funciones como np.nanmean
print(np.mean(data2, axis=0))
print( | np.nanmean(data2, axis=0) | numpy.nanmean |
# Test plotting 3-dimensional things
import hdviz
import numpy as np
def create_3d_spiral(a: float = 1):
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z ** 2 + 1
x = r * np.sin(a * theta)
y = r * np.cos(a * theta)
return np.vstack((x, y, z)).T
def test_point_plot():
x = np.random.normal(size=(100, 3))
a = hdviz.create_plotter(3)
a.add_pointset(x, label="example_data", alpha=0.3)
x2 = np.random.normal(size=(10, 3))
a.add_pointset(x2, label="example_other", marker="x", color="red")
x = np.random.normal(size=(30, 3))
a.add_pointset(x)
assert a.num_pointsets() == 3
b = a.plot(title="Hei", square=True)
assert str(b)[0:13] == "Axes3DSubplot"
a.clear_data()
def test_line_plot():
x = np.random.normal(size=(10, 100, 3))
a = hdviz.create_plotter(3)
a.add_lineset(x, label="lines", alpha=0.8)
assert a.num_pointsets() == 0
assert a.num_linesets() == 1
def test_mixed_plot():
l1 = create_3d_spiral(1.0)
l2 = create_3d_spiral(1.5)
x = np.stack((l1, l2))
a = hdviz.create_plotter(3)
a.add_lineset(x, label="lines", alpha=0.3)
x2 = np.random.normal(size=(10, 3))
a.add_pointset(x2, marker="x", color="red")
assert a.num_pointsets() == 1
assert a.num_linesets() == 1
def test_quiver_plot():
a = hdviz.create_plotter(3)
x = np.random.normal(size=(10, 3))
a.add_pointset(x, marker="x", color="red")
u = a.create_grid_around_points(square=False, M=8)
v1 = np.sin(u[:, 0])
v2 = np.cos(u[:, 1] + 0.1 * u[:, 0])
v3 = 0.1 * u[:, 2]
v = | np.vstack((v1, v2, v3)) | numpy.vstack |
import numpy as np
np.random.seed(7)
import tensorflow as tf
import datetime
import time
import threading
import math
import random
random.seed(7)
import os
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from keras.models import *
from keras.layers import *
from keras import backend as K
from enum import Enum
from time import sleep
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
art = """
.d8888b. 888 d88888888888
d88P Y88b 888 d88888 888
888 888 888 d88P888 888
888 888d888888 88888888b. 888888 .d88b. d88P 888 888
888 888P" 888 888888 "88b888 d88""88b d88P 888 888
888 888888 888 888888 888888 888 888 d88P 888 888
Y88b d88P888 Y88b 888888 d88PY88b. Y88..88P d8888888888 888
"Y8888P" 888 "Y8888888888P" "Y888 "Y88P" d88P 8888888888
888888 d88P
Y8b d88P888 d88P
"Y88P" 888 d88P by UmeW
****** Deep AC3 Trader ******
"""
THREADS = 8
np.set_printoptions(linewidth = 500)
# HyperParams
LOSS_V = .5 # v loss coefficient
LOSS_ENTROPY = 0.1 # entropy coefficient
LEARNING_RATE = 1e-4
EPS_START = 0.5
EPS_END = 0.1
EPS_SLOPE = 600
N_STEP_RETURN = 8
MIN_BATCH = 32
NUM_HISTORY = 300
NUM_STATE = 1 * NUM_HISTORY + 1 + 1 + 1 + 1# Scrapped data + (Shares bought?) + (Budget?)
NUM_DENSE = 120
NUM_DENSE2 = 30
GAMMA = 0.99
GAMMA_N = GAMMA ** N_STEP_RETURN
CAN_SHORT = False
NUM_ACTIONS = 3 # Buy = 0 , Sell = 1 , Hold = 2
# States Var
mdPrice = []
mdPriceMin = []
mdPriceMax = []
mdBSRatio = []
mdVolume = []
mdVar = [0] * THREADS
mdMean = [0] * THREADS
mdTimeMax = [0] * THREADS
aHistory = [[] for i in range(THREADS)]
stopSignal = False
testFile = open("result2.test", "a")
print(art)
loadData()
if False :
HP_LOSS_V = [0.5]
HP_LOSS_ENTROPY = [0.01,0.001,0.1,0.5,1,10]
HP_LEARNING_RATE = [1e-4,5e-4,1e-3,1e-2]
HP_EPS_START = [0.5,0.6,0.7,0.4,0.3]
HP_EPS_END = [0.15,0.05,0.25,0.1]
HP_EPS_SLOPE = [10, 15, 5]
HP_N_STEP_RETURN = [8]
HP_MIN_BATCH = [32,64,512]
HP_NUM_HISTORY = [1,2,3]
HP_GAMMA = [0.99]
for loss_v in HP_LOSS_V:
LOSS_V = loss_v
for eps_start in HP_EPS_START:
EPS_START = eps_start
for eps_end in HP_EPS_END:
EPS_END = eps_end
for eps_slope in HP_EPS_SLOPE:
EPS_SLOPE = eps_slope
for n_step_return in HP_N_STEP_RETURN:
N_STEP_RETURN = n_step_return
for min_batch in HP_MIN_BATCH:
MIN_BATCH = min_batch
for num_history in HP_NUM_HISTORY:
NUM_STATE = 1 * NUM_HISTORY + 1 + 1 + 1
HP_NUM_DENSE = [30, 10, 100]
for num_dense in HP_NUM_DENSE:
NUM_DENSE = num_dense
for loss_entropy in HP_LOSS_ENTROPY:
LOSS_ENTROPY = loss_entropy
for learning_rate in HP_LEARNING_RATE:
LEARNING_RATE = learning_rate
for gamma in HP_GAMMA:
GAMMA = gamma
GAMMA_N = GAMMA ** N_STEP_RETURN
result = start()
strin = ("loss_v: " + str(loss_v) +
" | loss_entropy: " + str(loss_entropy) +
" | learning_rate: " + str(learning_rate) +
" | eps_start: " + str(eps_start) +
" | eps_end: " + str(eps_end) +
" | eps_slope: " + str(eps_slope) +
" | n_step_return: " + str(n_step_return) +
" | min_batch: " + str(min_batch) +
" | num_history: " + str(num_history) +
" | num_dense: " + str(num_dense) +
" | result: " + str(result) + "\n"
)
print(strin)
else :
print(start())
testFile.close
plt.ion()
fig = plt.figure()
lines = []
prices = []
for i in range(THREADS):
x = np.arange(NUM_HISTORY - 1, mdTimeMax[i%8], 1)
priceA = mdPrice[i % 8 ][NUM_HISTORY - 1: mdTimeMax[i % 8] ]
priceA = np.array([(x - mdMean[i % 8 ])/ mdVar[i % 8 ] for x in priceA])
prices.append(priceA)
ax = fig.add_subplot(int(THREADS/2), 2, i + 1)
acts = aHistory[i][0]
fill = [-1]*(mdTimeMax[i] - NUM_HISTORY- len(acts) + 1)
actions = np.array( acts + fill ) + 1
beee,line = ax.plot(x, priceA, 'b-', x, actions, 'ro')
lines.append(line)
plt.title(str(i))
fig.canvas.draw()
k = 0
while k < 1000:
for i in range(THREADS):
if k < len(aHistory[i]):
acts = aHistory[i][k]
fill = [-1]*(mdTimeMax[i] - NUM_HISTORY - len(acts) + 1)
actions = np.array( acts + fill ) + 1
else:
acts = aHistory[i][-1]
fill = [-1]*(mdTimeMax[i] - NUM_HISTORY - len(acts) + 1)
actions = np.array( acts + fill ) + 1
lines[i].set_ydata(actions)
k += 1
t = time.time()
while time.time() < t + 1 :
fig.canvas.flush_events()
sleep(0.001)
class Action(Enum):
BUY = 0
SELL = 1
HOLD = 2
def loadData():
j = 0
for j in range(0, 8):
with open('training2/training_'+ str(j) +'.data', 'r') as f:
buf = f.readlines()
mdPrice.append([])
mdPriceMin.append([])
mdPriceMax.append([])
mdBSRatio.append([])
mdVolume.append([])
esp = 0
esp2 = 0
for line in buf: # we should test if everything good at import
dat = line.split(' ')
#>>> t = "2017-12-08 23:22:00 16066.530120481928 16060 16072 38 225691"
#['2017-12-08', '23:22:00', '16066.530120481928', '16060', '16072', '38', '225691']
mdPrice[j].append(float(dat[2]))
esp += float(dat[2])
esp2 += float(dat[2]) ** 2
mdPriceMin[j].append(float(dat[3]))
mdPriceMax[j].append(float(dat[4]))
mdBSRatio[j].append(float(dat[5]))
mdVolume[j].append(float(dat[6]))
mdTimeMax[j] = int(len(buf))
esp = esp / mdTimeMax[j]
esp2 = esp2 / mdTimeMax[j]
mdVar[j] = math.sqrt(esp2 - (esp ** 2))
mdMean[j] = esp
#print(mdVar[j])
class Brain():
def __init__(self):
g = tf.Graph()
SESSION = tf.Session(graph=g)
self.session = SESSION
with g.as_default():
tf.set_random_seed(7)
K.set_session(self.session)
K.manual_variable_initialization(True)
self.model = self.BuildModel()
self.graph = self.BuildGraph()
self.session.run(tf.global_variables_initializer())
self.default_graph = tf.get_default_graph()
#self.default_graph.finalize()
self.buffer = [[], [], [], [], []]
self.lock = threading.Lock()
def BuildModel(self):
l_input = Input(batch_shape=(None, NUM_STATE))
#l_predense = Dense(NUM_DENSE, activation='relu', kernel_regularizer=regularizers.l2(0.01))(l_input)
#l_dense = Dense(NUM_DENSE, activation='relu', kernel_regularizer=regularizers.l2(0.01))(l_predense)
l_predense = Dense(NUM_DENSE, activation='tanh')(l_input)
l_dense = Dense(NUM_DENSE, activation='tanh')(l_predense)
out_actions = Dense(NUM_ACTIONS, activation='softmax')(l_dense)
out_value = Dense(1, activation='linear')(l_dense)
model = Model(inputs=[l_input], outputs=[out_actions, out_value])
model._make_predict_function()
self.intermediateModel = Model(inputs=[l_input], outputs=[l_dense])
self.intermediateModel._make_predict_function()
return model
def BuildGraph(self):
s_t = tf.placeholder(tf.float64, shape=(None, NUM_STATE))
r_t = tf.placeholder(tf.float64, shape=(None, 1)) # r + gamma vs'
a_t = tf.placeholder(tf.float64, shape=(None, NUM_ACTIONS))
p_t, v_t = self.model(s_t)
advantage = r_t - v_t
log_prob = tf.log(tf.reduce_sum(p_t * a_t, axis=1, keep_dims=True) + 1e-10)
loss_policy = - log_prob * tf.stop_gradient(advantage)
loss_value = LOSS_V * tf.square(advantage)
entropy = LOSS_ENTROPY * tf.reduce_sum(p_t * tf.log(p_t + 1e-10), axis=1, keep_dims=True)
loss_total = tf.reduce_mean(loss_policy + loss_value + entropy)
#loss_total = tf.reduce_mean(entropy)
self.loss = loss_total
optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, decay=.99)
minimize = optimizer.minimize(loss_total)
return s_t, a_t, r_t, minimize
def getPrediction(self, s):
with self.default_graph.as_default():
#print(self.intermediateModel.predict(s))
p, v = self.model.predict(s)
#print(p)
#s_t, a_t, r_t, minimize = self.graph
#k = self.session.run(self.entropy, feed_dict={s_t: s})
#print(k)
return p, v
def getValue(self, s):
with self.default_graph.as_default():
p, v = self.model.predict(s)
return v
def getPolicy(self, s):
with self.default_graph.as_default():
p, v = self.model.predict(s)
return p
def pushTraining(self, action, reward, oldStep, newStep, threadId):
with self.lock:
act = np.zeros(NUM_ACTIONS)
act[action] = 1
self.buffer[0].append(act)
self.buffer[1].append(reward)
self.buffer[2].append(oldStep)
if newStep is None:
self.buffer[3].append( | np.zeros(NUM_STATE) | numpy.zeros |
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytests for `pydrobert.kaldi.io.table_streams`"""
import platform
import numpy as np
import pytest
from pydrobert.kaldi.io import open as io_open
from pydrobert.kaldi.io import table_streams
from pydrobert.kaldi.io.enums import KaldiDataType
@pytest.mark.parametrize(
"dtype,value",
[
("bv", []),
("bm", [[]]),
("bv", [np.infty]),
("bv", [1] * 100),
("bm", [[1, 2], [3, 4]]),
("fv", [-1, -1, 0, 0.1]),
("fm", np.random.random((10, 10)).astype(np.float32)),
("dv", np.arange(1000, dtype=np.float64) - 10),
(
"dm",
np.outer(
np.arange(100, dtype=np.float32), np.arange(111, dtype=np.float32)
),
), # upcast ok
("t", "able"),
# our methods can accept unicode, but always return strings,
# so we don't enforce that these be unicode type.
("t", "\u00D6a"),
("t", "n\u00F9"),
# lists can be written, but tuples are read
("tv", tuple()),
("tv", ("foo", "bar")),
("tv", ("skryyyyy",)),
("tv", ("\u00D6a", "n\u00F9")),
("i", -10),
("iv", (0, 1, 2)),
("iv", tuple()),
("ivv", ((100,), (10, 40))),
("ivv", tuple()),
("ipv", ((1, 2), (3, 4))),
("ipv", tuple()),
("d", 0.1),
("d", 1),
("b", -0.1),
("b", -10000),
("bpv", ((0, 1.3), (4.5, 6))),
("bpv", tuple()),
("B", True),
("B", False),
],
)
@pytest.mark.parametrize("is_text", [True, False])
@pytest.mark.parametrize("bg", [True, False])
def test_read_write(temp_file_1_name, dtype, value, is_text, bg):
opts = ["", "t"] if is_text else [""]
specifier = "ark" + ",".join(opts) + ":" + temp_file_1_name
writer = io_open(specifier, dtype, mode="w")
writer.write("a", value)
writer.close()
if bg:
opts += ["bg"]
specifier = "ark" + ",".join(opts) + ":" + temp_file_1_name
reader = io_open(specifier, dtype)
once = True
for read_value in iter(reader):
assert once, "Multiple values"
try:
if dtype.startswith("b") or dtype.startswith("f") or dtype.startswith("d"):
assert | np.allclose(read_value, value) | numpy.allclose |
import os
import io
from collections import OrderedDict
import torch
from torch.nn.init import normal_, zeros_
from torchvision.utils import make_grid
import math
import bezier
import numpy as np
from PIL import Image
from tqdm import tqdm
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
def load_weight(model, weight, feat_dim=None):
loaded_state_dict = torch.load(weight)
state_dict = OrderedDict()
classifier = "fc"
w = "fc.weight"
b = "fc.bias"
for k, v in loaded_state_dict.items():
if not classifier in k:
state_dict[k] = v
elif k == w:
if v.size(0) != feat_dim:
classifier_weight = torch.empty((feat_dim,) + v.size()[1:], dtype=torch.float32)
normal_(classifier_weight)
state_dict[k] = classifier_weight
else:
state_dict[k] = v
elif k == b:
if v.size(0) != feat_dim:
classifier_bias = torch.empty((feat_dim,), dtype=torch.float32)
zeros_(classifier_bias)
state_dict[k] = classifier_bias
else:
state_dict[k] = v
else:
state_dict[k] = v
model.load_state_dict(state_dict)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def eval_bezier(ctrl_coor, degree):
curve = bezier.Curve(ctrl_coor, degree=degree)
ts = np.linspace(0.0, 1.0, 256)
x_eval, y_eval = curve.evaluate_multi(ts)
return x_eval, y_eval
def plot(image, label, output, gt_num_lane, pred_num_lane, degree):
ax = plt.gca()
ax.figure.set_size_inches(8.2, 2.95)
image = image.transpose([1, 2, 0])
ax.imshow(image)
cmap = ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
ctrl_points = output.reshape((-1, (degree + 1) * 2))
for idx in range(pred_num_lane):
x_ctrls = ctrl_points[idx, 0::2]
y_ctrls = ctrl_points[idx, 1::2]
ctrl_point = np.stack([x_ctrls, y_ctrls], axis=1).transpose()
x_eval, y_eval = eval_bezier(ctrl_point, degree)
ax.plot(x_eval, y_eval, color=cmap[idx], label="prediction", ls="--")
label = label[:gt_num_lane, :, :]
label = label.reshape((-1, 2))
ax.scatter(label[:, 0], label[:, 1], color="C0", s=10, label="reference")
if gt_num_lane != 0 or pred_num_lane != 0:
ax.legend(loc="upper right")
ax.set_xlim(0, 820)
ax.set_ylim(0, 295)
ax.axis("off")
ax.invert_yaxis()
with io.BytesIO() as buffer:
plt.savefig(buffer, bbox_inches="tight")
plt.close("all")
buffer.seek(0)
image = Image.open(buffer).convert("RGB")
image = image.resize((820, 295))
img = np.array(image)
image.close()
return img
def visualize_image(writer, images, labels, outputs, gt_num_lanes, pred_num_lanes, degree, global_step):
num_vis = 25
images = images.clone().cpu().data.numpy()
outputs = outputs.detach().cpu().data.numpy()
labels = labels.clone().cpu().data.numpy()
gt_num_lanes = gt_num_lanes.clone().cpu().data.numpy()
pred_num_lanes = pred_num_lanes.clone().cpu().data.numpy()
images = images[-num_vis:, :, :, :]
outputs = outputs[-num_vis:, :]
labels = labels[-num_vis:, :, :, :]
gt_num_lanes = gt_num_lanes[-num_vis:]
pred_num_lanes = pred_num_lanes[-num_vis:]
rendered_images = []
for image, label, output, gt_num_lane, pred_num_lane in zip(images, labels, outputs, gt_num_lanes, pred_num_lanes):
rendered_image = plot(image, label, output, gt_num_lane, pred_num_lane, degree)
rendered_images.append(rendered_image)
rendered_images = np.stack(rendered_images, axis=0)
rendered_images = rendered_images.transpose((0, 3, 1, 2))
rendered_images = torch.tensor(rendered_images)
grid_image = make_grid(rendered_images.data, int(math.sqrt(num_vis)), range=(0, 255))
writer.add_image("Vis", grid_image, global_step)
def train(model, dataloader, optimizer, criterion, beta, writer, epoch, degree, use_gpu):
model.train()
all_loss = []
all_cls_loss = []
all_dsd_loss = []
all_acc = []
for idx, data in enumerate(tqdm(dataloader, desc="Training epoch {}...".format(epoch))):
images, existence, coors, ts = data
if use_gpu:
images = images.cuda()
existence = existence.cuda()
coors = coors.cuda()
ts = ts.cuda()
outs1, outs2 = model(images)
cls_labels = existence.sum(dim=1).long()
cls_loss = criterion["xent"](outs1, cls_labels)
targets = {"existence": existence, "ts": ts, "coors": coors}
dsd_loss = criterion["dsd"](outs2, targets)
loss = beta * cls_loss + dsd_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc = (cls_labels == torch.argmax(outs1, dim=1)).float().mean()
all_loss.append(loss.item())
all_cls_loss.append(cls_loss.item())
all_dsd_loss.append(dsd_loss.item())
all_acc.append(acc.item())
writer.add_scalar("train_loss", loss.item(), global_step=epoch * len(dataloader) + idx)
writer.add_scalar("train_cls_loss", cls_loss.item(), global_step=epoch * len(dataloader) + idx)
writer.add_scalar("train_dsd_loss", dsd_loss.item(), global_step=epoch * len(dataloader) + idx)
writer.add_scalar("train_acc", acc.item(), global_step=epoch * len(dataloader) + idx)
writer.add_scalar("loss", np.nanmean(all_loss).item(), global_step=epoch)
writer.add_scalar("cls_loss", np.nanmean(all_cls_loss).item(), global_step=epoch)
writer.add_scalar("dsd_loss", np.nanmean(all_dsd_loss).item(), global_step=epoch)
writer.add_scalar("acc", np.nanmean(all_acc).item(), global_step=epoch)
visualize_image(writer, images, coors, outs2, cls_labels, torch.argmax(outs1, dim=1), degree, global_step=epoch)
def evaluate(model, dataloader, criterion, beta, scheduler, writer, epoch, degree, weight_dir, use_gpu):
model.eval()
all_loss = []
all_cls_loss = []
all_dsd_loss = []
all_acc = []
with torch.no_grad():
for idx, data in enumerate(tqdm(dataloader, desc="Evaluating epoch {}...".format(epoch))):
images, existence, coors, ts = data
if use_gpu:
images = images.cuda()
existence = existence.cuda()
coors = coors.cuda()
ts = ts.cuda()
outs1, outs2 = model(images)
cls_labels = existence.sum(dim=1).long()
cls_loss = criterion["xent"](outs1, cls_labels)
targets = {"existence": existence, "ts": ts, "coors": coors}
dsd_loss = criterion["dsd"](outs2, targets)
loss = beta * cls_loss + dsd_loss
all_loss.append(loss.item())
all_cls_loss.append(cls_loss.item())
all_dsd_loss.append(dsd_loss.item())
acc = (cls_labels == torch.argmax(outs1, dim=1)).float().mean()
all_acc.append(acc.item())
loss = np.nanmean(all_loss).item()
cls_loss = np.nanmean(all_cls_loss).item()
dsd_loss = | np.nanmean(all_dsd_loss) | numpy.nanmean |
"""Use k-nearest neighbors on pitch2dv0 joint data"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import pandas as pd
import scipy.io as spio
import matplotlib.pyplot as plt
import time
from datetime import datetime
from sklearn import neighbors
from sklearn.metrics import confusion_matrix
import utils
train_path = "/media/linzhank/850EVO_1T/Works/Action_Recognition/Data/train/joint/"
test_path = "/media/linzhank/850EVO_1T/Works/Action_Recognition/Data/test/joint/"
TODAY = datetime.today().strftime("%Y%m%d")
result_path= "/media/linzhank/850EVO_1T/Works/Action_Recognition/Data/result{}".format(TODAY)
# Load train data
train_data = spio.loadmat(train_path + "joint_train.mat")["joint_train"]
num_examples_train = train_data.shape[0]
initid_train = utils.detectInit(train_data)
train_data = train_data.reshape(num_examples_train,150,75)
train_classes = spio.loadmat(train_path + "labels_train.mat")["labels_train"]
train_labels = np.argmax(train_classes, axis=1)
# Load test data
test_data = spio.loadmat(test_path + "joint_test.mat")["joint_test"]
num_examples_test = test_data.shape[0]
initid_test = utils.detectInit(test_data)
test_data = test_data.reshape(num_examples_test,150,75)
test_classes = spio.loadmat(test_path + "labels_test.mat")["labels_test"]
test_labels = np.argmax(test_classes, axis=1)
# Use 5, 10, 15,...,40 frames of data to train 8 knn classifier
num_frames = 5*np.arange(1,9)
# Init best k storage
best_k = np.zeros(num_frames.shape).astype(int)
# Init best classifier storage
best_classifier = []
# Init highest train score storage
high_score_train = np.zeros(best_k.shape)
# Init highest test score storage
high_score_test = np.zeros(best_k.shape)
# Init knn-vote prediction storage
pred_even = np.zeros((num_frames.shape[0], test_labels.shape[0])).astype(int)
pred_disc = np.zeros((num_frames.shape[0], test_labels.shape[0])).astype(int)
pred_logr = np.zeros((num_frames.shape[0], test_labels.shape[0])).astype(int)
# Init prediction accuracy storage
acc_even = np.zeros(best_k.shape)
acc_disc = np.zeros(best_k.shape)
acc_logr = np.zeros(best_k.shape)
# Init time consumption storage
time_elapsed = np.zeros(best_k.shape)
for i,nf in enumerate(num_frames):
# On your mark
start_t = time.time()
Xtr, ytr = utils.prepJointData(
train_data,
train_labels,
initid_train,
nf,
shuffle=True)
Xte, yte = utils.prepJointData(
test_data,
test_labels,
initid_test,
nf)
# Build KNN classifier and make prediction
num_k = 16
score_train = np.zeros(num_k)
score_test = np.zeros(num_k)
knn = []
for k in range(num_k):
knn.append(neighbors.KNeighborsClassifier(k+1))
score_train[k] = knn[k].fit(Xtr, ytr).score(Xtr, ytr)
score_test[k] = knn[k].fit(Xtr, ytr).score(Xte, yte)
print("[{}]{} frames, training accuracy: {} @ k={}".format(time.time(), nf, score_train[k], k+1))
print("[{}]{} frames, testing accuracy: {} @ k={}".format(time.time(), nf, score_test[k], k+1))
# Find best k and best classifier
bki = | np.argmax(score_test) | numpy.argmax |
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 3-clause BSD License
import os
import numpy as np
from astropy import units
from astropy.coordinates import Angle, EarthLocation
import pytest
from pyuvdata import UVData
from scipy.special import j1
import pyuvsim
import pyuvsim.utils as simutils
from pyuvsim.data import DATA_PATH as SIM_DATA_PATH
from pyuvsim.astropy_interface import Time
EW_uvfits_file = os.path.join(SIM_DATA_PATH, '28mEWbl_1time_1chan.uvfits')
c_ms = pyuvsim.analyticbeam.c_ms
@pytest.fixture
def heratext_posfreq():
time = Time('2018-03-01 00:00:00', scale='utc')
array_location = EarthLocation(lat='-30d43m17.5s', lon='21d25m41.9s',
height=1073.)
sources, mock_keywords = pyuvsim.create_mock_catalog(
time, 'hera_text', array_location=array_location
)
sources.update_positions(time, array_location)
za_vals = np.pi / 2. - sources.alt_az[1] # rad
az_vals = sources.alt_az[1]
freq_vals = np.array([10**8])
return az_vals, za_vals, freq_vals
def test_uniform_beam(heratext_posfreq):
beam = pyuvsim.AnalyticBeam('uniform')
beam.peak_normalize()
az_vals, za_vals, freqs = heratext_posfreq
nsrcs = az_vals.size
n_freqs = freqs.size
interpolated_beam, interp_basis_vector = beam.interp(
az_array=az_vals, za_array=za_vals, freq_array=freqs
)
expected_data = np.zeros((2, 1, 2, n_freqs, nsrcs), dtype=float)
expected_data[1, 0, 0, :, :] = 1
expected_data[0, 0, 1, :, :] = 1
assert np.allclose(interpolated_beam, expected_data)
def test_airy_beam_values(heratext_posfreq):
diameter_m = 14.
beam = pyuvsim.AnalyticBeam('airy', diameter=diameter_m)
beam.peak_normalize()
az_vals, za_vals, freq_vals = heratext_posfreq
interpolated_beam, interp_basis_vector = beam.interp(
az_array=az_vals, za_array=za_vals, freq_array=freq_vals
)
expected_data = np.zeros((2, 1, 2, 1, az_vals.size), dtype=float)
za_grid, f_grid = np.meshgrid(za_vals, freq_vals)
xvals = diameter_m / 2. * np.sin(za_grid) * 2. * np.pi * f_grid / c_ms
airy_values = np.zeros_like(xvals)
nz = xvals != 0.
ze = xvals == 0.
airy_values[nz] = 2. * j1(xvals[nz]) / xvals[nz]
airy_values[ze] = 1.
expected_data[1, 0, 0, :, :] = airy_values
expected_data[0, 0, 1, :, :] = airy_values
assert np.allclose(interpolated_beam, expected_data)
def test_uv_beam_widths():
# Check that the width of the Airy disk beam in UV space corresponds with the dish diameter.
diameter_m = 25.0
beam = pyuvsim.AnalyticBeam('airy', diameter=diameter_m)
beam.peak_normalize()
Nfreqs = 20
freq_vals = np.linspace(100e6, 130e6, Nfreqs)
lams = c_ms / freq_vals
N = 250
Npix = 500
zmax = np.radians(90) # Degrees
arr = np.arange(-N, N)
x, y = np.meshgrid(arr, arr)
r = np.sqrt(x ** 2 + y ** 2) / float(N)
zas = r * zmax
azs = np.arctan2(y, x)
interpolated_beam, interp_basis_vector = beam.interp(
az_array=np.array(azs), za_array=np.array(zas), freq_array=np.array(freq_vals)
)
ebeam = interpolated_beam[0, 0, 1, :, :]
ebeam = ebeam.reshape(Nfreqs, Npix, Npix)
beam_kern = np.fft.fft2(ebeam, axes=(1, 2))
beam_kern = np.fft.fftshift(beam_kern, axes=(1, 2))
for i, bk in enumerate(beam_kern):
# Cutoff at half a % of the maximum value in Fourier space.
thresh = np.max(np.abs(bk)) * 0.005
points = np.sum(np.abs(bk) >= thresh)
upix = 1 / (2 * np.sin(zmax)) # 2*sin(zmax) = fov extent projected onto the xy plane
area = np.sum(points) * upix ** 2
kern_radius = np.sqrt(area / np.pi)
assert np.isclose(diameter_m / lams[i], kern_radius, rtol=0.5)
def test_achromatic_gaussian_beam(heratext_posfreq):
sigma_rad = Angle('5d').to_value('rad')
beam = pyuvsim.AnalyticBeam('gaussian', sigma=sigma_rad)
beam.peak_normalize()
az_vals, za_vals, freq_vals = heratext_posfreq
nsrcs = az_vals.size
n_freqs = freq_vals.size
interpolated_beam, interp_basis_vector = beam.interp(
az_array=np.array(az_vals), za_array=np.array(za_vals), freq_array=np.array(freq_vals)
)
expected_data = np.zeros((2, 1, 2, n_freqs, nsrcs), dtype=float)
interp_zas = np.zeros((n_freqs, nsrcs), dtype=float)
for f_ind in range(n_freqs):
interp_zas[f_ind, :] = np.array(za_vals)
gaussian_vals = np.exp(-(interp_zas ** 2) / (2 * sigma_rad ** 2))
expected_data[1, 0, 0, :, :] = gaussian_vals
expected_data[0, 0, 1, :, :] = gaussian_vals
assert np.allclose(interpolated_beam, expected_data)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_gaussbeam_values():
"""
Make the long-line point sources up to 10 degrees from zenith.
Confirm that the coherencies match the expected beam values at those zenith angles.
"""
sigma = 0.05
hera_uv = UVData()
hera_uv.read_uvfits(EW_uvfits_file)
array_location = EarthLocation.from_geocentric(
*hera_uv.telescope_location, unit='m'
)
freq = hera_uv.freq_array[0, 0] * units.Hz
time = Time(hera_uv.time_array[0], scale='utc', format='jd')
catalog, mock_keywords = pyuvsim.create_mock_catalog(
time=time, arrangement='long-line', Nsrcs=41, min_alt=80., array_location=array_location
)
catalog.update_positions(time, array_location)
beam = pyuvsim.AnalyticBeam('gaussian', sigma=sigma)
array = pyuvsim.Telescope('telescope_name', array_location, [beam])
# Need a dummy baseline for this test.
antenna1 = pyuvsim.Antenna('ant1', 1, np.array([0, 0, 0]), 0)
antenna2 = pyuvsim.Antenna('ant2', 2, np.array([107, 0, 0]), 0)
baseline = pyuvsim.Baseline(antenna1, antenna2)
task = pyuvsim.UVTask(catalog, time, freq, baseline, array)
engine = pyuvsim.UVEngine(task)
engine.apply_beam()
altitudes = task.sources.alt_az[0] # In radians.
# All four components should be identical
if isinstance(engine.apparent_coherency, units.Quantity):
coherency_use = engine.apparent_coherency.to_value("Jy")
else:
coherency_use = engine.apparent_coherency
coherencies = np.real(coherency_use[0, 0] + coherency_use[1, 1]).astype(float)
zenith_angles, _ = simutils.altaz_to_zenithangle_azimuth(
altitudes, np.zeros_like(np.array(altitudes))
)
# Confirm the coherency values (ie., brightnesses) match the beam values.
beam_values = np.exp(-(zenith_angles) ** 2 / (2 * beam.sigma ** 2))
assert np.all(beam_values ** 2 == coherencies)
def test_chromatic_gaussian():
"""
test_chromatic_gaussian
Defining a gaussian beam with a spectral index and reference frequency.
Check that beam width follows prescribed power law.
"""
freqs = np.arange(120e6, 160e6, 4e6)
Nfreqs = len(freqs)
Npix = 1000
alpha = -1.5
sigma = np.radians(15.0)
az = np.zeros(Npix)
za = np.linspace(0, np.pi / 2., Npix)
# Error if trying to define chromatic beam without a reference frequency
with pytest.raises(ValueError,
match='ref_freq must be set for nonzero gaussian beam spectral index'):
pyuvsim.AnalyticBeam('gaussian', sigma=sigma, spectral_index=alpha)
A = pyuvsim.AnalyticBeam('gaussian', sigma=sigma, ref_freq=freqs[0], spectral_index=alpha)
# Get the widths at each frequency.
vals, _ = A.interp(az, za, freqs)
vals = vals[0, 0, 1]
for fi in range(Nfreqs):
hwhm = za[np.argmin(np.abs(vals[fi] - 0.5))]
sig_f = sigma * (freqs[fi] / freqs[0]) ** alpha
assert np.isclose(sig_f, 2 * hwhm / 2.355, atol=1e-3)
def test_power_analytic_beam():
# Check that power beam evaluation matches electric field amp**2 for analytic beams.
freqs = np.arange(120e6, 160e6, 4e6)
Npix = 1000
diam = 14.0
az = np.zeros(Npix)
za = np.linspace(0, np.pi / 2., Npix)
for b in ['gaussian', 'uniform', 'airy']:
eb = pyuvsim.AnalyticBeam(b, diameter=diam)
pb = pyuvsim.AnalyticBeam(b, diameter=diam)
pb.efield_to_power()
evals = eb.interp(az, za, freqs)[0][0, 0, 1]
pvals = pb.interp(az, za, freqs)[0][0, 0, 0]
assert np.allclose(evals**2, pvals)
# Ensure uniform beam works
pb = pyuvsim.AnalyticBeam('uniform')
pb.efield_to_power()
pb.interp(az, za, freqs)
def test_comparison():
"""
Beam __eq__ method
"""
beam1 = pyuvsim.AnalyticBeam('uniform')
beam2 = pyuvsim.AnalyticBeam('gaussian', sigma=0.02)
beam2.type = 'undefined'
not_beam = UVData()
assert beam1 != not_beam
assert beam2 != beam1
def test_beamerrs():
"""
Error cases.
"""
with pytest.raises(ValueError, match='type not recognized'):
pyuvsim.AnalyticBeam('unsupported_type')
beam = pyuvsim.AnalyticBeam('gaussian')
az, za = np.random.uniform(0.0, np.pi, (2, 5))
freq_arr = np.linspace(1e8, 1.5e8, 10)
with pytest.raises(ValueError, match='Dish diameter needed for gaussian beam'):
beam.interp(az, za, freq_arr)
beam.type = 'airy'
with pytest.raises(ValueError, match='Dish diameter needed for airy beam'):
beam.interp(az, za, freq_arr)
beam.type = 'noninterpolable'
with pytest.raises(ValueError, match='no interp for this type: noninterpolable'):
beam.interp(az, za, freq_arr)
def test_diameter_to_sigma():
# The integrals of an Airy power beam and a Gaussian power beam, within
# the first Airy null, should be close if the Gaussian width is set to the Airy width.
diameter_m = 25.0
abm = pyuvsim.AnalyticBeam('airy', diameter=diameter_m)
gbm = pyuvsim.AnalyticBeam('gaussian', diameter=diameter_m)
Nfreqs = 20
freq_vals = np.linspace(100e6, 130e6, Nfreqs)
lams = c_ms / freq_vals
N = 250
Npix = 501
zmax = np.radians(40) # Degrees
zas = | np.linspace(-zmax, zmax, Npix) | numpy.linspace |
import matplotlib.pyplot as plt
import numpy as np
from math import log10
sizes = [64, 6400, 640000]
sdma_rotate_times = [[9056583, 9657132, 10272182, 3671854, 4271867, 5457278, 3380028, 10256436, 5457458, 2779993],
[7799936, 7800599, 7200640, 6300599, 3000747, 7200884, 9600764, 3000590, 5400458, 3000737],
[8824067, 10683803, 12783663, 16083949, 13083638, 8283578, 10683468, 7683570, 11283738, 6783683]]
reg_rotate_times = [[564, 533, 547, 564, 559, 545, 562, 560, 550, 563],
[35929, 36972, 36491, 36532, 36214, 36481, 36173, 36240, 36714, 36440],
[133322070,133330533,133349656,133338860,133345244,133350214,133330109,133326373,133335184,133343432]]
fig, ax = plt.subplots()
ax.set_title('Rotate Runtime')
ax.set_xlabel('Matrix size (32-bit ints, in logscale)')
ax.set_ylabel('Cycles / 1000 (in logscale)')
ax.plot(sizes, [int(np.mean(times)) for times in sdma_rotate_times], 'bs',
label='SDMA', markersize=12)
ax.plot(sizes, [int(np.mean(times)) for times in sdma_rotate_times],
linewidth=2.0)
ax.plot(sizes, [int(np.mean(times)) for times in reg_rotate_times], 'g^',
label='Standard', markersize=12)
ax.plot(sizes, [int(np.mean(times)) for times in reg_rotate_times],
linewidth=2.0)
legend = ax.legend(loc='lower right')
ax.set_xscale('log')
s_arr = [6.4]
s_arr.extend(sizes)
s_arr.append(6400000)
plt.xticks(s_arr, ['', '8 x 8', '80 x 80', '800 x 800', ''])
ax.set_yscale('log')
c_arr = [1, 10, 100, 1000, 10000, 100000, 1000000]
plt.yticks([c * 1000 for c in c_arr], ['1', '10', '100', '1,000', '10,000', '100,000', '1 mil.'])
plt.grid()
plt.savefig('rotate-times.png')
print ('=== SDMA rotate ===')
for i in range(3):
print('size=' + str(sizes[i]) + ': mean=' + str(int(np.mean(sdma_rotate_times[i])))
+ ' std=' + str(int(np.std(sdma_rotate_times[i]))))
print ('\n=== Regular rotate ===')
for i in range(3):
print('size=' + str(sizes[i]) + ': mean=' + str(int(np.mean(reg_rotate_times[i])))
+ ' std=' + str(int( | np.std(reg_rotate_times[i]) | numpy.std |
import random
import pandas as pd
import tqdm
import numpy as np
data = pd.read_csv(
'../data/2009_skill_builder_data_corrected/skill_builder_data_corrected.csv',
usecols=['order_id', 'user_id', 'sequence_id', 'skill_id', 'correct']).fillna(0) # 给缺省的skill_id填为0
# print(data.isnull().sum()) # 缺省信息
raw_question = data.skill_id.unique().tolist() # 知识点id
num_skill = len(raw_question) # 124 skills, 因为把缺省的知识点全部转换为0
def question_id_transfer(question):
'''
:param question: 知识点id矩阵
:return: 知识点id矩阵,每个知识点对应的id的一个字典
'''
id2question = [p for p in raw_question]
question2id = {}
for i, p in enumerate(raw_question):
question2id[p] = i
return id2question, question2id
id2question, question2id = question_id_transfer(raw_question)
def parse_all_seq(students):
'''
:param students: 学生的id(user_id)这里是unique的,没有重复的学生id。
:return:
'''
all_sequences = []
for student_id in tqdm.tqdm(students, 'parse student sequence:\t'): # tqdm 进度条
student_sequence = parse_student_seq(data[data.user_id == student_id]) # 将每个学生的答题记录进行预处理
all_sequences.extend(student_sequence) # 在列表末尾一次性追加另一个序列中的多个值(用新列表扩展原来的列表)。
return all_sequences
def parse_student_seq(student):
'''
:param student: 一个学生的答题记录,包含'order_id', 'user_id', 'sequence_id', 'skill_id', 'correct'
:return:
'''
student = student.drop_duplicates(subset='order_id') # 将这列对应值相同的行进行去重(除去重复答题记录)
sequence_ids = student.sequence_id.unique() # 学生的答题集(sequence)的id集合
sequences = [] # list [(知识点id,对应的得分)] length:学生的答题集(sequence)的个数
for seq_id in sequence_ids: # 遍历sequence
seq = student[student.sequence_id == seq_id].sort_values('order_id') # 按照order_id大小进行排序得到学生在某一问题集上的答题情况
questions = [question2id[id] for id in seq.skill_id.tolist()] # 知识点的id
answers = seq.correct.tolist()
sequences.append((questions, answers))
return sequences # 长度为学生作答的sequence的长度,每个元素都是tuple类型,里面有两个list,代表考察的知识点和对应的答题情况
'''得到每个学生在每个sequnce上的得分情况
length:学生数 x 每个学生做过的sequnce数 = 59874'''
sequences = parse_all_seq(data.user_id.unique())
def train_test_split(data, train_size=.7, shuffle=True):
'''
:param data: 学生在每个sequence上的得分情况,每个元素是一个tuple([skill_id], [score profiles])
:param train_size: 训练机比例
:param shuffle: 是否打乱
:return:测试集与训练集
'''
if shuffle:
random.shuffle(data)
boundary = round(len(data) * train_size)
return data[: boundary], data[boundary:]
train_sequences, test_sequences = train_test_split(sequences) # 得到训练集和测试集
def sequences2tl(sequences, trgpath):
with open(trgpath, 'a', encoding='utf8') as f:
for seq in tqdm.tqdm(sequences, 'write into file: '):
# seq:([skill_id], [score profiles])
questions, answers = seq
seq_len = len(questions)
f.write(str(seq_len) + '\n')
f.write(','.join([str(q) for q in questions]) + '\n')
f.write(','.join([str(a) for a in answers]) + '\n')
"""
得到的数据每三行作为一组数据(Triple Line Format data)
第一行:学生在每个问题集(sequence)上的答题次数
第二行:每个子问题考察的知识点
第三行:对应的答题情况(0-答错,1-答对)
"""
# save triple line format for other tasks
sequences2tl(train_sequences, '../../data/2009_skill_builder_data_corrected/train.txt')
sequences2tl(test_sequences, '../../data/2009_skill_builder_data_corrected/test.txt')
MAX_STEP = 50 # sequence length
NUM_QUESTIONS = num_skill # 知识点个数
'''复现DKT原文,将得分情况编码为one_hot作为输入'''
def encode_onehot(sequences, max_step, num_questions):
question_sequences = np.array([])
answer_sequences = np.array([])
onehot_result = []
for questions, answers in tqdm.tqdm(sequences, 'convert to onehot format: '):
# seq: ([skill_id], [score profiles])
# questions: [skill_id]
# answers: [score profiles]
length = len(questions)
# append questions' and answers' length to an integer multiple of max_step
mod = 0 if length % max_step == 0 else (max_step - length % max_step)
fill_content = np.zeros(mod) - 1 # -1填充,保证向量长度都是max_step(50)的倍数?
questions = | np.append(questions, fill_content) | numpy.append |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 18:29:41 2019
@author: <NAME>
"""
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import tools
import numpy as np
from scipy import ndimage
#from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#%%
#images
#PSP_001414_1780_RED_img_row_33792_col_12288_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_15360_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_14336_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_13312_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_9216_col_11264_w_1024_h_1024_x_0_y_0
#chameleon
#parachute
path = "C:/Users/<NAME>/Documents/Unterlagen/SoSe2019/mars/python/1024x1024/"
img = cv2.imread('rocks.jpg')
#im = Image.open('rocks.jpg')
#np_im = np.array(im)
sharpened = tools.sharp(img, 3)
stretched = tools.stretch_8bit(img)
enhanced1 = tools.stretch_8bit(sharpened)
enhanced2 = tools.sharp(stretched, 3)
plt.imshow(enhanced1)
plt.show()
plt.imshow(enhanced2)
plt.show()
compare = tools.concatenate([img, sharpened, stretched, enhanced1, enhanced2]) #they are img type
plt.imshow(compare)
plt.show()
print(type(compare))
#cv2.imwrite('land_sharp3.jpg', compare)
compare = tools.concatenate([img, enhanced1, enhanced2])
plt.imshow(compare)
plt.show()
#compare.save('land_orgfinal_sharp3.jpg')
#cv2.imwrite('output/enhanced.jpg', enhanced1)
#cv2.imwrite('output/stretched.jpg', stretched)
#v2.imwrite('output/sharpened.jpg', sharpened)
#%%
img = cv2.imread('output/techno-signature_augmentation/parachute.jpg')
#simple = tools.augment_simple(img)
#augmentations = tools.augment_random(simple[3], generations = 8)
#augmentations = [flipped, rolled, rotated90, rotated180]
#cv2.imwrite('flipped.jpg', simple[0])
#cv2.imwrite('rolled.jpg', simple[1])
#cv2.imwrite('rotated90.jpg', simple[2])
#cv2.imwrite('rotated180.jpg', simple[3])
lista = [cv2.imread('aug_00.jpg'),cv2.imread('aug_01.jpg'),cv2.imread('aug_02.jpg'),cv2.imread('aug_03.jpg'),
cv2.imread('aug_04.jpg'),cv2.imread('aug_05.jpg'),cv2.imread('aug_06.jpg'),cv2.imread('aug_07.jpg')]
#lista2 = [cv2.imread('aug_08.jpg'),cv2.imread('aug_09.jpg'),cv2.imread('aug_10.jpg'),cv2.imread('aug_11.jpg'),
#cv2.imread('aug_12.jpg'),cv2.imread('aug_13.jpg'),cv2.imread('aug_14.jpg'),cv2.imread('aug_15.jpg')]
#lista3 = [cv2.imread('aug_16.jpg'),cv2.imread('aug_17.jpg'),cv2.imread('aug_18.jpg'),cv2.imread('aug_19.jpg'),
#cv2.imread('aug_20.jpg'),cv2.imread('aug_21.jpg'),cv2.imread('aug_22.jpg'),cv2.imread('aug_23.jpg')]
#%%
concatenated = tools.concatenate(lista)
plt.imshow(concatenated)
plt.show()
concatenated.save('comb5.jpg')
#%%
#_________________ create function with this _________________________ DONE
list_im = ['output/original.jpg','output/sharpened.jpg','output/stretched.jpg','output/enhanced.jpg']
imgs = [ Image.open(i) for i in list_im ]
# pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]
imgs_comb = np.hstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )
# save that beautiful picture
imgs_comb = Image.fromarray( imgs_comb)
#imgs_comb.save( 'test_hor.jpg' )
# for a vertical stacking it is simple: use vstack
imgs_comb = np.vstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )
imgs_comb = Image.fromarray( imgs_comb)
#imgs_comb.save( 'test_ver.jpg' )
#_______________________________________________________________________
#%%
def concatenate(imgflnames): #file name, Image.fromarray for cv2 or numpy. Error: ValueError: cannot resize an array that references or is referenced
#by another array in this way.
#Use the np.resize function or refcheck=False
images = [cv2.imread(i) for i in imgflnames] #for loop one line for lists
print("\n", type(images), "\n")
print("lenght: ", len(images))
print("dimension 0: ", images[0].ndim)
print("dimension 1: ", images[1].ndim)
min_shape = sorted( [(np.sum(i.shape), i.shape ) for i in images])[0][1]
print(min_shape)
imgs_comb = np.hstack( (np.asarray(cv2.resize(i,(min_shape[0], min_shape[1]))) for i in images ) )
#res = cv2.resize(img_np, dsize=(2048, 2048), interpolation=cv2.INTER_CUBIC)
imgs_comb = Image.fromarray( imgs_comb)
return imgs_comb
def concatenate2(imgflnames): #file name, dimensionality problem: all the input arrays must have same number of dimensions. Could be fix with a resize function
images = [Image.open(i) for i in imgflnames] #for loop one line for lists
print("\n", type(images), "\n")
print("lenght: ", len(images))
print("dimension 0: ", images[0].size)
print("dimension 1: ", images[1].size)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in images])[0][1]
print(min_shape)
imgs_comb = np.hstack( (np.asarray( i.resize(min_shape) ) for i in images ) )
imgs_comb = Image.fromarray( imgs_comb)
return imgs_comb
#%%
list_im = ['output/enhancement/original.jpg','output/enhancement/enhanced.jpg']
imgs = [ Image.open(i) for i in list_im ]
# pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]
imgs_comb = np.hstack((np.asarray(i.resize(min_shape) ) for i in imgs))
# save that beautiful picture
imgs_comb = Image.fromarray( imgs_comb)
plt.imshow(imgs_comb)
plt.show()
two = concatenate2(list_im)
plt.imshow(two)
plt.show()
#imgs_comb.save( 'orginal_final.jpg' )
#tools.augment_random(img, 20)
#augmented = tools.augment_simple(img)
#cv2.imwrite("output/chameleon.jpg", img)
#cv2.imwrite("output/flipped.jpg", augmented[0])
#cv2.imwrite("output/rolled.jpg", augmented[1])
#cv2.imwrite("output/rotated90.jpg", augmented[2])
#cv2.imwrite("output/rotated180.jpg", augmented[3])
#%% register_image(img)
#try with chameleon and rotate 27.5
#try resize again
img = cv2.imread('resized.jpg')
plt.imshow(img)
plt.show()
#img = cv2.imread('resized.jpg')
ref = tools.generate_template(img)
plt.imshow(ref)
plt.show()
cv2.imwrite('refresized.jpg',ref)
#ref = tools.generate_template(img, [255,0,0])
#plt.imshow(ref)
#plt.show()
#%%
type2_list = ['type2.jpg','reftype2.jpg','translation_type2.jpg','rigid_body_type2.jpg','scale_type2.jpg','affine_type2.jpg','bilatelar_type2.jpg']
resized_list = ['resized.jpg','align_and_crop_before.jpg','refresized.jpg','translation.jpg','rigid_body.jpg','scaled_rotation.jpg','affine.jpg','bilinear.jpg']
conc1 = tools.concatenate(type2_list, True)
plt.imshow(conc1)
plt.show()
conc2 = tools.concatenate(resized_list, True)
plt.imshow(conc2)
plt.show()
#%%
img_list = ['output/enhancement/original.jpg','output/enhancement/enhanced.jpg', 'bilinear_template.jpg'] #dimensionality problem
images = [Image.open(i) for i in img_list]
for i in images:
print (i.size)
print (type(i))
concatenated = concatenate(img_list)
plt.imshow(concatenated)
plt.show()
#%% concatenation test detailed
list_im1 = 'output/enhancement/original.jpg'
imgs_1 = Image.open(list_im1)
imgs2_1 = cv2.imread(list_im1)
print(imgs_1.size)
print("PIL Image type: ", type(imgs_1))
print(imgs2_1.shape)
print("CV2read imgs2 type: ", type(imgs2_1))
list_im2 = 'output/enhancement/enhanced.jpg'
imgs_2 = Image.open(list_im2)
imgs2_2 = cv2.imread(list_im2)
print("\n",imgs_2.size)
print("PIL Image type: ", type(imgs_2))
print(imgs2_2.shape)
print("CV2read imgs2 type: ", type(imgs2_2))
list_im3 = 'bilinear_template.jpg'
imgs_3 = Image.open(list_im3)
imgs2_3 = cv2.imread(list_im3)
print("\n",imgs_3.size)
print("PIL Image type: ", type(imgs_3))
print(imgs2_3.shape)
print("CV2read imgs2 type: ", type(imgs2_3))
result = tools.concatenate([list_im3, list_im2, list_im1])
plt.imshow(result)
plt.show()
#%%
#img_rotated = ndimage.rotate(img, 27)
#cv2.imwrite('output/rotated_chameleon27.jpg', img_rotated)
#transformations = tools.register_image(img, ref = 'bilinear_template.jpg') #best result so far
transformations = tools.register_image(img)
transformations = tools.register_image(img ,'solid')
transformations = tools.register_image(img, ref = 'bilinear_template.jpg') #homography function could have the same
#%%
cv2.imwrite('output/translation_resized_bilinear.jpg', transformations[0])
cv2.imwrite('output/rotation_resized_bilinear.jpg', transformations[1])
cv2.imwrite('output/scaled_rotation_resized_bilinear.jpg', transformations[2])
cv2.imwrite('output/affine_resized_bilinear.jpg', transformations[3])
cv2.imwrite('output/bilinear_resized_bilinear.jpg', transformations[4])
#%%
def random_color(low=5, high=250):
color = [np.random.randint(5,250),np.random.randint(5,250),np.random.randint(5,250)]
return color
#%%
def generate_template():
ref = | np.zeros((img.shape[0],img.shape[1],3), dtype = 'uint8') | numpy.zeros |
# Autoencoder (using MLP and CNN) for fashion MNIST
# Based on
# https://github.com/ageron/handson-ml2/blob/master/17_autoencoders_and_gans.ipynb
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
import tensorflow as tf
from tensorflow import keras
from sklearn.manifold import TSNE
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train_full = X_train_full.astype(np.float32) / 255
X_test = X_test.astype(np.float32) / 255
X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]
y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]
def rounded_accuracy(y_true, y_pred):
return keras.metrics.binary_accuracy(tf.round(y_true), tf.round(y_pred))
tf.random.set_seed(42)
np.random.seed(42)
stacked_encoder = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(100, activation="selu"),
keras.layers.Dense(30, activation="selu"),
])
stacked_decoder = keras.models.Sequential([
keras.layers.Dense(100, activation="selu", input_shape=[30]),
keras.layers.Dense(28 * 28, activation="sigmoid"),
keras.layers.Reshape([28, 28])
])
stacked_ae = keras.models.Sequential([stacked_encoder, stacked_decoder])
stacked_ae.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1.5), metrics=[rounded_accuracy])
history = stacked_ae.fit(X_train, X_train, epochs=20,
validation_data=[X_valid, X_valid])
def plot_image(image):
plt.imshow(image, cmap="binary")
plt.axis("off")
def show_reconstructions(model, images=X_valid, n_images=5):
reconstructions = model.predict(images[:n_images])
plt.figure(figsize=(n_images * 1.5, 3))
for image_index in range(n_images):
plt.subplot(2, n_images, 1 + image_index)
plot_image(images[image_index])
plt.subplot(2, n_images, 1 + n_images + image_index)
plot_image(reconstructions[image_index])
show_reconstructions(stacked_ae)
save_fig("ae-mlp-fashion-recon.pdf")
plt.show()
# Visualize 2d manifold using tSNE
np.random.seed(42)
X_valid_compressed = stacked_encoder.predict(X_valid)
tsne = TSNE()
X_valid_2D = tsne.fit_transform(X_valid_compressed)
X_valid_2D = (X_valid_2D - X_valid_2D.min()) / (X_valid_2D.max() - X_valid_2D.min())
# adapted from https://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html
plt.figure(figsize=(10, 8))
cmap = plt.cm.tab10
plt.scatter(X_valid_2D[:, 0], X_valid_2D[:, 1], c=y_valid, s=10, cmap=cmap)
image_positions = np.array([[1., 1.]])
for index, position in enumerate(X_valid_2D):
dist = np.sum((position - image_positions) ** 2, axis=1)
if np.min(dist) > 0.02: # if far enough from other images
image_positions = np.r_[image_positions, [position]]
imagebox = mpl.offsetbox.AnnotationBbox(
mpl.offsetbox.OffsetImage(X_valid[index], cmap="binary"),
position, bboxprops={"edgecolor": cmap(y_valid[index]), "lw": 2})
plt.gca().add_artist(imagebox)
plt.axis("off")
save_fig("ae-mlp-fashion-tsne.pdf")
plt.show()
# Tied weight version
class DenseTranspose(keras.layers.Layer):
def __init__(self, dense, activation=None, **kwargs):
self.dense = dense
self.activation = keras.activations.get(activation)
super().__init__(**kwargs)
def build(self, batch_input_shape):
self.biases = self.add_weight(name="bias",
shape=[self.dense.input_shape[-1]],
initializer="zeros")
super().build(batch_input_shape)
def call(self, inputs):
z = tf.matmul(inputs, self.dense.weights[0], transpose_b=True)
return self.activation(z + self.biases)
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
dense_1 = keras.layers.Dense(100, activation="selu")
dense_2 = keras.layers.Dense(30, activation="selu")
tied_encoder = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
dense_1,
dense_2
])
tied_decoder = keras.models.Sequential([
DenseTranspose(dense_2, activation="selu"),
DenseTranspose(dense_1, activation="sigmoid"),
keras.layers.Reshape([28, 28])
])
tied_ae = keras.models.Sequential([tied_encoder, tied_decoder])
tied_ae.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1.5), metrics=[rounded_accuracy])
history = tied_ae.fit(X_train, X_train, epochs=10,
validation_data=[X_valid, X_valid])
show_reconstructions(tied_ae)
plt.show()
# Convolutional version (very slow unless you use a GPU)
tf.random.set_seed(42)
| np.random.seed(42) | numpy.random.seed |
'''
by Einsbon (<NAME>)
- GitHub: https://github.com/Einsbon
- Youtube: https://www.youtube.com/channel/UCt7FZ-8uzV_jHJiKp3NlHvg
- Blog: https://blog.naver.com/einsbon
Robot's forward is direction X +
left is Y+
up is Z +
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import math
class WalkGenerator():
def __init__(self):
# 0 1 2 3 4 5
self._motorDirectionRight = np.array([+1, +1, +1, +1, +1, +1])
self._motorDirectionLeft = np.array([+1, +1, +1, +1, +1, +1])
'''
self._walkPoint0 = 0 # double support. point of the landed foot
self._walkPoint1 = 0 # single support. point of the supporting foot
self._walkPoint2 = 0 # double support. point of the foot to lift.
self._walkPoint3 = 0 # single support. point of the swinging foot
'''
'''
self.walkPointStartRightstepRightLeg = 0 # 오른쪽 발을 먼저 내밈. 그때의 오른쪽 발.
self.walkPointStartRightstepLeftLeg = 0 # 오른쪽 발을 먼저 내밈. 그때의 왼쪽 발.
self.walkPointStartLeftstepRightLeg = 0 # 왼쪽 발을 먼저 내밈. 그때의 오른쪽 발.
self.walkPointStartLeftstepLeftLeg = 0 # 왼쪽 발을 먼저 내밈. 그때의 왼쪽 발.
self.walkPointEndRightstepRightLeg = 0 # 오른쪽 발을 디디면서 끝남. 그때의 오른쪽 발.
self.walkPointEndLeftstepRightLeg = 0 # 오른쪽 발을 디디면서 끝남. 그때의 왼쪽 발.
self.walkPointEndLeftstepLeftLeg = 0
self.walkPointEndRightstepLeftLeg = 0
self.walkPointRightStepRightLeg = 0
self.walkPointLeftStepRightLeg = 0
self.walkPointRightStepLeftLeg = 0
self.walkPointLeftStepLeftLeg = 0
self.walkAnglesWalkingRight = 0
self.walkAnglesWalkingLeft = 0
self.walkAnglesStartRight = 0 # 왼쪽으로 sway 했다가 오른발을 먼저 내밈.
self.walkAnglesStartLeft = 0 # 오른쪽으로 sway 했다가 왼발을 먼저 내밈.
self.walkAnglesEndRight = 0 # 오른발을 디디면서 끝남.
self.walkAnglesEndLeft = 0 # 왼발을 디디면서 끝남.
self.turnListUnfold = 0
self.turnListFold = 0
'''
# 로봇의 길이 설정. 길이 단위: mm
self._pelvic_interval = 70.5
self._legUp_length = 110
self._legDown_length = 110
self._footJoint_to_bottom = 45
'''
self._bodyMovePoint = 0
self._legMovePoint = 0
self._h = 0
self._l = 0
self._sit = 0
self._swayBody = 0
self._swayFoot = 0
self._swayShift = 0
self._liftPush = 0
self._landPull = 0
self._stepTime = 0
self._bodyPositionXPlus = 0
self._damping = 0
self._incline = incline
'''
def setRobotParameter(self, pelvic_interval, leg_up_length, leg_down_length, foot_to_grount, foot_to_heel, foot_to_toe):
pass
def setWalkParameter(self, bodyMovePoint, legMovePoint, height, stride, sit, swayBody, swayFoot, bodyPositionForwardPlus, swayShift, liftPush=0.4, landPull=0.6, timeStep=0.1, damping=0, incline=0):
self._bodyMovePoint = bodyMovePoint # the number of point when two feet are landed
self._legMovePoint = legMovePoint # the number of point when lift one foot
self._h = height # foot lift height
self._l = stride # stride length
self._sit = sit # sit height. increase this will make leg more fold. too high or too low makes an error
self._swayBody = swayBody # body sway length
self._swayFoot = swayFoot # foot sway length. 0 -> feet move straight forward. plus this make floating leg spread.(increase gap between feet)
self._swayShift = swayShift # start point of sway
self._liftPush = liftPush # push the lifting foot backward when lifting the foot to gains momentum.
self._landPull = landPull # Before put the foot down, go forward more and pull back when landing.
self._timeStep = timeStep # simulation timeStep
self._bodyPositionXPlus = bodyPositionForwardPlus # plus this makes the body forward
self._damping = damping # damping at the start and end of foot lift.
self._incline = incline # tangent angle of incline
self._stepPoint = bodyMovePoint + legMovePoint
def generate(self):
walkPoint = self._bodyMovePoint * 2 + self._legMovePoint * 2
trajectoryLength = self._l * (2 * self._bodyMovePoint + self._legMovePoint) / (self._bodyMovePoint + self._legMovePoint)
walkPoint0 = np.zeros((3, self._bodyMovePoint))
walkPoint1 = np.zeros((3, self._legMovePoint))
walkPoint2 = np.zeros((3, self._bodyMovePoint))
walkPoint3 = np.zeros((3, self._legMovePoint))
self.walkPointStartRightstepRightLeg = np.zeros((3, self._bodyMovePoint + self._legMovePoint))
self.walkPointStartLeftstepRightLeg = np.zeros((3, self._bodyMovePoint + self._legMovePoint))
self.walkPointEndRightstepRightLeg = np.zeros((3, self._bodyMovePoint + self._legMovePoint))
self.walkPointEndLeftstepRightLeg = np.zeros((3, self._bodyMovePoint + self._legMovePoint))
# walking motion
for i in range(self._bodyMovePoint):
t = (i + 1) / (walkPoint - self._legMovePoint)
walkPoint0[0][i] = -trajectoryLength * (t - 0.5)
walkPoint0[2][i] = self._sit
walkPoint0[1][i] = self._swayBody * math.sin(2 * math.pi * ((i + 1 - self._swayShift) / walkPoint))
for i in range(self._legMovePoint):
t = (i + 1 + self._bodyMovePoint) / (walkPoint - self._legMovePoint)
walkPoint1[0][i] = -trajectoryLength * (t - 0.5)
walkPoint1[2][i] = self._sit
walkPoint1[1][i] = self._swayBody * math.sin(2 * math.pi * ((i + 1 + self._bodyMovePoint - self._swayShift) / walkPoint))
for i in range(self._bodyMovePoint):
t = (i + 1 + self._bodyMovePoint + self._legMovePoint) / (walkPoint - self._legMovePoint)
walkPoint2[0][i] = -trajectoryLength * (t - 0.5)
walkPoint2[2][i] = self._sit
walkPoint2[1][i] = self._swayBody * math.sin(2 * math.pi * ((i + 1 + self._bodyMovePoint + self._legMovePoint - self._swayShift) / walkPoint))
for i in range(self._legMovePoint):
t = (i + 1) / self._legMovePoint
sin_tpi = math.sin(t * math.pi)
walkPoint3[0][i] = (2 * t - 1 + (1 - t) * self._liftPush * -sin_tpi + t * self._landPull * sin_tpi) * trajectoryLength / 2
walkPoint3[2][i] = math.sin(t * math.pi) * self._h + self._sit
walkPoint3[1][i] = math.sin(t * math.pi) * self._swayFoot + self._swayBody * math.sin(2 * math.pi * ((i + 1 + walkPoint - self._legMovePoint - self._swayShift) / walkPoint))
# starting motion
for i in range(self._bodyMovePoint - self._swayShift):
t = (i + 1) / self._bodyMovePoint
self.walkPointStartRightstepRightLeg[0][i] = 0
self.walkPointStartRightstepRightLeg[2][i] = self._sit
self.walkPointStartLeftstepRightLeg[0][i] = 0
self.walkPointStartLeftstepRightLeg[2][i] = self._sit
for i in range(self._legMovePoint):
t = (i + 1) / self._legMovePoint
t2 = (i + 1) / (self._legMovePoint + self._swayShift)
sin_tpi = math.sin(t * math.pi)
self.walkPointStartRightstepRightLeg[2][i + self._bodyMovePoint - self._swayShift] = math.sin(t * math.pi) * self._h + self._sit
self.walkPointStartRightstepRightLeg[0][i + self._bodyMovePoint - self._swayShift] = (2 * t + (1 - t) * self._liftPush * -sin_tpi + t * self._landPull * sin_tpi) * trajectoryLength / 4
self.walkPointStartLeftstepRightLeg[0][i + self._bodyMovePoint - self._swayShift] = (math.cos(t2 * math.pi / 2) - 1) * trajectoryLength * self._legMovePoint / (self._bodyMovePoint * 2 + self._legMovePoint) / 2
self.walkPointStartLeftstepRightLeg[0][i + self._bodyMovePoint - self._swayShift] = (math.cos(t2 * math.pi / 2) - 1) * trajectoryLength * ((self._swayShift + self._bodyMovePoint + self._legMovePoint) / (self._bodyMovePoint * 2 + self._legMovePoint) - 0.5)
self.walkPointStartLeftstepRightLeg[2][i + self._bodyMovePoint - self._swayShift] = self._sit
for i in range(self._swayShift):
t2 = (i + 1 + self._legMovePoint) / (self._legMovePoint + self._swayShift)
self.walkPointStartRightstepRightLeg[0][i + self._legMovePoint + self._bodyMovePoint - self._swayShift] = -trajectoryLength * ((i + 1) / (walkPoint - self._legMovePoint) - 0.5)
self.walkPointStartRightstepRightLeg[2][i + self._legMovePoint + self._bodyMovePoint - self._swayShift] = self._sit
self.walkPointStartLeftstepRightLeg[0][i + self._legMovePoint + self._bodyMovePoint - self._swayShift] = -trajectoryLength * ((i + 1 + self._bodyMovePoint + self._legMovePoint) / (walkPoint - self._legMovePoint) - 0.5)
self.walkPointStartLeftstepRightLeg[0][i + self._legMovePoint + self._bodyMovePoint - self._swayShift] = (math.cos(t2 * math.pi / 2) - 1) * trajectoryLength * ((self._swayShift + self._bodyMovePoint + self._legMovePoint) / (self._bodyMovePoint * 2 + self._legMovePoint) - 0.5)
self.walkPointStartLeftstepRightLeg[2][i + self._legMovePoint + self._bodyMovePoint - self._swayShift] = self._sit
for i in range(self._bodyMovePoint + self._legMovePoint):
t = (i + 1) / (self._bodyMovePoint + self._legMovePoint)
#self.walkPointStartRightstepRightLeg[1][i] = -self._swayBody * math.sin(t*math.pi) * math.sin(t*math.pi)
#self.walkPointStartLeftstepRightLeg[1][i] = self._swayBody * math.sin(t*math.pi) * math.sin(t*math.pi)
if t < 1 / 4:
self.walkPointStartRightstepRightLeg[1][i] = -self._swayBody * (math.sin(t * math.pi) - (1 - math.sin(math.pi * 2 * t)) * (math.sin(4 * t * math.pi) / 4))
self.walkPointStartLeftstepRightLeg[1][i] = self._swayBody * (math.sin(t * math.pi) - (1 - math.sin(math.pi * 2 * t)) * (math.sin(4 * t * math.pi) / 4))
else:
self.walkPointStartRightstepRightLeg[1][i] = -self._swayBody * math.sin(t * math.pi)
self.walkPointStartLeftstepRightLeg[1][i] = self._swayBody * math.sin(t * math.pi)
# ending motion. 마무리 동작. 왼발이 뜸. 그러나 둘다 오른쪽다리 기준
for i in range(self._bodyMovePoint - self._swayShift):
self.walkPointEndLeftstepRightLeg[0][i] = -trajectoryLength * \
((i+1+self._swayShift)/(walkPoint-self._legMovePoint)-0.5)
self.walkPointEndLeftstepRightLeg[2][i] = self._sit
self.walkPointEndRightstepRightLeg[0][i] = -trajectoryLength * \
((i + 1 + self._swayShift + self._bodyMovePoint+self._legMovePoint)/(walkPoint-self._legMovePoint)-0.5)
self.walkPointEndRightstepRightLeg[2][i] = self._sit
for i in range(self._legMovePoint):
t = (i + 1) / self._legMovePoint
sin_tpi = math.sin(t * math.pi)
self.walkPointEndLeftstepRightLeg[0][i + self._bodyMovePoint - self._swayShift] = (math.sin(t * math.pi / 2) - 1) * trajectoryLength * ((self._bodyMovePoint) / (self._bodyMovePoint * 2 + self._legMovePoint) - 0.5)
self.walkPointEndLeftstepRightLeg[2][i + self._bodyMovePoint - self._swayShift] = self._sit
self.walkPointEndRightstepRightLeg[0][i + self._bodyMovePoint - self._swayShift] = (2 * t - 2 + (1 - t) * self._liftPush * -sin_tpi + t * self._landPull * sin_tpi) * trajectoryLength / 4
self.walkPointEndRightstepRightLeg[2][i + self._bodyMovePoint - self._swayShift] = math.sin(t * math.pi) * self._h + self._sit
for i in range(self._swayShift):
self.walkPointEndLeftstepRightLeg[0][i + self._bodyMovePoint + self._legMovePoint - self._swayShift] = 0
self.walkPointEndLeftstepRightLeg[2][i + self._bodyMovePoint + self._legMovePoint - self._swayShift] = self._sit
self.walkPointEndRightstepRightLeg[0][i + self._bodyMovePoint + self._legMovePoint - self._swayShift] = 0
self.walkPointEndRightstepRightLeg[2][i + self._bodyMovePoint + self._legMovePoint - self._swayShift] = self._sit
# turn
self.turnListUnfold = np.zeros((self._bodyMovePoint + self._legMovePoint, 12))
self.turnListFold = np.zeros((self._bodyMovePoint + self._legMovePoint, 12))
turnAngle = np.zeros(self._bodyMovePoint + self._legMovePoint)
for i in range(self._legMovePoint):
t = (i + 1) / self._legMovePoint
turnAngle[self._bodyMovePoint - self._swayShift + i] = (1 - math.cos(math.pi * t)) / 4
for i in range(self._swayShift):
turnAngle[self._bodyMovePoint + self._legMovePoint - self._swayShift + i] = 1 / 2
for i in range(self._bodyMovePoint + self._legMovePoint):
self.turnListUnfold[i] = [-turnAngle[i], 0, 0, 0, 0, 0, +turnAngle[i], 0, 0, 0, 0, 0]
self.turnListFold[i] = [-0.5 + turnAngle[i], 0, 0, 0, 0, 0, +0.5 - turnAngle[i], 0, 0, 0, 0, 0]
for i in range(self._bodyMovePoint + self._legMovePoint):
t = 1 - (i + 1) / (self._bodyMovePoint + self._legMovePoint)
if t < 1 / 4:
self.walkPointEndLeftstepRightLeg[1][i] = self._swayBody * (math.sin(t * math.pi) - (1 - math.sin(math.pi * 2 * t)) * (math.sin(4 * t * math.pi) / 4))
self.walkPointEndRightstepRightLeg[1][i] = -self._swayBody * (math.sin(t * math.pi) - (1 - math.sin(math.pi * 2 * t)) * (math.sin(4 * t * math.pi) / 4))
else:
self.walkPointEndLeftstepRightLeg[1][i] = self._swayBody * math.sin(t * math.pi)
self.walkPointEndRightstepRightLeg[1][i] = -self._swayBody * math.sin(t * math.pi)
# 추가 파라미터의 조정
if self._incline != 0: # 기울기. 계단 등에서 사용.
walkPoint0[2] = walkPoint0[2] + walkPoint0[0] * self._incline
walkPoint1[2] = walkPoint1[2] + walkPoint1[0] * self._incline
walkPoint2[2] = walkPoint2[2] + walkPoint2[0] * self._incline
walkPoint3[2] = walkPoint3[2] + walkPoint3[0] * self._incline
self.walkPointStartRightstepRightLeg[2] = self.walkPointStartRightstepRightLeg[2] + self.walkPointStartRightstepRightLeg[0] * self._incline
self.walkPointStartLeftstepRightLeg[2] = self.walkPointStartLeftstepRightLeg[2] + self.walkPointStartLeftstepRightLeg[0] * self._incline
self.walkPointEndLeftstepRightLeg[2] = self.walkPointEndLeftstepRightLeg[2] + self.walkPointEndLeftstepRightLeg[0] * self._incline
self.walkPointEndRightstepRightLeg[2] = self.walkPointEndRightstepRightLeg[2] + self.walkPointEndRightstepRightLeg[0] * self._incline
if self._bodyPositionXPlus != 0: # 허리 앞뒤 위치 조절
walkPoint0[0] = walkPoint0[0] - self._bodyPositionXPlus
walkPoint1[0] = walkPoint1[0] - self._bodyPositionXPlus
walkPoint2[0] = walkPoint2[0] - self._bodyPositionXPlus
walkPoint3[0] = walkPoint3[0] - self._bodyPositionXPlus
self.walkPointStartRightstepRightLeg[0] = self.walkPointStartRightstepRightLeg[0] - self._bodyPositionXPlus
self.walkPointStartLeftstepRightLeg[0] = self.walkPointStartLeftstepRightLeg[0] - self._bodyPositionXPlus
self.walkPointEndLeftstepRightLeg[0] = self.walkPointEndLeftstepRightLeg[0] - self._bodyPositionXPlus
self.walkPointEndRightstepRightLeg[0] = self.walkPointEndRightstepRightLeg[0] - self._bodyPositionXPlus
if self._damping != 0: # 댐핑 조절
dampHeight = (walkPoint3[2][-1] - walkPoint0[2][0]) / 2
walkPoint0[2][0] = walkPoint0[2][0] + dampHeight * self._damping
walkPoint2[2][0] = walkPoint2[2][0] - dampHeight * self._damping
self._walkPoint0 = walkPoint0
self._walkPoint1 = walkPoint1
self._walkPoint2 = walkPoint2
self._walkPoint3 = walkPoint3
self.walkPointLeftStepRightLeg = | np.column_stack([walkPoint0[:, self._swayShift:], walkPoint1, walkPoint2[:, :self._swayShift]]) | numpy.column_stack |
from ..core import ctll, satellite, instrument
from ..targets.targets import Target
from ..utils import trigsf
from itertools import groupby
import collections.abc
from collections.abc import Iterable
from astropy import units as u
from poliastro.twobody import propagation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
try:
from functools import cached_property # type: ignore
except ImportError:
from cached_property import cached_property # type: ignore
TOL = 1**(-10)
def get_view(lons, lats, r, target, R):
"""Get string indicating in view considering maximum
field of view possible, i.e. elevation = 0
Parameters
----------
lons : ~astropy.units.quantity.Quantity
longitudes in radians
lats : ~astropy.units.quantity.Quantity
latitudes in radians
r : ~astropy.units.quantity.Quantity
position, distance
target : CtllDes.targets.targets.Target
desired target
R : ~astropy.units.quantity.Quantity
attractors mean distance
"""
t_lon = (target.x*u.deg).to(u.rad)
t_lat = (target.y*u.deg).to(u.rad)
lams_0 = trigsf.get_lam_0(r,R)
angles = trigsf.get_angles(lons, lats, t_lon, t_lat)
cov = []
for a,l in zip(angles,lams_0):
if a < l:
cov.append(1)
else:
cov.append(0)
return cov
def _push_broom(FOV_vert, lons, lats, r, target, R):
view = get_view(lons, lats, r, target, R)
cov = []
for i in range(1,len(view)):
if view[i] == 1:
radii = np.sqrt(np.sum(r[i]**2))
t_lon = (target.x*u.deg).to(u.rad)-lons[i]
t_lat = (target.y*u.deg).to(u.rad)-lats[i]
inc = np.arctan2(np.sin(lons[i]-lons[i-1])*np.sin(lats[i]-lats[i-1]),np.cos(lats[i-1]))
cos_inc = np.cos(i)
sin_inc = np.sin(i)
angle = trigsf.get_angles(0,0,lons[i]-lons[i-1],lats[i]-lats[i-1])
t_lon = np.arctan2(cos_inc*np.sin(t_lon)*np.sin(t_lat)+sin_inc*np.cos(t_lat),
np.cos(t_lon)*np.sin(t_lat))
t_lat = np.arccos(-sin_inc*np.sin(t_lon)*np.sin(t_lat)+cos_inc*np.cos(t_lat))
lam = trigsf.get_lam(r[i],FOV_vert,R)
if t_lon < angle/2 and t_lat < lam:
cov.append(1)
else:
cov.append(0)
else:
cov.append(0)
pass
return cov
def push_broom(FOV_vert, lons, lats, r, target, R):
view = get_view(lons, lats, r, target, R)
cov = []
t_lon = (target.x*u.deg).to(u.rad)
t_lat = (target.y*u.deg).to(u.rad)
for i in range(1,len(view)):
if view[i] == 1:
lam = trigsf.get_lam(r[i],FOV_vert,R)
angle_target = trigsf.get_angles(lons[i],lats[i],t_lon,t_lat)
if angle_target > lam:
cov.append(0)
else:
# r_ant = np.linalg.norm(r[i-1])*np.array([np.sin(lats[i-1])*np.cos(lons[i-1]),
# np.sin(lats[i-1])*np.sin(lons[i-1]), np.cos(lats[i-1])])
# r_ = np.linalg.norm(r[i])*np.array([np.sin(lats[i])*np.cos(lons[i]),
# np.sin(lats[i])*np.sin(lons[i]), np.cos(lats[i])])
# r_post = np.linalg.norm(r[i+1])*np.array([np.sin(lats[i+1])*np.cos(lons[i+1]),
# np.sin(lats[i+1])*np.sin(lons[i+1]), np.cos(lats[i+1])])
# mid_point_ant = (r_ant + r_)/np.linalg.norm(r_ant + r_)*R
# r_ant, lat_ant, lon_ant = trigsf.c2s(mid_point_ant[0],
# mid_point_ant[1], mid_point_ant[2])
# mid_point_post = (r_post + r_)/np.linalg.norm(r_post + r_)*R
# r_post, lat_post, lon_post = trigsf.c2s(mid_point_post[0],
# mid_point_post[1], mid_point_post[2])
angle = trigsf.get_angles(lons[i-1],lats[i-1], lons[i], lats[i])
angle_ant = trigsf.get_angles(lons[i-1],lats[i-1], t_lon, t_lat)
angle_post = trigsf.get_angles(lons[i], lats[i], t_lon, t_lat)
A, B, C = trigsf.SSS(angle.value, angle_post.value, angle_ant.value)
if (B < np.pi/2) and (C < np.pi/2):
cov.append(1)
else:
cov.append(0)
else:
cov.append(0)
return cov
def symmetric_with_roll(FOV, lons, lats, r, v, target, R, roll_angle = 0):
"""coverage method
This coverage method, is symmetric with the roll capabilities.
It is just a potential coverage obtained by stipulating the new field of
view, obtained by the roll angle in any direction. Perpendicular to velocity
rolls are not taken into account since, increase in coverage from this
analysis are restricted to a few seconds of the satellite passing.
"""
radiis = np.linalg.norm(r,axis=1)
lams_0 = np.arccos(R/radiis)
max_etas = np.arcsin(R/radiis)
lams = trigsf.get_lam(r,FOV+roll_angle,R)
final_lams = np.array([ lam_0 if (max_eta < (FOV + roll_angle).value) else lam
for lam_0,max_eta,lam in zip(lams_0.value, max_etas.value, lams.value) ])*u.rad
angles = trigsf.get_angles(lons,lats,(target.x*u.deg).to(u.rad),
(target.y*u.deg).to(u.rad))
cov = []
for angle,lam in zip(angles,final_lams):
if angle <= lam:
cov.append(1)
else:
cov.append(0)
return cov
pass
def symmetric_disk(FOV_min,FOV_max,lons,lats,r,target,R):
"""coverage method.
Disk of coverage centered on subsatellite point.
Parameters
----------
FOV_min : ~astropy.units.quantity.Quantity
minimum field of view in radians
FOV_max : ~astropy.units.quantity.Quantity
maximum field of view in radians
* : default coverage parameters
help(CtllDes.request.coverage.Instrument.coverage) for more
info.
"""
#HACK: there were 2 options, do this nasty thing here
#or in targets.py I had to make that call.
if FOV_max < FOV_min:
raise ValueError("Wrong FOV ordering")
lams_min = trigsf.get_lam(r,FOV_min,R)
lams_max = trigsf.get_lam(r,FOV_max,R)
angles = trigsf.get_angles(lons,lats,(target.x*u.deg).to(u.rad),
(target.y*u.deg).to(u.rad))
cov = []
for angle,lam_min,lam_max in zip(angles,lams_min,lams_max):
if lam_min <= angle <= lam_max:
cov.append(1)
else:
cov.append(0)
return cov
def symmetric(FOV,lons,lats,r,v,target,R):
"""Circle of coverage centered on ssp"""
#HACK: there were 2 options, do this nasty thing here
#or in targets.py I had to make that call.
lams = trigsf.get_lam(r,FOV,R)
angles = trigsf.get_angles(lons,lats,(target.x*u.deg).to(u.rad),
(target.y*u.deg).to(u.rad))
cov = []
for angle,lam in zip(angles,lams):
if angle <= lam:
cov.append(1)
else:
cov.append(0)
return cov
COV_METHODS = [symmetric, symmetric_disk, symmetric_with_roll]
class Coverages(collections.abc.Set):
"""Container for Coverage objects. Frame where you can
get all the data from the coverage analysis. It is meant
to be created with the classmethods.
"""
def __init__(self,covs,tag=None):
self._covs = lst = list()
self._tag = tag if tag else "No Tag"
if isinstance(covs,Coverage):
lst.append(covs)
elif not isinstance(covs,Iterable):
raise TypeError("covs must be Coverage, or iterable collection of Coverage objects")
else:
for cov in covs:
if not isinstance(cov,Coverage):
raise TypeError("covs must be a collection of Coverage objects")
if cov not in lst:
lst.append(cov)
self._targets = {(covv.target.lon,covv.target.lat) for covv in self.covs}
self._sats_id = {covv.sat_id for covv in self.covs }
@property
def covs(self):
return self._covs
@property
def targets(self):
tgts = [Target(ll[0],ll[1]) for ll in self._targets]
return tgts
@property
def sats_id(self):
return self._sats_id
def __iter__(self):
return iter(self.covs)
def __contains__(self,value):
return value in self.covs
def __len__(self):
len(self.covs)
def __str__(self):
return self.tag
def filter_by_sun_angle(self, thresh):
"""Return new Coverages with
Parameters
----------
threshold : float
maximum sun angle to filter
Returns
-------
cov : ~CtllDes.requests.coverage.Coverages
New Coverages object filtered by sun angle
"""
return Coverages([cov.filter_by_sun_angle(thresh) for cov in self.covs],
tag=self._tag)
def to_df(self):
"""Returns dataframe with all the merit figures
form the Coverage contained.
"""
df = pd.DataFrame(columns=['T',
'dt',
'Satellite ID',
'Target',
'accumulated',
'mean gap light',
'mean gap dark',
'response time',
'average time gap',
'max gap',
'sun angle'])
data = [{'T': cov.T,
'dt': cov.dt,
'Satellite ID': cov.sat_id,
'Target':(cov.target.lon,cov.target.lat),
'accumulated': cov.accumulated,
'mean gap light': cov.mean_gap_light,
'mean gap dark': cov.mean_gap_dark,
'response time': cov.response_time,
'average time gap': cov.avg_time_gap,
'max gap': cov.max_gap,
'sun angle': cov.sun_angles} for cov in self.covs]
df = df.append(data,ignore_index=True)
return df
@classmethod
def from_ctll(cls,ctll,targets,T,dt=1.,r_sun=None, lon_offset=0, verbose=False, **kwargs):
"""Get coverages from constellation.
Parameters
----------
ctll : CtllDes.core.ctll.Ctll
CtllDes constellation object
targets : CtllDes.targets.targets.Targets
Desired targets to build coverages
T : float | int
Desired time of coverage in days
dt : float | int, optional
time of sampling in seconds
r_sun : ~astropy.units.quantity.Quantity
sun position vector, must coincide with T and dt
lon_offset : ~astropy.units.quantity.Quantity
longitude offset
verbose : boolean
Print progress
Returns
-------
Coverages object containing only the coverage from
instruments that have _coverage function implemented.
"""
ctll_covs = []
for sat in ctll.sats:
if verbose:
print(f'Satellite {ctll.sats.index(sat)+1} of {ctll.N}')
try:
sat_covs = Coverages.from_sat(sat,targets,T,dt,r_sun=r_sun, lon_offset=lon_offset, f=True,verbose=verbose, **kwargs)
except Exception as e:
print(e)
pass
else:
ctll_covs += sat_covs
if not len(ctll_covs):
raise Exception("Constellation has no Coverage Instruments")
return Coverages(ctll_covs,tag=ctll.__str__())
@classmethod
def from_sat(cls,sat, targets,T, dt=1., r_sun=None, lon_offset=0, f=False, verbose=False, **kwargs):
"""Build list of coverage objects from satellite
Parameters
----------
sat : ~CtllDes.core.sat
sat object
targets : ~CtllDes.targets.targets.Targets
Desired targets of coverage
T : float
Desired Time of analysis in days.
dt : float | int, optional
time of sampling in seconds
r_sun : ~astropy.units.quantity.Quantity
sun position vector, must coincide with T and dt
lon_offset : ~astropy.units.quantity.Quantity
longitude offset
verbose : boolean
Print progress
f : boolean
Not to be modfied from default state
Returns
-------
Coverages : list
List of Coverage objects, one for each target and
coverage instrument.
Not the best practice to return two different types,
depending on internal or external use.
but since the use of this library is quite reduced. This
will be fixed later.
"""
if isinstance(targets,Target):
targets = [targets]
cov_instruments = [instr for instr in sat.cov_instruments]
if not len(cov_instruments):
raise Exception("No coverage instruments found on" +
f" satID : {sat.id}")
r,v = sat.rv(T,dt,**kwargs)
lons,lats = sat.ssps_from_r(r,T,dt,lon_offset=lon_offset, **kwargs)
if r_sun != None:
dot = np.einsum('ij,ij->i',r.value,r_sun.value)
norm_r = | np.linalg.norm(r.value ,axis=1) | numpy.linalg.norm |
"""
Holds auxiliary functions used by eccentricity based Lambert's problem
solvers. The majority of the routines hosted within this module were directly
taken from the following publications [1] [2] [3].
[1] <NAME>. (2008). A simple Lambert algorithm. Journal of guidance,
control, and dynamics, 31(6), 1587-1594.
[2] <NAME>., <NAME>., & <NAME>. (2010). Multiple-revolution solutions of the
transverse-eccentricity-based Lambert problem. Journal of guidance,
control, and dynamics, 33(1), 265-269.
[3] <NAME>, <NAME>, and <NAME>. "Derivative analysis and
algorithm modification of transverse-eccentricity-based Lambert problem."
Journal of Guidance, Control, and Dynamics 37.4 (2014): 1195-1201.
"""
import numpy as np
from numpy import cross
from numpy.linalg import norm
from lamberthub.utils.angles import (
get_orbit_inc_and_raan_from_position_vectors,
get_orbit_normal_vector,
get_transfer_angle,
)
from lamberthub.utils.assertions import assert_transfer_angle_not_zero
from lamberthub.utils.kepler import kepler_from_nu
def get_geometry(r1, r2, prograde):
"""
Computes associated problem geometry.
Parameters
-----------
r1: np.array
Initial position vector.
r2: np.array
Final position vector.
prograde: bool
If True, assumes prograde motion, otherwise retrograde is applied.
Returns
-------
r1_norm: float
Norm of the initial position vector.
r1_norm: float
Norm of the final position vector.
c_norm: float
Norm of the chord vector.
dtheta: float
Transfer angle.
w_c: float
Angle between initial position and chord vectors.
"""
# Solve for the norms
r1_norm, r2_norm, c_norm = [np.linalg.norm(vec) for vec in [r1, r2, (r2 - r1)]]
# Compute angles
dtheta = get_transfer_angle(r1, r2, prograde)
assert_transfer_angle_not_zero(dtheta)
w_c = get_transfer_angle(r1, (r2 - r1), prograde)
return r1_norm, r2_norm, c_norm, dtheta, w_c
def get_eccF(r1_norm, r2_norm, c_norm):
"""
Computes the eccentricity component along the chord. This value is kept
constant for all the problem as long as the boundary conditons are not
changed.
Parameters
----------
r1_norm: float
Norm of the initial vector position.
r2_norm: float
Norm of the final vector position.
c_norm: float
Norm of the chord vector.
Returns
-------
ecc_F: float
Eccentricity component along the chord direction.
Notes
-----
Equation (3) from Avanzini's report [1].
"""
ecc_F = (r1_norm - r2_norm) / c_norm
return ecc_F
def get_aF(r1_norm, r2_norm):
"""
Computes the semi-major axis of the fundamental ellipse. This value is
kept constant for all the problem as long as the boundary conditions are not
changed.
Parameters
----------
r1_norm: float
Norm of the initial vector position.
r2_norm: float
Norm of the final vector position.
Returns
-------
a_F: float
Semi-major axis of the fundamental ellipse.
Notes
-----
No labeled equation (appears between [3] and [4]) from Avanzini's report
[1].
"""
a_F = (r1_norm + r2_norm) / 2
return a_F
def get_pF(a_F, ecc_F):
"""
Computes the orbital parameter (semi-latus) rectum of the fundamental
ellipse. This value is kept constant for all the problem as long as the
boundary conditions are not changed.
Parameters
----------
a_F: float
Semi-major axis of the fundamental ellipse.
ecc_F: float
Eccentricity of the fundamental ellipse.
Returns
-------
p_F: float
Orbital parameter / semi-latus rectum of the fundamental ellipse.
Notes
-----
No labeled equation (appears between [3] and [4]) from Avanzini's report
"""
p_F = a_F * (1 - ecc_F ** 2)
return p_F
def get_fundamental_ellipse_properties(r1_norm, r2_norm, c_norm):
"""
Computes the fundamental ellipse properties. Those are the eccentricity,
semi-major axis and the orbital parameter.
Parameters
----------
r1_norm: float
Norm of the initial vector position.
r2_norm: float
Norm of the final vector position.
c_norm: float
Norm of the chord vector.
Returns
-------
ecc_F: float
Eccentricity component along the chord direction.
a_F: float
Semi-major axis of the fundamental ellipse.
p_F: float
Orbital parameter / semi-latus rectum of the fundamental ellipse.
"""
# Compute the fundamental ellipse parameters
ecc_F = get_eccF(r1_norm, r2_norm, c_norm)
a_F = get_aF(r1_norm, r2_norm)
p_F = get_pF(a_F, ecc_F)
return ecc_F, a_F, p_F
def ecc_at_eccT(ecc_T, ecc_F):
"""
Computes transfer orbit eccentricity from transverse and fundamental
components.
Parameters
----------
ecc_T: float
Eccentricity transverse component.
ecc_F: float
Eccentricity of the fundamental ellipse.
Returns
-------
ecc: float
Eccentricity of the transfer orbit.
"""
ecc = np.sqrt(ecc_T ** 2 + ecc_F ** 2)
return ecc
def p_at_eccT(ecc_T, r1_norm, r2_norm, c_norm, dtheta, p_F):
"""
Computes the orbital parameter or semi-latus rectum of the transfer orbit.
Parameters
----------
ecc_T: float
Eccentricity transverse component.
r1_norm: float
Norm of the initial vector position.
r2_norm: float
Norm of the final vector position.
c_norm: float
Norm of the chord vector.
dtheta: float
Transfer angle.
p_F: float
Orbital parameter or semi-latus rectum of the fundamental ellipse.
Returns
-------
p: float
Orbital parameter or semi-lactus rectum.
"""
p = p_F - ecc_T * r1_norm * r2_norm * np.sin(dtheta) / c_norm
return p
def a_at_eccT(ecc_T, ecc_F, p):
"""
Computes the semi-major axis of the transfer orbit.
Parameters
----------
ecc_T: float
Eccentricity transverse component.
ecc_F: float
Eccentricity of the fundamental ellipse.
p: float
Transfer orbit parameter or semi-latus rectum.
Returns
-------
a: float
Semi-major axis of the transfer orbit.
"""
a = p / (1 - ecc_F ** 2 - ecc_T ** 2)
return a
def eap_from_eccT(ecc_T, geometry):
"""
Solves for transfer orbit eccentricity, semi-major axis and orbital
parameter.
Parameters
----------
ecc_T: float
Eccentricity component along transverse direction.
geometry: tuple
A tuple hosting r1_norm, r2_norm, c_norm, dtheta and w_c geometry values.
Returns
-------
ecc: float
Absolute eccentricity of the transfer orbit.
a: float
Semi-major axis of the transfer orbit.
p: float
Semi-latus rectum of the transfer orbit.
"""
# Unpack useful parameters
r1_norm, r2_norm, c_norm, dtheta, w_c = geometry
# Solve for the fundamental ellipse properties
ecc_F, a_F, p_F = get_fundamental_ellipse_properties(r1_norm, r2_norm, c_norm)
# Compute the transfer orbit eccentricity, semi-latus rectum and
# semi-major axis.
ecc = ecc_at_eccT(ecc_T, ecc_F)
p = p_at_eccT(ecc_T, r1_norm, r2_norm, c_norm, dtheta, p_F)
a = a_at_eccT(ecc_T, ecc_F, p)
return ecc, a, p
def w_at_eccT(ecc_T, ecc_F, w_c):
"""
Compute the true anomalies for the initial and final position vectors
with respect to the transfer orbit.
Parameters
----------
ecc_T: float
Eccentricity transverse component.
ecc_F: float
Eccentricity of the fundamental ellipse.
dtheta: float
Transfer angle.
w_c: float
Angle between the initial and chord vector.
Returns
-------
nu_1: float
True anomaly of the initial position vector w.r.t. transfer orbit.
nu_2: float
True anomaly of the final position vector w.r.t. transfer orbit.
Notes
-----
This is equation (6) from Quan He's report [2].
"""
# Compute the coordinates
y = ecc_F * np.sin(w_c) + ecc_T * np.cos(w_c)
x = ecc_F * np.cos(w_c) - ecc_T * np.sin(w_c)
w = | np.arctan2(y, x) | numpy.arctan2 |
from utilities import get_closest_index, add_month_to_timestamp, get_dir, get_distance_between_points
import numpy as np
import shapefile
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cftr
from netCDF4 import Dataset
from datetime import datetime, timedelta
import log
# -----------------------------------------------
# Classes
# -----------------------------------------------
class OceanBasins:
def __init__(self):
self.basin = []
def determine_if_point_in_basin(self,basin_name,p_lon,p_lat):
p_lon = np.array(p_lon)
p_lat = np.array(p_lat)
if basin_name.startswith('po'):
basin_name = [basin_name[:2]+'_l'+basin_name[2:],basin_name[:2]+'_r'+basin_name[2:]]
else:
basin_name = [basin_name]
l_in_basin = np.zeros(len(p_lon)).astype('bool')
for i in range(len(basin_name)):
basin = self.get_basin_polygon(basin_name[i])
for p in range(len(p_lon)):
point = Point(p_lon[p],p_lat[p])
l_in_polygon = basin.polygon.contains(point)
l_in_basin[p] = l_in_polygon or l_in_basin[p]
return l_in_basin
def get_basin_polygon(self,basin_name):
for basin in self.basin:
if basin.name == basin_name:
return basin
raise ValueError('Unknown ocean basin requested. Valid options are: "io","ao","po", and any of these with "_nh" or "_sh" added.')
@staticmethod
def read_from_shapefile(input_path='input/oceanbasins_polygons.shp'):
ocean_basins = OceanBasins()
sf = shapefile.Reader(input_path)
shape_records = sf.shapeRecords() # reads both shapes and records(->fields)
for i in range(len(shape_records)):
name = shape_records[i].record[1]
points = shape_records[i].shape.points
polygon = Polygon(points)
ocean_basins.basin.append(OceanBasin(name,polygon))
sf.close()
return ocean_basins
class OceanBasin:
def __init__(self,name,polygon):
self.name = name
self.polygon = polygon
class OceanBasinGrid:
def __init__(self,basin_name,dx,lon_range=None,lat_range=None):
self.basin_name = basin_name
if lon_range is None:
lon_range = [-180,180]
if lat_range is None:
lat_range = [-90,90]
self.grid = Grid(dx,lon_range,lat_range)
lon,lat = np.meshgrid(self.grid.lon,self.grid.lat)
self.in_basin = np.ones(lon.shape).astype('bool')
ocean_basins = OceanBasins.read_from_shapefile()
for i in range(lon.shape[0]):
self.in_basin[i,:] = ocean_basins.determine_if_point_in_basin(basin_name,lon[i,:],lat[i,:])
class Grid:
def __init__(self,dx,lon_range,lat_range,dy=None,periodic=False):
self.dx = dx
if not dy:
self.dy = dx
else:
self.dy = dy
self.lon_min = lon_range[0]
self.lon_max = lon_range[1]
self.lat_min = lat_range[0]
self.lat_max = lat_range[1]
self.lon = np.arange(self.lon_min,self.lon_max+self.dx,self.dx)
self.lat = np.arange(self.lat_min,self.lat_max+self.dy,self.dy)
self.lon_size = len(self.lon)
self.lat_size = len(self.lat)
self.periodic = periodic
def get_index(self,lon,lat):
lon = np.array(lon)
lat = np.array(lat)
# get lon index
lon_index = np.floor((lon-self.lon_min)*1/self.dx)
lon_index = np.array(lon_index)
l_index_lon_over = lon_index >= abs(self.lon_max-self.lon_min)*1/self.dx
if self.periodic:
lon_index[l_index_lon_over] = 0
else:
lon_index[l_index_lon_over] = np.nan
l_index_lon_under = lon_index < 0
if self.periodic:
lon_index[l_index_lon_under]
else:
lon_index[l_index_lon_under] = np.nan
# get lat index
lat_index = np.floor((lat-self.lat_min)*1/self.dy)
lat_index = np.array(lat_index)
l_index_lat_over = lat_index >= abs(self.lat_max-self.lat_min)*1/self.dy
lat_index[l_index_lat_over] = np.nan
l_index_lat_under = lat_index<0
lat_index[l_index_lat_under] = np.nan
return (lon_index,lat_index)
@staticmethod
def get_from_lon_lat_array(lon,lat):
dx = np.round(np.unique(np.diff(lon))[0],2)
dy = np.round(np.unique(np.diff(lat))[0],2)
lon_range = [np.nanmin(lon),np.nanmax(lon)]
lat_range = [np.nanmin(lat),np.nanmax(lat)]
log.warning(None,f'dx ({np.unique(np.diff(lon))[0]}) to create Grid rounded to 2 decimals: dx = {dx}')
if dy != dx:
log.warning(None,f'dy ({np.unique(np.diff(lat))[0]}) to create Grid rounded to 2 decimals: dy = {dy}')
return Grid(dx,lon_range,lat_range,dy=dy)
class IrregularGrid:
def __init__(self,lon,lat):
self.lon = lon
self.lat = lat
def get_index(self,lon,lat):
lon_index = get_closest_index(self.lon,lon)
lat_index = get_closest_index(self.lat,lat)
return lon_index,lat_index
class LandMask:
def __init__(self,lon,lat,mask):
self.lon = lon
self.lat = lat
self.mask = mask # 0: ocean, 1: land
def get_landmask_with_halo(self):
'''Increases the size of the landmask by 1 gridcell.
This can be used to move plastic
source locations further away from land.'''
i,j = | np.where(self.mask==1) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color= | np.array([0.15, 0.15, 0.15]) | numpy.array |
"""
Particle Filter helper functions
"""
import configparser
import json
import math
import os
from collections import defaultdict
from io import BytesIO
from itertools import permutations
from itertools import product
from pathlib import Path
import imageio
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
from .definitions import RUN_DIR
def permute_particle(particle):
return np.hstack((particle[4:], particle[:4]))
def particle_swap(env):
# 2000 x 8
particles = np.copy(env.pf.particles)
n_targets = env.state.n_targets
state_dim = 4
# convert particles to cartesian
for i in range(n_targets):
x, y = pol2cart(particles[:, state_dim*i],
np.radians(particles[:, (state_dim*i)+1]))
particles[:, state_dim*i] = x
particles[:, (state_dim*i)+1] = y
swapped = True
k = 0
while swapped and k < 10:
k += 1
swapped = False
for i in range(len(particles)):
original_particle = np.copy(particles[i])
target_centroids = [
np.mean(particles[:, state_dim*t:(state_dim*t)+2]) for t in range(n_targets)]
distance = 0
for t in range(n_targets):
dif = particles[i, state_dim *
t:(state_dim*t)+2] - target_centroids[t]
distance += np.dot(dif, dif)
permuted_particle = permute_particle(particles[i])
particles[i] = permuted_particle
permuted_target_centroids = [
np.mean(particles[:, state_dim*t:(state_dim*t)+2]) for t in range(n_targets)]
permuted_distance = 0
for t in range(n_targets):
dif = particles[i, state_dim *
t:(state_dim*t)+2] - permuted_target_centroids[t]
permuted_distance += np.dot(dif, dif)
if distance < permuted_distance:
particles[i] = original_particle
else:
swapped = True
# convert particles to polar
for i in range(n_targets):
rho, phi = cart2pol(
particles[:, state_dim*i], particles[:, (state_dim*i)+1])
particles[:, state_dim*i] = rho
particles[:, (state_dim*i)+1] = np.degrees(phi)
env.pf.particles = particles
def pol2cart(rho, phi):
"""
Transform polar to cartesian
"""
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def cart2pol(x, y):
"""
Transform cartesian to polar
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return rho, phi
def get_distance(coord1, coord2):
"""
Get the distance between two coordinates
"""
if (coord1 is None) or (coord2 is None):
return None
lat1, long1 = coord1
lat2, long2 = coord2
# approximate radius of earth in km
R = 6373.0
lat1 = np.radians(lat1)
long1 = np.radians(long1)
lat2 = np.radians(lat2)
long2 = np.radians(long2)
dlon = long2 - long1
dlat = lat2 - lat1
a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2)**2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
distance = R * c
return distance*(1e3)
def get_bearing(coord1, coord2):
"""
Get the bearing of two coordinates
"""
if (coord1 is None) or (coord2 is None):
return None
lat1, long1 = coord1
lat2, long2 = coord2
dLon = (long2 - long1)
x = np.cos(np.radians(lat2)) * np.sin(np.radians(dLon))
y = np.cos(np.radians(lat1)) * np.sin(np.radians(lat2)) - \
np.sin(np.radians(lat1)) * np.cos(np.radians(lat2)) * \
np.cos(np.radians(dLon))
brng = np.arctan2(x, y)
brng = np.degrees(brng)
return -brng + 90
def is_float(element):
"""
Check if an element is a float or not
"""
try:
float(element)
return True
except (ValueError, TypeError):
return False
class GPSVis:
"""
modified from:
https://github.com/tisljaricleo/GPS-visualization-Python
MIT License
Copyright (c) 2021 <NAME>
Class for GPS data visualization using pre-downloaded OSM map in image format.
"""
def __init__(self, position=None, map_path=None, bounds=None):
"""
:param data_path: Path to file containing GPS records.
:param map_path: Path to pre-downloaded OSM map in image format.
:param bounds: Upper-left, and lower-right GPS points of the map (lat1, lon1, lat2, lon2).
"""
self.position = position
self.map_path = map_path
self.bounds = bounds
if self.map_path is not None and self.bounds is not None:
self.img = self.create_image_from_map()
elif self.position is not None:
self.zoom = 17
self.TILE_SIZE = 256
distance = 100
coord = self.position
lat_dist = distance/111111
lon_dist = distance / (111111 * np.cos(np.radians(coord[0])))
top, bot = coord[0] + lat_dist, coord[0] - lat_dist
lef, rgt = coord[1] - lon_dist, coord[1] + lon_dist
self.bounds = [top, lef, bot, rgt]
self.img = self.create_image_from_position()
self.get_ticks()
self.cell_size = 1
self.xedges = np.arange(0, self.width_meters +
self.cell_size, self.cell_size)
self.yedges = np.arange(0, self.height_meters +
self.cell_size, self.cell_size)
def plot_map(self, axis1=None, output=None, save_as='resultMap.png'):
"""
Method for plotting the map. You can choose to save it in file or to plot it.
:param output: Type 'plot' to show the map or 'save' to save it. Default None
:param save_as: Name and type of the resulting image.
:return:
"""
# create Fig and Axis if doesn't exist
if axis1 is None:
fig, axis1 = plt.subplots(figsize=(10, 13))
# Plot background map
axis1.imshow(np.flipud(self.img), alpha=0.7, origin='lower')
# Set axis dimensions, labels and tick marks
axis1.set_xlim(0, int(self.width_meters))
axis1.set_ylim(0, int(self.height_meters))
axis1.set_xlabel('Longitude')
axis1.set_ylabel('Latitude')
axis1.set_xticks(np.linspace(0, int(self.width_meters), num=8))
axis1.set_xticklabels(self.x_ticks, rotation=30, ha='center')
axis1.set_yticks(np.linspace(0, int(self.height_meters), num=8))
axis1.set_yticklabels(self.y_ticks)
axis1.grid()
# Save or display
if output == 'save':
plt.savefig(save_as)
elif output == 'plot':
plt.show()
def point_to_pixels(self, lat, lon, zoom):
"""convert gps coordinates to web mercator"""
r = math.pow(2, zoom) * self.TILE_SIZE
lat = math.radians(lat)
x = int((lon + 180.0) / 360.0 * r)
y = int(
(1.0 - math.log(math.tan(lat) + (1.0 / math.cos(lat))) / math.pi) / 2.0 * r)
return x, y
def create_image_from_position(self):
URL = 'https://tile.openstreetmap.org/{z}/{x}/{y}.png'.format
top, lef, bot, rgt = self.bounds
x0, y0 = self.point_to_pixels(top, lef, self.zoom)
x1, y1 = self.point_to_pixels(bot, rgt, self.zoom)
x0_tile, y0_tile = int(x0 / self.TILE_SIZE), int(y0 / self.TILE_SIZE)
x1_tile, y1_tile = math.ceil(
x1 / self.TILE_SIZE), math.ceil(y1 / self.TILE_SIZE)
assert (x1_tile - x0_tile) * (y1_tile -
y0_tile) < 50, "That's too many tiles!"
# full size image we'll add tiles to
img = Image.new('RGB', (
(x1_tile - x0_tile) * self.TILE_SIZE,
(y1_tile - y0_tile) * self.TILE_SIZE))
# loop through every tile inside our bounded box
for x_tile, y_tile in product(range(x0_tile, x1_tile), range(y0_tile, y1_tile)):
with requests.get(URL(x=x_tile, y=y_tile, z=self.zoom)) as resp:
tile_img = Image.open(BytesIO(resp.content))
# add each tile to the full size image
img.paste(
im=tile_img,
box=((x_tile - x0_tile) * self.TILE_SIZE, (y_tile - y0_tile) * self.TILE_SIZE))
x, y = x0_tile * self.TILE_SIZE, y0_tile * self.TILE_SIZE
img = img.crop((
int(x0-x), # left
int(y0-y), # top
int(x1-x), # right
int(y1-y))) # bottom
self.width_meters = get_distance(
(self.bounds[0], self.bounds[1]), (self.bounds[0], self.bounds[3]))
self.height_meters = get_distance(
(self.bounds[0], self.bounds[1]), (self.bounds[2], self.bounds[1]))
img = img.resize((int(self.width_meters), int(self.height_meters)))
return img
def create_image_from_map(self):
"""
Create the image that contains the original map and the GPS records.
:param color: Color of the GPS records.
:param width: Width of the drawn GPS records.
:return:
"""
img = Image.open(self.map_path, 'r')
self.width_meters = get_distance(
(self.bounds[0], self.bounds[1]), (self.bounds[0], self.bounds[3]))
self.height_meters = get_distance(
(self.bounds[0], self.bounds[1]), (self.bounds[2], self.bounds[1]))
img = img.resize((int(self.width_meters), int(self.height_meters)))
print('background image size (pixels) = ', img.size)
return img
def scale_to_img(self, lat_lon, w_h):
"""
Conversion from latitude and longitude to the image pixels.
It is used for drawing the GPS records on the map image.
:param lat_lon: GPS record to draw (lat1, lon1).
:param w_h: Size of the map image (w, h).
:return: Tuple containing x and y coordinates to draw on map image.
"""
# https://gamedev.stackexchange.com/questions/33441/how-to-convert-a-number-from-one-min-max-set-to-another-min-max-set/33445
lat_old = (self.bounds[2], self.bounds[0])
new = (0, w_h[1])
y = ((lat_lon[0] - lat_old[0]) * (new[1] - new[0]) /
(lat_old[1] - lat_old[0])) + new[0]
lon_old = (self.bounds[1], self.bounds[3])
new = (0, w_h[0])
x = ((lat_lon[1] - lon_old[0]) * (new[1] - new[0]) /
(lon_old[1] - lon_old[0])) + new[0]
# y must be reversed because the orientation of the image in the matplotlib.
# image - (0, 0) in upper left corner; coordinate system - (0, 0) in lower left corner
return int(x), int(y) # w_h[1] - int(y)
def set_origin(self, lat_lon):
self.origin = self.scale_to_img(
lat_lon, (int(self.width_meters), int(self.height_meters)))
def get_ticks(self):
"""
Generates custom ticks based on the GPS coordinates of the map for the matplotlib output.
:return:
"""
self.x_ticks = map(
lambda x: round(x, 4),
np.linspace(self.bounds[1], self.bounds[3], num=8))
self.y_ticks = map(
lambda x: round(x, 4),
np.linspace(self.bounds[2], self.bounds[0], num=8))
# Ticks must be reversed because the orientation of the image in the matplotlib.
# image - (0, 0) in upper left corner; coordinate system - (0, 0) in lower left corner
self.y_ticks = list(self.y_ticks) # sorted(y_ticks, reverse=True)
self.x_ticks = list(self.x_ticks)
class Results:
'''
Results class for saving run results
to file with common format.
'''
def __init__(self, method_name='', global_start_time='', num_iters=0, plotting=False, config={}):
self.num_iters = num_iters
self.method_name = method_name
self.global_start_time = global_start_time
self.plotting = plotting
if not isinstance(self.plotting, bool):
if self.plotting in ('true', 'True'):
self.plotting = True
else:
self.plotting = False
self.native_plot = config.get('native_plot', 'false').lower()
self.plot_every_n = int(config.get('plot_every_n', 1))
self.make_gif = config.get('make_gif', 'false').lower()
self.namefile = f'{RUN_DIR}/{method_name}/{global_start_time}_data.csv'
self.plot_dir = config.get(
'plot_dir', f'{RUN_DIR}/{method_name}/{global_start_time}')
self.logdir = f'{RUN_DIR}/{method_name}/{global_start_time}_logs/'
if self.make_gif == 'true':
Path(self.plot_dir+'/png/').mkdir(parents=True, exist_ok=True)
Path(self.plot_dir+'/gif/').mkdir(parents=True, exist_ok=True)
Path(self.logdir).mkdir(parents=True, exist_ok=True)
self.col_names = ['time', 'run_time', 'target_state', 'sensor_state',
'action', 'observation', 'reward', 'collisions', 'lost',
'r_err', 'theta_err', 'heading_err', 'centroid_err', 'rmse', 'mae', 'inference_times', 'pf_cov']
self.pf_stats = defaultdict(list)
self.abs_target_hist = []
self.abs_sensor_hist = []
self.target_hist = []
self.sensor_hist = []
self.sensor_gps_hist = []
self.history_length = 50
self.time_step = 0
self.texts = []
self.openstreetmap = None
self.transform = None
self.expected_target_rssi = None
if config:
write_header_log(config, self.method_name, self.global_start_time)
def write_dataframe(self, run_data):
"""
Save dataframe to CSV file
"""
if os.path.isfile(self.namefile):
print('Updating file {}'.format(self.namefile))
else:
print('Saving file to {}'.format(self.namefile))
df = pd.DataFrame(run_data, columns=self.col_names)
df.to_csv(self.namefile)
def save_gif(self, run, sub_run=None):
filename = run if sub_run is None else '{}_{}'.format(run, sub_run)
# Build GIF
with imageio.get_writer('{}/gif/{}.gif'.format(self.plot_dir, filename), mode='I', fps=5) as writer:
for png_filename in sorted(os.listdir(self.plot_dir+'/png/'), key=lambda x: (len(x), x)):
image = imageio.imread(self.plot_dir+'/png/'+png_filename)
writer.append_data(image)
def live_plot(self, env, time_step=None, fig=None, ax=None, data=None):
"""
Create a live plot
"""
if self.openstreetmap is None and data.get('position', None) is not None and data.get('bearing', None) is not None:
self.openstreetmap = GPSVis(
position=data['position']
# map_path='map_delta_park.png', # Path to map downloaded from the OSM.
# bounds=(45.60311,-122.68450, 45.59494, -122.67505) # upper left, lower right
)
self.openstreetmap.set_origin(data['position'])
self.transform = np.array(
[self.openstreetmap.origin[0], self.openstreetmap.origin[1]])
self.time_step = time_step
self.pf_stats['mean_hypothesis'].append(
env.pf.mean_hypothesis if hasattr(env.pf, 'mean_hypothesis') else [None])
self.pf_stats['map_hypothesis'].append(
env.pf.map_hypothesis if hasattr(env.pf, 'map_hypothesis') else [None])
self.pf_stats['mean_state'].append(
env.pf.mean_state if hasattr(env.pf, 'mean_state') else [None])
self.pf_stats['map_state'].append(
env.pf.map_state if hasattr(env.pf, 'map_state') else [None])
abs_sensor = env.state.sensor_state
abs_particles = env.get_absolute_particles()
self.sensor_hist.append(abs_sensor)
target_bearing = None
target_relative_bearing = None
if data.get('position', None) is not None and data.get('drone_position', None) is not None and data.get('bearing', None) is not None:
target_bearing = get_bearing(
data['position'], data['drone_position'])
target_relative_bearing = target_bearing - data['bearing']
target_distance = get_distance(
data['position'], data['drone_position'])
self.expected_target_rssi = env.sensor.observation(
[[target_distance, target_relative_bearing, None, None]])[0]
ax.clear()
if self.openstreetmap is not None:
self.openstreetmap.plot_map(axis1=ax)
# TODO get variables
ax.set_title('Time = {}, Frequency = {}, Bandwidth = {}, Gain = {}'.format(
time_step, None, None, None))
color_array = [['salmon', 'darkred', 'red'],
['lightskyblue', 'darkblue', 'blue']]
lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html
# Plot Particles
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
if self.transform is not None:
particles_x += self.transform[0]
particles_y += self.transform[1]
line1, = ax.plot(particles_x, particles_y, 'o',
color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
if self.openstreetmap:
heatmap, xedges, yedges = np.histogram2d(particles_x, particles_y, bins=(
self.openstreetmap.xedges, self.openstreetmap.yedges))
heatmap = gaussian_filter(heatmap, sigma=8)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = ax.imshow(heatmap.T, extent=extent, origin='lower',
cmap='jet', interpolation='nearest', alpha=0.2)
# plt.colorbar(im)
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
line2, = ax.plot(centroid_x, centroid_y, '*', color='magenta',
markeredgecolor='black', label='centroid', markersize=12, zorder=2)
if t == 0:
lines.extend([line1, line2])
else:
lines.extend([])
# Plot Sensor
sensor_x, sensor_y = pol2cart(np.array(self.sensor_hist)[
:, 0], np.radians(np.array(self.sensor_hist)[:, 1]))
if self.transform is not None:
sensor_x += self.transform[0]
sensor_y += self.transform[1]
if len(self.sensor_hist) > 1:
ax.arrow(sensor_x[-2], sensor_y[-2], 4*(sensor_x[-1]-sensor_x[-2]),
4*(sensor_y[-1]-sensor_y[-2]), width=1.5, color='blue', zorder=4)
ax.plot(sensor_x[:-1], sensor_y[:-1], linewidth=3.0,
color='blue', markeredgecolor='black', markersize=4, zorder=4)
line4, = ax.plot(sensor_x[-1], sensor_y[-1], 'H',
color='blue', label='sensor', markersize=10, zorder=4)
lines.extend([line4])
if self.openstreetmap and data.get('drone_position', None) is not None:
self.target_hist.append(self.openstreetmap.scale_to_img(
data['drone_position'], (self.openstreetmap.width_meters, self.openstreetmap.height_meters)))
target_np = np.array(self.target_hist)
if len(self.target_hist) > 1:
ax.plot(target_np[:, 0], target_np[:, 1], linewidth=3.0,
color='maroon', zorder=3, markersize=4)
line5, = ax.plot(target_np[-1, 0], target_np[-1, 1], 'o', color='maroon',
markeredgecolor='black', label='target', markersize=10, zorder=3)
lines.extend([line5])
# Legend
ax.legend(handles=lines, loc='upper left', bbox_to_anchor=(
1.04, 1.0), fancybox=True, shadow=True, ncol=1)
# X/Y Limits
if self.openstreetmap is None:
map_width = 600
min_map = -1*int(map_width/2)
max_map = int(map_width/2)
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
# Sidebar Text
# actual_str = r'$\bf{Actual}$''\n' # prettier format but adds ~0.04 seconds ???
actual_str = 'Actual\n'
actual_str += 'Bearing = {:.0f} deg\n'.format(data.get(
'bearing', None)) if data.get('bearing', None) else 'Bearing = unknown\n'
actual_str += 'Speed = {:.2f} m/s'.format(data.get('action_taken', None)[
1]) if data.get('action_taken', None) else 'Speed = unknown\n'
proposal_str = 'Proposed\n'
proposal_str += 'Bearing = {:.0f} deg\n'.format(data.get('action_proposal', None)[
0]) if None not in data.get('action_proposal', (None, None)) else 'Bearing = unknown\n'
proposal_str += 'Speed = {:.2f} m/s'.format(data.get('action_proposal', None)[
1]) if None not in data.get('action_proposal', (None, None)) else 'Speed = unknown\n'
last_mean_hyp = self.pf_stats['mean_hypothesis'][-1][0]
last_map_hyp = self.pf_stats['map_hypothesis'][-1][0]
rssi_str = 'RSSI\n'
rssi_str += 'Observed = {:.1f} dB\n'.format(
env.last_observation) if env.last_observation else 'Observed = unknown\n'
rssi_str += 'Expected = {:.1f} dB\n'.format(
self.expected_target_rssi) if self.expected_target_rssi else 'Expected = unknown\n'
rssi_str += 'Difference = {:.1f} dB\n'.format(env.last_observation - self.expected_target_rssi) if (
env.last_observation and self.expected_target_rssi) else ''
#rssi_str += 'Target bearing = {} \n'.format(target_bearing) if target_bearing else ''
#rssi_str += 'Target relative bearing = {} \n'.format(target_relative_bearing) if target_relative_bearing else ''
rssi_str += 'MLE estimate = {:.1f} dB\n'.format(
last_mean_hyp) if last_mean_hyp else 'MLE estimate = unknown'
rssi_str += 'MAP estimate = {:.1f} dB'.format(
last_map_hyp) if last_map_hyp else 'MAP estimate = unknown'
if len(fig.texts) == 0:
props = dict(boxstyle='round', facecolor='palegreen', alpha=0.5)
text = fig.text(1.04, 0.75, actual_str, transform=ax.transAxes,
fontsize=14, verticalalignment='top', bbox=props)
props = dict(boxstyle='round',
facecolor='paleturquoise', alpha=0.5)
text = fig.text(1.04, 0.5, proposal_str, transform=ax.transAxes,
fontsize=14, verticalalignment='top', bbox=props)
props = dict(boxstyle='round', facecolor='khaki', alpha=0.5)
text = fig.text(1.04, 0.25, rssi_str, transform=ax.transAxes,
fontsize=14, verticalalignment='top', bbox=props)
else:
fig.texts[0].set_text(actual_str)
fig.texts[1].set_text(proposal_str)
fig.texts[2].set_text(rssi_str)
self.native_plot = 'true' if time_step % self.plot_every_n == 0 else 'false'
if self.native_plot == 'true':
plt.draw()
plt.pause(0.001)
if self.make_gif == 'true':
png_filename = '{}/png/{}.png'.format(self.plot_dir, time_step)
print('saving plots in {}'.format(png_filename))
plt.savefig(png_filename, bbox_inches='tight')
def build_multitarget_plots(self, env, time_step=None, fig=None, axs=None, centroid_distance_error=None, selected_plots=[1, 2, 3, 4, 5], simulated=True, textstr=None):
xp = env.state.target_state
belief = env.pf.particles.reshape(
len(env.pf.particles), env.state.n_targets, 4)
#print('sensor state = ',env.state.sensor_state)
abs_sensor = env.state.sensor_state
abs_particles = env.get_absolute_particles()
if simulated:
abs_target = np.array(env.get_absolute_target())
else:
abs_target = None
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
if len(self.abs_target_hist) < self.history_length:
self.abs_target_hist = [abs_target] * self.history_length
self.abs_sensor_hist = [abs_sensor] * self.history_length
else:
self.abs_target_hist.pop(0)
self.abs_target_hist.append(abs_target)
self.abs_sensor_hist.pop(0)
self.abs_sensor_hist.append(abs_sensor)
if len(self.target_hist) == 150:
self.target_hist = []
self.sensor_hist = []
self.rel_sensor_hist = []
self.target_hist.append(abs_target)
self.sensor_hist.append(abs_sensor)
plt.tight_layout()
# Put space between plots
plt.subplots_adjust(wspace=0.7, hspace=0.2)
color_array = [['salmon', 'darkred', 'red'],
['lightskyblue', 'darkblue', 'blue']]
plot_count = 0
if axs is None:
axs = {}
map_width = 600
min_map = -1*int(map_width/2)
max_map = int(map_width/2)
cell_size = int((max_map - min_map)/max_map)
cell_size = 2
xedges = np.arange(min_map, max_map+cell_size, cell_size)
yedges = np.arange(min_map, max_map+cell_size, cell_size)
if 1 in selected_plots:
# Plot 1: Particle Plot (Polar)
plot_count += 1
if 1 not in axs:
axs[1] = fig.add_subplot(
1, len(selected_plots), plot_count, polar=True)
ax = axs[1]
ax.clear()
for t in range(env.state.n_targets):
# plot particles
plot_theta = np.radians(belief[:, t, 1])
plot_r = belief[:, t, 0] # [row[0] for row in belief]
ax.plot(plot_theta, plot_r, 'o', color=color_array[t][0], markersize=4,
markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
# plot targets
plot_x_theta = np.radians(xp[t, 1])
plot_x_r = xp[t, 0]
ax.set_ylim(0, 300)
if 2 in selected_plots:
# Plot 2: Particle Plot (Polar) with Interpolation
plot_count += 1
if 2 not in axs:
axs[2] = fig.add_subplot(
1, len(selected_plots), plot_count, polar=True)
ax = axs[2]
for t in range(env.state.n_targets):
# Create grid values first via histogram.
nbins = 10
plot_theta = np.radians(belief[:, t, 1])
plot_r = belief[:, t, 0] # [row[0] for row in belief]
counts, xbins, ybins = np.histogram2d(
plot_theta, plot_r, bins=nbins)
# Make a meshgrid for theta, r values
tm, rm = np.meshgrid(xbins[:-1], ybins[:-1])
# Build contour plot
ax.contourf(tm, rm, counts)
# True position
plot_x_theta = np.radians(xp[t, 1])
plot_x_r = xp[t, 0]
ax.plot(plot_x_theta, plot_x_r, 'X')
ax.set_ylim(0, 300)
if 3 in selected_plots:
# Plot 3: Heatmap Plot (Cartesian)
plot_count += 1
if 3 not in axs:
axs[3] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[3]
# COMBINED; UNCOMMENT AFTER PAPER PLOT
all_particles_x, all_particles_y = [], []
for t in range(env.state.n_targets):
cart = np.array(
list(map(pol2cart, belief[:, t, 0], np.radians(belief[:, t, 1]))))
x = cart[:, 0]
y = cart[:, 1]
all_particles_x.extend(x)
all_particles_y.extend(y)
heatmap, xedges, yedges = np.histogram2d(
all_particles_x, all_particles_y, bins=(xedges, yedges))
heatmap = gaussian_filter(heatmap, sigma=8)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = ax.imshow(heatmap.T, extent=extent, origin='lower',
cmap='jet', interpolation='nearest')
plt.colorbar(im)
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
if 4 in selected_plots:
# Plot 4: Absolute Polar coordinates
plot_count += 1
if 4 not in axs:
axs[4] = fig.add_subplot(
1, len(selected_plots), plot_count, polar=True)
ax = axs[4]
ax.clear()
lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)
target_r, target_theta, target_x, target_y = [], [], [], []
for i in range(5):
target_r.append(
self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][0])
target_theta.append(np.radians(
self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][1]))
target_x, target_y = pol2cart(target_r, target_theta)
if len(self.target_hist) > 1:
ax.plot(np.radians(np.array(self.target_hist)[:-1, t, 1]), np.array(self.target_hist)[
:-1, t, 0], linewidth=4.0, color='limegreen', zorder=3, markersize=12)
line0, = ax.plot(target_theta[4], target_r[4], 'X', color='limegreen',
markeredgecolor='black', label='targets', markersize=20, zorder=4)
line1, = ax.plot(np.radians(abs_particles[:, t, 1]), abs_particles[:, t, 0], 'o', color=color_array[t]
[0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
if t == 0:
lines.extend([line0, line1])
else:
lines.extend([line0])
if len(self.sensor_hist) > 1:
ax.plot(np.radians(np.array(self.sensor_hist)[:-1, 1]), np.array(self.sensor_hist)[
:-1, 0], linewidth=4.0, color='mediumorchid', zorder=3, markersize=12)
line4, = ax.plot(np.radians(self.sensor_hist[-1][1]), self.sensor_hist[-1][0], 'H',
color='mediumorchid', markeredgecolor='black', label='sensor', markersize=20, zorder=3)
lines.extend([line4])
ax.legend(handles=lines, loc='center left', bbox_to_anchor=(
1.08, 0.5), fancybox=True, shadow=True,)
ax.set_ylim(0, 250)
if 5 in selected_plots:
# Plot 5: Absolute Cartesian coordinates
plot_count += 1
if 5 not in axs:
axs[5] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[5]
xedges = np.arange(min_map, max_map, cell_size)
yedges = np.arange(min_map, max_map, cell_size)
heatmap_combined = None
all_particles_x, all_particles_y = [], []
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
all_particles_x.extend(particles_x)
all_particles_y.extend(particles_y)
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)
target_r, target_theta, target_x, target_y = [], [], [], []
for i in range(5):
target_r.append(self.abs_target_hist[10*(i+1)-1][t][0])
target_theta.append(np.radians(
self.abs_target_hist[10*(i+1)-1][t][1]))
target_x, target_y = pol2cart(target_r, target_theta)
ax.plot(centroid_x, centroid_y, '*',
label='centroid', markersize=12)
ax.plot(target_x[4], target_y[4], 'X',
label='target', markersize=12)
sensor_r, sensor_theta, sensor_x, sensor_y = [], [], [], []
for i in range(5):
sensor_r.append(self.abs_sensor_hist[10*(i+1)-1][0])
sensor_theta.append(np.radians(
self.abs_sensor_hist[10*(i+1)-1][1]))
sensor_x, sensor_y = pol2cart(sensor_r, sensor_theta)
ax.plot(sensor_x[4], sensor_y[4], 'p',
label='sensor', markersize=12)
heatmap, xedges, yedges = np.histogram2d(
all_particles_x, all_particles_y, bins=(xedges, yedges))
heatmap = gaussian_filter(heatmap, sigma=8)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = ax.imshow(heatmap.T, extent=extent, origin='lower',
cmap='jet', interpolation='nearest')
plt.colorbar(im)
ax.legend(loc='center left', bbox_to_anchor=(
1.2, 0.5), fancybox=True, shadow=True,)
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
if 6 in selected_plots:
# Plot 1: Particle Plot (Polar)
plot_count += 1
if 6 not in axs:
axs[6] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[6]
ax.clear()
for t in range(env.state.n_targets):
# plot particles
plot_theta = np.radians(belief[:, t, 1])
plot_r = belief[:, t, 0]
particles_x, particles_y = pol2cart(
belief[:, t, 0], np.radians(belief[:, t, 1]))
ax.plot(particles_x, particles_y, 'o',
color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
# plot targets
plot_x_theta = np.radians(xp[t, 1])
plot_x_r = xp[t, 0]
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
sensor_x, sensor_y = pol2cart(
self.sensor_hist[-1][0], np.radians(self.sensor_hist[-1][1]))
if 7 in selected_plots:
plot_count += 1
if 7 not in axs:
axs[7] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[7]
ax.clear()
lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
centroid_x = | np.mean(particles_x) | numpy.mean |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : <NAME>
# E-mail : <EMAIL>
# Description:
# Date : 05/08/2018 6:04 PM
# File Name : kinect2grasp_python2.py
# Note: this file is inspired by PyntCloud
# Reference web: https://github.com/daavoo/pyntcloud
import numpy as np
from scipy.spatial import cKDTree
from numba import jit
is_numba_avaliable = True
@jit
def groupby_count(xyz, indices, out):
for i in range(xyz.shape[0]):
out[indices[i]] += 1
return out
@jit
def groupby_sum(xyz, indices, N, out):
for i in range(xyz.shape[0]):
out[indices[i]] += xyz[i][N]
return out
@jit
def groupby_max(xyz, indices, N, out):
for i in range(xyz.shape[0]):
if xyz[i][N] > out[indices[i]]:
out[indices[i]] = xyz[i][N]
return out
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
class VoxelGrid:
def __init__(self, points, n_x=1, n_y=1, n_z=1, size_x=None, size_y=None, size_z=None, regular_bounding_box=True):
"""Grid of voxels with support for different build methods.
Parameters
----------
points: (N, 3) numpy.array
n_x, n_y, n_z : int, optional
Default: 1
The number of segments in which each axis will be divided.
Ignored if corresponding size_x, size_y or size_z is not None.
size_x, size_y, size_z : float, optional
Default: None
The desired voxel size along each axis.
If not None, the corresponding n_x, n_y or n_z will be ignored.
regular_bounding_box : bool, optional
Default: True
If True, the bounding box of the point cloud will be adjusted
in order to have all the dimensions of equal length.
"""
self._points = points
self.x_y_z = [n_x, n_y, n_z]
self.sizes = [size_x, size_y, size_z]
self.regular_bounding_box = regular_bounding_box
def compute(self):
xyzmin = self._points.min(0)
xyzmax = self._points.max(0)
if self.regular_bounding_box:
#: adjust to obtain a minimum bounding box with all sides of equal length
margin = max(xyzmax - xyzmin) - (xyzmax - xyzmin)
xyzmin = xyzmin - margin / 2
xyzmax = xyzmax + margin / 2
for n, size in enumerate(self.sizes):
if size is None:
continue
margin = (((self._points.ptp(0)[n] // size) + 1) * size) - self._points.ptp(0)[n]
xyzmin[n] -= margin / 2
xyzmax[n] += margin / 2
self.x_y_z[n] = ((xyzmax[n] - xyzmin[n]) / size).astype(int)
self.xyzmin = xyzmin
self.xyzmax = xyzmax
segments = []
shape = []
for i in range(3):
# note the +1 in num
s, step = np.linspace(xyzmin[i], xyzmax[i], num=(self.x_y_z[i] + 1), retstep=True)
segments.append(s)
shape.append(step)
self.segments = segments
self.shape = shape
self.n_voxels = self.x_y_z[0] * self.x_y_z[1] * self.x_y_z[2]
self.id = "V({},{},{})".format(self.x_y_z, self.sizes, self.regular_bounding_box)
# find where each point lies in corresponding segmented axis
# -1 so index are 0-based; clip for edge cases
self.voxel_x = np.clip(np.searchsorted(self.segments[0], self._points[:, 0]) - 1, 0, self.x_y_z[0])
self.voxel_y = np.clip(np.searchsorted(self.segments[1], self._points[:, 1]) - 1, 0, self.x_y_z[1])
self.voxel_z = np.clip(np.searchsorted(self.segments[2], self._points[:, 2]) - 1, 0, self.x_y_z[2])
self.voxel_n = np.ravel_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z)
# compute center of each voxel
midsegments = [(self.segments[i][1:] + self.segments[i][:-1]) / 2 for i in range(3)]
self.voxel_centers = cartesian(midsegments).astype(np.float32)
def query(self, points):
"""ABC API. Query structure.
TODO Make query_voxelgrid an independent function, and add a light
save mode where only segments and x_y_z are saved.
"""
voxel_x = np.clip(np.searchsorted(
self.segments[0], points[:, 0]) - 1, 0, self.x_y_z[0])
voxel_y = np.clip(np.searchsorted(
self.segments[1], points[:, 1]) - 1, 0, self.x_y_z[1])
voxel_z = np.clip(np.searchsorted(
self.segments[2], points[:, 2]) - 1, 0, self.x_y_z[2])
voxel_n = np.ravel_multi_index([voxel_x, voxel_y, voxel_z], self.x_y_z)
return voxel_n
def get_feature_vector(self, mode="binary"):
"""Return a vector of size self.n_voxels. See mode options below.
Parameters
----------
mode: str in available modes. See Notes
Default "binary"
Returns
-------
feature_vector: [n_x, n_y, n_z] ndarray
See Notes.
Notes
-----
Available modes are:
binary
0 for empty voxels, 1 for occupied.
density
number of points inside voxel / total number of points.
TDF
Truncated Distance Function. Value between 0 and 1 indicating the distance
between the voxel's center and the closest point. 1 on the surface,
0 on voxels further than 2 * voxel side.
x_max, y_max, z_max
Maximum coordinate value of points inside each voxel.
x_mean, y_mean, z_mean
Mean coordinate value of points inside each voxel.
"""
vector = np.zeros(self.n_voxels)
if mode == "binary":
vector[np.unique(self.voxel_n)] = 1
elif mode == "density":
count = np.bincount(self.voxel_n)
vector[:len(count)] = count
vector /= len(self.voxel_n)
elif mode == "TDF":
# truncation = np.linalg.norm(self.shape)
kdt = cKDTree(self._points)
vector, i = kdt.query(self.voxel_centers, n_jobs=-1)
elif mode.endswith("_max"):
if not is_numba_avaliable:
raise ImportError("numba is required to compute {}".format(mode))
axis = {"x_max": 0, "y_max": 1, "z_max": 2}
vector = groupby_max(self._points, self.voxel_n, axis[mode], vector)
elif mode.endswith("_mean"):
if not is_numba_avaliable:
raise ImportError("numba is required to compute {}".format(mode))
axis = {"x_mean": 0, "y_mean": 1, "z_mean": 2}
voxel_sum = groupby_sum(self._points, self.voxel_n, axis[mode], np.zeros(self.n_voxels))
voxel_count = groupby_count(self._points, self.voxel_n, np.zeros(self.n_voxels))
vector = np.nan_to_num(voxel_sum / voxel_count)
else:
raise NotImplementedError("{} is not a supported feature vector mode".format(mode))
return vector.reshape(self.x_y_z)
def get_voxel_neighbors(self, voxel):
"""Get valid, non-empty 26 neighbors of voxel.
Parameters
----------
voxel: int in self.set_voxel_n
Returns
-------
neighbors: list of int
Indices of the valid, non-empty 26 neighborhood around voxel.
"""
x, y, z = | np.unravel_index(voxel, self.x_y_z) | numpy.unravel_index |
import clustering
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
import dionysus as dion
import random
def plot_all(data, diagrams):
fig = plt.figure(figsize=(20, 10))
for i in range(len(data)):
num = 241 + i
ax = plt.subplot(num)
plt.scatter(data[i][:, 0], data[i][:, 1])
ax = plt.subplot(num + 4)
plot_diagram(diagrams[i], ax, lims=[0, 1.5, 0, 1.75])
fig.suptitle("Datasets with corresponding persistence diagrams")
plt.show()
def compute_diagrams(data):
diagrams = []
for i in range(len(data)):
print("Processing data: " + str(i))
filtration = dion.fill_rips(data[i], 2, 3.0)
homology = dion.homology_persistence(filtration)
diagram = dion.init_diagrams(homology, filtration)
diagrams.append(diagram[1])
print()
return diagrams
def plot_clusters(M):
plt.scatter(M[0].T[0], M[0].T[1], c='r', label='Rings')
plt.scatter(M[1].T[0], M[1].T[1], c='b', label='Noise')
plt.xlim([0, 1.5])
plt.ylim([0, 1.75])
plt.plot([0.1, 1.2], [0.1, 1.2])
plt.legend()
plt.title("Persistence Diagram Cluster Centres")
plt.show()
def gen_data(seed, noise=0.05, n_samples=100):
print("\nGenerating data...\n")
| np.random.seed(seed) | numpy.random.seed |
# REFS Some (most) of those functions come from the keras library (https://github.com/fchollet/keras)
# Some are modified to add output images and output centerline
# keras.preprocessing.image: flip_axis, random_channel_shift, apply_transform, transform_matrix_offset_center, ApplyRandomTransformations
import time
import numpy as np
import random
import scipy as sp
import scipy.interpolate
import scipy.ndimage
import scipy.ndimage.interpolation
from NnetsX import IS_CHANNELS_FIRST
# from File import SavePickle
INTENSITY_FACTOR = 0.2
VECTOR_FIELD_SIGMA = 5. # in pixel
ROTATION_FACTOR = 10 # degree
TRANSLATION_FACTOR = 0.2 # proportion of the image size
SHEAR_FACTOR = 2*np.pi/180 # in radian
ZOOM_FACTOR = 0.1
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_channel_shift(x, intensity, channel_index=0):
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
shift = np.random.uniform(-intensity, intensity) # TODO add a choice if we want the same shift for all channels
channel_images = [np.clip(x_channel + shift, min_x, max_x)
for x_channel in x]
# channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
# for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [sp.ndimage.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def ApplyRandomTransformations(_x, _y, _pts, _trans, _rot, _zoom, _shear, _elastix, _row_index=1, _col_index=2, _channel_index=0, _fill_mode='constant', _cval=0.):
if _elastix != 0:
sigma = _elastix # in pixel
kernelSize = 3
sizeAll = kernelSize + 2
imgShape = (_x.shape[1], _x.shape[2])
# create the indices of the 5x5 vector field (fieldPts.shape = (25,2))
fieldPts = np.mgrid[0.:1.:complex(sizeAll), 0.:1.:complex(sizeAll)].swapaxes(0,2).swapaxes(0,1).reshape((sizeAll*sizeAll, 2))
# create the displacement (x and y) of the 5x5 vector field (border have no displacement so it's 0) (displacementX.shape = (25))
displacementX = np.zeros((sizeAll*sizeAll))
displacementY = np.zeros((sizeAll*sizeAll))
for i in range(0, sizeAll*sizeAll):
if fieldPts[i][0] != 0. and fieldPts[i][0] != 1. \
and fieldPts[i][1] != 0. and fieldPts[i][1] != 1.:
displacementX[i] = np.random.normal(0, sigma, 1)
displacementY[i] = np.random.normal(0, sigma, 1)
# transform the indice of the 5x5 vector field in the image coordinate system (TODO WARNING works only with square images)
fieldPts = fieldPts*imgShape[0] # TODO check if it's not imgShape[0] - 1?
# create the indices of all pixels in the image (gridX.shape = (1024,1024))
gridX, gridY = np.mgrid[0.:(imgShape[0] - 1):complex(imgShape[0]), 0.:(imgShape[1] - 1):complex(imgShape[1])]
# interpolate the vector field for every pixels in the image (dxGrid.shape = (1024,1024))
dxGrid = scipy.interpolate.griddata(fieldPts, displacementX, (gridX, gridY), method='cubic')
dyGrid = scipy.interpolate.griddata(fieldPts, displacementY, (gridX, gridY), method='cubic')
# apply the displacement on every pixels (indices = [indices.shape[0] = 1024*1024, indices.shape[1] = 1024*1024])
indices = np.reshape(gridY + dyGrid, (-1, 1)), np.reshape(gridX + dxGrid, (-1, 1))
for chan in range(_x.shape[0]):
_x[chan] = scipy.ndimage.interpolation.map_coordinates(_x[chan], indices, order=2, mode='reflect').reshape(imgShape)
_x[chan] = np.clip(_x[chan], 0., 1.)
if _y is not None:
for chan in range(_y.shape[0]):
_y[chan] = scipy.ndimage.interpolation.map_coordinates(_y[chan], indices, order=2, mode='reflect').reshape(imgShape)
_y[chan] = np.clip(_y[chan], 0., 1.)
#if _pts is not None:
matrix = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
if _rot != 0:
theta = np.pi/180*np.random.uniform(-_rot, _rot)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
matrix = np.dot(matrix, rotation_matrix)
if _trans != 0:
ty = np.random.uniform(-_trans, _trans)*_x.shape[_row_index]
tx = np.random.uniform(-_trans, _trans)*_x.shape[_col_index]
translation_matrix = np.array([[1, 0, ty],
[0, 1, tx],
[0, 0, 1]])
matrix = np.dot(matrix, translation_matrix)
if _shear != 0:
shear = np.random.uniform(-_shear, _shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
matrix = np.dot(matrix, shear_matrix)
if _zoom != 0:
zx, zy = np.random.uniform(1 - _zoom, 1 + _zoom, 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
matrix = np.dot(matrix, zoom_matrix)
h, w = _x.shape[_row_index], _x.shape[_col_index]
transformMatrix = transform_matrix_offset_center(matrix, h, w)
_x = apply_transform(_x, transformMatrix, _channel_index, _fill_mode, _cval)
if _y is not None:
_y = apply_transform(_y, transformMatrix, _channel_index, _fill_mode, _cval)
if _pts is not None:
matrix = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
if _rot != 0:
rotation_matrix = np.array([[ | np.cos(theta) | numpy.cos |
import os
import datetime
from dateutil.tz import tzlocal
import h5py as h5
import numpy as np
from warp import getselfe, getphi, getb, geta
class FieldDiagnostic(object):
"""
Common functionality for field diagnostic classes
Parameters:
solver: A solver object containing fields to be output.
top: The object representing Warp's top package.
w3d: The object representing Warp's w3d package.
comm_world: Object representing an MPI communicator.
period (int): Sets the period in steps of data writeout by the diagnostic.
Defaults to writeout on every step if not set.
write_dir (str): Relative path to place data output of the diagnostic.
Defaults to 'diags/fields/electric' for electric fields/potentials, and 'diags/fields/magnetic'
for magnetic fields/vector potentials if not set.
"""
def __init__(self, solver, top, w3d, comm_world, period=None, write_dir=None):
self.solver = solver
self.top = top
self.w3d = w3d
self.comm_world = comm_world
if self.comm_world is None:
self.lparallel = 0
else:
self.lparallel = comm_world.Get_size()
self.period = period
if not write_dir:
self.write_dir = None
else:
self.write_dir = write_dir
self.geometryParameters = ''
if self.solver.solvergeom == self.w3d.XYZgeom:
self.geometry = 'cartesian'
self.dims = ['x', 'y', 'z']
self.gridsize = [self.solver.nx + 1, self.solver.ny + 1, self.solver.nz + 1]
self.gridSpacing = [self.solver.dx, self.solver.dy, self.solver.dz]
self.gridGlobalOffset = [self.solver.xmmin, self.solver.ymmin, self.solver.zmmin]
self.mesh = [self.solver.xmesh, self.solver.ymesh, self.solver.zmesh]
elif self.solver.solvergeom == self.w3d.XZgeom:
self.geometry = 'cartesian2D'
self.dims = ['x', 'y', 'z']
self.gridsize = [self.solver.nx + 1, self.solver.nz + 1]
self.gridSpacing = [self.solver.dx, self.solver.dz]
self.gridGlobalOffset = [self.solver.xmmin, self.solver.zmmin]
self.mesh = [self.solver.xmesh, self.solver.zmesh]
elif self.solver.solvergeom == self.w3d.RZgeom:
self.geometry = 'thetaMode'
self.geometryParameters = 'm=0'
self.dims = ['r', 't', 'z']
self.gridsize = [self.solver.nx + 1, self.solver.nz + 1]
self.gridSpacing = [self.solver.dx, self.solver.dz]
self.gridGlobalOffset = [self.solver.xmmin, self.solver.zmmin]
self.mesh = [self.solver.xmesh, self.solver.zmesh]
else:
raise Exception("No handler for geometry type %i" % self.solver.solvergeom)
def write(self, write_dir=None):
if self.period and self.top.it % self.period != 0:
return False
if write_dir is None:
write_dir = self.write_dir
if not os.path.lexists(write_dir):
if self.lparallel == 0 or self.comm_world.rank == 0:
os.makedirs(write_dir)
step = str(self.top.it)
filename = '%s/data%s.h5' % (write_dir, step.zfill(5))
if self.lparallel == 0 or self.comm_world.rank == 0:
f = h5.File(filename, 'w')
# for i, v in enumerate(self.mesh):
# f['/data/meshes/mesh/%s' % self.dims[i]] = v
# f['/data/meshes/mesh'].attrs['geometry'] = self.geometry
# f['/data/meshes/mesh'].attrs['geometryParameters'] = self.geometryParameters
# from warp.data_dumping.openpmd_diag.generic_diag
# This header information is from https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#hierarchy-of-the-data-file
f.attrs["openPMD"] = np.string_("1.0.0")
f.attrs["openPMDextension"] = np.uint32(1)
f.attrs["software"] = np.string_("warp")
f.attrs["softwareVersion"] = np.string_("4")
f.attrs["date"] = np.string_(
datetime.datetime.now(tzlocal()).strftime('%Y-%m-%d %H:%M:%S %z'))
f.attrs["meshesPath"] = np.string_("meshes/")
f.attrs["particlesPath"] = np.string_("particles/")
# Setup the basePath
f.attrs["basePath"] = np.string_("/data/%T/")
base_path = "/data/%d/" % self.top.it
bp = f.require_group(base_path)
# https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#required-attributes-for-the-basepath
bp.attrs["time"] = self.top.time
bp.attrs["dt"] = self.top.dt
bp.attrs["timeUnitSI"] = 1.
# https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series
f.attrs["iterationEncoding"] = np.string_("fileBased")
f.attrs["iterationFormat"] = | np.string_("%s%%T.h5" % write_dir) | numpy.string_ |
import pytest
import numpy as np
from pyrsistent import InvariantException
# noinspection PyProtectedMember
from performance_curves._utils import synchronize_sort, get_bin_sizes, SynchronizedArrays
@pytest.fixture
def synced_arrays() -> SynchronizedArrays:
return SynchronizedArrays(arrays={"a": np.array([1, 2, 3]), "b": np.array([5, 4, 3])})
class TestSynchronizedArrays:
def test_invariant(self):
with pytest.raises(InvariantException):
SynchronizedArrays(arrays={"a": np.arange(1), "b": np.arange(2)})
def test_sort(self, synced_arrays: SynchronizedArrays):
arrays = synced_arrays.sort("a", descending=True).arrays
assert np.array_equiv(arrays["a"], np.array([3, 2, 1]))
assert np.array_equiv(arrays["b"], np.array([3, 4, 5]))
arrays = SynchronizedArrays(arrays={"a": np.array([]), "b": np.array([])}).sort("b").arrays
assert np.array_equiv(arrays["a"], np.array([]))
assert np.array_equiv(arrays["b"], np.array([]))
def test_filter(self, synced_arrays: SynchronizedArrays):
arrays = synced_arrays.filter("b", lambda x: x == 5).arrays
assert np.array_equiv(arrays["a"], np.array([1]))
assert np.array_equiv(arrays["b"], np.array([5]))
arrays = SynchronizedArrays(arrays={"a": np.array([]), "b": np.array([])}).filter("b", lambda x: x == 1).arrays
assert np.array_equiv(arrays["a"], np.array([]))
assert np.array_equiv(arrays["b"], np.array([]))
def test_synchronize_sort():
# Given a key and a dependent array of equal length
key = np.array([0.1, 0.4, 0.91, 0.8, 0.05])
dependent = np.array([0, 0, 1, 1, 0])
sorted_key, sorted_dependent = synchronize_sort(key, dependent, descending=True)
np.testing.assert_array_equal(sorted_key, np.array([0.91, 0.8, 0.4, 0.1, 0.05]))
np.testing.assert_array_equal(sorted_dependent, np.array([1, 1, 0, 0, 0]))
# Given a key and a dependent array where the former is longer than the latter
key = np.array([0.1, 0.4, 0.91])
dependent = np.array([0, 0, 1, 1, 0])
sorted_key, sorted_dependent = synchronize_sort(key, dependent, descending=True)
np.testing.assert_array_equal(sorted_key, np.array([0.91, 0.4, 0.1]))
np.testing.assert_array_equal(sorted_dependent, np.array([1, 0, 0]))
# Given a key and a dependent array where the former is shorter than the latter
key = np.array([0.1, 0.4, 0.91, 0.8, 0.05])
dependent = np.array([0, 1])
with np.testing.assert_raises(IndexError):
synchronize_sort(key, dependent, descending=True)
# Given an empty key array and a non-empty dependent array
key = np.array([])
dependent = np.array([0, 0, 1, 1, 0])
sorted_key, sorted_dependent = synchronize_sort(key, dependent, descending=True)
assert len(sorted_key) == 0
assert len(sorted_dependent) == 0
# Given a non-empty key array and an empty dependent array
key = np.array([0.1, 0.4, 0.91])
dependent = np.array([])
with np.testing.assert_raises(IndexError):
synchronize_sort(key, dependent, descending=True)
# Given two empty arrays
key = np.array([])
dependent = | np.array([]) | numpy.array |
import pytest
import numpy as np
from qtpy.QtCore import Qt
from qtpy.QtGui import QColor, QBrush, QPen
from pytilemap.functions import makeColorFromInts, makeColorFromFloats, makeColorFromStr, \
makeColorFromList, makeColorFromNdArray, makeColor, makeBrush, makePen, clip
SolidLine = Qt.SolidLine
DashLine = Qt.DashLine
SolidPattern = Qt.SolidPattern
NoBrush = Qt.NoBrush
Dense1Pattern = Qt.Dense1Pattern
COLOR_ARG_LIST_INT = [(1, 2, 3), (1, 2, 3, 4), [1, 2, 3]]
COLOR_ARG_LIST_INT_REF = [QColor(1, 2, 3), QColor(1, 2, 3, 4), QColor(1, 2, 3)]
COLOR_ARG_LIST_FLOAT = [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3, 0.4), [0.1, 0.2, 0.3]]
COLOR_ARG_LIST_FLOAT_REF = [QColor(0.1 * 255, 0.2 * 255, 0.3 * 255),
QColor(0.1 * 255, 0.2 * 255, 0.3 * 255, 0.4 * 255),
QColor(0.1 * 255, 0.2 * 255, 0.3 * 255)]
COLOR_ARG_LIST_STR = ['#FFAA11', 'green']
COLOR_ARG_LIST_STR_REF = [QColor(255, 170, 17), QColor(0, 128, 0)]
COLOR_ARG_NDARRAY_INT3 = np.asarray([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
COLOR_ARG_NDARRAY_INT3_REF = [QColor(1, 2, 3), QColor(4, 5, 6)]
COLOR_ARG_NDARRAY_INT4 = | np.asarray([[1, 2, 3, 4], [4, 5, 6, 7]], dtype=np.int64) | numpy.asarray |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import numpy as np
from scipy import linalg
from ..source_estimate import (SourceEstimate, VolSourceEstimate,
_BaseSourceEstimate)
from ..minimum_norm.inverse import (combine_xyz, _prepare_forward,
_check_reference)
from ..forward import is_fixed_orient
from ..io.pick import pick_channels_evoked
from ..io.proj import deactivate_proj
from ..utils import logger, verbose, _check_depth
from ..dipole import Dipole
from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi,
norm_l2inf, tf_mixed_norm_solver, norm_epsilon_inf)
@verbose
def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
mask = None
if isinstance(weights, _BaseSourceEstimate):
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
if len(source_weighting.shape) == 1:
source_weighting *= weights
else:
source_weighting *= weights[:, None]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) // n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
def _prepare_gain(forward, info, noise_cov, pca, depth, loose, rank,
weights=None, weights_min=None):
depth = _check_depth(depth, 'depth_sparse')
forward, gain_info, gain, _, _, source_weighting, _, _, whitener = \
_prepare_forward(forward, info, noise_cov, 'auto', loose, rank, pca,
use_cps=True, **depth)
if weights is None:
mask = None
else:
gain, source_weighting, mask = _prepare_weights(
forward, gain, source_weighting, weights, weights_min)
return forward, gain, gain_info, whitener, source_weighting, mask
def _reapply_source_weighting(X, source_weighting, active_set):
X *= source_weighting[active_set][:, None]
return X
def _compute_residual(forward, evoked, X, active_set, info):
# OK, picking based on row_names is safe
sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
residual = evoked.copy()
residual = pick_channels_evoked(residual, include=info['ch_names'])
r_tmp = residual.copy()
r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
# Take care of proj
active_projs = list()
non_active_projs = list()
for p in evoked.info['projs']:
if p['active']:
active_projs.append(p)
else:
non_active_projs.append(p)
if len(active_projs) > 0:
r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True)
r_tmp.apply_proj()
r_tmp.add_proj(non_active_projs, remove_existing=False)
residual.data -= r_tmp.data
return residual
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, verbose=None):
if not is_fixed_orient(forward):
logger.info('combining the current components...')
X = combine_xyz(X)
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
if src.kind != 'surface':
vertices = src[0]['vertno'][active_idx]
stc = VolSourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
else:
vertices = []
n_points_so_far = 0
for this_src in src:
this_n_points_so_far = n_points_so_far + len(this_src['vertno'])
this_active_idx = active_idx[(n_points_so_far <= active_idx) &
(active_idx < this_n_points_so_far)]
this_active_idx -= n_points_so_far
this_vertno = this_src['vertno'][this_active_idx]
n_points_so_far = this_n_points_so_far
vertices.append(this_vertno)
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
return stc
@verbose
def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, M_est,
active_is_idx=False, verbose=None):
times = tmin + tstep * np.arange(X.shape[1])
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
gof = np.zeros(M_est.shape[1])
M_norm2 = np.sum(M ** 2, axis=0)
R_norm2 = np.sum((M - M_est) ** 2, axis=0)
gof[M_norm2 > 0.0] = 1. - R_norm2[M_norm2 > 0.0] / M_norm2[M_norm2 > 0.0]
gof *= 100.
dipoles = []
for k, i_dip in enumerate(active_idx):
i_pos = forward['source_rr'][i_dip][np.newaxis, :]
i_pos = i_pos.repeat(len(times), axis=0)
X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos]
if n_dip_per_pos == 1:
amplitude = X_[0]
i_ori = forward['source_nn'][i_dip][np.newaxis, :]
i_ori = i_ori.repeat(len(times), axis=0)
else:
if forward['surf_ori']:
X_ = np.dot(forward['source_nn'][
i_dip * n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_)
amplitude = np.sqrt(np.sum(X_ ** 2, axis=0))
i_ori = np.zeros((len(times), 3))
i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] /
amplitude[amplitude > 0.]).T
dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof))
return dipoles
@verbose
def make_stc_from_dipoles(dipoles, src, verbose=None):
"""Convert a list of spatio-temporal dipoles into a SourceEstimate.
Parameters
----------
dipoles : Dipole | list of instances of Dipole
The dipoles to convert.
src : instance of SourceSpaces
The source space used to generate the forward operator.
%(verbose)s
Returns
-------
stc : SourceEstimate
The source estimate.
"""
logger.info('Converting dipoles into a SourceEstimate.')
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if not isinstance(dipoles, list):
raise ValueError('Dipoles must be an instance of Dipole or '
'a list of instances of Dipole. '
'Got %s!' % type(dipoles))
tmin = dipoles[0].times[0]
tstep = dipoles[0].times[1] - tmin
X = np.zeros((len(dipoles), len(dipoles[0].times)))
source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src],
axis=0)
n_lh_points = len(src[0]['vertno'])
lh_vertno = list()
rh_vertno = list()
for i in range(len(dipoles)):
if not np.all(dipoles[i].pos == dipoles[i].pos[0]):
raise ValueError('Only dipoles with fixed position over time '
'are supported!')
X[i] = dipoles[i].amplitude
idx = np.all(source_rr == dipoles[i].pos[0], axis=1)
idx = np.where(idx)[0][0]
if idx < n_lh_points:
lh_vertno.append(src[0]['vertno'][idx])
else:
rh_vertno.append(src[1]['vertno'][idx - n_lh_points])
vertices = [np.array(lh_vertno).astype(int),
np.array(rh_vertno).astype(int)]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep,
subject=src._subject)
logger.info('[done]')
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8,
maxit=3000, tol=1e-4, active_set_size=10,
debias=True, time_pca=True, weights=None, weights_min=0.,
solver='auto', n_mxne_iter=1, return_residual=False,
return_as_dipoles=False, dgap_freq=10, rank=None,
verbose=None):
"""Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE).
Compute L1/L2 mixed-norm solution [1]_ or L0.5/L2 [2]_ mixed-norm
solution on evoked data.
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float in range [0, 100)
Regularization parameter. 0 means no regularization, 100 would give 0
active dipole.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None, a
1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained
with wMNE, dSPM, or fMRI).
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. 'prox' stands for
proximal iterations using the FISTA algorithm, 'cd' uses
coordinate descent, and 'bcd' applies block coordinate descent.
'cd' is only available for fixed orientation.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations. Ignored if
solver is 'cd'.
%(rank_None)s
.. versionadded:: 0.18
%(verbose)s
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
tf_mixed_norm
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] <NAME>, <NAME>, <NAME>, <NAME>,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 2016.
"""
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if n_mxne_iter < 1:
raise ValueError('MxNE has to be computed at least 1 time. '
'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
pca = True
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of alpha easy
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = | np.dot(M, Vh) | numpy.dot |
import argparse
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import scipy.constants as FCNST
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import progressbar as PGB
import healpy as HP
import geometry as GEOM
import interferometry as RI
import catalog as SM
import constants as CNST
import my_operations as OPS
import primary_beams as PB
import ipdb as PDB
def Jy2K(fluxJy, freq, pixres):
return fluxJy * CNST.Jy / pixres / (2.0* FCNST.k * (freq)**2 / FCNST.c**2)
def K2Jy(tempK, freq, pixres):
return tempK * (2.0* FCNST.k * (freq)**2 / FCNST.c**2) * pixres / CNST.Jy
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to simulate interferometer array data')
project_group = parser.add_mutually_exclusive_group(required=True)
project_group.add_argument('--project-MWA', dest='project_MWA', action='store_true')
project_group.add_argument('--project-HERA', dest='project_HERA', action='store_true')
project_group.add_argument('--project-beams', dest='project_beams', action='store_true')
project_group.add_argument('--project-drift-scan', dest='project_drift_scan', action='store_true')
project_group.add_argument('--project-global-EoR', dest='project_global_EoR', action='store_true')
telescope_group = parser.add_argument_group('Telescope parameters', 'Telescope/interferometer specifications')
telescope_group.add_argument('--label-prefix', help='Prefix for baseline labels [str, Default = ""]', default='', type=str, dest='label_prefix')
telescope_group.add_argument('--telescope', help='Telescope name [str, default="custom"]', default='custom', type=str, dest='telescope_id', choices=['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'paper_dipole', 'custom', 'mwa_tools'])
telescope_group.add_argument('--latitude', help='Latitude of interferometer array in degrees [float, Default=-26.701]', default=-26.701, type=float, dest='latitude')
telescope_group.add_argument('--A-eff', help='Effective area in m^2', type=float, dest='A_eff', nargs='?')
antenna_element_group = parser.add_argument_group('Antenna element parameters', 'Antenna element specifications')
antenna_element_group.add_argument('--shape', help='Shape of antenna element [no default]', type=str, dest='antenna_element_shape', default=None, choices=['dish', 'dipole', 'delta'])
antenna_element_group.add_argument('--size', help='Size of dish or length of dipole (in meters) [float, no default]', default=None, type=float, dest='antenna_element_size')
antenna_element_group.add_argument('--orientation', help='Orientation of dipole or pointing direction of dish [float, (altitude azimuth) or (l m [n])]', default=None, type=float, nargs='*', dest='antenna_element_orientation')
antenna_element_group.add_argument('--ocoords', help='Coordinates of dipole orientation or dish pointing direction [str]', default=None, type=str, dest='antenna_element_orientation_coords', choices=['dircos', 'altaz'])
antenna_element_group.add_argument('--phased-array', dest='phased_array', action='store_true')
antenna_element_group.add_argument('--phased-array-file', help='Locations of antenna elements to be phased', default='/data3/t_nithyanandan/project_MWA/MWA_tile_dipole_locations.txt', type=file, dest='phased_elements_file')
antenna_element_group.add_argument('--groundplane', help='Height of antenna element above ground plane (in meters) [float]', default=None, type=float, dest='ground_plane')
obsparm_group = parser.add_argument_group('Observation setup', 'Parameters specifying the observation')
obsparm_group.add_argument('-f', '--freq', help='Foreground center frequency in Hz [float, Default=185e6]', default=185e6, type=float, dest='freq')
obsparm_group.add_argument('--dfreq', help='Frequency resolution in Hz [float, Default=40e3]', default=40e3, type=float, dest='freq_resolution')
obsparm_group.add_argument('--obs-mode', help='Observing mode [str, track/drift/drift-shift/custom]', default=None, type=str, dest='obs_mode', choices=['track', 'drift', 'dns', 'custom'])
# obsparm_group.add_argument('--t-snap', help='Integration time (seconds) [float, Default=300.0]', default=5.0*60.0, type=float, dest='t_snap')
obsparm_group.add_argument('--nchan', help='Number of frequency channels [int, Default=256]', default=256, type=int, dest='n_channels')
duration_group = parser.add_argument_group('Observing duration parameters', 'Parameters specifying observing duration')
duration_group.add_argument('--t-obs', help='Duration of observation [seconds]', dest='t_obs', default=None, type=float, metavar='t_obs')
duration_group.add_argument('--n-snap', help='Number of snapshots or records that make up the observation', dest='n_snaps', default=None, type=int, metavar='n_snapshots')
duration_group.add_argument('--t-snap', help='integration time of each snapshot [seconds]', dest='t_snap', default=None, type=int, metavar='t_snap')
pointing_group = parser.add_mutually_exclusive_group(required=True)
pointing_group.add_argument('--pointing-file', dest='pointing_file', type=str, nargs=1, default=None)
pointing_group.add_argument('--pointing-info', dest='pointing_info', type=float, nargs=3, metavar=('lst_init', 'ra_init', 'dec_init'))
snapshot_selection_group = parser.add_mutually_exclusive_group(required=False)
snapshot_selection_group.add_argument('--beam-switch', dest='beam_switch', action='store_true')
snapshot_selection_group.add_argument('--snap-pick', dest='pick_snapshots', default=None, type=int, nargs='*')
snapshot_selection_group.add_argument('--snap-range', dest='snapshots_range', default=None, nargs=2, type=int)
snapshot_selection_group.add_argument('--all-snaps', dest='all_snapshots', action='store_true')
fgmodel_group = parser.add_mutually_exclusive_group(required=True)
fgmodel_group.add_argument('--ASM', action='store_true') # Diffuse (GSM) + Compact (NVSS+SUMSS) All-sky model
fgmodel_group.add_argument('--DSM', action='store_true') # Diffuse all-sky model
fgmodel_group.add_argument('--CSM', action='store_true') # Point source model (NVSS+SUMSS)
fgmodel_group.add_argument('--SUMSS', action='store_true') # SUMSS catalog
fgmodel_group.add_argument('--NVSS', action='store_true') # NVSS catalog
fgmodel_group.add_argument('--MSS', action='store_true') # Molonglo Sky Survey
fgmodel_group.add_argument('--GLEAM', action='store_true') # GLEAM catalog
fgmodel_group.add_argument('--PS', action='store_true') # Point sources
fgmodel_group.add_argument('--USM', action='store_true') # Uniform all-sky model
fgparm_group = parser.add_argument_group('Foreground Setup', 'Parameters describing foreground sky')
fgparm_group.add_argument('--flux-unit', help='Units of flux density [str, Default="Jy"]', type=str, dest='flux_unit', default='Jy', choices=['Jy','K'])
fgparm_group.add_argument('--spindex', help='Spectral index, ~ f^spindex [float, Default=0.0]', type=float, dest='spindex', default=0.0)
fgparm_group.add_argument('--spindex-rms', help='Spectral index rms [float, Default=0.0]', type=float, dest='spindex_rms', default=0.0)
fgparm_group.add_argument('--spindex-seed', help='Spectral index seed [float, Default=None]', type=int, dest='spindex_seed', default=None)
fgparm_group.add_argument('--nside', help='nside parameter for healpix map [int, Default=64]', type=int, dest='nside', default=64, choices=[64, 128])
fgcat_group = parser.add_argument_group('Catalog files', 'Catalog file locations')
fgcat_group.add_argument('--dsm-file-prefix', help='Diffuse sky model filename prefix [str]', type=str, dest='DSM_file_prefix', default='/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata')
fgcat_group.add_argument('--sumss-file', help='SUMSS catalog file [str]', type=str, dest='SUMSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt')
fgcat_group.add_argument('--nvss-file', help='NVSS catalog file [str]', type=file, dest='NVSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits')
fgcat_group.add_argument('--GLEAM-file', help='GLEAM catalog file [str]', type=str, dest='GLEAM_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/mwacs_b1_131016.csv')
fgcat_group.add_argument('--PS-file', help='Point source catalog file [str]', type=str, dest='PS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/PS_catalog.txt')
# parser.add_argument('--', help='', type=, dest='', required=True)
parser.add_argument('--plots', help='Create plots', action='store_true', dest='plots')
args = vars(parser.parse_args())
rootdir = '/data3/t_nithyanandan/'
project_MWA = args['project_MWA']
project_HERA = args['project_HERA']
project_beams = args['project_beams']
project_drift_scan = args['project_drift_scan']
project_global_EoR = args['project_global_EoR']
if project_MWA: project_dir = 'project_MWA'
if project_HERA: project_dir = 'project_HERA'
if project_beams: project_dir = 'project_beams'
if project_drift_scan: project_dir = 'project_drift_scan'
if project_global_EoR: project_dir = 'project_global_EoR'
telescope_id = args['telescope_id']
element_shape = args['antenna_element_shape']
element_size = args['antenna_element_size']
element_orientation = args['antenna_element_orientation']
element_ocoords = args['antenna_element_orientation_coords']
phased_array = args['phased_array']
phased_elements_file = args['phased_elements_file']
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
if telescope_id == 'mwa': phased_array = True
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if element_shape != 'delta':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
if element_orientation is None:
if element_ocoords is not None:
if element_ocoords == 'altaz':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
elif element_ocoords == 'dircos':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
else:
raise ValueError('Invalid value specified antenna element orientation coordinate system.')
else:
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
element_ocoords = 'altaz'
else:
if element_ocoords is None:
raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')
element_orientation = NP.asarray(element_orientation).reshape(1,-1)
if (element_orientation.size < 2) or (element_orientation.size > 3):
raise ValueError('Antenna element orientation must be a two- or three-element vector.')
elif (element_ocoords == 'altaz') and (element_orientation.size != 2):
raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')
ground_plane = args['ground_plane']
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
latitude = args['latitude']
latitude_str = 'lat_{0:.3f}_'.format(latitude)
telescope = {}
if telescope_id in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'mwa_tools']:
telescope['id'] = telescope_id
telescope['shape'] = element_shape
telescope['size'] = element_size
telescope['orientation'] = element_orientation
telescope['ocoords'] = element_ocoords
telescope['groundplane'] = ground_plane
telescope['latitude'] = latitude
freq = args['freq']
freq_resolution = args['freq_resolution']
n_channels = args['n_channels']
nchan = n_channels
chans = (freq + (NP.arange(nchan) - 0.5 * nchan) * freq_resolution)/ 1e9 # in GHz
bw = n_channels * freq_resolution
bandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)
if args['A_eff'] is None:
if (telescope['shape'] == 'dipole') or (telescope['shape'] == 'delta'):
A_eff = (0.5*FCNST.c/freq)**2
if (telescope_id == 'mwa') or phased_array:
A_eff *= 16
if telescope['shape'] == 'dish':
A_eff = NP.pi * (0.5*element_size)**2
else:
A_eff = args['A_eff']
obs_mode = args['obs_mode']
t_snap = args['t_snap']
t_obs = args['t_obs']
n_snaps = args['n_snaps']
snapshot_type_str = obs_mode
pointing_file = args['pointing_file']
if pointing_file is not None:
pointing_file = pointing_file[0]
pointing_info = args['pointing_info']
element_locs = None
if phased_array:
try:
element_locs = NP.loadtxt(phased_elements_file, skiprows=1, comments='#', usecols=(0,1,2))
except IOError:
raise IOError('Could not open the specified file for phased array of antenna elements.')
if telescope_id == 'mwa':
xlocs, ylocs = NP.meshgrid(1.1*NP.linspace(-1.5,1.5,4), 1.1*NP.linspace(1.5,-1.5,4))
element_locs = NP.hstack((xlocs.reshape(-1,1), ylocs.reshape(-1,1), NP.zeros(xlocs.size).reshape(-1,1)))
if pointing_file is not None:
pointing_init = None
pointing_info_from_file = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays_str = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
if n_snaps is None:
n_snaps = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]
n_snaps = min(n_snaps, pointing_info_from_file.shape[0])
pointings_altaz = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)
pointings_altaz_orig = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
if obs_mode is None:
obs_mode = 'custom'
if (obs_mode == 'dns') and beam_switch:
angle_diff = GEOM.sphdist(pointings_altaz[1:,1], pointings_altaz[1:,0], pointings_altaz[:-1,1], pointings_altaz[:-1,0])
angle_diff = NP.concatenate(([0.0], angle_diff))
shift_threshold = 1.0 # in degrees
# lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
lst_wrapped = NP.concatenate(([lst_wrapped[0]], lst_wrapped[angle_diff > shift_threshold], [lst_wrapped[-1]]))
n_snaps = lst_wrapped.size - 1
pointings_altaz = NP.vstack((pointings_altaz[0,:].reshape(-1,2), pointings_altaz[angle_diff>shift_threshold,:].reshape(-1,2)))
obs_id = NP.concatenate(([obs_id[0]], obs_id[angle_diff>shift_threshold]))
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = NP.vstack((delay_settings[0,:], delay_settings[angle_diff>shift_threshold,:]))
obs_mode = 'custom'
lst_edges_left = lst_wrapped[:-1] + 0.0
lst_edges_right = NP.concatenate(([lst_edges[1]], lst_edges[NP.asarray(NP.where(angle_diff > shift_threshold)).ravel()+1]))
elif snapshots_range is not None:
snapshots_range[1] = snapshots_range[1] % n_snaps
if snapshots_range[0] > snapshots_range[1]:
raise IndexError('min snaphost # must be <= max snapshot #')
lst_wrapped = lst_wrapped[snapshots_range[0]:snapshots_range[1]+2]
lst_edges = NP.copy(lst_wrapped)
pointings_altaz = pointings_altaz[snapshots_range[0]:snapshots_range[1]+1,:]
obs_id = obs_id[snapshots_range[0]:snapshots_range[1]+1]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[snapshots_range[0]:snapshots_range[1]+1,:]
n_snaps = snapshots_range[1]-snapshots_range[0]+1
elif pick_snapshots is not None:
pick_snapshots = NP.asarray(pick_snapshots)
lst_begin = NP.asarray(lst_wrapped[pick_snapshots])
lst_end = NP.asarray(lst_wrapped[pick_snapshots+1])
t_snap = (lst_end - lst_begin) / 15.0 * 3.6e3
n_snaps = t_snap.size
lst = 0.5 * (lst_begin + lst_end)
pointings_altaz = pointings_altaz[pick_snapshots,:]
obs_id = obs_id[pick_snapshots]
if (telescope_id == 'mwa') or (phased_array) or (telescope_id == 'mwa_tools'):
delays = delay_settings[pick_snapshots,:]
obs_mode = 'custom'
if pick_snapshots is None:
if not beam_switch:
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3
else:
lst = 0.5*(lst_edges_left + lst_edges_right)
t_snap = (lst_edges_right - lst_edges_left) / 15.0 * 3.6e3
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_radec[:,0] = pointings_radec[:,0] % 360.0
t_obs = NP.sum(t_snap)
elif pointing_info is not None:
pointing_init = NP.asarray(pointing_info[1:])
lst_init = pointing_info[0]
pointing_file = None
if t_snap is None:
raise NameError('t_snap must be provided for an automated observing run')
if (n_snaps is None) and (t_obs is None):
raise NameError('n_snaps or t_obs must be provided for an automated observing run')
elif (n_snaps is not None) and (t_obs is not None):
raise ValueError('Only one of n_snaps or t_obs must be provided for an automated observing run')
elif n_snaps is None:
n_snaps = int(t_obs/t_snap)
else:
t_obs = n_snaps * t_snap
t_snap = t_snap + NP.zeros(n_snaps)
lst = (lst_init + (t_snap/3.6e3) * NP.arange(n_snaps)) * 15.0 # in degrees
if obs_mode is None:
obs_mode = 'track'
if obs_mode == 'track':
pointings_radec = NP.repeat(NP.asarray(pointing_init).reshape(-1,2), n_snaps, axis=0)
else:
ha_init = lst_init * 15.0 - pointing_init[0]
pointings_radec = NP.hstack((NP.asarray(lst-pointing_init[0]).reshape(-1,1), pointing_init[1]+NP.zeros(n_snaps).reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_radec_orig = NP.copy(pointings_radec)
pointings_hadec_orig = NP.copy(pointings_hadec)
pointings_altaz_orig = NP.copy(pointings_altaz)
pointings_dircos_orig = NP.copy(pointings_dircos)
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
duration_str = ''
if obs_mode in ['track', 'drift']:
if (t_snap is not None) and (n_snaps is not None):
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, NP.asarray(t_snap)[0])
pointing_info = {}
pointing_info['pointing_center'] = pointings_altaz
pointing_info['pointing_coords'] = 'altaz'
pointing_info['lst'] = lst
if element_locs is not None:
telescope['element_locs'] = element_locs
plots = args['plots']
use_GSM = args['ASM']
use_DSM = args['DSM']
use_CSM = args['CSM']
use_NVSS = args['NVSS']
use_SUMSS = args['SUMSS']
use_MSS = args['MSS']
use_GLEAM = args['GLEAM']
use_PS = args['PS']
use_USM = args['USM']
fg_str = ''
nside = args['nside']
pixres = HP.nside2pixarea(nside)
flux_unit = args['flux_unit']
spindex_seed = args['spindex_seed']
spindex_rms = args['spindex_rms']
spindex_rms_str = ''
spindex_seed_str = ''
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
if use_GSM:
fg_str = 'asm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spindex = dsm_table['spindex'] + 2.0
freq_DSM = 0.185 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM + 0.0
dec_deg = dec_deg_DSM + 0.0
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM + 0.0
freq_SUMSS = 0.843 # in GHz
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = NP.concatenate((freq_catalog, freq_SUMSS*1e9 + NP.zeros(fint.size)))
catlabel = NP.concatenate((catlabel, NP.repeat('SUMSS', fint.size)))
ra_deg = NP.concatenate((ra_deg, ra_deg_SUMSS))
dec_deg = NP.concatenate((dec_deg, dec_deg_SUMSS))
spindex = NP.concatenate((spindex, spindex_SUMSS))
majax = NP.concatenate((majax, fmajax/3.6e3))
minax = NP.concatenate((minax, fminax/3.6e3))
fluxes = NP.concatenate((fluxes, fint))
nvss_file = args['NVSS_file']
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_DSM:
fg_str = 'dsm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spindex = dsm_table['spindex'] + 2.0
freq_DSM = 0.185 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM
dec_deg = dec_deg_DSM
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM
hdulist.close()
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_USM:
fg_str = 'usm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg = dsm_table['RA']
dec_deg = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
avg_temperature = NP.mean(temperatures)
fluxes_USM = avg_temperature * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy * NP.ones(temperatures.size)
spindex = NP.zeros(fluxes_USM.size)
freq_USM = 0.185 # in GHz
freq_catalog = freq_USM * 1e9 + NP.zeros(fluxes_USM.size)
catlabel = NP.repeat('USM', fluxes_USM.size)
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
hdulist.close()
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_CSM:
fg_str = 'csm'
freq_SUMSS = 0.843 # in GHz
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = fint + 0.0
nvss_file = args['NVSS_file']
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_SUMSS:
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg = ra_deg[PS_ind]
dec_deg = dec_deg[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 1.0
ra_deg = ra_deg[bright_source_ind]
dec_deg = dec_deg[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg = ra_deg[valid_ind]
dec_deg = dec_deg[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
freq_catalog = 0.843 # in GHz
if spindex_seed is None:
spindex = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex = -0.83 + spindex_rms * NP.random.randn(fint.size)
fg_str = 'sumss'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = 1.0e-3 + NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_MSS:
pass
elif use_GLEAM:
catalog_file = args['GLEAM_file']
catdata = ascii.read(catalog_file, data_start=1, delimiter=',')
dec_deg = catdata['DEJ2000']
ra_deg = catdata['RAJ2000']
fpeak = catdata['S150_fit']
ferr = catdata['e_S150_fit']
spindex = catdata['Sp+Index']
fg_str = 'gleam'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_PS:
fg_str = 'point'
catalog_file = args['PS_file']
catdata = ascii.read(catalog_file, comment='#', header_start=0, data_start=1)
ra_deg = catdata['RA'].data
dec_deg = catdata['DEC'].data
fint = catdata['F_INT'].data
spindex = catdata['SPINDEX'].data
majax = catdata['MAJAX'].data
minax = catdata['MINAX'].data
pa = catdata['PA'].data
freq_PS = 0.185 # in GHz
freq_catalog = freq_PS * 1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('PS', fint.size)
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
antpower_Jy = RI.antenna_power(skymod, telescope, pointing_info, freq_scale='Hz')
antpower_K = antpower_Jy * CNST.Jy / pixres / (2.0* FCNST.k * (1e9*chans.reshape(1,-1))**2 / FCNST.c**2)
outfile = 'antenna_power_'+telescope_str+ground_plane_str+latitude_str+snapshot_type_str+duration_str+'_'+fg_str+'_sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'{0}_{1:.1f}_MHz'.format(bandpass_str, freq/1e6)
if plots:
fig = PLT.figure(figsize=(6,6))
ax = fig.add_subplot(111)
if flux_unit == 'Jy':
ax.plot(lst/15, antpower_Jy[:,nchan/2], 'k-', lw=2)
elif flux_unit == 'K':
ax.plot(lst/15, antpower_K[:,nchan/2], 'k-', lw=2)
ax.set_xlim(0, 24)
ax.set_xlabel('RA [hours]', fontsize=18, weight='medium')
ax.set_ylabel(r'$T_\mathrm{ant}$'+' [ '+flux_unit+' ]', fontsize=16, weight='medium')
ax_y2 = ax.twinx()
if flux_unit == 'Jy':
ax_y2.set_yticks(Jy2K(ax.get_yticks(), chans[nchan/2]*1e9, pixres))
ax_y2.set_ylim(Jy2K(NP.asarray(ax.get_ylim())), chans[nchan/2]*1e9, pixres)
ax_y2.set_ylabel(r'$T_\mathrm{ant}$'+' [ K ]', fontsize=16, weight='medium')
elif flux_unit == 'K':
ax_y2.set_yticks(K2Jy(ax.get_yticks(), chans[nchan/2]*1e9, pixres))
ax_y2.set_ylim(K2Jy(NP.asarray(ax.get_ylim()), chans[nchan/2]*1e9, pixres))
ax_y2.set_ylabel(r'$T_\mathrm{ant}$'+' [ Jy ]', fontsize=16, weight='medium')
ax.text(0.5, 0.9, '{0:.1f} MHz'.format(chans[nchan/2]*1e3), transform=ax.transAxes, fontsize=12, weight='medium', ha='center', color='black')
fig.subplots_adjust(right=0.85)
fig.subplots_adjust(left=0.15)
PLT.savefig(rootdir+project_dir+'/figures/'+outfile+'.png', bbox_inches=0)
hdulist = []
hdulist += [fits.PrimaryHDU()]
hdulist[0].header['EXTNAME'] = 'PRIMARY'
hdulist[0].header['telescope_id'] = (telescope_id, 'Telescope ID')
hdulist[0].header['element_shape'] = (telescope['shape'], 'Antenna element shape')
hdulist[0].header['element_size'] = (telescope['size'], 'Antenna element size (m)')
hdulist[0].header['A_eff'] = (A_eff, 'Effective area [m^2]')
if telescope['ocoords'] is not None:
hdulist[0].header['element_ocoords'] = (telescope['ocoords'], 'Antenna element orientation coordinates')
if telescope['groundplane'] is not None:
hdulist[0].header['ground_plane'] = (telescope['groundplane'], 'Antenna element height above ground plane [m]')
hdulist[0].header['latitude'] = (latitude, 'Latitude of telescope')
hdulist[0].header['obs_mode'] = (obs_mode, 'Observing mode')
hdulist[0].header['t_snap'] = ( | NP.mean(t_snap) | numpy.mean |
import numpy as np
# Photon history bits (see photon.h for source)
NO_HIT = 0x1 << 0
BULK_ABSORB = 0x1 << 1
SURFACE_DETECT = 0x1 << 2
SURFACE_ABSORB = 0x1 << 3
RAYLEIGH_SCATTER = 0x1 << 4
REFLECT_DIFFUSE = 0x1 << 5
REFLECT_SPECULAR = 0x1 << 6
SURFACE_REEMIT = 0x1 << 7
SURFACE_TRANSMIT = 0x1 << 8
BULK_REEMIT = 0x1 << 9
CHERENKOV = 0x1 << 10
SCINTILLATION = 0x1 << 11
NAN_ABORT = 0x1 << 31
class Steps(object):
def __init__(self,x,y,z,t,dx,dy,dz,ke,edep,qedep):
self.x = x
self.y = y
self.z = z
self.t = t
self.dx = dx
self.dy = dy
self.dz = dz
self.ke = ke
self.edep = edep
self.qedep = qedep
class Vertex(object):
def __init__(self, particle_name, pos, dir, ke, t0=0.0, pol=None, steps=None, children=None, trackid=-1, pdgcode=-1):
'''Create a particle vertex.
particle_name: string
Name of particle, following the GEANT4 convention.
Examples: e-, e+, gamma, mu-, mu+, pi0
pos: array-like object, length 3
Position of particle vertex (mm)
dir: array-like object, length 3
Normalized direction vector
ke: float
Kinetic energy (MeV)
t0: float
Initial time of particle (ns)
pol: array-like object, length 3
Normalized polarization vector. By default, set to None,
and the particle is treated as having a random polarization.
'''
self.particle_name = particle_name
self.pos = pos
self.dir = dir
self.pol = pol
self.ke = ke
self.t0 = t0
self.steps = steps
self.children = children
self.trackid = trackid
self.pdgcode = pdgcode
def __str__(self):
return 'Vertex('+self.particle_name+',ke='+str(self.ke)+',steps='+str(True if self.steps else False)+')'
__repr__ = __str__
class Photons(object):
def __init__(self, pos=np.empty((0,3)), dir=np.empty((0,3)), pol=np.empty((0,3)), wavelengths=np.empty((0)), t=None, last_hit_triangles=None, flags=None, weights=None, evidx=None, channel=None):
'''Create a new list of n photons.
pos: numpy.ndarray(dtype=numpy.float32, shape=(n,3))
Position 3-vectors (mm)
dir: numpy.ndarray(dtype=numpy.float32, shape=(n,3))
Direction 3-vectors (normalized)
pol: numpy.ndarray(dtype=numpy.float32, shape=(n,3))
Polarization direction 3-vectors (normalized)
wavelengths: numpy.ndarray(dtype=numpy.float32, shape=n)
Photon wavelengths (nm)
t: numpy.ndarray(dtype=numpy.float32, shape=n)
Photon times (ns)
last_hit_triangles: numpy.ndarray(dtype=numpy.int32, shape=n)
ID number of last intersected triangle. -1 if no triangle hit in last step
If set to None, a default array filled with -1 is created
flags: numpy.ndarray(dtype=numpy.uint32, shape=n)
Bit-field indicating the physics interaction history of the photon. See
history bit constants in chroma.event for definition.
weights: numpy.ndarray(dtype=numpy.float32, shape=n)
Survival probability for each photon. Used by
photon propagation code when computing likelihood functions.
evidx: numpy.ndarray(dtype=numpy.uint32, shape=n)
Index of the event in a GPU batch
'''
self.pos = np.asarray(pos, dtype=np.float32)
self.dir = np.asarray(dir, dtype=np.float32)
self.pol = | np.asarray(pol, dtype=np.float32) | numpy.asarray |
# Mini Jerk function file
# Updated in 2020 1/12
# Author <NAME> / JameScottX
# Other: use numba to accelarate
import numpy as np
import numba as nb
class TRAJ_J(object):
def __init__(self):
self.Q_JX= []
self.Q_JY= []
self.Q_JZ= []
self.Q_JS= []
self.__len = 0
self.__T_piece = 0.1
self.T = 0
self.__len_s = 0
self.__T_piece_s = 0.1
self.T_s = 0
pass
def _jerk_core(self, X, t_, t_2):
'''最小jerk核心函数'''
if type(X) is not np.ndarray:
X = np.array(X)
if not np.shape(X) == (6,):
raise ValueError('jerk_func X must be (6,)')
T = np.mat([[np.power(t_, 5), np.power(t_, 4),np.power(t_, 3),np.power(t_, 2), t_, 1],
[5*np.power(t_, 4), 4*np.power(t_, 3),3*np.power(t_, 2),2*t_, 1, 0],
[20*np.power(t_, 3), 12*np.power(t_, 2),6*t_, 2, 0, 0],
[np.power(t_2, 5), np.power(t_2, 4),np.power(t_2, 3),np.power(t_2, 2), t_2, 1],
[5*np.power(t_2, 4), 4*np.power(t_2, 3),3*np.power(t_2, 2),2*t_2, 1, 0],
[20*np.power(t_2, 3), 12*np.power(t_2, 2),6*t_2, 2, 0, 0]])
Q = np.linalg.solve(T,X)
return Q
def traj_mult_pots_Q(self, Xs,Ys,Zs, T):
"""[产生五次样条的Q序列
点之间时间等分]
"""
self.Q_JX, self.Q_JY, self.Q_JZ = [], [], []
Xs_, Ys_, Zs_ = np.array(Xs), np.array(Ys), np.array(Zs)
self.__len = np.shape(Xs_)[0] #此为两个点 序列
self.__T_piece = T / (self.__len)
self.T = T #记录总的时间
for i in range(self.__len):
self.Q_JX.append( self._jerk_core(Xs_[i].T, self.__T_piece*i, self.__T_piece*(i+1)) )
self.Q_JY.append( self._jerk_core(Ys_[i].T, self.__T_piece*i, self.__T_piece*(i+1)) )
self.Q_JZ.append( self._jerk_core(Zs_[i].T, self.__T_piece*i, self.__T_piece*(i+1)) )
def traj_tj_f(self, t):
"""[jerk 轨迹输出]
Returns:
[array]: [x y z]
"""
temp_i = 0
t_all = (self.__len) * self.__T_piece
if t_all <=t: #解决时间超出问题
temp_i = self.__len-1
std= np.array([np.power(t_all, 5), np.power(t_all, 4),np.power(t_all, 3),np.power(t_all, 2), t_all, 1])
else:
for i in range(1,self.__len+1):
if i * self.__T_piece > t :
temp_i =i-1
break
std= np.array([np.power(t, 5), np.power(t, 4),np.power(t, 3),np.power(t, 2), t, 1])
t_out = np.array([np.dot(std,self.Q_JX[temp_i]), np.dot(std,self.Q_JY[temp_i]), np.dot(std,self.Q_JZ[temp_i])])
return t_out
def traj_init_Xs(self, xyz, speed, acc):
Xs, Ys, Zs = [], [], []
for i in range(len(xyz)-1):
Xs.append(np.array([ xyz[i][0], speed[i][0], acc[i][0], xyz[i+1][0], speed[i+1][0], acc[i+1][0] ]))
Ys.append(np.array([ xyz[i][1], speed[i][1], acc[i][1], xyz[i+1][1], speed[i+1][1], acc[i+1][1] ]))
Zs.append(np.array([ xyz[i][2], speed[i][2], acc[i][2], xyz[i+1][2], speed[i+1][2], acc[i+1][2] ]))
return np.array(Xs), np.array(Ys), np.array(Zs)
def traj_Xs(self, xyz, speed, acc, T):
#总的路径规划函数
Xs, Ys, Zs = self.traj_init_Xs(xyz, speed, acc)
self.traj_mult_pots_Q(Xs, Ys, Zs,T)
def xyz_speed_default(self, xyz, speed_sta_end, speed_keep, T):
xyz = np.array(xyz)
n0 = np.shape(xyz)[0]
speed = []
speed.append(speed_sta_end[0])
for _ in range( 1, n0-1 ):
speed.append(speed_keep)
speed.append(speed_sta_end[1])
acc= [ [0.,0.,0.] for _ in range(n0) ]
return np.array(speed), np.array(acc)
def traj_s_Q(self, s, T):
self.Q_JS =[]
self.__len_s = np.shape(s)[0] #此为两个点 序列
self.__T_piece_s = T / (self.__len_s )
for i in range(self.__len_s):
self.Q_JS.append( self._jerk_core(s[i].T, self.__T_piece_s*i, self.__T_piece_s*(i+1)) )
def traj_init_s(self, s, ds, dds):
S = []
for i in range(len(s)-1):
S.append(np.array([ s[i], ds[i], dds[i], s[i+1], ds[i+1], dds[i+1] ]))
return S
def traj_s(self, s, ds, dds, T):
#总的路径规划函数
S = self.traj_init_s(s, ds, dds)
self.traj_s_Q(S,T)
def traj_ts_f(self, t):
"""[jerk 轨迹输出]
"""
temp_i = 0
t_all = (self.__len_s) * self.__T_piece_s
if t_all <=t: #解决时间超出问题
temp_i = self.__len_s-1
std= np.array([np.power(t_all, 5), np.power(t_all, 4),np.power(t_all, 3), | np.power(t_all, 2) | numpy.power |
import numpy as np
from autoins.common import io, math
class DataGenerator():
def __init__(self,
distance_model,
exp_dir,
ccl_id,
data_name,
ag_shape,
nb_subgoal,
nb_data_gen,
contrastive_sample_size):
# input_shape --> ag_shape
# nb_ccl_sample --> nb_data_gen_size
# nb_contrastive_sample --> contrastive_sample_size
self.distance_model = distance_model
self.exp_dir = exp_dir
self.ccl_id = ccl_id
self.data_name = data_name
self.ag_shape = ag_shape
self.nb_subgoal = nb_subgoal
self.nb_data_gen = nb_data_gen
self.contrastive_sample_size = contrastive_sample_size
self.io_manager = io.IoManager(self.exp_dir, self.data_name, self.ccl_id)
def generate_data(self):
ag_demo = self.io_manager.ag_demo
adj_mat = self.io_manager.adj_mat
label = self.io_manager.label
dist_mat = math.compute_dist_mat(adj_mat)
contrastive_data = self.generate_contrastive_data(
ag_demo,
label,
dist_mat,
contrastive_sample_size = self.contrastive_sample_size,
nb_data_gen = self.nb_data_gen)
classification_data = self.generate_classification_data(
ag_demo,
label,
nb_data_gen = self.nb_data_gen)
data = dict()
data.update(contrastive_data)
data.update(classification_data)
return data
def generate_classification_data(self, ag_demo, label, nb_data_gen):
ag_demo_concat = np.concatenate(ag_demo,0)
label_concat = np.concatenate(label, 0)
total_data = len(ag_demo_concat)
random_idx = np.random.choice(total_data, nb_data_gen, replace = True)
picked_ag = ag_demo_concat[random_idx]
picked_label = label_concat[random_idx]
picked_label_onehot = math.to_onehot(picked_label, depth = self.nb_subgoal)
data = dict(x_classification = picked_ag, label = picked_label_onehot)
return data
def generate_contrastive_data(self,
ag_demo,
label,
dist_mat,
contrastive_sample_size,
nb_data_gen):
positive_pair_array = []
positive_dist_array = []
negative_pair_array = []
negative_dist_array = []
for _ in range(nb_data_gen):
positive_pair, positive_dist, negative_pair, negative_dist = \
self.make_contrastive_sample(ag_demo, label, dist_mat, contrastive_sample_size)
positive_pair_array.append(positive_pair)
positive_dist_array.append(positive_dist)
negative_pair_array.append(negative_pair)
negative_dist_array.append(negative_dist)
positive_pair_array = np.asarray(positive_pair_array)
positive_dist_array = np.asarray(positive_dist_array)
negative_pair_array = np.asarray(negative_pair_array)
negative_dist_array = np.asarray(negative_dist_array)
data = dict(positive_pair = positive_pair_array,
positive_dist = positive_dist_array,
negative_pair = negative_pair_array,
negative_dist = negative_dist_array)
return data
def make_contrastive_sample(self,
ag_demo,
label,
dist_mat,
contrastive_sample_size):
nb_demo = len(ag_demo)
demo_idx = np.random.choice(nb_demo, 1, replace = False)[0]
picked_demo = ag_demo[demo_idx]
len_demo = picked_demo.shape[0]
t_idx = np.random.choice(len_demo, contrastive_sample_size+1, replace = True)
picked_sample = picked_demo[t_idx]
picked_label = label[demo_idx][t_idx]
anchor = np.repeat(picked_sample[:1,:], contrastive_sample_size, axis = 0)
query = picked_sample[1:,:]
anchor_t = np.tile(t_idx[:1], [contrastive_sample_size])
query_t = t_idx[1:]
anchor_l = np.repeat(picked_label[:1], contrastive_sample_size, axis = 0)
query_l = picked_label[1:]
d_for = self._get_graph_dist(anchor_l, query_l, dist_mat) \
+ self._get_temporal_dist(anchor_t, query_t, len_demo)
d_rev = self._get_graph_dist(query_l, anchor_l, dist_mat) \
+ self._get_temporal_dist(query_t, anchor_t, len_demo)
d_concat = np.concatenate([d_for, d_rev], 0)
# pick the index having the minimum distance
argmin_idx_list = np.where(d_concat == d_concat.min())[0]
argmin_idx = argmin_idx_list[np.random.choice(argmin_idx_list.shape[0])]
if argmin_idx < len(d_for):
idx = argmin_idx
positive_query = query[[idx]]
positive_dist = d_for[[idx]]
positive_pair = np.concatenate([anchor[[0]], positive_query], 0)
positive_pair = np.expand_dims(positive_pair, 0)
negative_query_for = np.concatenate([query[:idx],query[idx+1:]],0)
negative_query_for = np.expand_dims(negative_query_for, 1)
negative_query_rev = np.copy(query)
negative_query_rev = np.expand_dims(negative_query_rev,1)
negative_dist_for = np.concatenate([d_for[:idx],d_for[idx+1:]],0)
negative_dist_rev = np.copy(d_rev)
elif argmin_idx >= len(d_for):
idx = argmin_idx - len(d_for)
positive_query = query[[idx]]
positive_dist = d_rev[[idx]]
positive_pair = np.concatenate([positive_query, anchor[[0]]], 0)
positive_pair = np.expand_dims(positive_pair, 0)
negative_query_for = np.copy(query)
negative_query_for = np.expand_dims(negative_query_for, 1)
negative_query_rev = np.concatenate([query[:idx],query[idx+1:]],0)
negative_query_rev = np.expand_dims(negative_query_rev,1)
negative_dist_for = np.copy(d_for)
negative_dist_rev = np.concatenate([d_rev[:idx],d_rev[idx+1:]],0)
len_for = len(negative_query_for)
len_rev = len(negative_query_rev)
anchor_for = np.expand_dims(anchor[:len_for], 1)
anchor_rev = np.expand_dims(anchor[:len_rev], 1)
negative_pair_for = np.concatenate([anchor_for, negative_query_for], 1)
negative_pair_rev = np.concatenate([negative_query_rev, anchor_rev], 1)
negative_pair = | np.concatenate([negative_pair_for, negative_pair_rev], 0) | numpy.concatenate |
from matplotlib import pyplot as plt
import numpy as np
import pymc as pm
from scipy.stats.mstats import mquantiles
from IPython.core.pylabtools import figsize
def logistic(x, beta, alpha=0):
return 1.0 / (1.0 + np.exp( | np.dot(beta, x) | numpy.dot |
# Copyright (C) 2019 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Author: <NAME>
import numpy as np
from mocasin.design_centering.volume import *
import mocasin.design_centering.sample as sample
import mocasin.util.random_distributions.discrete_random as rd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def random_s_set_gen(
n, procs, mu, Q, r, num_samples, threshold=0.05, num_points=10
):
ns = [procs] * n
# print np.linalg.det(Q)
# print np.linalg.eig(Q)
# eigenv,_ = np.linalg.eig(r**2 * Q*np.transpose(Q))
# print("Eigenvalues of Cov: " + str(eigenv))
# test discrete uniform plain
result = sample.SampleSet()
for i in range(num_samples):
sample_vec = rd.discrete_gauss(ns, mu.astype(int), r, np.array(Q))
s = sample.Sample(sample=sample_vec)
s.setFeasibility(True)
result.add_sample(s)
return result
def random_s_set_test_center(dim, num_procs, mu, num_samples, Q, r):
procs = num_procs
s_set = random_s_set_gen(dim, procs, mu, Q, r, num_samples)
component = np.random.randint(2)
other_component = 1 - component
for sample in s_set.get_samples():
tup = sample.sample2simpleTuple()
# skew upward
if tup[component] > mu[component] and (
tup[other_component] == mu[other_component]
):
sample.setFeasibility(True)
else:
sample.setFeasibility(False)
return s_set
# generates random sets of points around a feasible circle with radius r_set
def random_s_set_test_radius(r, point, dim, num_procs, num_samples, Q, r_set):
test_set = random_s_set_gen(dim, num_procs, point, Q, r, num_samples)
Qinv = np.linalg.inv(Q)
for s in test_set.get_feasible():
vecNotShifted = np.array(s.sample2tuple()) - np.array(point)
vec = np.dot(vecNotShifted, Qinv)
dist = np.sqrt(np.dot(vec, vec.transpose()))
if dist >= r_set:
s.setFeasibility(False)
return test_set
# center and radius should not really change
def random_s_set_test_covariance(
Q, Q_target, dim, num_procs, r, mu, num_samples
):
test_set = random_s_set_gen(dim, num_procs, mu, Q, r, num_samples)
Qinv = np.linalg.inv(Q_target)
for s in test_set.get_feasible():
vecNotShifted = np.array(s.sample2tuple()) - np.array(mu)
vec = | np.dot(vecNotShifted, Qinv) | numpy.dot |
import os
import math
import warnings
import numpy as np
import pandas as pd
import gmhazard_calc.constants as const
from gmhazard_calc.im import IM, IMType
from qcore import nhm
def calculate_rupture_rates(
nhm_df: pd.DataFrame,
rup_name: str = "rupture_name",
annual_rec_prob_name: str = "annual_rec_prob",
mag_name: str = "mag_name",
) -> pd.DataFrame:
"""Takes in a list of background ruptures and
calculates the rupture rates for the given magnitudes
The rupture rate calculation is based on the Gutenberg-Richter equation from OpenSHA.
It discretises the recurrance rate per magnitude instead of storing the probability of
rupture exceeding a certain magnitude
https://en.wikipedia.org/wiki/Gutenberg%E2%80%93Richter_law
https://github.com/opensha/opensha-core/blob/master/src/org/opensha/sha/magdist/GutenbergRichterMagFreqDist.java
Also includes the rupture magnitudes
"""
data = np.ndarray(
sum(nhm_df.n_mags),
dtype=[
(rup_name, str, 64),
(annual_rec_prob_name, np.float64),
(mag_name, np.float64),
],
)
# Make an array of fault bounds so the ith faults has
# the ruptures indexes[i]-indexes[i+1]-1 (inclusive)
indexes = np.cumsum(nhm_df.n_mags.values)
indexes = np.insert(indexes, 0, 0)
index_mask = np.zeros(len(data), dtype=bool)
warnings.filterwarnings(
"ignore", message="invalid value encountered in true_divide"
)
for i, line in nhm_df.iterrows():
index_mask[indexes[i] : indexes[i + 1]] = True
# Generate the magnitudes for each rupture
sample_mags = np.linspace(line.M_min, line.M_cutoff, line.n_mags)
for ii, iii in enumerate(range(indexes[i], indexes[i + 1])):
data[rup_name][iii] = create_ds_rupture_name(
line.source_lat,
line.source_lon,
line.source_depth,
sample_mags[ii],
line.tect_type,
)
# Calculate the cumulative rupture rate for each rupture
baseline = (
line.b
* math.log(10, 2.72)
/ (1 - 10 ** (-1 * line.b * (line.M_cutoff - line.M_min)))
)
f_m_mag = np.power(10, (-1 * line.b * (sample_mags - line.M_min))) * baseline
f_m_mag = np.append(f_m_mag, 0)
rup_prob = (f_m_mag[:-1] + f_m_mag[1:]) / 2 * 0.1
total_cumulative_rate = rup_prob * line.totCumRate
# normalise
total_cumulative_rate = (
line.totCumRate * total_cumulative_rate / np.sum(total_cumulative_rate)
)
data[mag_name][index_mask] = sample_mags
data[annual_rec_prob_name][index_mask] = total_cumulative_rate
index_mask[indexes[i] : indexes[i + 1]] = False
background_values = pd.DataFrame(data=data)
background_values.fillna(0, inplace=True)
return background_values
def convert_im_type(im_type: str):
"""Converts the IM type to the standard format,
will be redundant in the future"""
if im_type.startswith("SA"):
return "p" + im_type.replace("p", ".")
return im_type
def get_erf_name(erf_ffp: str) -> str:
"""Gets the erf name, required for rupture ids
Use this function for consistency, instead of doing it manual
"""
return os.path.basename(erf_ffp).split(".")[0]
def pandas_isin(array_1: np.ndarray, array_2: np.ndarray) -> np.ndarray:
"""This is the same as a np.isin,
however is significantly faster for large arrays
https://stackoverflow.com/questions/15939748/check-if-each-element-in-a-numpy-array-is-in-another-array
"""
return pd.Index(pd.unique(array_2)).get_indexer(array_1) >= 0
def get_min_max_values_for_im(im: IM):
"""Get minimum and maximum for the given im. Values for velocity are
given on cm/s, acceleration on cm/s^2 and Ds on s
"""
if im.is_pSA():
assert im.period is not None, "No period provided for pSA, this is an error"
if im.period <= 0.5:
return 0.005, 10.0
elif 0.5 < im.period <= 1.0:
return 0.005, 7.5
elif 1.0 < im.period <= 3.0:
return 0.0005, 5.0
elif 3.0 < im.period <= 5.0:
return 0.0005, 4.0
elif 5.0 < im.period <= 10.0:
return 0.0005, 3.0
if im.im_type is IMType.PGA:
return 0.0001, 10.0
elif im.im_type is IMType.PGV:
return 1.0, 400.0
elif im.im_type is IMType.CAV:
return 0.0001 * 980, 20.0 * 980.0
elif im.im_type is IMType.AI:
return 0.01, 1000.0
elif im.im_type is IMType.Ds575 or im.im_type is IMType.Ds595:
return 1.0, 400.0
else:
print("Unknown IM, cannot generate a range of IM values. Exiting the program")
exit(1)
def get_im_values(im: IM, n_values: int = 100):
"""
Create an range of values for a given IM according to their min, max
as defined by get_min_max_values
Parameters
----------
im: IM
The IM Object to get im values for
n_values: int
Returns
-------
Array of IM values
"""
start, end = get_min_max_values_for_im(im)
im_values = np.logspace(
start=np.log(start), stop=np.log(end), num=n_values, base=np.e
)
return im_values
def closest_location(locations, lat, lon):
"""
Find position of closest location in locations 2D np.array of (lat, lon).
"""
d = (
np.sin(np.radians(locations[:, 0] - lat) / 2.0) ** 2
+ np.cos(np.radians(lat))
* np.cos(np.radians(locations[:, 0]))
* np.sin(np.radians(locations[:, 1] - lon) / 2.0) ** 2
)
return np.argmin(6378.139 * 2.0 * np.arctan2(np.sqrt(d), np.sqrt(1 - d)))
def read_emp_file(emp_file, cs_faults):
"""Read an empiricial file"""
# Read file
emp = pd.read_csv(
emp_file,
sep="\t",
names=("fault", "mag", "rrup", "med", "dev", "prob"),
usecols=(0, 1, 2, 5, 6, 7),
dtype={
"fault": object,
"mag": np.float32,
"rrup": np.float32,
"med": np.float32,
"dev": np.float32,
"prob": np.float32,
},
engine="c",
skiprows=1,
)
# Type contains 0: Type A; 1: Type B; 2: Distributed Seismicity
emp["type"] = pd.Series(0, index=emp.index, dtype=np.uint8)
# Type B faults
emp.type += np.invert( | np.vectorize(cs_faults.__contains__) | numpy.vectorize |
import cv2
import numpy as np
import math
# Specify the path for the video
cap = cv2.VideoCapture('Data/Tag0.mp4')
# cap = cv2.VideoCapture('Data/Tag0.mp4')
# cap = cv2.VideoCapture('Data/Tag2.mp4')
# cap = cv2.VideoCapture('Data/multipleTags.mp4')
lenaImage = cv2.imread('Data/Lena.png')
lenaImage = cv2.resize(lenaImage, (200, 200))
print("Choose the task to be performed on the video from selected options")
print("Press 1 for AR tag detection")
print("Press 2 for superimposing Lena on AR tag")
print("Press 3 for superimposing cube on AR tag")
print("")
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
task = int(input("Make your selection: "))
previous_orientation=0
while(cap.isOpened()):
ret, arTag = cap.read()
size = arTag.shape
if ret == False:
break
# Convert frame into grayscale
arTag_grayscale = cv2.cvtColor(arTag, cv2.COLOR_BGR2GRAY)
# Smoothen the frame to get rid of noise
blur = cv2.GaussianBlur(arTag_grayscale, (7,7),0)
# Threshold the frame to obtain binary image and to clearly visualize the AR tag
(threshold, binary) = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) ## some versions use this
image,contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Filter out unwanted contours
unwantedContours = []
for contour, h in enumerate(hierarchy[0]):
if h[2] == -1 or h[3] == -1:
unwantedContours.append(contour)
# Sort the desired contours based on area
cnts = [c2 for c1, c2 in enumerate(contours) if c1 not in unwantedContours]
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:3]
# Approximate the contours to 4 sided polygon and obtain the desired ones
tagContours = []
for c in cnts:
c = cv2.convexHull(c)
epsilon = 0.01122*cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, epsilon, True)
if len(approx) == 4:
tagContours.append(approx)
# Obtain the 4 corners of the tag from the contours obtained
corners = []
for tc in tagContours:
coords = []
for p in tc:
coords.append([p[0][0],p[0][1]])
corners.append(coords)
# Draw the contours on image
contourImage = cv2.drawContours(arTag, tagContours, -1, (0,0,255), 4)
for tag1, tag2 in enumerate(corners):
ii=0
corner_points_x = []
corner_points_y = []
# Order the obtained corners in clockwise direction
ordered_corner_points = np.zeros((4,2))
corner_points = np.array(tag2)
# Sort the corner with respect to X coordinates
corner_points_sorted_x = corner_points[np.argsort(corner_points[:, 0]), :]
# Obtain the left most and right most corners
left = corner_points_sorted_x[:2, :]
right = corner_points_sorted_x[2:, :]
# Sort the left most inorder to fix them
left = left[np.argsort(left[:, 1]), :]
(ordered_corner_points[0], ordered_corner_points[3]) = left
# Obtain the right most by fixing top left coordinates and calculating distance to the right most
d1 = math.sqrt((right[0][0] - ordered_corner_points[0][0])**2 + (right[0][1] - ordered_corner_points[0][1])**2)
d2 = math.sqrt((right[1][0] - ordered_corner_points[0][0])**2 + (right[1][1] - ordered_corner_points[0][1])**2)
if d1 > d2:
ordered_corner_points[2] = right[0]
ordered_corner_points[1] = right[1]
else:
ordered_corner_points[1] = right[0]
ordered_corner_points[2] = right[1]
for point in ordered_corner_points:
corner_points_x.append(point[0])
corner_points_y.append(point[1])
reference_corners_x = [0,199,199,0]
reference_corners_y = [0,0,199,199]
A = np.array([
[ corner_points_x[0], corner_points_y[0], 1 , 0 , 0 , 0 , -reference_corners_x[0]*corner_points_x[0], -reference_corners_x[0]*corner_points_y[0], -reference_corners_x[0]],
[ 0 , 0 , 0 , corner_points_x[0], corner_points_y[0], 1 , -reference_corners_y[0]*corner_points_x[0], -reference_corners_y[0]*corner_points_y[0], -reference_corners_y[0]],
[ corner_points_x[1], corner_points_y[1], 1 , 0 , 0 , 0 , -reference_corners_x[1]*corner_points_x[1], -reference_corners_x[1]*corner_points_y[1], -reference_corners_x[1]],
[ 0 , 0 , 0 , corner_points_x[1], corner_points_y[1], 1 , -reference_corners_y[1]*corner_points_x[1], -reference_corners_y[1]*corner_points_y[1], -reference_corners_y[1]],
[ corner_points_x[2], corner_points_y[2], 1 , 0 , 0 , 0 , -reference_corners_x[2]*corner_points_x[2], -reference_corners_x[2]*corner_points_y[2], -reference_corners_x[2]],
[ 0 , 0 , 0 , corner_points_x[2], corner_points_y[2], 1 , -reference_corners_y[2]*corner_points_x[2], -reference_corners_y[2]*corner_points_y[2], -reference_corners_y[2]],
[ corner_points_x[3], corner_points_y[3], 1 , 0 , 0 , 0 , -reference_corners_x[3]*corner_points_x[3], -reference_corners_x[3]*corner_points_y[3], -reference_corners_x[3]],
[ 0 , 0 , 0 , corner_points_x[3], corner_points_y[3], 1 , -reference_corners_y[3]*corner_points_x[3], -reference_corners_y[3]*corner_points_y[3], -reference_corners_y[3]],
], dtype=np.float64)
U,S,V = np.linalg.svd(A)
H = V[:][8]/V[8][8]
H_matrix = np.reshape(H, (3,3))
H_inverse = np.linalg.inv(H_matrix)
coords = np.indices((200, 200)).reshape(2, -1)
coords=np.vstack((coords, np.ones(coords.shape[1])))
x2, y2 = coords[0], coords[1]
warp_coords = ( H_inverse@coords)
warp_coords=warp_coords/warp_coords[2]
x1, y1 = warp_coords[0, :], warp_coords[1, :]# Get pixels within image boundaries
indices = np.where((x1 >= 0) & (x1 < size[1]) &
(y1 >= 0) & (y1 < size[0]))
xpix1, ypix1 = x2[indices], y2[indices]
xpix1=xpix1.astype(int)
ypix1=ypix1.astype(int)
xpix2, ypix2 = x1[indices], y1[indices]# Map Correspondence
xpix2=xpix2.astype(int)
ypix2=ypix2.astype(int)
perspectiveImage = np.zeros((200,200))
perspectiveImage[ypix1, xpix1] = binary[ypix2,xpix2]
stride = perspectiveImage.shape[0]//8
x = 0
y = 0
tagGrid = np.zeros((8,8))
for i in range(8):
for j in range(8):
cell = perspectiveImage[y:y+stride, x:x+stride]
cv2.rectangle(perspectiveImage,(x,y),(x+stride,y+stride), (255,0,0), 1)
if cell.mean() > 255//2:
tagGrid[i][j] = 1
x = x + stride
x = 0
y = y + stride
cv2.imshow('perspective projection', perspectiveImage)
# rotated_corner_points = []
if(tagGrid[2][2] == 0 and tagGrid[2][5] == 0 and tagGrid[5][2] == 0 and tagGrid[5][5] == 1):
orientation = 0
elif(tagGrid[2][2] == 1 and tagGrid[2][5] == 0 and tagGrid[5][2] == 0 and tagGrid[5][5] == 0):
orientation = 180
elif(tagGrid[2][2] == 0 and tagGrid[2][5] == 1 and tagGrid[5][2] == 0 and tagGrid[5][5] == 0):
orientation = 90
elif(tagGrid[2][2] == 0 and tagGrid[2][5] == 0 and tagGrid[5][2] == 1 and tagGrid[5][5] == 0):
orientation = -90
else:
orientation = None
# print(orientation)
if (orientation == None):
flag = False
else:
flag = True
Id=0
if(flag == True):
if (orientation == 0):
Id = tagGrid[3][3]*1 +tagGrid[4][3]*8 +tagGrid[4][4]*4 + tagGrid[3][4]*2
reference_corners_x = [0,199,199,0]
reference_corners_y = [0,0,199,199]
previous_orientation = orientation
# rotated_corner_points = ordered_corner_points
elif(orientation == 90):
Id = tagGrid[3][3]*2 + tagGrid[3][4]*4 + tagGrid[4][4]*8 +tagGrid[4][3]*1
reference_corners_x = [199,199,0,0]
reference_corners_y = [0,199,199,0]
previous_orientation = orientation
# rotated_corner_points = [ordered_corner_points[2], ordered_corner_points[0], ordered_corner_points[3], ordered_corner_points[1]]
elif(orientation == -90):
Id= tagGrid[3][3]*8 + tagGrid[3][4] + tagGrid[4][4]*2 +tagGrid[4][3]*4
reference_corners_x = [0,0,199,199]
reference_corners_y = [199,0,0,199]
previous_orientation = orientation
# rotated_corner_points = [ordered_corner_points[1], ordered_corner_points[3], ordered_corner_points[0], ordered_corner_points[2]]
elif(orientation == 180):
Id = tagGrid[3][3]*4 + tagGrid[4][3]*2 + tagGrid[4][4] + tagGrid[3][4]*8
reference_corners_x = [199,0,0,199]
reference_corners_y = [199,199,0,0]
previous_orientation = orientation
else:
if (previous_orientation == 0):
Id = tagGrid[3][3]*1 +tagGrid[4][3]*8 +tagGrid[4][4]*4 + tagGrid[3][4]*2
reference_corners_x = [0,199,199,0]
reference_corners_y = [0,0,199,199]
# rotated_corner_points = ordered_corner_points
elif(previous_orientation == 90):
Id = tagGrid[3][3]*2 + tagGrid[3][4]*4 + tagGrid[4][4]*8 +tagGrid[4][3]*1
reference_corners_x = [199,199,0,0]
reference_corners_y = [0,199,199,0]
# rotated_corner_points = [ordered_corner_points[2], ordered_corner_points[0], ordered_corner_points[3], ordered_corner_points[1]]
elif(previous_orientation == -90):
Id= tagGrid[3][3]*8 + tagGrid[3][4] + tagGrid[4][4]*2 +tagGrid[4][3]*4
reference_corners_x = [0,0,199,199]
reference_corners_y = [199,0,0,199]
# rotated_corner_points = [ordered_corner_points[1], ordered_corner_points[3], ordered_corner_points[0], ordered_corner_points[2]]
elif(previous_orientation == 180):
Id = tagGrid[3][3]*4 + tagGrid[4][3]*2 + tagGrid[4][4] + tagGrid[3][4]*8
reference_corners_x = [199,0,0,199]
reference_corners_y = [199,199,0,0]
# rotated_corner_points = [ordered_corner_points[3], ordered_corner_points[2], ordered_corner_points[1], ordered_corner_points[0]]
if(Id !=0 and task ==1):
cv2.putText(arTag, str(int(Id)), ((int(corner_points_x[ii]-50), int(corner_points_y[ii]-50))), cv2.FONT_ITALIC, 2, (255,0,255),3)
ii+=1
Id=0
if(task==1):
cv2.imshow('Tag ID', arTag)
if (task ==2):
A = np.array([
[ corner_points_x[0], corner_points_y[0], 1 , 0 , 0 , 0 , -reference_corners_x[0]*corner_points_x[0], -reference_corners_x[0]*corner_points_y[0], -reference_corners_x[0]],
[ 0 , 0 , 0 , corner_points_x[0], corner_points_y[0], 1 , -reference_corners_y[0]*corner_points_x[0], -reference_corners_y[0]*corner_points_y[0], -reference_corners_y[0]],
[ corner_points_x[1], corner_points_y[1], 1 , 0 , 0 , 0 , -reference_corners_x[1]*corner_points_x[1], -reference_corners_x[1]*corner_points_y[1], -reference_corners_x[1]],
[ 0 , 0 , 0 , corner_points_x[1], corner_points_y[1], 1 , -reference_corners_y[1]*corner_points_x[1], -reference_corners_y[1]*corner_points_y[1], -reference_corners_y[1]],
[ corner_points_x[2], corner_points_y[2], 1 , 0 , 0 , 0 , -reference_corners_x[2]*corner_points_x[2], -reference_corners_x[2]*corner_points_y[2], -reference_corners_x[2]],
[ 0 , 0 , 0 , corner_points_x[2], corner_points_y[2], 1 , -reference_corners_y[2]*corner_points_x[2], -reference_corners_y[2]*corner_points_y[2], -reference_corners_y[2]],
[ corner_points_x[3], corner_points_y[3], 1 , 0 , 0 , 0 , -reference_corners_x[3]*corner_points_x[3], -reference_corners_x[3]*corner_points_y[3], -reference_corners_x[3]],
[ 0 , 0 , 0 , corner_points_x[3], corner_points_y[3], 1 , -reference_corners_y[3]*corner_points_x[3], -reference_corners_y[3]*corner_points_y[3], -reference_corners_y[3]],
], dtype=np.float64)
U,S,V = np.linalg.svd(A)
H = V[:][8]/V[8][8]
H_matrix = np.reshape(H, (3,3))
H_inverse = np.linalg.inv(H_matrix)
coords = np.indices((200, 200)).reshape(2, -1)
coords=np.vstack((coords, np.ones(coords.shape[1])))
x2, y2 = coords[0], coords[1]
warp_coords = ( H_inverse@coords)
warp_coords=warp_coords/warp_coords[2]
x1, y1 = warp_coords[0, :], warp_coords[1, :]# Get pixels within image boundaries
indices = np.where((x1 >= 0) & (x1 < size[1]) &
(y1 >= 0) & (y1 < size[0]))
xpix1, ypix1 = x2[indices], y2[indices]
xpix1=xpix1.astype(int)
ypix1=ypix1.astype(int)
xpix2, ypix2 = x1[indices], y1[indices]# Map Correspondence
xpix2=xpix2.astype(int)
ypix2=ypix2.astype(int)
arTag[ypix2, xpix2] = lenaImage[ypix1,xpix1]
imS = cv2.resize(arTag, (1920, 1080))
cv2.imshow('superimpose lena', arTag )
# cv2.resizeWindow('superimpose lena', 600,600)
#Placing a Virtual Cube over the tag
#Camera Intrinsic Parameters
K = np.array([[1406.08415449821,0,0],[2.20679787308599, 1417.99930662800,0],[1014.13643417416, 566.347754321696,1]])
h1 = H_inverse[:,0]
h2 = H_inverse[:,1]
K = np.transpose(K)
K_inv = np.linalg.inv(K)
lamda = 1/((np.linalg.norm(np.dot(K_inv,h1))+np.linalg.norm(np.dot(K_inv,h2)))/2)
Btilde = np.dot(K_inv,H_inverse)
if np.linalg.det(Btilde)>0:
B = Btilde
else:
B = -Btilde
b1 = B[:,0]
b2 = B[:,1]
b3 = B[:,2]
r1 = lamda*b1
r2 = lamda*b2
r3 = np.cross(r1,r2)
t = lamda*b3
projectionMatrix = np.dot(K, (np.stack((r1,r2,r3,t), axis=1)))
x1,y1,z1 = np.matmul(projectionMatrix,[0,0,0,1])
x2,y2,z2 = np.matmul(projectionMatrix,[0,199,0,1])
x3,y3,z3 = np.matmul(projectionMatrix,[199,0,0,1])
x4,y4,z4 = np.matmul(projectionMatrix,[199,199,0,1])
x5,y5,z5 = np.matmul(projectionMatrix,[0,0,-199,1])
x6,y6,z6 = | np.matmul(projectionMatrix,[0,199,-199,1]) | numpy.matmul |
import time
import random
import itertools
# import gc
import os
import sys
import glob
import datetime
import numpy as np
import yaml
import pickle
from operator import itemgetter
from optparse import OptionParser
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc, average_precision_score
sys.path.insert(0, os.path.join(sys.path[0], ".."))
from tiknib.utils import do_multiprocess, parse_fname
from tiknib.utils import load_func_data
from tiknib.utils import flatten
from tiknib.utils import store_cache, load_cache
from get_roc_graph import plot_roc_all
import logging
import coloredlogs
logger = logging.getLogger(__name__)
coloredlogs.install(level=logging.INFO)
coloredlogs.install(level=logging.DEBUG)
np.seterr(divide="ignore", invalid="ignore")
TRIALS=10
def debughere():
import ipdb; ipdb.set_trace(sys._getframe().f_back)
def get_package(func_key):
return func_key[0]
def get_binary(func_key):
return func_key[1]
def get_func(func_key):
return (func_key[2], func_key[3])
def get_opti(option_key):
return option_key[0]
def get_arch(option_key):
return option_key[1]
def get_arch_nobits(option_key):
return option_key[1].split("_")[0]
def get_bits(option_key):
return option_key[1].split("_")[1]
def get_compiler(option_key):
return option_key[2]
def get_others(option_key):
return option_key[3]
def parse_other_options(bin_path):
other_options = ["lto", "pie", "noinline"]
for opt in other_options:
if opt in bin_path:
return opt
return "normal"
def get_optionidx_map(options):
return {opt: idx for idx, opt in enumerate(sorted(options))}
def is_valid(dictionary, s):
return s in dictionary and dictionary[s]
def calc_ap(X, y):
return average_precision_score(y, X)
def calc_roc(X, y):
fpr, tpr, tresholds = roc_curve(y, X, pos_label=1)
return auc(fpr, tpr)
def calc_tptn_gap(tps, tns):
return np.mean(np.abs(tps - tns), axis=0)
def relative_difference(a, b):
max_val = np.maximum(np.absolute(a), np.absolute(b))
d = np.absolute(a - b) / max_val
d[np.isnan(d)] = 0 # 0 / 0 = nan -> 0
d[ | np.isinf(d) | numpy.isinf |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from rapyuta.inout import read_fits
from rapyuta.plots import pplot
slit = 'Ns'
## raw
name = 'raw'
ds = read_fits('3390001.1_'+slit,'3390001.1_'+slit+'_unc')
snr = np.mean(ds.data/ds.unc)
# p = pplot(ds.wave, ds.data[:,14,1], yerr=ds.unc[:,14,1],ec='r',c='k',label=name,legend=2,title='S/R={}'.format(np.mean(ds.data/ds.unc)))
# p.save(name)
mask = (ds.data/ds.unc)[:,14,1] >0.1
p = pplot(ds.wave[mask], (ds.data/ds.unc)[:,14,1][mask],
figsize=((10,8)), left=.15, right=.99, bottom=.1, top=.98,
c='k',label=name,ylog=1,#ylim=(5e1,2e4),
title=None,
xlabel=r'$\lambda\,(\mu m)$', ylabel='Signal-to-noise ratio',
titlesize=20, labelsize=20, ticksize=20)
## pt3
name = 'pt3'
name = '2err'
ds = read_fits('3390001.1_'+slit+'_'+name,'3390001.1_'+slit+'_'+name+'_unc')
snr = np.mean(ds.data/ds.unc)
# p = pplot(ds.wave, ds.data[:,20,1], yerr=ds.unc[:,20,1],ec='r',c='k',label=name,legend=2,title='S/R={}'.format(np.mean(ds.data/ds.unc)))
# p.save(name)
mask = (ds.data/ds.unc)[:,14,1] >0.1
p.add_plot(ds.wave[mask], (ds.data/ds.unc)[:,14,1][mask], c='c',label='pointing error')
p.ax.legend(loc='upper left', fontsize=20, framealpha=0)
p.save('test_irc_pt')
exit()
## splitnorm
name = 'splitnorm'
ds = read_fits('3390001.1_'+slit+'_'+name,'3390001.1_'+slit+'_'+name+'_unc')
snr = np.mean(ds.data/ds.unc)
p = pplot(ds.wave, ds.data[:,20,1], yerr=ds.unc[:,20,1],ec='r',c='k',label=name,legend=2,title='S/R={}'.format(np.mean(ds.data/ds.unc)))
p.save(name)
## 2err
name = '2err'
ds = read_fits('3390001.1_'+slit+'_'+name,'3390001.1_'+slit+'_'+name+'_unc')
snr = np.mean(ds.data/ds.unc)
p = pplot(ds.wave, ds.data[:,20,1], yerr=ds.unc[:,20,1],ec='r',c='k',label=name,legend=2,title='S/R={}'.format(np.mean(ds.data/ds.unc)))
p.save(name)
## 2errsup
name = '2errsup'
ds = read_fits('3390001.1_'+slit+'_'+name,'3390001.1_'+slit+'_'+name+'_unc')
snr = np.mean(ds.data/ds.unc)
p = pplot(ds.wave, ds.data[:,20,1], yerr=ds.unc[:,20,1],ec='r',c='k',label=name,legend=2,title='S/R={}'.format(np.mean(ds.data/ds.unc)))
p.save(name)
## 2errfill
name = '2errfill'
ds = read_fits('3390001.1_'+slit+'_'+name,'3390001.1_'+slit+'_'+name+'_unc')
snr = np.mean(ds.data/ds.unc)
p = pplot(ds.wave, ds.data[:,20,1], yerr=ds.unc[:,20,1],ec='r',c='k',label=name,legend=2,title='S/R={}'.format(np.mean(ds.data/ds.unc)))
p.save(name)
## 2errfillsup
name = '2errfillsup'
ds = read_fits('3390001.1_'+slit+'_'+name,'3390001.1_'+slit+'_'+name+'_unc')
snr = | np.mean(ds.data/ds.unc) | numpy.mean |
import cv2
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import torch as t
from skimage import io
from torchvision.utils import make_grid
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def save_tensor_images(image_tensor, i, save_dir, prefix, resize_factor=0.5):
with t.no_grad():
if prefix == "rgb":
mean = t.tensor([0.34, 0.33, 0.35]).reshape(1, 3, 1, 1)
std = t.tensor([0.19, 0.18, 0.18]).reshape(1, 3, 1, 1)
elif prefix == "ir":
mean = 0.35
std = 0.18
elif prefix == "pred":
mean = 0.5
std = 0.5
elif prefix == "segment":
image_unflat = (image_tensor.detach().cpu() + 1) / 2
else:
raise TypeError("Name error")
if prefix != "segment":
image_unflat = image_tensor.detach().cpu() * std + mean
image_grid = make_grid(image_unflat, nrow=3)
img = image_grid.permute(1, 2, 0).squeeze().numpy()
img = np.clip(img * 255, a_min=0, a_max=255).astype(np.uint8)
img = cv2.resize(img, (0, 0), fx=resize_factor, fy=resize_factor)
name = prefix + str(i) + r'.jpg'
io.imsave(os.path.join(save_dir, name), img)
def save_all_images(rgb, ir, pred, i, save_dir, segment=None, resize_factor=0.5):
with t.no_grad():
mean_rgb = t.tensor([0.34, 0.33, 0.35]).reshape(1, 3, 1, 1)
std_rgb = t.tensor([0.19, 0.18, 0.18]).reshape(1, 3, 1, 1)
rgb_n = rgb.detach().cpu() * std_rgb + mean_rgb
mean_ir = 0.35
std_ir = 0.18
ir_n = ir.detach().cpu() * std_ir + mean_ir
ir_n = t.cat([ir_n, ir_n, ir_n], dim=1)
pred_n = pred.detach().cpu() * 0.5 + 0.5
pred_n = t.cat([pred_n, pred_n, pred_n], dim=1)
if segment is not None:
segment_n = segment.detach().cpu() * 0.5 + 0.5
image_unflat = t.cat([pred_n, ir_n, rgb_n, segment_n], dim=0)
else:
image_unflat = t.cat([pred_n, ir_n, rgb_n], dim=0)
image_grid = make_grid(image_unflat, nrow=2)
img = image_grid.permute(1, 2, 0).squeeze().numpy()
img = | np.clip(img * 255, a_min=0, a_max=255) | numpy.clip |
import trimesh
import numpy as np
import quaternion
# original obj operations by Zhengqin
def loadMesh(name ):
vertices = []
faces = []
with open(name, 'r') as meshIn:
lines = meshIn.readlines()
lines = [x.strip() for x in lines if len(x.strip() ) > 2 ]
for l in lines:
if l[0:2] == 'v ':
vstr = l.split(' ')[1:4]
varr = [float(x) for x in vstr ]
varr = np.array(varr ).reshape([1, 3] )
vertices.append(varr )
elif l[0:2] == 'f ':
fstr = l.split(' ')[1:4]
farr = [int(x.split('/')[0] ) for x in fstr ]
farr = np.array(farr ).reshape([1, 3] )
faces.append(farr )
vertices = np.concatenate(vertices, axis=0 )
faces = np.concatenate(faces, axis=0 )
return vertices, faces
def writeMesh(name, vertices, faces ):
with open(name, 'w') as meshOut:
for n in range(0, vertices.shape[0]):
meshOut.write('v %.3f %.3f %.3f\n' %
(vertices[n, 0], vertices[n, 1], vertices[n, 2] ) )
for n in range(0,faces.shape[0] ):
meshOut.write('f %d %d %d\n' %
(faces[n, 0], faces[n, 1], faces[n, 2]) )
def computeBox(vertices ):
minX, maxX = vertices[:, 0].min(), vertices[:, 0].max()
minY, maxY = vertices[:, 1].min(), vertices[:, 1].max()
minZ, maxZ = vertices[:, 2].min(), vertices[:, 2].max()
corners = []
corners.append(np.array([minX, minY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, minY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, minY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, minY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, maxY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, maxY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, maxY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, maxY, maxZ] ).reshape(1, 3) )
corners = np.concatenate(corners ).astype(np.float32 )
faces = []
faces.append(np.array([1, 2, 3] ).reshape(1, 3) )
faces.append(np.array([1, 3, 4] ).reshape(1, 3) )
faces.append(np.array([5, 7, 6] ).reshape(1, 3) )
faces.append(np.array([5, 8, 7] ).reshape(1, 3) )
faces.append(np.array([1, 6, 2] ).reshape(1, 3) )
faces.append(np.array([1, 5, 6] ).reshape(1, 3) )
faces.append(np.array([2, 7, 3] ).reshape(1, 3) )
faces.append(np.array([2, 6, 7] ).reshape(1, 3) )
faces.append(np.array([3, 8, 4] ).reshape(1, 3) )
faces.append(np.array([3, 7, 8] ).reshape(1, 3) )
faces.append(np.array([4, 5, 1] ).reshape(1, 3) )
faces.append(np.array([4, 8, 5] ).reshape(1, 3) )
faces = np.concatenate(faces ).astype(np.int32 )
return corners, faces
def computeTransform(vertices, t, q, s):
if s != None:
scale = np.array(s, dtype=np.float32 ).reshape(1, 3)
vertices = vertices * scale
if q != None:
q = np.quaternion(q[0], q[1], q[2], q[3])
rotMat = quaternion.as_rotation_matrix(q )
if np.abs(rotMat[1, 1] ) > 0.5:
d = rotMat[1, 1]
rotMat[:, 1] = 0
rotMat[1, :] = 0
if d < 0:
rotMat[1, 1] = -1
else:
rotMat[1, 1] = 1
vertices = np.matmul(rotMat, vertices.transpose() )
vertices = vertices.transpose()
if t != None:
trans = np.array(t, dtype=np.float32 ).reshape(1, 3)
vertices = vertices + trans
return vertices, trans.squeeze(), rotMat, scale.squeeze()
# mesh operations by Rui
def load_OR_mesh(layout_obj_file):
mesh = trimesh.load_mesh(str(layout_obj_file))
mesh = as_mesh(mesh)
return mesh
def as_mesh(scene_or_mesh):
"""
Convert a possible scene to a mesh.
If conversion occurs, the returned mesh has only vertex and face data.
"""
if isinstance(scene_or_mesh, trimesh.Scene):
if len(scene_or_mesh.geometry) == 0:
mesh = None # empty scene
else:
# we lose texture information here
mesh = trimesh.util.concatenate(
tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
for g in scene_or_mesh.geometry.values()))
else:
assert(isinstance(scene_or_mesh, trimesh.Trimesh))
mesh = scene_or_mesh
return mesh
def remove_top_down_faces(mesh):
v = np.array(mesh.vertices)
f = list(np.array(mesh.faces))
f_after = []
for f0 in f:
if not(v[f0[0]][2]==v[f0[1]][2]==v[f0[2]][2]):
f_after.append(f0)
new_mesh = trimesh.Trimesh(vertices=v, faces=np.asarray(f_after))
return new_mesh
def mesh_to_contour(mesh, if_input_is_v_e=False, vertical_dim=-1):
if if_input_is_v_e:
v, e = mesh
else:
mesh = remove_top_down_faces(mesh)
v = np.array(mesh.vertices)
e = np.array(mesh.edges)
v_new_id_list = []
v_new_id = 0
floor_z = np.amin(v[:, vertical_dim])
for v0 in v:
if v0[vertical_dim]==floor_z:
v_new_id_list.append(v_new_id)
v_new_id += 1
else:
v_new_id_list.append(-1)
v_new = np.array([np.delete(v[x], vertical_dim) for x in range(len(v)) if v_new_id_list[x]!=-1])
e_new = np.array([[v_new_id_list[e[x][0]], v_new_id_list[e[x][1]]] for x in range(len(e)) if (v_new_id_list[e[x][0]]!=-1 and v_new_id_list[e[x][1]]!=-1)])
return v_new, e_new
def mesh_to_skeleton(mesh):
mesh = remove_top_down_faces(mesh)
v = np.array(mesh.vertices)
e = mesh.edges
floor_z = np.amin(v[:, -1])
ceil_z = np.amax(v[:, -1])
e_new = []
for e0 in e:
z0, z1 = v[e0[0]][2], v[e0[1]][2]
if z0 == z1:
e_new.append(e0)
elif np.array_equal(v[e0[0]][:2], v[e0[1]][:2]):
e_new.append(e0)
e_new = np.array(e_new)
return v, e_new
def v_pairs_from_v3d_e(v, e):
v_pairs = [(np.array([v[e0[0]][0], v[e0[1]][0]]), np.array([v[e0[0]][1], v[e0[1]][1]]), np.array([v[e0[0]][2], v[e0[1]][2]])) for e0 in e]
return v_pairs
def v_pairs_from_v2d_e(v, e):
v_pairs = [(np.array([v[e0[0]][0], v[e0[1]][0]]), np.array([v[e0[0]][1], v[e0[1]][1]])) for e0 in e]
return v_pairs
def v_xytuple_from_v2d_e(v, e):
v_pairs = [(v[e0[0]], v[e0[1]]) for e0 in e]
return v_pairs
def transform_v(vertices, transforms):
assert transforms[0][0]=='s' and transforms[1][0]=='rot' and transforms[2][0]=='t'
# following computeTransform()
assert len(vertices.shape)==2
assert vertices.shape[1]==3
s = transforms[0][1]
scale = np.array(s, dtype=np.float32 ).reshape(1, 3)
vertices = vertices * scale
rotMat = s = transforms[1][1]
vertices = np.matmul(rotMat, vertices.transpose() )
vertices = vertices.transpose()
t = s = transforms[2][1]
trans = np.array(t, dtype=np.float32 ).reshape(1, 3)
vertices = vertices + trans
return vertices
from scipy.spatial import ConvexHull
def minimum_bounding_rectangle(points):
# https://gis.stackexchange.com/questions/22895/finding-minimum-area-rectangle-for-given-points
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
:param points: an nx2 matrix of coordinates
:rval: an nx2 matrix of coordinates
"""
from scipy.ndimage.interpolation import rotate
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = np.zeros((len(hull_points)-1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = np.zeros((len(edges)))
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
# XXX both work
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
# rotations = np.vstack([
# np.cos(angles),
# -np.sin(angles),
# np.sin(angles),
# np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = | np.nanmax(rot_points[:, 1], axis=1) | numpy.nanmax |
"""Molecular geometric feature representation based on numpy.
a loose collection of functions.
Modular functions to compute distance matrix, angles, coordinates, connectivity, etc.
Many functions are written for batches too. Ideally all functions are vectorized.
Note: All functions are supposed to work out of the box without any dependencies, i.e. do not depend on each other.
"""
import numpy as np
def coordinates_to_distancematrix(coord3d):
"""
Transform coordinates to distance matrix.
Will apply transformation on last dimension.
Changing of shape (...,N,3) -> (...,N,N)
Arg:
coord3d (np.array): Coordinates of shape (...,N,3) for cartesian coordinates (x,y,z)
and N the number of atoms or points. Coordinates are last dimension.
Returns:
np.array: distance matrix as numpy array with shape (...,N,N) where N is the number of atoms
"""
shape_3d = len(coord3d.shape)
a = np.expand_dims(coord3d, axis=shape_3d - 2)
b = np.expand_dims(coord3d, axis=shape_3d - 1)
c = b - a
d = np.sqrt(np.sum(np.square(c), axis=shape_3d))
return d
def invert_distance(d, nan=0, posinf=0, neginf=0):
"""
Invert distance array, e.g. distance matrix.
Inversion is done for all entries.
Keeping of shape (...,) -> (...,)
Args:
d (np.array): array of distance values of shape (...,)
nan (value): replacement for np.nan after division, default = 0
posinf (value): replacement for np.inf after division, default = 0
neginf (value): replacement for -np.inf after division, default = 0
Returns:
np.array: Inverted distance array as numpy array of identical shape (...,) and
replaces np.nan and np.inf with e.g. 0
"""
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide(1, d)
# c[c == np.inf] = 0
c = np.nan_to_num(c, nan=nan, posinf=posinf, neginf=neginf)
return c
def inversedistancematrix_to_coulombmatrix(dinv, proton_number):
"""
Calculate Coulombmatrix from inverse distance Matrix plus nuclear charges/proton number.
Transform shape as (...,N,N) + (...,N) -> (...,N,N)
Args:
dinv (np.array): Inverse distance matrix defined at last two axis.
Array of shape (...,N,N) with N number of atoms storing inverse distances.
proton_number (np.array): Nuclear charges given in last dimension.
Order must match entries in inverse distance matrix.
array of shape (...,N)
Returns:
np.array: Numpy array with Coulombmatrix at last two dimension (...,N,N).
Function multiplies Z_i*Z_j with 1/d_ij and set diagonal to 0.5*Z_ii^2.4
"""
shape_z = proton_number.shape
a = np.expand_dims(proton_number, axis=len(shape_z) - 1)
b = np.expand_dims(proton_number, axis=len(shape_z))
c = a * b
coul = dinv * c
indslie = np.arange(0, shape_z[-1])
coul[..., indslie, indslie] = np.power(proton_number, 2.4) * 0.5
return coul
def value_to_onehot(vals, compare):
"""
Convert array of values e.g. nuclear charge to one-hot representation thereof.
a dictionary of all possible values is required.
Expands shape from (...,) + (M,) -> (...,M)
Args:
vals (np.array): array of values to convert.
compare (np.array): 1D-numpy array with a list of possible values.
Returns:
np.array: a one-hot representation of vals input with expanded last dimension to match
the compare dictionary. Entries are 1.0 if vals == compare[i] and 0.0 else
"""
comp = np.array(compare, dtype=vals.dtype)
vals_shape = vals.shape
vals = np.expand_dims(vals, axis=-1)
comp = np.broadcast_to(comp, vals_shape + comp.shape) # shape (1,1,...,M)
out = np.array(vals == comp, dtype=np.float32)
return out
def coulombmatrix_to_inversedistance_proton(coulmat, unit_conversion=1):
"""Convert a coulomatrix back to inverse distancematrix + atomic number.
(...,N,N) -> (...,N,N) + (...,N)
Args:
coulmat (np.array): Full Coulombatrix of shape (...,N,N)
unit_conversion (float) : Whether to scale units for distance. Default is 1.
Returns:
tuple: [inv_dist,z]
- inv_dist(np.array): Inverse distance Matrix of shape (...,N,N)
- z(np.array): Atom Number corresponding diagonal as proton number.
"""
indslie = np.arange(0, coulmat.shape[-1])
z = coulmat[..., indslie, indslie]
z = np.power(2 * z, 1 / 2.4)
a = np.expand_dims(z, axis=len(z.shape) - 1)
b = np.expand_dims(z, axis=len(z.shape))
zz = a * b
c = coulmat / zz
c[..., indslie, indslie] = 0
c /= unit_conversion
z = np.array(np.round(z), dtype=np.int)
return c, z
def distance_to_gaussdistance(distance, bins=30, gauss_range=5.0, gauss_sigma=0.2):
"""Convert distance array to smooth one-hot representation using Gaussian functions.
Changes shape for gaussian distance (...,) -> (...,GBins)
The Default values match units in Angstroem.
Args:
distance (np.array): Array of distances of shape (...,)
bins (int): number of Bins to sample distance from, default = 30
gauss_range (value): maximum distance to be captured by bins, default = 5.0
gauss_sigma (value): sigma of the gaussian function, determining the width/sharpness, default = 0.2
Returns:
np.array: Numpy array of gaussian distance with expanded last axis (...,GBins)
"""
gamma = 1 / gauss_sigma / gauss_sigma * (-1) / 2
d_shape = distance.shape
edge_dist_grid = np.expand_dims(distance, axis=-1)
edge_gaus_bin = np.arange(0, bins, 1) / bins * gauss_range
edge_gaus_bin = np.broadcast_to(edge_gaus_bin, np.append(np.ones(len(d_shape), dtype=np.int32),
edge_gaus_bin.shape)) # shape (1,1,...,bins)
edge_gaus_bin = np.square(edge_dist_grid - edge_gaus_bin) * gamma # (N,M,...,1) - (1,1,...,bins)
edge_gaus_bin = np.exp(edge_gaus_bin)
return edge_gaus_bin
def sort_distmatrix(distance_matrix):
"""
Sort a flexible shaped distance matrix along last dimension.
Keeps shape (...,N,M) -> index (...,N,M) + sorted (...,N,M)
Args:
distance_matrix (np.array): Matrix of distances of shape (...,N,M)
Returns:
tuple: [sorting_index, sorted_distance]
- sorting_index (np.array): Indices of sorted last dimension entries. Shape (...,N,M)
- sorted_distance (np.array): Sorted distance Matrix, sorted at last dimension.
"""
sorting_index = np.argsort(distance_matrix, axis=-1)
sorted_distance = np.take_along_axis(distance_matrix, sorting_index, axis=-1)
return sorting_index, sorted_distance
def get_connectivity_from_inversedistancematrix(invdistmat, protons, radii_dict=None, k1=16.0, k2=4.0 / 3.0,
cutoff=0.85, force_bonds=True):
"""
Get connectivity table from inverse distance matrix defined at last dimensions (...,N,N) and
corresponding bond-radii.
Keeps shape with (...,N,N).
Covalent radii, from Pyykko and Atsumi, Che<NAME>. J. 15, 2009, 188-197.
Values for metals decreased by 10% according to <NAME>'s Sterimol implementation.
Partially based on code from <NAME>'s Sterimol script, which based this part on Grimme's D3 code
Args:
invdistmat (np.array): inverse distance matrix defined at last dimensions (...,N,N)
distances must be in Angstroem not in Bohr
protons (np.array): An array of atomic numbers matching the invdistmat (...,N),
for which the radii are to be computed.
radii_dict (np.array): covalent radii for each element. If default=None, stored values are used.
Otherwise array with covalent bonding radii.
example: np.array([0, 0.24, 0.46, 1.2, ...]) from {'H': 0.34, 'He': 0.46, 'Li': 1.2,
...}
k1 (value): default = 16
k2 (value): default = 4.0/3.0
cutoff (value): cutoff value to set values to Zero (no bond) default = 0.85
force_bonds (value): whether to force at least one bond in the bond table per atom (default = True)
Retruns:
np.array: Connectivity table with 1 for chemical bond and zero otherwise of shape (...,N,N) -> (...,N,N)
"""
# Dictionary of bond radii
# original_radii_dict = {'H': 0.34, 'He': 0.46, 'Li': 1.2, 'Be': 0.94, 'b': 0.77, 'C': 0.75, 'N': 0.71, 'O': 0.63,
# 'F': 0.64, 'Ne': 0.67, 'Na': 1.4, 'Mg': 1.25, 'Al': 1.13, 'Si': 1.04, 'P': 1.1, 'S': 1.02,
# 'Cl': 0.99, 'Ar': 0.96, 'K': 1.76, 'Ca': 1.54, 'Sc': 1.33, 'Ti': 1.22, 'V': 1.21,
# 'Cr': 1.1, 'Mn': 1.07, 'Fe': 1.04, 'Co': 1.0, 'Ni': 0.99, 'Cu': 1.01, 'Zn': 1.09,
# 'Ga': 1.12, 'Ge': 1.09, 'As': 1.15, 'Se': 1.1, 'Br': 1.14, 'Kr': 1.17, 'Rb': 1.89,
# 'Sr': 1.67, 'Y': 1.47, 'Zr': 1.39, 'Nb': 1.32, 'Mo': 1.24, 'Tc': 1.15, 'Ru': 1.13,
# 'Rh': 1.13, 'Pd': 1.19, 'Ag': 1.15, 'Cd': 1.23, 'In': 1.28, 'Sn': 1.26, 'Sb': 1.26,
# 'Te': 1.23, 'I': 1.32, 'Xe': 1.31, 'Cs': 2.09, 'Ba': 1.76, 'La': 1.62, 'Ce': 1.47,
# 'Pr': 1.58, 'Nd': 1.57, 'Pm': 1.56, 'Sm': 1.55, 'Eu': 1.51, 'Gd': 1.52, 'Tb': 1.51,
# 'Dy': 1.5, 'Ho': 1.49, 'Er': 1.49, 'Tm': 1.48, 'Yb': 1.53, 'Lu': 1.46, 'Hf': 1.37,
# 'Ta': 1.31, 'W': 1.23, 'Re': 1.18, 'Os': 1.16, 'Ir': 1.11, 'Pt': 1.12, 'Au': 1.13,
# 'Hg': 1.32, 'Tl': 1.3, 'Pb': 1.3, 'Bi': 1.36, 'Po': 1.31, 'At': 1.38, 'Rn': 1.42,
# 'Fr': 2.01, 'Ra': 1.81, 'Ac': 1.67, 'Th': 1.58, 'Pa': 1.52, 'U': 1.53, 'Np': 1.54,
# 'Pu': 1.55}
proton_raddi_dict = np.array(
[0, 0.34, 0.46, 1.2, 0.94, 0.77, 0.75, 0.71, 0.63, 0.64, 0.67, 1.4, 1.25, 1.13, 1.04, 1.1, 1.02, 0.99, 0.96,
1.76, 1.54, 1.33, 1.22, 1.21, 1.1, 1.07, 1.04, 1.0, 0.99, 1.01, 1.09, 1.12, 1.09, 1.15, 1.1, 1.14, 1.17, 1.89,
1.67, 1.47, 1.39, 1.32, 1.24, 1.15, 1.13, 1.13, 1.19, 1.15, 1.23, 1.28, 1.26, 1.26, 1.23, 1.32, 1.31, 2.09,
1.76, 1.62, 1.47, 1.58, 1.57, 1.56, 1.55, 1.51, 1.52, 1.51, 1.5, 1.49, 1.49, 1.48, 1.53, 1.46, 1.37, 1.31,
1.23, 1.18, 1.16, 1.11, 1.12, 1.13, 1.32, 1.3, 1.3, 1.36, 1.31, 1.38, 1.42, 2.01, 1.81, 1.67, 1.58, 1.52, 1.53,
1.54, 1.55])
if radii_dict is None:
radii_dict = proton_raddi_dict # index matches atom number
# Get Radii
protons = np.array(protons, dtype=np.int)
radii = radii_dict[protons]
# Calculate
shape_rad = radii.shape
r1 = np.expand_dims(radii, axis=len(shape_rad) - 1)
r2 = np.expand_dims(radii, axis=len(shape_rad))
rmat = r1 + r2
rmat = k2 * rmat
rr = rmat * invdistmat
damp = (1.0 + np.exp(-k1 * (rr - 1.0)))
damp = 1.0 / damp
if force_bonds: # Have at least one bond
maxvals = np.expand_dims(np.argmax(damp, axis=-1), axis=-1)
np.put_along_axis(damp, maxvals, 1, axis=-1)
# To make it symmetric transpose last two axis
damp = np.swapaxes(damp, -2, -1)
np.put_along_axis(damp, maxvals, 1, axis=-1)
damp = np.swapaxes(damp, -2, -1)
damp[damp < cutoff] = 0
bond_tab = np.round(damp)
return bond_tab
def get_indexmatrix(shape, flatten=False):
"""
Matrix of indices with a_ijk... = [i,j,k,..] for shape (N,M,...,len(shape)) with Indexlist being the last dimension.
Note: numpy indexing does not work this way but as indexlist per dimension
Args:
shape (list, int): list of target shape, e.g. (2,2)
flatten (bool): whether to flatten the output or keep inputshape, default=False
Returns:
np.array: Index array of shape (N,M,...,len(shape)) e.g. [[[0,0],[0,1]],[[1,0],[1,1]]]
"""
indarr = np.indices(shape)
re_order = np.append(np.arange(1, len(shape) + 1), 0)
indarr = indarr.transpose(re_order)
if flatten:
indarr = np.reshape(indarr, (np.prod(shape), len(shape)))
return indarr
def coordinates_from_distancematrix(distance, use_center=None, dim=3):
"""Compute list of coordinates from a distance matrix of shape (N,N).
Uses vectorized Alogrithm:
http://scripts.iucr.org/cgi-bin/paper?S0567739478000522
https://www.researchgate.net/publication/252396528_Stable_calculation_of_coordinates_from_distance_information
no check of positive semi-definite or possible k-dim >= 3 is done here
performs svd from numpy
may even wok for (...,N,N) but not tested
Args:
distance (np.array): distance matrix of shape (N,N) with Dij = abs(ri-rj)
use_center (int): which atom should be the center, dafault = None means center of mass
dim (int): the dimension of embedding, 3 is default
Return:
np.array: List of Atom coordinates [[x_1,x_2,x_3],[x_1,x_2,x_3],...]
"""
distance = np.array(distance)
dim_in = distance.shape[-1]
if use_center is None:
# Take Center of mass (slightly changed for vectorization assuming d_ii = 0)
di2 = np.square(distance)
di02 = 1 / 2 / dim_in / dim_in * (2 * dim_in * np.sum(di2, axis=-1) - np.sum(np.sum(di2, axis=-1), axis=-1))
mat_m = (np.expand_dims(di02, axis=-2) + np.expand_dims(di02, axis=-1) - di2) / 2 # broadcasting
else:
di2 = np.square(distance)
mat_m = (np.expand_dims(di2[..., use_center], axis=-2) + np.expand_dims(di2[..., use_center],
axis=-1) - di2) / 2
u, s, v = np.linalg.svd(mat_m)
vecs = np.matmul(u, np.sqrt(np.diag(s))) # EV are sorted by default
distout = vecs[..., 0:dim]
return distout
def make_rotationmatrix(vector, angle):
"""
Generate rotationmatrix around a given vector with a certain angle.
Only defined for 3 dimensions here.
Args:
vector (np.array, list): vector of rotation axis (3,) with (x,y,z)
angle (value): angle in degrees ° to rotate around
Returns:
list: Rotation matrix R of shape (3,3) that performs the rotation with y = R*x
"""
angle = angle / 180.0 * np.pi
norm = (vector[0] ** 2.0 + vector[1] ** 2.0 + vector[2] ** 2.0) ** 0.5
direction = vector / norm
matrix = np.zeros((3, 3))
matrix[0][0] = direction[0] ** 2.0 * (1.0 - np.cos(angle)) + np.cos(angle)
matrix[1][1] = direction[1] ** 2.0 * (1.0 - np.cos(angle)) + np.cos(angle)
matrix[2][2] = direction[2] ** 2.0 * (1.0 - np.cos(angle)) + np.cos(angle)
matrix[0][1] = direction[0] * direction[1] * (1.0 - np.cos(angle)) - direction[2] * np.sin(angle)
matrix[1][0] = direction[0] * direction[1] * (1.0 - np.cos(angle)) + direction[2] * np.sin(angle)
matrix[0][2] = direction[0] * direction[2] * (1.0 - np.cos(angle)) + direction[1] * np.sin(angle)
matrix[2][0] = direction[0] * direction[2] * (1.0 - np.cos(angle)) - direction[1] * np.sin(angle)
matrix[1][2] = direction[1] * direction[2] * (1.0 - np.cos(angle)) - direction[0] * np.sin(angle)
matrix[2][1] = direction[1] * direction[2] * (1.0 - np.cos(angle)) + direction[0] * np.sin(angle)
return matrix
def rotate_to_principle_axis(coord):
"""Rotate a pointcloud to its principle axis.
This can be a molecule but also some general data.
It uses PCA via SVD from numpy.linalg.svd(). PCA from scikit uses SVD too (scipy.sparse.linalg).
Note:
The data is centered before SVD but shifted back at the output.
Args:
coord (np.array): Array of points forming a pointcloud. Important: coord has shape (N,p)
where N is the number of samples and p is the feature/coordinate dimension e.g. 3 for x,y,z
Returns:
tuple: [R,rotated]
- R (np.array): rotaton matrix of shape (p,p) if input has (N,p)
- rotated (np.array): rotated pointcould of coord that was the input.
"""
centroid_c = np.mean(coord, axis=0)
sm = coord - centroid_c
zzt = (np.dot(sm.T, sm)) # Calculate covariance matrix
u, s, vh = np.linalg.svd(zzt)
# Alternatively SVD of coord with onyl compute vh but not possible for numpy/scipy
rotated = np.dot(sm, vh.T)
rotshift = rotated + centroid_c
return vh, rotshift
def rigid_transform(a, b, correct_reflection=False):
"""Rotate and shift pointcloud A to pointcloud B. This should implement Kabsch algorithm.
Important: the numbering of points of A and B must match, no shuffled pointcloud.
This works for 3 dimensions only. Uses SVD.
Note:
Explanation of Kabsch Algorithm: https://en.wikipedia.org/wiki/Kabsch_algorithm
For further literature
https://link.springer.com/article/10.1007/s10015-016-0265-x
https://link.springer.com/article/10.1007%2Fs001380050048
maybe work for (...,N,3), not tested
Args:
a (np.array): list of points (N,3) to rotate (and translate)
b (np.array): list of points (N,3) to rotate towards: A to B, where the coordinates (3) are (x,y,z)
correct_reflection (bool): Whether to allow reflections or just rotations. Default is False.
Returns:
list: [A_rot,R,t]
- A_rot (np.array): Rotated and shifted version of A to match B
- R (np.array): Rotation matrix
- t (np.array): translation from A to B
"""
a = np.transpose(np.array(a))
b = np.transpose(np.array(b))
centroid_a = np.mean(a, axis=1)
centroid_b = np.mean(b, axis=1)
am = a - np.expand_dims(centroid_a, axis=1)
bm = b - np.expand_dims(centroid_b, axis=1)
h = np.dot(am, np.transpose(bm))
u, s, vt = np.linalg.svd(h)
r = np.dot(vt.T, u.T)
d = np.linalg.det(r)
if d < 0:
print("Warning: det(R)<0, det(R)=", d)
if correct_reflection:
print("Correcting R...")
vt[-1, :] *= -1
r = np.dot(vt.T, u.T)
bout = np.dot(r, am) + np.expand_dims(centroid_b, axis=1)
bout = np.transpose(bout)
t = np.expand_dims(centroid_b - np.dot(r, centroid_a), axis=0)
t = t.T
return bout, r, t
def get_angles(coords, inds):
"""
Compute angeles between coordinates (...,N,3) from a matching index list that has shape (...,M,3)
with (ind0,ind1,ind2).
Angles are between ind1<(ind0,ind2) taking coords[ind]. The angle is oriented as ind1->ind0,ind1->ind2.
Args:
coords (np.array): list of coordinates of points (...,N,3)
inds (np.array): Index list of points (...,M,3) that means coords[i] with i in axis=-1.
Returns:
list: [angle_sin,angle_cos,angles ,norm_vec1,norm_vec2]
- angle_sin (np.array): sin() of the angles between ind2<(ind1,ind3)
- angle_cos (np.array): cos() of the angles between ind2<(ind1,ind3)
- angles (np.array): angles in rads
- norm_vec1 (np.array): length of vector ind1,ind2a
- norm_vec2 (np.array): length of vector ind1,ind2b
"""
ind1 = inds[..., 1]
ind2a = inds[..., 0]
ind2b = inds[..., 2]
vcords1 = np.take_along_axis(coords, np.expand_dims(ind1, axis=-1), axis=-2)
vcords2a = np.take_along_axis(coords, np.expand_dims(ind2a, axis=-1), axis=-2)
vcords2b = np.take_along_axis(coords, np.expand_dims(ind2b, axis=-1), axis=-2)
vec1 = -vcords1 + vcords2a
vec2 = -vcords1 + vcords2b
norm_vec1 = np.sqrt(np.sum(vec1 * vec1, axis=-1))
norm_vec2 = np.sqrt(np.sum(vec2 * vec2, axis=-1))
angle_cos = np.sum(vec1 * vec2, axis=-1) / norm_vec1 / norm_vec2
angles = np.arccos(angle_cos)
angle_sin = np.sin(angles)
return angle_sin, angle_cos, angles, norm_vec1, norm_vec2
def all_angle_combinations(ind1, ind2):
"""
Get all angles between ALL possible bonds also unrelated bonds e.g. (1,2) and (17,20) which are not connected.
Input shape is (...,N).
Note: This is mostly unpractical and not wanted, see make_angle_list for normal use.
Args:
ind1 (np.array): Indexlist of start index for a bond. This must be sorted. Shape (...,N)
ind2 (np.array): Indexlist of end index for a bond. Shape (...,N)
Returns
np.array: index touples of shape (...,N*N/2-N,2,2) where the bonds are specified at last axis and
the bond pairs at axis=-2
"""
# For all angels between unconncected bonds, possible for (...,N)
indb = np.concatenate([np.expand_dims(ind1, axis=-1), np.expand_dims(ind2, axis=-1)], axis=-1)
tils = [1] * (len(indb.shape) + 1)
tils[-2] = ind1.shape[-1]
b1 = np.tile(np.expand_dims(indb, axis=-2), tils)
tils = [1] * (len(indb.shape) + 1)
tils[-3] = ind1.shape[-1]
b2 = np.tile(np.expand_dims(indb, axis=-3), tils)
bcouples = np.concatenate([np.expand_dims(b1, axis=-2), np.expand_dims(b2, axis=-2)], axis=-2)
tris = np.tril_indices(ind1.shape[-1], k=-1)
bcouples = bcouples[..., tris[0], tris[1], :, :]
return bcouples
def make_angle_list(ind1, ind2):
"""Generate list of indices that match all angles for connections defined by (ind1,ind2).
For each unique index in ind1, meaning for each center. ind1 should be sorted.
Vectorized but requires memory for connections Max_bonds_per_atom*Number_atoms. Uses masking
Args:
ind1 (np.array): Indexlist of start index for a bond. This must be sorted. Shape (N,)
ind2 (np.array): Indexlist of end index for a bond. Shape (N,)
Returns:
out (np.array): Indexlist containing an angle-index-set. Shape (M,3)
Where the angle is defined by 0-1-2 as 1->0,1->2 or 1<(0,2)
"""
# Get unique atoms as center for bonds
n1_uni, n1_counts = np.unique(ind1, return_counts=True)
# n1_multi = np.repeat(n1_uni, n1_counts)
max_bonds = np.max(n1_counts)
# Make a list with (N_atoms,max_bonds) with zero padding for less bonds plus mask
# this is btab, btab_values where values have index2
btab = np.tile(np.expand_dims(np.arange(np.max(max_bonds)), axis=0), (len(n1_uni), 1))
btab = btab < np.expand_dims(n1_counts, axis=1)
btab_flat = btab.flatten()
btab_ind_flat = np.arange(len(btab_flat))
btab_ind_flat_activ = btab_ind_flat[btab_flat]
btab_values_flat = np.zeros(len(btab_flat))
btab_values_flat[btab_ind_flat_activ] = ind2
btab_values = np.reshape(btab_values_flat, btab.shape)
# Expand this padded list to a matrix (N_atoms, max_bonds, max_bonds, 2)
# to have all combinations like distance matrix and last dim indices
# Mask of this matrix must have left/upper blocks and a diagonal set to zero (no angle between same atom)
btab_values1 = np.tile(np.expand_dims(btab_values, axis=1), (1, max_bonds, 1))
btab_values2 = np.tile(np.expand_dims(btab_values, axis=2), (1, 1, max_bonds))
btab1 = np.tile(np.expand_dims(btab, axis=1), (1, max_bonds, 1))
btab2 = np.tile(np.expand_dims(btab, axis=2), (1, 1, max_bonds))
btab_values = np.concatenate([np.expand_dims(btab_values1, axis=-1), np.expand_dims(btab_values2, axis=-1)],
axis=-1)
btab_mat = np.logical_and(btab1, btab2)
btab_mat[..., np.arange(0, max_bonds), np.arange(0, max_bonds)] = False # set diagonal to zero
# Make the same matrix for the centers i.e. (N_atoms,max_bonds,max_bonds)
# with (...,max_bonds,max_bonds) has index of axis=0
center_1 = np.tile(np.expand_dims(np.expand_dims(np.arange(len(btab_mat)), axis=-1), axis=-1),
(1, max_bonds, max_bonds))
# Take Mask to get a list of index couples.
# The indices of bonds from center must be sorted to remove duplicates e.g. 0,2 and 2,0 will be same anlge
center_1 = center_1[btab_mat]
bcouples = btab_values[btab_mat]
bcouples_sorted = np.sort(bcouples, axis=-1)
out_ind = np.concatenate([np.expand_dims(bcouples_sorted[:, 0], axis=-1),
np.expand_dims(center_1, axis=-1),
np.expand_dims(bcouples_sorted[:, 1], axis=-1),
], axis=-1)
# remove duplicate 'angles'
out = np.unique(out_ind, axis=0)
return out
def define_adjacency_from_distance(distance_matrix, max_distance=np.inf, max_neighbours=np.inf, exclusive=True,
self_loops=False):
"""
Construct adjacency matrix from a distance matrix by distance and number of neighbours. Works for batches.
This does take into account special bonds (e.g. chemical) just a general distance measure.
Tries to connect nearest neighbours.
Args:
distance_matrix (np.array): distance Matrix of shape (...,N,N)
max_distance (float, optional): Maximum distance to allow connections, can also be None. Defaults to np.inf.
max_neighbours (int, optional): Maximum number of neighbours, can also be None. Defaults to np.inf.
exclusive (bool, optional): Whether both max distance and Neighbours must be fullfileed. Defaults to True.
self_loops (bool, optional): Allow self-loops on diagonal. Defaults to False.
Returns:
tuple: [graph_adjacency,graph_indices]
- graph_adjacency (np.array): Adjacency Matrix of shape (...,N,N) of dtype=np.bool.
- graph_indices (np.array): Flatten indizes from former array that have Adjacency == True.
"""
distance_matrix = np.array(distance_matrix)
num_atoms = distance_matrix.shape[-1]
if exclusive:
graph_adjacency = np.ones_like(distance_matrix, dtype=np.bool)
else:
graph_adjacency = np.zeros_like(distance_matrix, dtype=np.bool)
inddiag = np.arange(num_atoms)
# Make Indix Matrix
indarr = np.indices(distance_matrix.shape)
re_order = np.append(np.arange(1, len(distance_matrix.shape) + 1), 0)
graph_indices = indarr.transpose(re_order)
# print(graph_indices.shape)
# Add Max Radius
if max_distance is not None:
temp = distance_matrix < max_distance
# temp[...,inddiag,inddiag] = False
if exclusive:
graph_adjacency = np.logical_and(graph_adjacency, temp)
else:
graph_adjacency = np.logical_or(graph_adjacency, temp)
# Add #Nieghbours
if max_neighbours is not None:
max_neighbours = min(max_neighbours, num_atoms)
sorting_index = np.argsort(distance_matrix, axis=-1)
# SortedDistance = np.take_along_axis(self.distance_matrix, sorting_index, axis=-1)
ind_sorted_red = sorting_index[..., :max_neighbours + 1]
temp = np.zeros_like(distance_matrix, dtype=np.bool)
np.put_along_axis(temp, ind_sorted_red, True, axis=-1)
if exclusive:
graph_adjacency = np.logical_and(graph_adjacency, temp)
else:
graph_adjacency = np.logical_or(graph_adjacency, temp)
# Allow self-loops
if not self_loops:
graph_adjacency[..., inddiag, inddiag] = False
graph_indices = graph_indices[graph_adjacency]
return graph_adjacency, graph_indices
def geometry_from_coulombmat(coulmat, unit_conversion=1):
"""
Generate a geometry from Coulombmatrix.
Args:
coulmat (np.array): Coulombmatrix of shape (N,N).
unit_conversion (value, optional): If untis are converted from or to a. Defaults to 1.
Returns:
list: [ats,cords]
- ats (list): List of atoms e.g. ['C','C'].
- cords (np.array): Coordinates of shape (N,3).
"""
# Does not require mol backend inference, just self.mol_from_geometry
invd, pr = coulombmatrix_to_inversedistance_proton(coulmat, unit_conversion)
dist = invert_distance(invd)
cords = coordinates_from_distancematrix(dist)
inverse_global_proton_dict = {1: 'H', 2: 'He', 3: 'Li', 4: 'Be', 5: 'b', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 10: 'Ne',
11: 'Na', 12: 'Mg', 13: 'Al', 14: 'Si', 15: 'P', 16: 'S', 17: 'Cl', 18: 'Ar', 19: 'K',
20: 'Ca', 21: 'Sc', 22: 'Ti', 23: 'V', 24: 'Cr', 25: 'Mn', 26: 'Fe', 27: 'Co',
28: 'Ni', 29: 'Cu', 30: 'Zn', 31: 'Ga', 32: 'Ge', 33: 'As', 34: 'Se', 35: 'Br',
36: 'Kr', 37: 'Rb', 38: 'Sr', 39: 'Y', 40: 'Zr', 41: 'Nb', 42: 'Mo', 43: 'Tc',
44: 'Ru', 45: 'Rh', 46: 'Pd', 47: 'Ag', 48: 'Cd', 49: 'In', 50: 'Sn', 51: 'Sb',
52: 'Te', 53: 'I', 54: 'Xe', 55: 'Cs', 56: 'Ba', 57: 'La', 58: 'Ce', 59: 'Pr',
60: 'Nd', 61: 'Pm', 62: 'Sm', 63: 'Eu', 64: 'Gd', 65: 'Tb', 66: 'Dy', 67: 'Ho',
68: 'Er', 69: 'Tm', 70: 'Yb', 71: 'Lu', 72: 'Hf', 73: 'Ta',
74: 'W', 75: 'Re', 76: 'Os', 77: 'Ir', 78: 'Pt', 79: 'Au', 80: 'Hg', 81: 'Tl',
82: 'Pb', 83: 'Bi', 84: 'Po', 85: 'At', 86: 'Rn', 87: 'Fr', 88: 'Ra', 89: 'Ac',
90: 'Th', 91: 'Pa', 92: 'U', 93: 'Np', 94: 'Pu', 95: 'Am', 96: 'Cm', 97: 'Bk',
98: 'Cf', 99: 'Es', 100: 'Fm', 101: 'Md', 102: 'No', 103: 'Lr', 104: 'Rf',
105: 'Db', 106: 'Sg', 107: 'Bh', 108: 'Hs',
109: 'Mt', 110: 'Ds', 111: 'Rg', 112: 'Cn', 113: 'Nh', 114: 'Fl', 115: 'Mc',
116: 'Lv', 117: 'Ts', 118: 'Og', 119: 'Uue'}
ats = [inverse_global_proton_dict[x] for x in pr]
return ats, cords
def add_edges_reverse_indices(edge_indices, edge_values=None, remove_duplicates=True, sort_indices=True):
"""Add the edges for (i,j) as (j,i) with the same edge values. If they do already exist, no edge is added.
By default, all indices are sorted.
Args:
edge_indices (np.array): Index list of shape (N,2).
edge_values (np.array): Edge values of shape (N,M) matching the edge_indices
remove_duplicates (bool): Remove duplicate edge indices. Default is True.
sort_indices (bool): Sort final edge indices. Default is True.
Returns:
np.array: edge_indices or [edge_indices, edge_values]
"""
clean_edge = None
edge_index_flip = np.concatenate([edge_indices[:,1:2] ,edge_indices[:,0:1]],axis=-1)
edge_index_flip_ij = edge_index_flip[edge_index_flip[:,1] != edge_index_flip[:,0]] # Do not flip self loops
clean_index = | np.concatenate([edge_indices,edge_index_flip_ij],axis=0) | numpy.concatenate |
import numpy as np
import pytest
from scipy.misc import electrocardiogram
from pyecg import ECGRecord, Time, Signal
@pytest.mark.parametrize("fs, samples", [(360, 10), (250, 20), (360.0, 30)])
def test_duration(fs, samples):
record = ECGRecord("record_100", time=Time.from_fs_samples(fs, samples))
assert record.duration == (samples - 1) / fs
@pytest.mark.parametrize("fs, samples", [(360, 10), (250, 20), (360.0, 30)])
def test_length(fs, samples):
record = ECGRecord("record_100", time=Time.from_fs_samples(fs, samples))
assert len(record) == samples
@pytest.mark.parametrize("time", [[1, 2, 3, 4]])
def test_bad_time(time):
with pytest.raises(TypeError):
ECGRecord("record_100", time=time)
def test_inconsistent_signal_len():
record = ECGRecord("record_100", time=Time.from_fs_samples(360, 10))
with pytest.raises(ValueError):
record.add_signal(Signal(electrocardiogram(), "MLII"))
def test_inconsistent_signal_type():
record = ECGRecord("record_100", time=Time.from_fs_samples(360, 10))
with pytest.raises(TypeError):
record.add_signal(electrocardiogram())
@pytest.mark.parametrize("time, signal", [(np.arange(100), np.random.rand(100)),
(np.arange(100), np.random.rand(100, 3, 4))])
def test_from_numpy_array_bad_signal_shape(time, signal):
with pytest.raises(ValueError):
ECGRecord.from_np_array("record_100", time, signal, ["II"])
@pytest.mark.parametrize("time, signal", [( | np.arange(100) | numpy.arange |
"""simulate.py: Contains Cantilever class."""
# pylint: disable=E1101,R0902,C0103
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Ginger Lab"
__email__ = "<EMAIL>"
__status__ = "Production"
import numpy as np
from scipy.integrate import odeint
from .cantilever import Cantilever
from . import excitation
# Set constant 2 * pi.
PI2 = 2 * np.pi
class ElectricDrive(Cantilever):
"""Damped Driven Harmonic Oscillator Simulator for AFM Cantilevers under Electric drive
Simulates a DDHO under excitation with given parameters.
Parameters
----------
can_params : dict
Parameters for cantilever properties. See Cantilever
force_params : dict
Parameters for forces. The dictionary contains:
es_force = float (in N)
delta_freq = float (in Hz)
tau = float (in seconds)
v_dc = float (in Volts)
v_ac = float (in Volts)
v_cpd = float (in Volts)
dCdz = float (in F/m)
sim_params : dict
Parameters for simulation. The dictionary contains:
trigger = float (in seconds)
total_time = float (in seconds)
sampling_rate = int (in Hz)
v_array : ndarray, optional
If provided, supplies the time-dependent voltage to v_cpd
v_array must be the exact length and sampling of the desired signal
v_array only functionally does anything after the trigger.
v_step : float, optional
If v_array not supplied, then a voltage of v_step is applied at the trigger
Attributes
----------
amp : float
Amplitude of the cantilever in meters.
beta : float
Damping factor of the cantilever in rad/s.
delta : float
Initial phase of the cantilever in radians.
delta_freq : float
Frequency shift of the cantilever under excitation.
mass : float
Mass of the cantilever in kilograms.
Method
------
simulate(trigger_phase=180)
Simulates the cantilever motion with excitation happening
at the given phase.
See Also
--------
pixel: Pixel processing for FF-trEFM data.
Cantilever: base class
Examples
--------
>>> from ffta.simulation import electric_drive, load
>>>
>>> params_file = '../examples/sim_params.cfg'
>>> params = load.simulation_configuration(params_file)
>>>
>>> c = electric_drive.ElectricDrive(*params)
>>> Z, infodict = c.simulate()
>>> c.analyze()
>>> c.analyze(roi=0.004) # can change the parameters as desired
>>>
>>> # To supply an arbitary v_array
>>> n_points = int(params[2]['total_time'] * params[2]['sampling_rate'])
>>> v_array = np.ones(n_points) # create just a flat excitation
>>> c = mechanical_dirve.MechanicalDrive(*params, v_array = v_array)
>>> Z, _ = c.simulate()
>>> c.analyze()
>>>
>>> # To supply an arbitary voltage step
>>> step = -7 #-7 Volt step
>>> c = mechanical_dirve.MechanicalDrive(*params, v_step = step)
>>> Z, _ = c.simulate()
>>> c.analyze()
"""
def __init__(self, can_params, force_params, sim_params, v_array=[], v_step=np.nan,
func=excitation.single_exp, func_args=[]):
parms = [can_params, force_params, sim_params]
super(ElectricDrive, self).__init__(*parms)
# Did user supply a voltage pulse themselves (Electrical drive only)
self.use_varray = False
self.use_vstep = False
if any(v_array):
if len(v_array) != int(self.total_time * self.sampling_rate):
raise ValueError('v_array must match sampling rate/length of parameters')
else:
self.use_varray = True
self.v_array = v_array
self.scale = [np.max(v_array) - np.min(v_array), np.min(v_array)]
if not np.isnan(v_step):
self.v_step = v_step # if applying a single DC step
self.use_vstep = True
self.func = func
self.func_args = func_args
# default case set a single tau for a single exponential function
if not np.any(func_args):
self.func_args = [self.tau]
try:
_ = self.func(0, *self.func_args)
except:
print('Be sure to correctly set func_args=[params]')
return
return
def __gamma__(self, t):
"""
Controls how the cantilever behaves after a trigger.
Default operation is an exponential decay to omega0 - delta_freq with
time constant tau.
If supplying an explicit v_array, then this function will call the values
in that array
Parameters
----------
t : float
Time in seconds.
Returns
-------
value : float
Value of the function at the given time.
"""
p = int(t * self.sampling_rate)
n_points = int(self.total_time * self.sampling_rate)
t0 = self.t0
if t >= t0:
if not self.use_varray:
return self.func(t - t0, *self.func_args)
else:
_g = self.v_array[p] if p < n_points else self.v_array[-1]
_g = (_g - self.scale[1]) / self.scale[0]
return _g
else:
return 0
def omega(self, t, t0, tau):
"""
Exponentially decaying resonance frequency.
Parameters
----------
t : float
Time in seconds.
t0: float
Event time in seconds.
tau : float
Decay constant in the exponential function, in seconds.
Returns
-------
w : float
Resonance frequency of the cantilever at a given time, in rad/s.
"""
return self.w0 + self.delta_w * self.__gamma__(t)
def dc_step(self, t, t0):
"""
Adds a DC step at the trigger point for electrical drive simulation
Parameters
----------
t : float
Time in seconds.
t0: float
Event time in seconds.
"""
if t > t0:
return self.v_step
else:
return self.v_dc
def force(self, t, t0, tau):
"""
Force on the cantilever at a given time. It contains driving force and
electrostatic force.
Parameters
----------
t : float
Time in seconds.
t0: float
Event time in seconds.
tau : float
Decay constant in the exponential function, in seconds.
Returns
-------
f : float
Force on the cantilever at a given time, in N/kg.
"""
# explicitly define voltage at each time step
if self.use_varray:
p = int(t * self.sampling_rate)
n_points = int(self.total_time * self.sampling_rate)
_g = self.v_array[p] if p < n_points else self.v_array[-1]
driving_force = 0.5 * self.dCdz / self.mass * ((_g - self.v_cpd) \
+ self.v_ac * np.sin(self.wd * t)) ** 2
else: # single voltage step
driving_force = 0.5 * self.dCdz / self.mass * ((self.dc_step(t, t0) - self.v_cpd) \
+ self.v_ac * np.sin(self.wd * t)) ** 2
return driving_force
def set_conditions(self, trigger_phase=180):
"""
Sets initial conditions and other simulation parameters. Using 2w given
the squared term in electric driving
Parameters
----------
trigger_phase: float, optional
Trigger phase is in degrees and wrt cosine. Default value is 180.
"""
self.delta = np.abs(np.arctan(np.divide(2 * (2 * self.wd) * self.beta,
self.w0 ** 2 - (2 * self.wd) ** 2)))
self.trigger_phase = np.mod(np.pi * trigger_phase / 180, PI2)
self.n_points = int(self.total_time * self.sampling_rate)
# Add extra cycles to the simulation to find correct phase at trigger.
cycle_points = int(2 * self.sampling_rate / self.res_freq)
self.n_points_sim = cycle_points + self.n_points
# Create time vector and find the trigger wrt phase.
self.t = np.arange(self.n_points_sim) / self.sampling_rate
# Current phase at trigger.
current_phase = np.mod(self.wd * self.trigger - self.delta, PI2)
phase_diff = np.mod(self.trigger_phase - current_phase, PI2)
self.t0 = self.trigger + phase_diff / self.wd # modified trigger point
# Set the initial conditions at t=0.
z0 = self.amp * np.sin(-self.delta)
v0 = self.amp * self.wd * | np.cos(-self.delta) | numpy.cos |
#!/usr/bin/python
#
# test_transform.py - Unit tests for transform module
#
# Author: <NAME> (<EMAIL>)
# Date: 7/1/2015
#
# Requires:
# * FlowCal.io
# * FlowCal.transform
# * numpy
#
import FlowCal.io
import FlowCal.transform
import numpy as np
import unittest
import os
class TestRFIArray(unittest.TestCase):
def setUp(self):
self.d = np.array([
[1, 7, 2],
[2, 8, 3],
[3, 9, 4],
[4, 10, 5],
[5, 1, 6],
[6, 2, 7],
[7, 3, 8],
[8, 4, 9],
[9, 5, 10],
[10, 6, 1],
])
def test_rfi_original_integrity(self):
db = self.d.copy()
dt = FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(0,0), (0,0)],
amplifier_gain=[1.0, 1.0],
resolution=[1024, 1024],)
np.testing.assert_array_equal(self.d, db)
def test_rfi_arg_error_amplification_type_absent(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1])
def test_rfi_arg_error_amplification_type_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(4,1), (4,1), (4,1)])
def test_rfi_arg_error_resolution_absent(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(4,1), (4,1)])
def test_rfi_arg_error_resolution_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(4,1), (4,1)],
resolution=[1024])
def test_rfi_arg_error_amplifier_gain_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(0,0), (0,0)],
amplifier_gain=[3,4,4])
def test_rfi_1d_log_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=1,
amplification_type=(4, 1),
resolution=1024)
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], 10**(self.d[:,1]/256.0))
np.testing.assert_array_equal(dt[:,2], self.d[:,2])
def test_rfi_1d_log_2(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=2,
amplification_type=(2, 0.01),
amplifier_gain=5.0,
resolution=256)
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], self.d[:,1])
np.testing.assert_array_equal(dt[:,2], 0.01*10**(self.d[:,2]/128.0))
def test_rfi_1d_linear_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=2,
amplification_type=(0, 0),
amplifier_gain=None,
resolution=256)
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], self.d[:,1])
np.testing.assert_array_equal(dt[:,2], self.d[:,2])
def test_rfi_1d_linear_2(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=1,
amplification_type=(0, 0),
amplifier_gain=5.0,
resolution=256)
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], self.d[:,1]/5.0)
np.testing.assert_array_equal(dt[:,2], self.d[:,2])
def test_rfi_2d_log_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[1,2],
amplification_type=[(4, 1), (2, 0.01)],
resolution=[1024, 256])
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], 10**(self.d[:,1]/256.0))
np.testing.assert_array_equal(dt[:,2], 0.01*10**(self.d[:,2]/128.0))
def test_rfi_2d_mixed_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[1,2],
amplification_type=[(4, 1), (0, 0)],
amplifier_gain=[4., None],
resolution=[1024, 1024])
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], 10**(self.d[:,1]/256.0))
np.testing.assert_array_equal(dt[:,2], self.d[:,2])
def test_rfi_2d_mixed_2(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[1,2],
amplification_type=[(4, 1), (0, 0)],
amplifier_gain=[4., 10.],
resolution=[1024, 1024])
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], 10**(self.d[:,1]/256.0))
np.testing.assert_array_equal(dt[:,2], self.d[:,2]/10.)
def test_rfi_default_channel_1(self):
dt = FlowCal.transform.to_rfi(self.d,
amplification_type=[(4,1)]*3,
amplifier_gain=[4., 5., 10.],
resolution=[1024]*3)
np.testing.assert_array_equal(dt, 10**(self.d/256.0))
def test_rfi_default_channel_2(self):
dt = FlowCal.transform.to_rfi(self.d,
amplification_type=[(0,0)]*3,
amplifier_gain=[10., 100., 0.01],
resolution=[1024]*3)
np.testing.assert_array_equal(dt, self.d/np.array([10., 100., 0.01]))
class TestRFIFCSLog(unittest.TestCase):
def setUp(self):
self.channel_names = ['FSC-H', 'SSC-H', 'FL1-H',
'FL2-H', 'FL3-H', 'Time']
current_dir = os.path.abspath(__file__).replace(__file__, '') + os.path.sep
self.d = FlowCal.io.FCSData(current_dir + 'Data001.fcs')
self.n_samples = self.d.shape[0]
def test_rfi_original_integrity(self):
db = self.d.copy()
dt = FlowCal.transform.to_rfi(self.d,
channels=['FSC-H', 'SSC-H'],
amplification_type=[(4,1), (4,1)],
resolution=[1024, 1024])
np.testing.assert_array_equal(self.d, db)
def test_rfi_arg_error_amplification_type_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(4,1), (4,1), (4,1)])
def test_rfi_arg_error_resolution_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(4,1), (4,1)],
resolution=[1024])
def test_rfi_arg_error_amplifier_gain_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(0,0), (0,0)],
amplifier_gain=[3,4,4])
def test_rfi_1d_log_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels='FL1-H',
amplification_type=(4, 0.01),
resolution=512)
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'],
0.01*10**(self.d[:,'FL1-H']/128.0))
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'], self.d[:,'FL3-H'])
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_1d_log_2(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=2,
amplification_type=(2, 0.01),
amplifier_gain=50.,
resolution=512)
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'],
0.01*10**(self.d[:,'FL1-H']/256.0))
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'], self.d[:,'FL3-H'])
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_1d_linear_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=2,
amplification_type=(0, 0),
amplifier_gain=50.,
resolution=512)
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'], self.d[:,'FL1-H']/50.)
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'], self.d[:,'FL3-H'])
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_1d_defaults(self):
dt = FlowCal.transform.to_rfi(self.d,
channels='FL1-H')
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'],
10**(self.d[:,'FL1-H']/256.0))
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'], self.d[:,'FL3-H'])
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_2d_log_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=['FL1-H', 'FL3-H'],
amplification_type=[(4, 0.01), (2, 1)],
resolution=[512, 2048])
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'],
0.01*10**(self.d[:,'FL1-H']/128.0))
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'],
10**(self.d[:,'FL3-H']/1024.))
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_2d_mixed_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[2, 4],
amplification_type=[(4, 0.01), (0, 0)],
resolution=[512, 1024])
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'],
0.01*10**(self.d[:,'FL1-H']/128.0))
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'], self.d[:,'FL3-H'])
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_2d_mixed_2(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[2, 4],
amplification_type=[(4, 0.01), (0, 0)],
amplifier_gain=[5., None],
resolution=[512, 1024])
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'],
0.01*10**(self.d[:,'FL1-H']/128.0))
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'], self.d[:,'FL3-H'])
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_2d_mixed_3(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[2, 4],
amplification_type=[(4, 0.01), (0, 0)],
amplifier_gain=[5., 10.],
resolution=[512, 1024])
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'],
0.01*10**(self.d[:,'FL1-H']/128.0))
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'], self.d[:,'FL3-H']/10.)
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_2d_defaults(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=['FL1-H', 'FL3-H'])
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H'])
np.testing.assert_array_equal(dt[:,'FL1-H'],
10**(self.d[:,'FL1-H']/256.0))
np.testing.assert_array_equal(dt[:,'FL2-H'], self.d[:,'FL2-H'])
np.testing.assert_array_equal(dt[:,'FL3-H'],
10**(self.d[:,'FL3-H']/256.))
np.testing.assert_array_equal(dt[:,'Time'], self.d[:,'Time'])
def test_rfi_2d_range(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=['FL1-H', 'FL3-H'],
resolution=[512, 2048],
amplification_type=[(4, 0.01), (2, 1)])
np.testing.assert_array_equal(
dt.range('FSC-H'),
self.d.range('FSC-H'))
np.testing.assert_array_equal(
dt.range('SSC-H'),
self.d.range('SSC-H'))
np.testing.assert_array_equal(
dt.range('FL1-H'),
[0.01*10**(r/128.0) for r in self.d.range('FL1-H')])
np.testing.assert_array_equal(
dt.range('FL2-H'),
self.d.range('FL2-H'))
np.testing.assert_array_equal(
dt.range('FL3-H'),
[10**(r/1024.0) for r in self.d.range('FL3-H')])
def test_rfi_default_channel(self):
# Leave time channel out
channels = ['FSC-H', 'SSC-H', 'FL1-H', 'FL2-H', 'FL3-H']
dt = FlowCal.transform.to_rfi(self.d[:, channels])
np.testing.assert_array_equal(dt[:,'FSC-H'], self.d[:,'FSC-H'])
| np.testing.assert_array_equal(dt[:,'SSC-H'], self.d[:,'SSC-H']) | numpy.testing.assert_array_equal |
import numpy as np
import pandas as pd
from keras import Sequential
from keras.layers import CuDNNGRU, Dense
from keras.utils import to_categorical
from keras_preprocessing.sequence import TimeseriesGenerator
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
batch_size = 8
epochs = 4
df = pd.read_csv('./data/interim/Corpus_Cleaned.csv')
raw = df.loc[:, 'Top1':'Top25'].apply(lambda x: ' '.join([str(s) for s in x]), axis=1)
y = to_categorical(df.loc[:, 'Class'])
raw_train, raw_test, y_train, y_test = train_test_split(raw, y, train_size=0.8, shuffle=False)
vectorizer = CountVectorizer(binary=True)
X_train = vectorizer.fit_transform(raw_train)
X_test = vectorizer.transform(raw_test)
transformer = TfidfTransformer()
X_train = transformer.fit_transform(X_train)
X_test = transformer.transform(X_test)
X_train = X_train.todense()
X_test = X_test.todense()
y_train = np.matrix(y_train)
y_test = | np.matrix(y_test) | numpy.matrix |
# -*- coding: utf-8 -*-
# evaluate registration error and write into csv
import pandas as pd
import skimage.io as skio
import skimage.transform as skt
import scipy.io as sio
from tqdm import tqdm
import os, cv2, argparse
import numpy as np
from glob import glob
from sklearn.decomposition import PCA
from skimage import exposure
import SimpleITK as sitk
# self-defined functions
from utils.make_eliceiri_patches import tform_centred
from utils.make_rire_patches import get_transform_rigid3D, dist_coords, transform_coords, resample_volume
from mi import register_mi_3D
from sift import register_sift
import sys
sys.path.append(os.path.abspath("./alpha_amd"))
import aamd
# %%
def evaluate_methods(data_root, method, gan_name='', preprocess='nopre', mode='b2a', display=None):
'''
data_root:
should contains "{data_root}/A/test/" and "{data_root}/B/test/".
Corresponding images should have the same name.
'''
# data_root='./Datasets/RIRE_patches/fold1/patch_tlevel3/'
# method='MI'
# gan_name=''
# data_root_fake='./Datasets/RIRE_patches_fake/fold1'
# preprocess='nopre'
# mode='b2a'
# display=None
# dataset-specific variables
if 'RIRE' in data_root:
img_root='../Datasets/RIRE'
fold = data_root[data_root.rfind('fold') + len('fold')]
data_root_fake=f'./Datasets/RIRE_patches_fake/fold{fold}'
if 'MI' in method and method.replace('MI', '') != '':
n_mi_res=int(method.replace('MI', '')) # number of resolution level for MI
else:
n_mi_res=4
n_aAMD_iters=0.3 # factor of number of iterations for aAMD
dir_A = data_root + 'A/test/'
dir_B = data_root + 'B/test/'
if gan_name != '':
assert data_root_fake, "data_root_fake must not be None when given gan_name."
assert gan_name in ['cyc_A', 'cyc_B', 'p2p_A', 'p2p_B', 'drit_A', 'drit_B', 'star_A', 'star_B', 'comir'], (
"gan_name must be in 'cyc_A', 'cyc_B', 'p2p_A', 'p2p_B', 'drit_A', 'drit_B', 'star_A', 'star_B', 'comir'")
if 'comir' in gan_name:
dir_A = f'{data_root_fake}/{os.path.split(data_root[:-1])[-1]}/{gan_name}_A/'
dir_B = f'{data_root_fake}/{os.path.split(data_root[:-1])[-1]}/{gan_name}_B/'
elif '_A' in gan_name:
dir_B = f'{data_root_fake}/{os.path.split(data_root[:-1])[-1]}/{gan_name}/'
elif '_B' in gan_name:
dir_A = f'{data_root_fake}/{os.path.split(data_root[:-1])[-1]}/{gan_name}/'
assert mode in ['a2b', 'b2a', 'a2a', 'b2b'], "mode must be in ['a2b', 'b2a', 'a2a', 'b2b']"
if mode=='a2b':
dir_src = dir_A
dir_tar = dir_B
elif mode=='b2a':
dir_src = dir_B
dir_tar = dir_A
elif mode=='a2a':
dir_src = dir_A
dir_tar = dir_A
elif mode=='b2b':
dir_src = dir_B
dir_tar = dir_B
assert preprocess in ['', 'nopre', 'PCA', 'hiseq'], "preprocess must be in ['', 'nopre', 'PCA', 'hiseq']"
suffix_src = '_' + os.listdir(dir_src)[0].split('_')[-1]
suffix_src = suffix_src.replace('raw', 'mhd')
name_srcs = set([name[:-len(suffix_src)] for name in os.listdir(dir_src)])
suffix_tar = '_' + os.listdir(dir_tar)[0].split('_')[-1]
suffix_tar = suffix_tar.replace('raw', 'mhd')
name_tars = set([name[:-len(suffix_tar)] for name in os.listdir(dir_tar)])
f_names = name_srcs & name_tars
f_names = list(f_names)
f_names.sort()
f_names = [f_name for f_name in f_names if len(f_name) > len('patient_003')]
df = pd.read_csv(data_root + 'info_test.csv', index_col='Filename')
for f_name in tqdm(f_names):
_, f_name, i = f_name.split('_')
# extract reference and transformed patch coordinates
coords_ref = df.loc[
f'{f_name}_{i}',
['X1_Ref', 'Y1_Ref', 'Z1_Ref',
'X2_Ref', 'Y2_Ref', 'Z2_Ref',
'X3_Ref', 'Y3_Ref', 'Z3_Ref',
'X4_Ref', 'Y4_Ref', 'Z4_Ref',
'X5_Ref', 'Y5_Ref', 'Z5_Ref',
'X6_Ref', 'Y6_Ref', 'Z6_Ref',
'X7_Ref', 'Y7_Ref', 'Z7_Ref',
'X8_Ref', 'Y8_Ref', 'Z8_Ref']
].to_numpy().reshape((8, 3))
coords_trans = df.loc[
f'{f_name}_{i}',
['X1_Trans', 'Y1_Trans', 'Z1_Trans',
'X2_Trans', 'Y2_Trans', 'Z2_Trans',
'X3_Trans', 'Y3_Trans', 'Z3_Trans',
'X4_Trans', 'Y4_Trans', 'Z4_Trans',
'X5_Trans', 'Y5_Trans', 'Z5_Trans',
'X6_Trans', 'Y6_Trans', 'Z6_Trans',
'X7_Trans', 'Y7_Trans', 'Z7_Trans',
'X8_Trans', 'Y8_Trans', 'Z8_Trans']
].to_numpy().reshape((8, 3))
# load image (w, h)
# img_grey = np.asarray((img_rgb[...,0] * 0.299 + img_rgb[...,1] * 0.587 + img_rgb[...,2] * 0.114), dtype=np.uint8)
img_src = sitk.ReadImage(dir_src + f"patient_{f_name}_{i}_T.{suffix_src.split('.')[-1]}")
img_tar = sitk.ReadImage(dir_tar + f"patient_{f_name}_R.{suffix_tar.split('.')[-1]}")
if 'MI' in method:
# register
try:
transformParameterMap = register_mi_3D(img_src, img_tar, n_res=n_mi_res)
except:
continue
# transform the transformed patch coordinates back
tform = get_transform_rigid3D(
parameters=[float(c) for c in transformParameterMap['TransformParameters']],
center=[float(c) for c in transformParameterMap['CenterOfRotationPoint']])
coords_rec = transform_coords(coords_trans, tform)
elif 'aAMD' in method:
# TODO: not working
if img_src.ndim == 2:
img_src = np.expand_dims(img_src, axis=-1)
if img_tar.ndim == 2:
img_tar = np.expand_dims(img_tar, axis=-1)
# register
try:
img_rec, t = aamd.register_aamd(ref_im=img_tar, flo_im=img_src, iterations=n_aAMD_iters)
except:
continue
coords_rec = aamd.transform_coords(t, coords_in=coords_trans, centre_patch=centre_patch)
elif 'SIFT' in method:
# img_src = cv2.imread(dir_src + f"{f_name}_T.{suffix_src.split('.')[-1]}", 0)
# img_tar = cv2.imread(dir_tar + f"{f_name}_R.{suffix_tar.split('.')[-1]}", 0)
# register
try:
img_match, img_rec, tform = register_sift(img_src, img_tar)
except:
continue
coords_rec = skt.matrix_transform(coords_trans, tform.params)
# calculate error
disp_error = dist_coords(coords_rec, coords_ref)
result = {
'X1_Recover': coords_rec[0][0], 'Y1_Recover': coords_rec[0][1], 'Z1_Recover': coords_rec[0][2],
'X2_Recover': coords_rec[1][0], 'Y2_Recover': coords_rec[1][1], 'Z2_Recover': coords_rec[1][2],
'X3_Recover': coords_rec[2][0], 'Y3_Recover': coords_rec[2][1], 'Z3_Recover': coords_rec[2][2],
'X4_Recover': coords_rec[3][0], 'Y4_Recover': coords_rec[3][1], 'Z4_Recover': coords_rec[3][2],
'X5_Recover': coords_rec[4][0], 'Y5_Recover': coords_rec[4][1], 'Z5_Recover': coords_rec[4][2],
'X6_Recover': coords_rec[5][0], 'Y6_Recover': coords_rec[5][1], 'Z6_Recover': coords_rec[5][2],
'X7_Recover': coords_rec[6][0], 'Y7_Recover': coords_rec[6][1], 'Z7_Recover': coords_rec[6][2],
'X8_Recover': coords_rec[7][0], 'Y8_Recover': coords_rec[7][1], 'Z8_Recover': coords_rec[7][2],
'Error': | np.mean(disp_error) | numpy.mean |
"""
Copyright 2019 <NAME> <<EMAIL>>
This file is part of localreg.
localreg is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
localreg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with localreg. If not, see <http://www.gnu.org/licenses/>.
"""
# TODO
#
# One could consider making the kernels callable objects. These objects could
# then have a member function without if-testing, which is faster in case it
# is known that all datapoints are to be included. This is the case when
# frac!=None. It could also have a property for its width?
#
import numpy as np
import logging
logger = logging.getLogger("localreg")
logging.basicConfig()
def polyfit(x, y, x0, weights=None, degree=2):
if len(x) == 0:
return np.nan * np.ones_like(x0)
if weights is None:
weights = np.ones_like(x)
s = np.sqrt(weights)
X = x[:, None] ** np.arange(degree + 1)
X0 = x0[:, None] ** np.arange(degree + 1)
lhs = X * s[:, None]
rhs = y * s
# This is what NumPy uses for default from version 1.15 onwards,
# and what 1.14 uses when rcond=None. Computing it here ensures
# support for older versions of NumPy.
rcond = np.finfo(lhs.dtype).eps * max(*lhs.shape)
beta = np.linalg.lstsq(lhs, rhs, rcond=rcond)[0]
rslt = {"beta_fit": beta, "y_fit": X0.dot(beta)}
return rslt
def rectangular(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 0.5
return res
def triangular(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 1 - np.abs(t[ind])
return res
def epanechnikov(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 0.75 * (1 - t[ind] ** 2)
return res
def biweight(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = (15 / 16) * (1 - t[ind] ** 2) ** 2
return res
def triweight(t):
res = | np.zeros_like(t) | numpy.zeros_like |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
#################################################
# Test context free predict_expectation() method
################################################
def test_exp_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_context_history_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3], 'column2': [2, 3, 1]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual( | np.ndim(mab._imp.contexts) | numpy.ndim |
"""
Wrapper for the MKL FFT routines. This implements very fast FFT on Intel
processors, much faster than the stock fftpack routines in numpy/scipy.
"""
from __future__ import division, print_function
import numpy as np
import ctypes as _ctypes
import os
from dftidefs import *
def load_libmkl():
r"""Loads the MKL library if it can be found in the library load path.
Raises
------
ValueError
If the MKL library cannot be found.
"""
if os.name == 'posix':
try:
lib_mkl = os.getenv('LIBMKL')
if lib_mkl is None:
raise ValueError('LIBMKL environment variable not found')
return _ctypes.cdll.LoadLibrary(lib_mkl)
except:
pass
try:
return _ctypes.cdll.LoadLibrary("libmkl_rt.dylib")
except:
raise ValueError('MKL Library not found')
else:
try:
return _ctypes.cdll.LoadLibrary("mkl_rt.dll")
except:
raise ValueError('MKL Library not found')
mkl = load_libmkl()
def mkl_rfft(a, n=None, axis=-1, norm=None, direction='forward', out=None, scrambled=False):
r"""Forward/backward 1D double-precision real-complex FFT.
Uses the Intel MKL libraries distributed with Anaconda Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
rfft, irfft
"""
if axis == -1:
axis = a.ndim-1
# This code only works for 1D and 2D arrays
assert a.ndim < 3
assert (axis < a.ndim and axis >= -1)
assert (direction == 'forward' or direction == 'backward')
# Convert input to complex data type if real (also memory copy)
if direction == 'forward' and a.dtype != np.float32 and a.dtype != np.float64:
if a.dtype == np.int64 or a.dtype == np.uint64:
a = np.array(a, dtype=np.float64)
else:
a = np.array(a, dtype=np.float32)
elif direction == 'backward' and a.dtype != np.complex128 and a.dtype != np.complex64:
if a.dtype == np.int64 or a.dtype == np.uint64 or a.dtype == np.float64:
a = np.array(a, dtype=np.complex128)
else:
a = np.array(a, dtype=np.complex64)
order = 'C'
if a.flags['F_CONTIGUOUS'] and not a.flags['C_CONTIGUOUS']:
order = 'F'
# Add zero padding or truncate if needed (incurs memory copy)
if n is not None:
m = n if direction == 'forward' else (n // 2 + 1)
if a.shape[axis] < m:
# pad axis with zeros
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = m - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
elif a.shape[axis] > m:
# truncate along axis
b = np.swapaxes(a, axis, 0)[:m,]
a = np.swapaxes(b, 0, axis).copy()
elif direction == 'forward':
n = a.shape[axis]
elif direction == 'backward':
n = 2*(a.shape[axis]-1)
# determine output type
if direction == 'backward':
out_type = np.float64
if a.dtype == np.complex64:
out_type = np.float32
elif direction == 'forward':
out_type = np.complex128
if a.dtype == np.float32:
out_type = np.complex64
# Configure output array
assert a is not out
if out is not None:
assert out.dtype == out_type
for i in range(a.ndim):
if i != axis:
assert a.shape[i] == out.shape[i]
if direction == 'forward':
assert (n // 2 + 1) == out.shape[axis]
else:
assert out.shape[axis] == n
assert not np.may_share_memory(a, out)
else:
size = list(a.shape)
size[axis] = n // 2 + 1 if direction == 'forward' else n
out = np.empty(size, dtype=out_type, order=order)
# Define length, number of transforms strides
length = _ctypes.c_int(n)
n_transforms = _ctypes.c_int(np.prod(a.shape) // a.shape[axis])
# For strides, the C type used *must* be long
strides = (_ctypes.c_long*2)(0, a.strides[axis] // a.itemsize)
if a.ndim == 2:
if axis == 0:
distance = _ctypes.c_int(a.strides[1] // a.itemsize)
out_distance = _ctypes.c_int(out.strides[1] // out.itemsize)
else:
distance = _ctypes.c_int(a.strides[0] // a.itemsize)
out_distance = _ctypes.c_int(out.strides[0] // out.itemsize)
double_precision = True
if (direction == 'forward' and a.dtype == np.float32) or (direction == 'backward' and a.dtype == np.complex64):
double_precision = False
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
if not double_precision:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_REAL, _ctypes.c_int(1), length)
else:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_REAL, _ctypes.c_int(1), length)
# set the storage type
mkl.DftiSetValue(Desc_Handle, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX)
# set normalization factor
if norm == 'ortho':
scale = _ctypes.c_double(1 / np.sqrt(n))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
scale = _ctypes.c_double(1. / n)
s = mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# set all values if necessary
if a.ndim != 1:
mkl.DftiSetValue(Desc_Handle, DFTI_NUMBER_OF_TRANSFORMS, n_transforms)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_DISTANCE, out_distance)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(strides))
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(strides))
if scrambled:
s = mkl.DftiSetValue(Desc_Handle, DFTI_ORDERING, DFTI_BACKWARD_SCRAMBLED)
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def mkl_fft(a, n=None, axis=-1, norm=None, direction='forward', out=None, scrambled=False):
r"""Forward/backward 1D single- or double-precision FFT.
Uses the Intel MKL libraries distributed with Anaconda Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
fft, ifft
"""
# This code only works for 1D and 2D arrays
assert a.ndim < 3
assert axis < a.ndim and axis >= -1
# Add zero padding if needed (incurs memory copy)
'''
if n is not None and n != a.shape[axis]:
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = n - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
'''
if n is not None:
if a.shape[axis] < n:
# pad axis with zeros
pad_width = np.zeros((a.ndim, 2), dtype=np.int)
pad_width[axis,1] = n - a.shape[axis]
a = np.pad(a, pad_width, mode='constant')
elif a.shape[axis] > n:
# truncate along axis
b = np.swapaxes(a, axis, -1)[...,:n]
a = np.swapaxes(b, -1, axis).copy()
# Convert input to complex data type if real (also memory copy)
if a.dtype != np.complex128 and a.dtype != np.complex64:
if a.dtype == np.int64 or a.dtype == np.uint64 or a.dtype == np.float64:
a = np.array(a, dtype=np.complex128)
else:
a = np.array(a, dtype=np.complex64)
# Configure in-place vs out-of-place
inplace = False
if out is a:
inplace = True
elif out is not None:
assert out.dtype == a.dtype
assert a.shape == out.shape
assert not np.may_share_memory(a, out)
else:
out = np.empty_like(a)
# Define length, number of transforms strides
length = _ctypes.c_int(a.shape[axis])
n_transforms = _ctypes.c_int(np.prod(a.shape) // a.shape[axis])
# For strides, the C type used *must* be long
strides = (_ctypes.c_long*2)(0, a.strides[axis] // a.itemsize)
if a.ndim == 2:
if axis == 0:
distance = _ctypes.c_int(a.strides[1] // a.itemsize)
else:
distance = _ctypes.c_int(a.strides[0] // a.itemsize)
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
if a.dtype == np.complex64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_COMPLEX, _ctypes.c_int(1), length)
elif a.dtype == np.complex128:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_COMPLEX, _ctypes.c_int(1), length)
# Set normalization factor
if norm == 'ortho':
if a.dtype == np.complex64:
scale = _ctypes.c_float(1 / np.sqrt(a.shape[axis]))
else:
scale = _ctypes.c_double(1 / np.sqrt(a.shape[axis]))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
if a.dtype == np.complex64:
scale = _ctypes.c_float(1. / a.shape[axis])
else:
scale = _ctypes.c_double(1. / a.shape[axis])
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# set all values if necessary
if a.ndim != 1:
mkl.DftiSetValue(Desc_Handle, DFTI_NUMBER_OF_TRANSFORMS, n_transforms)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_DISTANCE, distance)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(strides))
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(strides))
if scrambled:
s = mkl.DftiSetValue(Desc_Handle, DFTI_ORDERING, DFTI_BACKWARD_SCRAMBLED)
DftiErrorMessage(s)
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
if inplace:
# In-place FFT
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p) )
else:
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def proper_fft2(a, norm=None, direction='forward', mkl_dir=None, fft_nthreads=0):
r"""Forward/backward 2D single- or double-precision FFT.
Uses the Intel MKL libraries distributed with Enthought Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
fft2, ifft2
"""
# input must be complex! Not exceptions
if a.dtype != np.complex128 and a.dtype != np.complex64:
raise ValueError('prop_fftw: Unsupported data type. Must be complex64 or complex128.')
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
dims = (_ctypes.c_int64*2)(*a.shape)
if a.dtype == np.complex64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
elif a.dtype == np.complex128:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
# Set normalization factor
if norm == 'ortho':
if a.dtype == np.complex64:
scale = _ctypes.c_float(1.0 / np.sqrt(np.prod(a.shape)))
else:
scale = _ctypes.c_double(1.0 / np.sqrt(np.prod(a.shape)))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
if a.dtype == np.complex64:
scale = _ctypes.c_float(1.0 / np.prod(a.shape))
else:
scale = _ctypes.c_double(1.0 / np.prod(a.shape))
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
# Set input strides if necessary
if not a.flags['C_CONTIGUOUS']:
in_strides = (_ctypes.c_int*3)(0, a.strides[0] // a.itemsize, a.strides[1] // a.itemsize)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(in_strides))
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
mkl.DftiSetValue( Desc_Handle, DFTI_THREAD_LIMIT, _ctypes.c_int(fft_nthreads) )
# In-place FFT
mkl.DftiCommitDescriptor( Desc_Handle )
fft_func( Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor( _ctypes.byref(Desc_Handle) )
return
def mkl_fft2(a, norm=None, direction='forward', out=None):
r"""Forward/backward 2D single- or double-precision FFT.
Uses the Intel MKL libraries distributed with Enthought Python.
Normalisation is different from Numpy!
By default, allocates new memory like 'a' for output data.
Returns the array containing output data.
See Also
--------
fft2, ifft2
"""
# convert input to complex data type if real (also memory copy)
if a.dtype != np.complex128 and a.dtype != np.complex64:
if a.dtype == np.int64 or a.dtype == np.uint64 or a.dtype == np.float64:
a = np.array(a, dtype=np.complex128)
else:
a = np.array(a, dtype=np.complex64)
# Configure in-place vs out-of-place
inplace = False
if out is a:
inplace = True
elif out is not None:
assert out.dtype == a.dtype
assert a.shape == out.shape
assert not np.may_share_memory(a, out)
else:
out = np.empty_like(a)
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
dims = (_ctypes.c_long*2)(*a.shape)
if a.dtype == np.complex64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
elif a.dtype == np.complex128:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_COMPLEX, _ctypes.c_int(2), dims)
# Set normalization factor
if norm == 'ortho':
scale = _ctypes.c_double(1.0 / np.sqrt(np.prod(a.shape)))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
scale = _ctypes.c_double(1.0 / np.prod(a.shape))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, _ctypes.c_double(1.0))
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
scale = _ctypes.c_float(0.)
mkl.DftiGetValue(Desc_Handle, DFTI_BACKWARD_SCALE, _ctypes.byref(scale))
# Set input strides if necessary
if not a.flags['C_CONTIGUOUS']:
in_strides = (_ctypes.c_long*3)(0, a.strides[0] // a.itemsize, a.strides[1] // a.itemsize)
mkl.DftiSetValue(Desc_Handle, DFTI_INPUT_STRIDES, _ctypes.byref(in_strides))
if direction == 'forward':
fft_func = mkl.DftiComputeForward
elif direction == 'backward':
fft_func = mkl.DftiComputeBackward
else:
assert False
if inplace:
# In-place FFT
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p) )
else:
# Not-in-place FFT
mkl.DftiSetValue(Desc_Handle, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
# Set output strides if necessary
if not out.flags['C_CONTIGUOUS']:
out_strides = (_ctypes.c_long*3)(0, out.strides[0] // out.itemsize, out.strides[1] // out.itemsize)
mkl.DftiSetValue(Desc_Handle, DFTI_OUTPUT_STRIDES, _ctypes.byref(out_strides))
mkl.DftiCommitDescriptor(Desc_Handle)
fft_func(Desc_Handle, a.ctypes.data_as(_ctypes.c_void_p), out.ctypes.data_as(_ctypes.c_void_p) )
mkl.DftiFreeDescriptor(_ctypes.byref(Desc_Handle))
return out
def cce2full(A):
# Assume all square for now
N = A.shape
N_half = N[0]//2 + 1
out = np.empty((A.shape[0], A.shape[0]), dtype=A.dtype)
out[:, :N_half] = A
out[1:, N_half:] = np.rot90(A[1:, 1:-1], 2).conj()
# Complete the first row
out[0, N_half:] = A[0, -2:0:-1].conj()
return out
def mkl_rfft2(a, norm=None, direction='forward', out=None):
r"""Forward/backward single- or double-precision real-complex 2D FFT.
For more details:
See Also
--------
rfft2, irfft2
"""
assert (a.dtype == np.float32) or (a.dtype == np.float64)
out_type = np.complex128
if a.dtype == np.float32:
out_type = np.complex64
n = a.shape[1]
# Allocate memory if needed
if out is not None:
assert out.dtype == out_type
assert out.shape[1] == n // 2 + 1
assert not np.may_share_memory(a, out)
else:
size = list(a.shape)
size[1] = n // 2 + 1
out = np.empty(size, dtype=out_type)
# Create the description handle
Desc_Handle = _ctypes.c_void_p(0)
dims = (_ctypes.c_long*2)(*a.shape)
if a.dtype == np.float32:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_SINGLE, DFTI_REAL, _ctypes.c_int(2), dims)
elif a.dtype == np.float64:
mkl.DftiCreateDescriptor(_ctypes.byref(Desc_Handle), DFTI_DOUBLE, DFTI_REAL, _ctypes.c_int(2), dims)
# Set the storage type
mkl.DftiSetValue(Desc_Handle, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX)
# Set normalization factor
if norm == 'ortho':
if a.dtype == np.float32:
scale = _ctypes.c_float(1.0 / np.sqrt(np.prod(a.shape)))
else:
scale = _ctypes.c_double(1.0 / np.sqrt(np.prod(a.shape)))
mkl.DftiSetValue(Desc_Handle, DFTI_FORWARD_SCALE, scale)
mkl.DftiSetValue(Desc_Handle, DFTI_BACKWARD_SCALE, scale)
elif norm is None:
if a.dtype == np.float64:
scale = _ctypes.c_float(1.0 / | np.prod(a.shape) | numpy.prod |
import math
import numpy as np
def split_patients(patient_admission: dict, admission_codes: dict, code_map: dict, seed=6669) -> (np.ndarray, np.ndarray, np.ndarray):
print('splitting train, valid, and test pids')
np.random.seed(seed)
common_pids = set()
for i, code in enumerate(code_map):
print('\r\t%.2f%%' % ((i + 1) * 100 / len(code_map)), end='')
for pid, admissions in patient_admission.items():
for admission in admissions:
codes = admission_codes[admission['admission_id']]
if code in codes:
common_pids.add(pid)
break
else:
continue
break
print('\r\t100%')
max_admission_num = 0
pid_max_admission_num = 0
for pid, admissions in patient_admission.items():
if len(admissions) > max_admission_num:
max_admission_num = len(admissions)
pid_max_admission_num = pid
common_pids.add(pid_max_admission_num)
remaining_pids = np.array(list(set(patient_admission.keys()).difference(common_pids)))
np.random.shuffle(remaining_pids)
train_num = 6000
valid_num = 125
train_pids = np.array(list(common_pids.union(set(remaining_pids[:(train_num - len(common_pids))].tolist()))))
valid_pids = remaining_pids[(train_num - len(common_pids)):(train_num + valid_num - len(common_pids))]
test_pids = remaining_pids[(train_num + valid_num - len(common_pids)):]
return train_pids, valid_pids, test_pids
def build_code_xy(pids: np.ndarray,
patient_admission: dict,
admission_codes_encoded: dict,
max_admission_num: int,
code_num: int,
max_code_num_in_a_visit: int) -> (np.ndarray, np.ndarray, np.ndarray):
print('building train/valid/test codes features and labels ...')
n = len(pids)
x = np.zeros((n, max_admission_num, max_code_num_in_a_visit), dtype=int)
y = np.zeros((n, code_num), dtype=int)
lens = np.zeros((n, ), dtype=int)
for i, pid in enumerate(pids):
print('\r\t%d / %d' % (i + 1, len(pids)), end='')
admissions = patient_admission[pid]
for k, admission in enumerate(admissions[:-1]):
codes = admission_codes_encoded[admission['admission_id']]
x[i][k][:len(codes)] = codes
codes = np.array(admission_codes_encoded[admissions[-1]['admission_id']]) - 1
y[i][codes] = 1
lens[i] = len(admissions) - 1
print('\r\t%d / %d' % (len(pids), len(pids)))
return x, y, lens
def build_time_duration_xy(pids: np.ndarray,
patient_time_duration_encoded: dict,
max_admission_num: int) -> (np.ndarray, np.ndarray):
print('building train/valid/test time duration features and labels ...')
n = len(pids)
x = np.zeros((n, max_admission_num))
y = np.zeros((n, ))
for i, pid in enumerate(pids):
print('\r\t%d / %d' % (i + 1, len(pids)), end='')
duration = patient_time_duration_encoded[pid]
x[i][:len(duration) - 1] = duration[:-1]
y[i] = duration[-1]
print('\r\t%d / %d' % (len(pids), len(pids)))
return x, y
def build_note_x(pids: np.ndarray,
patient_note_encoded: dict,
max_word_num_in_a_note: int) -> (np.ndarray, np.ndarray):
print('building train/valid/test notes features and labels ...')
n = len(pids)
x = np.zeros((n, max_word_num_in_a_note), dtype=int)
lens = np.zeros((n, ), dtype=int)
for i, pid in enumerate(pids):
print('\r\t%d / %d' % (i + 1, len(pids)), end='')
note = patient_note_encoded[pid]
length = max_word_num_in_a_note if max_word_num_in_a_note < len(note) else len(note)
x[i][:length] = note[:length]
lens[i] = length
print('\r\t%d / %d' % (len(pids), len(pids)))
return x, lens
def calculate_tf_idf(note_encoded: dict, word_num: int) -> dict:
n_docs = len(note_encoded)
tf = dict()
df = np.zeros((word_num + 1, ), dtype=np.int64)
print('calculating tf and df ...')
for i, (pid, note) in enumerate(note_encoded.items()):
print('\r\t%d / %d' % (i + 1, n_docs), end='')
note_tf = dict()
for word in note:
note_tf[word] = note_tf.get(word, 0) + 1
wset = set(note)
for word in wset:
df[word] += 1
tf[pid] = note_tf
print('\r\t%d / %d patients' % (n_docs, n_docs))
print('calculating tf_idf ...')
tf_idf = dict()
for i, (pid, note) in enumerate(note_encoded.items()):
print('\r\t%d / %d patients' % (i + 1, n_docs), end='')
note_tf = tf[pid]
note_tf_idf = [note_tf[word] / len(note) * (math.log(n_docs / (1 + df[word]), 10) + 1)
for word in note]
tf_idf[pid] = note_tf_idf
print('\r\t%d / %d patients' % (n_docs, n_docs))
return tf_idf
def build_tf_idf_weight(pids: np.ndarray, note_x: np.ndarray, note_encoded: dict, word_num: int) -> np.ndarray:
print('build tf_idf for notes ...')
tf_idf = calculate_tf_idf(note_encoded, word_num)
weight = np.zeros_like(note_x, dtype=float)
for i, pid in enumerate(pids):
note_tf_idf = tf_idf[pid]
weight[i][:len(note_tf_idf)] = note_tf_idf
weight = weight / weight.sum(axis=-1, keepdims=True)
return weight
def build_heart_failure_y(hf_prefix: str, codes_y: np.ndarray, code_map: dict) -> np.ndarray:
print('building train/valid/test heart failure labels ...')
hf_list = np.array([cid for code, cid in code_map.items() if code.startswith(hf_prefix)])
hfs = np.zeros((len(code_map), ), dtype=int)
hfs[hf_list - 1] = 1
hf_exist = | np.logical_and(codes_y, hfs) | numpy.logical_and |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval"""
import os
import random
import numpy as np
import scipy.special as sc
import scipy.stats
from mindspore import context
from mindspore import load_checkpoint, load_param_into_net
from mindspore.common import dtype as mstype
from mindspore.communication.management import init
from mindspore.context import ParallelMode
from mindspore.ops import operations as ops
import src.dataset as dt
from src.config import relationnet_cfg as cfg
from src.relationnet import Encoder_Relation, weight_init
from argparser import arg_parser
# init operators
concat0dim = ops.Concat(axis=0)
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sc.stdtrit(n - 1, (1 + confidence) / 2.)
return m, h
def main(args):
device_id = int(os.getenv("DEVICE_ID", args.device_id))
local_data_url = args.data_path
local_train_url = args.ckpt_dir
# if run on the cloud
if args.cloud:
import moxing as mox
local_data_url = './cache/data'
local_train_url = './cache/ckpt'
device_target = args.device_target
device_num = int(os.getenv("RANK_SIZE"))
device_id = int(os.getenv("DEVICE_ID"))
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
context.set_context(save_graphs=False)
if device_target == "Ascend":
context.set_context(device_id=device_id)
if device_num > 1:
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
init()
local_data_url = os.path.join(local_data_url, str(device_id))
else:
raise ValueError("Unsupported platform.")
import moxing as mox
mox.file.copy_parallel(src_url=args.data_url, dst_url=local_data_url)
mox.file.copy_parallel(src_url=args.ckpt_dir, dst_url=local_train_url)
else:
# run on the local server
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=device_id)
context.set_context(save_graphs=False)
# Step 1: init data folders
print("init data folders")
_, metatest_character_folders = dt.omniglot_character_folders(data_path=local_data_url)
# Step 4 : init networks
print("init neural networks")
encoder_relation = Encoder_Relation(cfg.feature_dim, cfg.relation_dim)
encoder_relation.set_train(False)
weight_init(encoder_relation)
# load parameters
if os.path.exists(local_train_url):
param_dict = load_checkpoint(local_train_url)
load_param_into_net(encoder_relation, param_dict)
print("successfully load parameters")
else:
print("Error:can not load checkpoint")
total_accuracy = 0.0
print("=" * 10 + "Testing" + "=" * 10)
for episode in range(cfg.eval_episode):
total_rewards = 0
accuracies = []
for _ in range(cfg.test_episode):
degrees = random.choice([0, 90, 180, 270])
flip = random.choice([True, False])
task = dt.OmniglotTask(metatest_character_folders, cfg.class_num, cfg.sample_num_per_class,
cfg.sample_num_per_class)
sample_dataloader = dt.get_data_loader(task, num_per_class=cfg.sample_num_per_class, split="train",
shuffle=False, rotation=degrees, flip=flip)
test_dataloader = dt.get_data_loader(task, num_per_class=cfg.sample_num_per_class, split="test",
shuffle=True, rotation=degrees, flip=flip)
test_samples, _ = next(sample_dataloader)
test_batches, test_batch_labels = next(test_dataloader)
# concat samples and batches
test_input = concat0dim((test_samples, test_batches))
test_relations = encoder_relation(test_input)
predict_labels = ops.Argmax(axis=1, output_type=mstype.int32)(test_relations).asnumpy()
test_batch_labels = test_batch_labels.asnumpy().astype(np.int32)
rewards = [1 if predict_labels[j] == test_batch_labels[j] else 0 for j in range(cfg.class_num)]
total_rewards += | np.sum(rewards) | numpy.sum |
import PIL
from PIL import Image
import matplotlib.pyplot as plt
from libtiff import TIFF
from libtiff import TIFFfile, TIFFimage
from scipy.misc import imresize
import numpy as np
import glob
import cv2
import os
import math
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from unet import UNet
import skimage.io as io
import skimage.transform as trans
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.preprocessing.image import ImageDataGenerator
from scipy.misc import imsave
from keras import backend as keras
#%matplotlib inline
def iou(y_true, y_pred, smooth = 100):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
union = K.sum(y_true,-1) + K.sum(y_pred,-1) - intersection
#sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
iou_acc = (intersection + smooth) / (union + smooth)
return iou_acc
model = UNet()
# To read the images in numerical order
import re
numbers = re.compile(r'(\d+)')
def numericalSort(value):
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
# List of file names of actual Satellite images for traininig
filelist_trainx = sorted(glob.glob('Inter-IIT-CSRE/The-Eye-in-the-Sky-dataset/sat/*.tif'), key=numericalSort)
# List of file names of classified images for traininig
filelist_trainy = sorted(glob.glob('Inter-IIT-CSRE/The-Eye-in-the-Sky-dataset/gt/*.tif'), key=numericalSort)
# List of file names of actual Satellite images for testing
filelist_testx = sorted(glob.glob('Inter-IIT-CSRE/The-Eye-in-the-Sky-test-data/sat_test/*.tif'), key=numericalSort)
# Not useful, messes up with the 4 dimentions of sat images
# Resizing the image to nearest dimensions multipls of 'stride'
def resize(img, stride, n_h, n_w):
#h,l,_ = img.shape
ne_h = (n_h*stride) + stride
ne_w = (n_w*stride) + stride
img_resized = imresize(img, (ne_h,ne_w))
return img_resized
# Padding at the bottem and at the left of images to be able to crop them into 128*128 images for training
def padding(img, w, h, c, crop_size, stride, n_h, n_w):
w_extra = w - ((n_w-1)*stride)
w_toadd = crop_size - w_extra
h_extra = h - ((n_h-1)*stride)
h_toadd = crop_size - h_extra
img_pad = np.zeros(((h+h_toadd), (w+w_toadd), c))
#img_pad[:h, :w,:] = img
#img_pad = img_pad+img
img_pad = np.pad(img, [(0, h_toadd), (0, w_toadd), (0,0)], mode='constant')
return img_pad
# Adding pixels to make the image with shape in multiples of stride
def add_pixals(img, h, w, c, n_h, n_w, crop_size, stride):
w_extra = w - ((n_w-1)*stride)
w_toadd = crop_size - w_extra
h_extra = h - ((n_h-1)*stride)
h_toadd = crop_size - h_extra
img_add = np.zeros(((h+h_toadd), (w+w_toadd), c))
img_add[:h, :w,:] = img
img_add[h:, :w,:] = img[:h_toadd,:, :]
img_add[:h,w:,:] = img[:,:w_toadd,:]
img_add[h:,w:,:] = img[h-h_toadd:h,w-w_toadd:w,:]
return img_add
# Slicing the image into crop_size*crop_size crops with a stride of crop_size/2 and makking list out of them
def crops(a, crop_size = 128):
stride = 32
croped_images = []
h, w, c = a.shape
n_h = int(int(h/stride))
n_w = int(int(w/stride))
# Padding using the padding function we wrote
##a = padding(a, w, h, c, crop_size, stride, n_h, n_w)
# Resizing as required
##a = resize(a, stride, n_h, n_w)
# Adding pixals as required
a = add_pixals(a, h, w, c, n_h, n_w, crop_size, stride)
# Slicing the image into 128*128 crops with a stride of 64
for i in range(n_h-1):
for j in range(n_w-1):
crop_x = a[(i*stride):((i*stride)+crop_size), (j*stride):((j*stride)+crop_size), :]
croped_images.append(crop_x)
return croped_images
# Making array of all the training sat images as it is without any cropping
xtrain_list = []
for fname in filelist_trainx[:13]:
# Reading the image
tif = TIFF.open(fname)
image = tif.read_image()
crop_size = 128
stride = 32
h, w, c = image.shape
n_h = int(int(h/stride))
n_w = int(int(w/stride))
image = padding(image, w, h, c, crop_size, stride, n_h, n_w)
xtrain_list.append(image)
# Making array of all the training gt images as it is without any cropping
ytrain_list = []
for fname in filelist_trainy[:13]:
# Reading the image
tif = TIFF.open(fname)
image = tif.read_image()
crop_size = 128
stride = 32
h, w, c = image.shape
n_h = int(int(h/stride))
n_w = int(int(w/stride))
image = padding(image, w, h, c, crop_size, stride, n_h, n_w)
ytrain_list.append(image)
y_train = np.asarray(ytrain_list)
x_train = np.asarray(xtrain_list)
#del ytrain_list
#del xtrain_list
# Making array of all the training sat images as it is without any cropping
# Reading the image
tif = TIFF.open(filelist_trainx[13])
image = tif.read_image()
crop_size = 128
stride = 32
h, w, c = image.shape
n_h = int(int(h/stride))
n_w = int(int(w/stride))
image = add_pixals(image, h, w, c, n_h, n_w, crop_size, stride)
#x_val = np.reshape(image, (1,h,w,c))
x_val = image
# Making array of all the training gt images as it is without any cropping
# Reading the image
tif = TIFF.open(filelist_trainy[13])
image = tif.read_image()
crop_size = 128
stride = 32
h, w, c = image.shape
n_h = int(int(h/stride))
n_w = int(int(w/stride))
image = add_pixals(image, h, w, c, n_h, n_w, crop_size, stride)
#y_val1 = np.reshape(image, (1,h,w,c))
y_val = image
xtest_list1 = []
for fname in filelist_testx:
# Reading the image
tif = TIFF.open(fname)
image = tif.read_image()
crop_size = 128
stride = 32
h, w, c = image.shape
n_h = int(int(h/stride))
n_w = int(int(w/stride))
image = add_pixals(image, h, w, c, n_h, n_w, crop_size, stride)
xtest_list1.append(image)
# Reading, padding, cropping and making array of all the cropped images of all the trainig sat images
trainx_list = []
for fname in filelist_trainx[:13]:
# Reading the image
tif = TIFF.open(fname)
image = tif.read_image()
# Padding as required and cropping
crops_list = crops(image)
#print(len(crops_list))
trainx_list = trainx_list + crops_list
# Array of all the cropped Training sat Images
trainx = np.asarray(trainx_list)
# Reading, padding, cropping and making array of all the cropped images of all the trainig gt images
trainy_list = []
for fname in filelist_trainy[:13]:
# Reading the image
tif = TIFF.open(fname)
image = tif.read_image()
# Padding as required and cropping
crops_list =crops(image)
trainy_list = trainy_list + crops_list
# Array of all the cropped Training gt Images
trainy = np.asarray(trainy_list)
# Reading, padding, cropping and making array of all the cropped images of all the testing sat images
testx_list = []
#for fname in filelist_trainx[13]:
# Reading the image
tif = TIFF.open(filelist_trainx[13])
image = tif.read_image()
# Padding as required and cropping
crops_list = crops(image)
testx_list = testx_list + crops_list
# Array of all the cropped Testing sat Images
testx = np.asarray(testx_list)
# Reading, padding, cropping and making array of all the cropped images of all the testing sat images
testy_list = []
#for fname in filelist_trainx[13]:
# Reading the image
tif = TIFF.open(filelist_trainy[13])
image = tif.read_image()
# Padding as required and cropping
crops_list = crops(image)
testy_list = testy_list + crops_list
# Array of all the cropped Testing sat Images
testy = np.asarray(testy_list)
color_dict = {0: (0, 0, 0),
1: (0, 125, 0),
2: (150, 80, 0),
3: (255, 255, 0),
4: (100, 100, 100),
5: (0, 255, 0),
6: (0, 0, 150),
7: (150, 150, 255),
8: (255, 255, 255)}
def rgb_to_onehot(rgb_arr, color_dict):
num_classes = len(color_dict)
shape = rgb_arr.shape[:2]+(num_classes,)
print(shape)
arr = np.zeros( shape, dtype=np.int8 )
for i, cls in enumerate(color_dict):
arr[:,:,i] = np.all(rgb_arr.reshape( (-1,3) ) == color_dict[i], axis=1).reshape(shape[:2])
return arr
def onehot_to_rgb(onehot, color_dict):
single_layer = np.argmax(onehot, axis=-1)
output = np.zeros( onehot.shape[:2]+(3,) )
for k in color_dict.keys():
output[single_layer==k] = color_dict[k]
return | np.uint8(output) | numpy.uint8 |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 08:07:18 2021
@author: abhis
"""
import warnings
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import os
import glob
print(torch.cuda.device_count())
warnings.filterwarnings("ignore")
class STN(nn.Module):
def __init__(self):
super(STN, self).__init__()
self.localization = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=7),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True),
nn.Conv2d(16, 32, kernel_size=5),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True)
)
# Regressor for the 3 * 2 affine matrix
self.fc_loc = nn.Sequential(
nn.Linear(52 * 52 * 32, 32),
nn.Sigmoid(),
nn.Linear(32, 3 * 2)
)
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
#####################################################
# Spatial transformer network forward function
def stn(self, x):
xs = self.localization(x)
xs = xs.view(-1, 52 * 52 * 32)
theta = self.fc_loc(xs)
theta = theta.view(-1, 2, 3)
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
def forward(self,x):
x = self.stn(x)
return x
# def stn_load(weights_path='stn.pth', **kwargs):
# """
# load imported model instance
# Args:
# weights_path (str): If set, loads model weights from the given path
# """
# model_stn = STN()
# if weights_path:
# h = torch.load(weights_path)
# for i in list(h.keys()):
# a='localization.0.weight'
# b='localization.0.bias'
# c='localization.3.weight'
# d='localization.3.bias'
# fc1='fc_loc.0.weight'
# fc2='fc_loc.0.bias'
# fc3='fc_loc.2.weight'
# fc4='fc_loc.2.bias'
# if i==a or i==b or i==c or i==d or i==fc1 or i==fc2 or i==fc3 or i==fc4:
# continue
# else:
# del h[i]
# model_stn.load_state_dict(h)
# return model_stn
# model_stn = STN()
# model_stn = model_stn.cuda()
class Resnet50_ferplus_dag(nn.Module):
def __init__(self):
super(Resnet50_ferplus_dag, self).__init__()
self.meta = {'mean': [131.0912, 103.8827, 91.4953],
'std': [1, 1, 1],
'imageSize': [224, 224, 3]}
from collections import OrderedDict
self.debug_feats = OrderedDict() # only used for feature verification
self.conv1_7x7_s2 = nn.Conv2d(3, 64, kernel_size=[7, 7], stride=(2, 2), padding=(3, 3), bias=False)
self.conv1_7x7_s2_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv1_relu_7x7_s2 = nn.ReLU()
self.pool1_3x3_s2 = nn.MaxPool2d(kernel_size=[3, 3], stride=[2, 2], padding=(0, 0), dilation=1, ceil_mode=True)
self.conv2_1_1x1_reduce = nn.Conv2d(64, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_1x1_reduce_relu = nn.ReLU()
self.conv2_1_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_1_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_3x3_relu = nn.ReLU()
self.conv2_1_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_proj = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_1x1_proj_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_relu = nn.ReLU()
self.conv2_2_1x1_reduce = nn.Conv2d(256, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_2_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_1x1_reduce_relu = nn.ReLU()
self.conv2_2_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_2_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_3x3_relu = nn.ReLU()
self.conv2_2_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_2_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_relu = nn.ReLU()
self.conv2_3_1x1_reduce = nn.Conv2d(256, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_3_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_1x1_reduce_relu = nn.ReLU()
self.conv2_3_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_3_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_3x3_relu = nn.ReLU()
self.conv2_3_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_3_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_relu = nn.ReLU()
self.conv3_1_1x1_reduce = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv3_1_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_1x1_reduce_relu = nn.ReLU()
self.conv3_1_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_1_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_3x3_relu = nn.ReLU()
self.conv3_1_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_1_1x1_proj = nn.Conv2d(256, 512, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv3_1_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_1x1_proj_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_relu = nn.ReLU()
self.conv3_2_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_2_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_1x1_reduce_relu = nn.ReLU()
self.conv3_2_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_2_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_3x3_relu = nn.ReLU()
self.conv3_2_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_2_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_relu = nn.ReLU()
self.conv3_3_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_3_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_1x1_reduce_relu = nn.ReLU()
self.conv3_3_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_3_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_3x3_relu = nn.ReLU()
self.conv3_3_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_3_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_relu = nn.ReLU()
self.conv3_4_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_4_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_1x1_reduce_relu = nn.ReLU()
self.conv3_4_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_4_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_3x3_relu = nn.ReLU()
self.conv3_4_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_4_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_relu = nn.ReLU()
self.conv4_1_1x1_reduce = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv4_1_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_1x1_reduce_relu = nn.ReLU()
self.conv4_1_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_1_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_3x3_relu = nn.ReLU()
self.conv4_1_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_1_1x1_proj = nn.Conv2d(512, 1024, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv4_1_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_1x1_proj_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_relu = nn.ReLU()
self.conv4_2_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_2_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_1x1_reduce_relu = nn.ReLU()
self.conv4_2_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_2_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_3x3_relu = nn.ReLU()
self.conv4_2_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_2_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_relu = nn.ReLU()
self.conv4_3_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_3_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_1x1_reduce_relu = nn.ReLU()
self.conv4_3_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_3_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_3x3_relu = nn.ReLU()
self.conv4_3_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_3_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_relu = nn.ReLU()
self.conv4_4_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_4_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_1x1_reduce_relu = nn.ReLU()
self.conv4_4_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_4_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_3x3_relu = nn.ReLU()
self.conv4_4_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_4_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_relu = nn.ReLU()
self.conv4_5_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_5_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_1x1_reduce_relu = nn.ReLU()
self.conv4_5_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_5_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_3x3_relu = nn.ReLU()
self.conv4_5_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_5_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_relu = nn.ReLU()
self.conv4_6_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_6_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_1x1_reduce_relu = nn.ReLU()
self.conv4_6_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_6_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_3x3_relu = nn.ReLU()
self.conv4_6_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_6_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_relu = nn.ReLU()
self.conv5_1_1x1_reduce = nn.Conv2d(1024, 512, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv5_1_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_1x1_reduce_relu = nn.ReLU()
self.conv5_1_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_1_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_3x3_relu = nn.ReLU()
self.conv5_1_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_1_1x1_proj = nn.Conv2d(1024, 2048, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv5_1_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_1x1_proj_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_relu = nn.ReLU()
self.conv5_2_1x1_reduce = nn.Conv2d(2048, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_2_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_1x1_reduce_relu = nn.ReLU()
self.conv5_2_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_2_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_3x3_relu = nn.ReLU()
self.conv5_2_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_2_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_relu = nn.ReLU()
self.conv5_3_1x1_reduce = nn.Conv2d(2048, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_3_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_1x1_reduce_relu = nn.ReLU()
self.conv5_3_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_3_3x3_drop = nn.Dropout(p=0.5)
self.conv5_3_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_3x3_relu = nn.ReLU()
self.conv5_3_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_3_1x1_increase_drop = nn.Dropout(p=0.5)
self.conv5_3_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_relu = nn.ReLU()
self.pool5_7x7_s1 = nn.AvgPool2d(kernel_size=[7, 7], stride=[1, 1], padding=0)
self.classifier = nn.Conv2d(2048, 8, kernel_size=[1, 1], stride=(1, 1))
def forward(self, data):
conv1_7x7_s2 = self.conv1_7x7_s2(data)
conv1_7x7_s2_bn = self.conv1_7x7_s2_bn(conv1_7x7_s2)
conv1_7x7_s2_bnxx = self.conv1_relu_7x7_s2(conv1_7x7_s2_bn)
pool1_3x3_s2 = self.pool1_3x3_s2(conv1_7x7_s2_bnxx)
conv2_1_1x1_reduce = self.conv2_1_1x1_reduce(pool1_3x3_s2)
conv2_1_1x1_reduce_bn = self.conv2_1_1x1_reduce_bn(conv2_1_1x1_reduce)
conv2_1_1x1_reduce_bnxx = self.conv2_1_1x1_reduce_relu(conv2_1_1x1_reduce_bn)
conv2_1_3x3 = self.conv2_1_3x3(conv2_1_1x1_reduce_bnxx)
conv2_1_3x3_bn = self.conv2_1_3x3_bn(conv2_1_3x3)
conv2_1_3x3_bnxx = self.conv2_1_3x3_relu(conv2_1_3x3_bn)
conv2_1_1x1_increase = self.conv2_1_1x1_increase(conv2_1_3x3_bnxx) # has 256
conv2_1_1x1_proj = self.conv2_1_1x1_proj(pool1_3x3_s2) # has 256
conv2_1_1x1_increase_bn = self.conv2_1_1x1_increase_bn(conv2_1_1x1_increase) #correct
conv2_1_1x1_proj_bn = self.conv2_1_1x1_proj_bn(conv2_1_1x1_proj) #correct
conv2_1 = torch.add(conv2_1_1x1_proj_bn, 1, conv2_1_1x1_increase_bn)
conv2_1x = self.conv2_1_relu(conv2_1) # has 256
conv2_2_1x1_reduce = self.conv2_2_1x1_reduce(conv2_1x)
conv2_2_1x1_reduce_bn = self.conv2_2_1x1_reduce_bn(conv2_2_1x1_reduce)
conv2_2_1x1_reduce_bnxx = self.conv2_2_1x1_reduce_relu(conv2_2_1x1_reduce_bn)
conv2_2_3x3 = self.conv2_2_3x3(conv2_2_1x1_reduce_bnxx)
conv2_2_3x3_bn = self.conv2_2_3x3_bn(conv2_2_3x3)
conv2_2_3x3_bnxx = self.conv2_2_3x3_relu(conv2_2_3x3_bn)
conv2_2_1x1_increase = self.conv2_2_1x1_increase(conv2_2_3x3_bnxx)
conv2_2_1x1_increase_bn = self.conv2_2_1x1_increase_bn(conv2_2_1x1_increase)
conv2_2 = torch.add(conv2_1x, 1, conv2_2_1x1_increase_bn)
conv2_2x = self.conv2_2_relu(conv2_2)
conv2_3_1x1_reduce = self.conv2_3_1x1_reduce(conv2_2x)
conv2_3_1x1_reduce_bn = self.conv2_3_1x1_reduce_bn(conv2_3_1x1_reduce)
conv2_3_1x1_reduce_bnxx = self.conv2_3_1x1_reduce_relu(conv2_3_1x1_reduce_bn)
conv2_3_3x3 = self.conv2_3_3x3(conv2_3_1x1_reduce_bnxx)
conv2_3_3x3_bn = self.conv2_3_3x3_bn(conv2_3_3x3)
conv2_3_3x3_bnxx = self.conv2_3_3x3_relu(conv2_3_3x3_bn)
conv2_3_1x1_increase = self.conv2_3_1x1_increase(conv2_3_3x3_bnxx)
conv2_3_1x1_increase_bn = self.conv2_3_1x1_increase_bn(conv2_3_1x1_increase)
conv2_3 = torch.add(conv2_2x, 1, conv2_3_1x1_increase_bn)
conv2_3x = self.conv2_3_relu(conv2_3)
conv3_1_1x1_reduce = self.conv3_1_1x1_reduce(conv2_3x)
conv3_1_1x1_reduce_bn = self.conv3_1_1x1_reduce_bn(conv3_1_1x1_reduce)
conv3_1_1x1_reduce_bnxx = self.conv3_1_1x1_reduce_relu(conv3_1_1x1_reduce_bn)
conv3_1_3x3 = self.conv3_1_3x3(conv3_1_1x1_reduce_bnxx)
conv3_1_3x3_bn = self.conv3_1_3x3_bn(conv3_1_3x3)
conv3_1_3x3_bnxx = self.conv3_1_3x3_relu(conv3_1_3x3_bn)
conv3_1_1x1_increase = self.conv3_1_1x1_increase(conv3_1_3x3_bnxx)
conv3_1_1x1_proj = self.conv3_1_1x1_proj(conv2_3x)
conv3_1_1x1_increase_bn = self.conv3_1_1x1_increase_bn(conv3_1_1x1_increase)
conv3_1_1x1_proj_bn = self.conv3_1_1x1_proj_bn(conv3_1_1x1_proj)
conv3_1 = torch.add(conv3_1_1x1_proj_bn, 1, conv3_1_1x1_increase_bn)
conv3_1x = self.conv3_1_relu(conv3_1)
conv3_2_1x1_reduce = self.conv3_2_1x1_reduce(conv3_1x)
conv3_2_1x1_reduce_bn = self.conv3_2_1x1_reduce_bn(conv3_2_1x1_reduce)
conv3_2_1x1_reduce_bnxx = self.conv3_2_1x1_reduce_relu(conv3_2_1x1_reduce_bn)
conv3_2_3x3 = self.conv3_2_3x3(conv3_2_1x1_reduce_bnxx)
conv3_2_3x3_bn = self.conv3_2_3x3_bn(conv3_2_3x3)
conv3_2_3x3_bnxx = self.conv3_2_3x3_relu(conv3_2_3x3_bn)
conv3_2_1x1_increase = self.conv3_2_1x1_increase(conv3_2_3x3_bnxx)
conv3_2_1x1_increase_bn = self.conv3_2_1x1_increase_bn(conv3_2_1x1_increase)
conv3_2 = torch.add(conv3_1x, 1, conv3_2_1x1_increase_bn)
conv3_2x = self.conv3_2_relu(conv3_2)
conv3_3_1x1_reduce = self.conv3_3_1x1_reduce(conv3_2x)
conv3_3_1x1_reduce_bn = self.conv3_3_1x1_reduce_bn(conv3_3_1x1_reduce)
conv3_3_1x1_reduce_bnxx = self.conv3_3_1x1_reduce_relu(conv3_3_1x1_reduce_bn)
conv3_3_3x3 = self.conv3_3_3x3(conv3_3_1x1_reduce_bnxx)
conv3_3_3x3_bn = self.conv3_3_3x3_bn(conv3_3_3x3)
conv3_3_3x3_bnxx = self.conv3_3_3x3_relu(conv3_3_3x3_bn)
conv3_3_1x1_increase = self.conv3_3_1x1_increase(conv3_3_3x3_bnxx)
conv3_3_1x1_increase_bn = self.conv3_3_1x1_increase_bn(conv3_3_1x1_increase)
conv3_3 = torch.add(conv3_2x, 1, conv3_3_1x1_increase_bn)
conv3_3x = self.conv3_3_relu(conv3_3)
conv3_4_1x1_reduce = self.conv3_4_1x1_reduce(conv3_3x)
conv3_4_1x1_reduce_bn = self.conv3_4_1x1_reduce_bn(conv3_4_1x1_reduce)
conv3_4_1x1_reduce_bnxx = self.conv3_4_1x1_reduce_relu(conv3_4_1x1_reduce_bn)
conv3_4_3x3 = self.conv3_4_3x3(conv3_4_1x1_reduce_bnxx)
conv3_4_3x3_bn = self.conv3_4_3x3_bn(conv3_4_3x3)
conv3_4_3x3_bnxx = self.conv3_4_3x3_relu(conv3_4_3x3_bn)
conv3_4_1x1_increase = self.conv3_4_1x1_increase(conv3_4_3x3_bnxx)
conv3_4_1x1_increase_bn = self.conv3_4_1x1_increase_bn(conv3_4_1x1_increase)
conv3_4 = torch.add(conv3_3x, 1, conv3_4_1x1_increase_bn)
conv3_4x = self.conv3_4_relu(conv3_4)
conv4_1_1x1_reduce = self.conv4_1_1x1_reduce(conv3_4x)
conv4_1_1x1_reduce_bn = self.conv4_1_1x1_reduce_bn(conv4_1_1x1_reduce)
conv4_1_1x1_reduce_bnxx = self.conv4_1_1x1_reduce_relu(conv4_1_1x1_reduce_bn)
conv4_1_3x3 = self.conv4_1_3x3(conv4_1_1x1_reduce_bnxx)
conv4_1_3x3_bn = self.conv4_1_3x3_bn(conv4_1_3x3)
conv4_1_3x3_bnxx = self.conv4_1_3x3_relu(conv4_1_3x3_bn)
conv4_1_1x1_increase = self.conv4_1_1x1_increase(conv4_1_3x3_bnxx)
conv4_1_1x1_proj = self.conv4_1_1x1_proj(conv3_4x)
conv4_1_1x1_increase_bn = self.conv4_1_1x1_increase_bn(conv4_1_1x1_increase)
conv4_1_1x1_proj_bn = self.conv4_1_1x1_proj_bn(conv4_1_1x1_proj)
conv4_1 = torch.add(conv4_1_1x1_proj_bn, 1, conv4_1_1x1_increase_bn)
conv4_1x = self.conv4_1_relu(conv4_1)
conv4_2_1x1_reduce = self.conv4_2_1x1_reduce(conv4_1x)
conv4_2_1x1_reduce_bn = self.conv4_2_1x1_reduce_bn(conv4_2_1x1_reduce)
conv4_2_1x1_reduce_bnxx = self.conv4_2_1x1_reduce_relu(conv4_2_1x1_reduce_bn)
conv4_2_3x3 = self.conv4_2_3x3(conv4_2_1x1_reduce_bnxx)
conv4_2_3x3_bn = self.conv4_2_3x3_bn(conv4_2_3x3)
conv4_2_3x3_bnxx = self.conv4_2_3x3_relu(conv4_2_3x3_bn)
conv4_2_1x1_increase = self.conv4_2_1x1_increase(conv4_2_3x3_bnxx)
conv4_2_1x1_increase_bn = self.conv4_2_1x1_increase_bn(conv4_2_1x1_increase)
conv4_2 = torch.add(conv4_1x, 1, conv4_2_1x1_increase_bn)
conv4_2x = self.conv4_2_relu(conv4_2)
conv4_3_1x1_reduce = self.conv4_3_1x1_reduce(conv4_2x)
conv4_3_1x1_reduce_bn = self.conv4_3_1x1_reduce_bn(conv4_3_1x1_reduce)
conv4_3_1x1_reduce_bnxx = self.conv4_3_1x1_reduce_relu(conv4_3_1x1_reduce_bn)
conv4_3_3x3 = self.conv4_3_3x3(conv4_3_1x1_reduce_bnxx)
conv4_3_3x3_bn = self.conv4_3_3x3_bn(conv4_3_3x3)
conv4_3_3x3_bnxx = self.conv4_3_3x3_relu(conv4_3_3x3_bn)
conv4_3_1x1_increase = self.conv4_3_1x1_increase(conv4_3_3x3_bnxx)
conv4_3_1x1_increase_bn = self.conv4_3_1x1_increase_bn(conv4_3_1x1_increase)
conv4_3 = torch.add(conv4_2x, 1, conv4_3_1x1_increase_bn)
conv4_3x = self.conv4_3_relu(conv4_3)
conv4_4_1x1_reduce = self.conv4_4_1x1_reduce(conv4_3x)
conv4_4_1x1_reduce_bn = self.conv4_4_1x1_reduce_bn(conv4_4_1x1_reduce)
conv4_4_1x1_reduce_bnxx = self.conv4_4_1x1_reduce_relu(conv4_4_1x1_reduce_bn)
conv4_4_3x3 = self.conv4_4_3x3(conv4_4_1x1_reduce_bnxx)
conv4_4_3x3_bn = self.conv4_4_3x3_bn(conv4_4_3x3)
conv4_4_3x3_bnxx = self.conv4_4_3x3_relu(conv4_4_3x3_bn)
conv4_4_1x1_increase = self.conv4_4_1x1_increase(conv4_4_3x3_bnxx)
conv4_4_1x1_increase_bn = self.conv4_4_1x1_increase_bn(conv4_4_1x1_increase)
conv4_4 = torch.add(conv4_3x, 1, conv4_4_1x1_increase_bn)
conv4_4x = self.conv4_4_relu(conv4_4)
conv4_5_1x1_reduce = self.conv4_5_1x1_reduce(conv4_4x)
conv4_5_1x1_reduce_bn = self.conv4_5_1x1_reduce_bn(conv4_5_1x1_reduce)
conv4_5_1x1_reduce_bnxx = self.conv4_5_1x1_reduce_relu(conv4_5_1x1_reduce_bn)
conv4_5_3x3 = self.conv4_5_3x3(conv4_5_1x1_reduce_bnxx)
conv4_5_3x3_bn = self.conv4_5_3x3_bn(conv4_5_3x3)
conv4_5_3x3_bnxx = self.conv4_5_3x3_relu(conv4_5_3x3_bn)
conv4_5_1x1_increase = self.conv4_5_1x1_increase(conv4_5_3x3_bnxx)
conv4_5_1x1_increase_bn = self.conv4_5_1x1_increase_bn(conv4_5_1x1_increase)
conv4_5 = torch.add(conv4_4x, 1, conv4_5_1x1_increase_bn)
conv4_5x = self.conv4_5_relu(conv4_5)
conv4_6_1x1_reduce = self.conv4_6_1x1_reduce(conv4_5x)
conv4_6_1x1_reduce_bn = self.conv4_6_1x1_reduce_bn(conv4_6_1x1_reduce)
conv4_6_1x1_reduce_bnxx = self.conv4_6_1x1_reduce_relu(conv4_6_1x1_reduce_bn)
conv4_6_3x3 = self.conv4_6_3x3(conv4_6_1x1_reduce_bnxx)
conv4_6_3x3_bn = self.conv4_6_3x3_bn(conv4_6_3x3)
conv4_6_3x3_bnxx = self.conv4_6_3x3_relu(conv4_6_3x3_bn)
conv4_6_1x1_increase = self.conv4_6_1x1_increase(conv4_6_3x3_bnxx)
conv4_6_1x1_increase_bn = self.conv4_6_1x1_increase_bn(conv4_6_1x1_increase)
conv4_6 = torch.add(conv4_5x, 1, conv4_6_1x1_increase_bn)
conv4_6x = self.conv4_6_relu(conv4_6)
conv5_1_1x1_reduce = self.conv5_1_1x1_reduce(conv4_6x)
conv5_1_1x1_reduce_bn = self.conv5_1_1x1_reduce_bn(conv5_1_1x1_reduce)
conv5_1_1x1_reduce_bnxx = self.conv5_1_1x1_reduce_relu(conv5_1_1x1_reduce_bn)
conv5_1_3x3 = self.conv5_1_3x3(conv5_1_1x1_reduce_bnxx)
conv5_1_3x3_bn = self.conv5_1_3x3_bn(conv5_1_3x3)
conv5_1_3x3_bnxx = self.conv5_1_3x3_relu(conv5_1_3x3_bn)
conv5_1_1x1_increase = self.conv5_1_1x1_increase(conv5_1_3x3_bnxx)
conv5_1_1x1_proj = self.conv5_1_1x1_proj(conv4_6x)
conv5_1_1x1_increase_bn = self.conv5_1_1x1_increase_bn(conv5_1_1x1_increase)
conv5_1_1x1_proj_bn = self.conv5_1_1x1_proj_bn(conv5_1_1x1_proj)
conv5_1 = torch.add(conv5_1_1x1_proj_bn, 1, conv5_1_1x1_increase_bn)
conv5_1x = self.conv5_1_relu(conv5_1)
conv5_2_1x1_reduce = self.conv5_2_1x1_reduce(conv5_1x)
conv5_2_1x1_reduce_bn = self.conv5_2_1x1_reduce_bn(conv5_2_1x1_reduce)
conv5_2_1x1_reduce_bnxx = self.conv5_2_1x1_reduce_relu(conv5_2_1x1_reduce_bn)
conv5_2_3x3 = self.conv5_2_3x3(conv5_2_1x1_reduce_bnxx)
conv5_2_3x3_bn = self.conv5_2_3x3_bn(conv5_2_3x3)
conv5_2_3x3_bnxx = self.conv5_2_3x3_relu(conv5_2_3x3_bn)
conv5_2_1x1_increase = self.conv5_2_1x1_increase(conv5_2_3x3_bnxx)
conv5_2_1x1_increase_bn = self.conv5_2_1x1_increase_bn(conv5_2_1x1_increase)
conv5_2 = torch.add(conv5_1x, 1, conv5_2_1x1_increase_bn)
conv5_2x = self.conv5_2_relu(conv5_2)
conv5_3_1x1_reduce = self.conv5_3_1x1_reduce(conv5_2x)
conv5_3_1x1_reduce_bn = self.conv5_3_1x1_reduce_bn(conv5_3_1x1_reduce)
conv5_3_1x1_reduce_bnxx = self.conv5_3_1x1_reduce_relu(conv5_3_1x1_reduce_bn)
conv5_3_3x3 = self.conv5_3_3x3(conv5_3_1x1_reduce_bnxx)
conv5_3_3x3_drop = self.conv5_3_3x3_drop(conv5_3_3x3)
conv5_3_3x3_bn = self.conv5_3_3x3_bn(conv5_3_3x3_drop)
conv5_3_3x3_bnxx = self.conv5_3_3x3_relu(conv5_3_3x3_bn)
conv5_3_1x1_increase = self.conv5_3_1x1_increase(conv5_3_3x3_bnxx)
conv5_3_1x1_increase_drop = self.conv5_3_1x1_increase_drop(conv5_3_1x1_increase)
conv5_3_1x1_increase_bn = self.conv5_3_1x1_increase_bn(conv5_3_1x1_increase_drop)
conv5_3 = torch.add(conv5_2x, 1, conv5_3_1x1_increase_bn)
conv5_3x = self.conv5_3_relu(conv5_3)
pool5_7x7_s1 = self.pool5_7x7_s1(conv5_3x)
prediction = self.classifier(pool5_7x7_s1)
return prediction
def resnet50_ferplus_dag(weights_path='resnet50_ferplus_dag.pth', **kwargs):
"""
load imported model instance
Args:
weights_path (str): If set, loads model weights from the given path
"""
model = Resnet50_ferplus_dag()
if weights_path:
state_dict = torch.load(weights_path)
model.load_state_dict(state_dict)
return model
#print(torch.cuda.current_device())
#print(torch.cuda.get_device_name(torch.cuda.current_device()))
#print(torch.cuda.get_device_properties(torch.cuda.current_device()))
#print(torch.cuda.memory_allocated())
#print(torch.cuda.memory_cached())
#print(torch.cuda.max_memory_reserved())
model = resnet50_ferplus_dag()
for child in model.children():
for param in child.parameters():
param.requires_grad = False
class Temporal(nn.Module):
def __init__(self):
super(Temporal, self).__init__()
self.batch_size = 32
self.hidden_dim = 1024
self.lstm = nn.LSTM(2048, 1024, dropout=0.2)
self.fc1 = nn.Linear(1024,512)
self.bn1 = nn.LayerNorm(512)
self.drop1 = nn.Dropout(p=0.3)
self.fc2 = nn.Linear(512,256)
self.bn2 = nn.LayerNorm(256)
self.drop2 = nn.Dropout(p=0.1)
self.fc3 = nn.Linear(256,1)
self.drop3 = nn.Dropout(p=0.1)
def forward(self, x):
x = x.view(self.batch_size,1, 2048)
x,_ = self.lstm(x)
x = x[-1,:,:]
x = x.view(-1,self.hidden_dim)
x = self.drop1(x)
x = F.leaky_relu(self.bn1(self.fc1(x)))
x = self.drop1(x)
x = F.leaky_relu(self.bn2(self.fc2(x)))
x = self.drop2(x)
x = F.tanh(self.fc3(x))
return x
model.classifier = Temporal()
model.conv1_7x7_s2 = nn.Sequential(STN(), model.conv1_7x7_s2)
model = model.cuda()
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
#----------------------------------------------------------------------------------
#provides the face number which is stored in int
def face_no_stored(video_no):
parent_dir = 'Aff-Wild/frames/' + video_no + '/'
video_list = os.listdir(parent_dir)
hi = [int(video.strip('.jpg')) for video in video_list]
hi = np.array(hi)
face_no = np.sort(hi)
face_no = np.reshape(face_no, (np.shape(face_no)[0], 1))
face_no = np.transpose(face_no)
return face_no
#Target value train data
def target_data(video_no):
arousal_dir = 'Aff-Wild/annotations/arousal/'
valence_dir = 'Aff-Wild/annotations/valence/'
arousal = open(arousal_dir + video_no +'.txt', 'r')
arousal = arousal.read().split('\n')
while("" in arousal) :
arousal.remove("")
arousal = np.array([float(i) for i in arousal])
arousal = np.reshape(arousal, (np.shape(arousal)[0], 1))
valence = open(valence_dir + video_no +'.txt', 'r')
valence = valence.read().split('\n')
while("" in valence) :
valence.remove("")
valence = np.array([float(i) for i in valence])
valence = np.reshape(valence, (np.shape(valence)[0], 1))
y_train = np.concatenate((arousal, valence), axis=1)
return y_train
def miniclips(video_no, batch_size, count, y_train_no):
parent_dir = 'Aff-Wild/frames/'
miniclip_compile = np.zeros((batch_size, 3,224, 224))
y_train = np.zeros((batch_size, 2))
img1 = cv2.imread(parent_dir + str(video_no) + '/' +'frame' +str(count) +'.jpg')
img1 = cv2.resize(img1, (224,224))
img1 = np.reshape(img1, [3,224,224])
img1 = img1/255
miniclip_compile[0,:,:,:] = img1
y_train[0,:] = y_train_no[count]
for i in range(1, batch_size):
img2 = cv2.imread(parent_dir + str(video_no) + '/' + 'frame'+str(count+1) +'.jpg')
img2 = cv2.resize(img2, (224,224))
img2 = np.reshape(img2, [3,224,224])
img2 = img2/255
miniclip_compile[i, :,:,:] = img2
y_train[i,:] = y_train_no[count+1]
count += 1
return miniclip_compile, y_train, count
def mae_loss(pred, y_train):
y_train = y_train.squeeze(0)
loss = torch.square(pred - y_train)
return loss
def main():
no_of_train_videos=20
no_of_val_videos = 5
train_loss = []
val_loss_mae = []
total_train_loss = []
total_val_loss = []
epochs = 30
batch_size = 32
valORaro = 0 # 0 to train on valence and 1 to train on arousal
parent_dir = 'Aff-Wild/frames/'
video_list = os.listdir(parent_dir)
lr = 0.004
optimizer = torch.optim.Adam(model.parameters(), lr)
#x_train is given data
#y_train is given labels
for epoch in range(epochs):
print('\n Epoch No:- {}'.format(epoch+1))
avg_loss_train=0
avg_loss_val=0
#training loop
model.train()
for video_no in video_list[0:no_of_train_videos]: #126
count = 0
video_loss = 0.0
loop_count=0
number_of_imgs = len(glob.glob(parent_dir + video_no + '/*'))
y_train_no = target_data(video_no)
while count<= number_of_imgs - batch_size :
x_train, y_train, count = miniclips(video_no, batch_size, int(count), y_train_no)
if x_train == []:
continue
optimizer.zero_grad()
y_train = y_train[:, valORaro]
y_train = y_train[np.argmax(np.absolute(y_train))]
x_train = torch.Tensor(x_train)
y_train = torch.tensor(y_train, dtype=torch.float32)
x_train = x_train.cuda()
y_train = y_train.cuda()
pred = model(x_train)
loss = mae_loss(pred, y_train)
loss.backward()
optimizer.step()
video_loss += loss.item()
loop_count +=1
video_loss = video_loss/loop_count
print('Loss of video no {} : {} '.format(video_no, video_loss))
avg_loss_train += video_loss
train_loss.append(video_loss)
avg_loss_train = avg_loss_train/no_of_train_videos
total_train_loss.append(avg_loss_train)
print('\n-------------Average Train Loss = {} --------------'.format(avg_loss_train))
#validation loop
model.eval()
for video_no in video_list[no_of_train_videos:no_of_train_videos+ no_of_val_videos]: #126
count = 0
loss_mae_ = 0.0
loop_count = 0
number_of_imgs = len(glob.glob(parent_dir + video_no + '/*'))
y_train_no = target_data(video_no)
with torch.no_grad():
while count<= number_of_imgs - batch_size + 1 :
x_val, y_val, count = miniclips(video_no, batch_size, int(count), y_train_no)
if x_val == []:
continue
y_val = y_val[:, valORaro]
y_val = y_val[np.argmax( | np.absolute(y_val) | numpy.absolute |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: lcpp-dev
# language: python
# name: lcpp-dev
# ---
# %% [markdown]
# # Basic processing of Sentinel V mooring
# %%
import xarray as xr
import numpy as np
import utils
import matplotlib.pyplot as plt
import scipy.stats as stats
import utm
from scipy.ndimage import gaussian_filter
def mode(x, **kwargs):
mout = np.squeeze(stats.mode(x, axis=1)[0])
return mout
def interval_to_mid(intervals):
"""
Parameters
----------
intervals : 1D numpy array
An array of pandas Interval objects.
Returns
-------
mids : 1D numpy array
Midpoints of the intervals.
"""
return np.array([v.mid for v in intervals])
# %% [markdown]
# Load datasets and do some basic conversion of times and variables.
# %%
sV = xr.open_dataset("../proc/ABLE_sentinel_2018_enu.nc")
sV = sV.set_coords(["lon", "lat"])
sV["time"] = utils.POSIX_to_datetime(sV.time.values).astype(np.datetime64)
x, y, *_ = utm.from_latlon(sV.lat, sV.lon)
sV = sV.assign_coords({"x": x, "y": y})
virt = xr.open_dataset("../proc/ABLE_sentinel_RBRvirtuoso_2018.nc")
virt = virt.set_coords(["lon", "lat"])
virt["time"] = utils.POSIX_to_datetime(virt.time.values).astype(np.datetime64)
sbe = xr.open_dataset("../proc/ABLE_sentinel_SBE37_2018.nc")
sbe = sbe.set_coords(["lon", "lat"])
sbe["time"] = utils.POSIX_to_datetime(sbe.time.values).astype(np.datetime64)
# %% [markdown]
# Define some parameters and simple thresholds for processing.
# %%
pmin = 125 # Minimum pressure to keep
dpdtmax = 0.4e-9 # Maximum rate of change of pressure to keep
cut_ends = 2 # Number of points on either end to remove after applying other thresholds
dt = 10 # Bin size for time average [s]
# %% [markdown]
# Apply the thresholds to remove some data.
# %%
is_deep = sV.p > pmin
is_slow = np.fabs(sV.p.differentiate("time")) < dpdtmax
keep = is_deep & is_slow
sVp = sV.isel(time=keep).isel(time=slice(cut_ends, -cut_ends))
# %%
sVp.p.plot.line('.')
# %% [markdown]
# ## Old quality control
#
# Note [Marion's document](https://escholarship.org/content/qt6xd149s8/qt6xd149s8.pdf)
# %%
# # qc_err0 = 0.3
# # qc_err1 = 0.5
# qc_err = 0.15 # error velocity
# qc_q = 110 # correlation
# qc_uv = 2.0 # horizontal velocity
# qc_w = 1.5 # vertical velocity
# qc_a = 30 # echo intensity
# %%
# qc_u_bad = np.abs(sVp.u) > qc_uv
# qc_v_bad = np.abs(sVp.v) > qc_uv
# qc_w_bad = np.abs(sVp.w) > qc_w
# qc_vv_bad = np.abs(sVp.vv) > qc_w
# qc_err_bad = np.abs(sVp.err) > qc_err
# qc_q1_good = sVp.q1 > qc_q
# qc_q2_good = sVp.q2 > qc_q
# qc_q3_good = sVp.q3 > qc_q
# qc_q4_good = sVp.q4 > qc_q
# qc_q_bad = (qc_q1_good.astype(int) + qc_q2_good.astype(int) + qc_q3_good.astype(int) + qc_q4_good.astype(int)) <= 3
# %%
# uv_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_u_bad.astype(int) + qc_v_bad.astype(int)) > 1
# w_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_w_bad.astype(int)) > 1
# vv_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_vv_bad.astype(int)) > 1
# %%
# fig, axs = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 10))
# uv_reject.plot(ax=axs[0])
# w_reject.plot(ax=axs[1])
# vv_reject.plot(ax=axs[2])
# %% [markdown]
# Remove velocity using QC.
# %%
# sVqc = sVp.copy()
# u = sVqc.u.values
# u[uv_reject] = np.nan
# sVqc["u"] = (sVqc.u.dims, u, sVqc.u.attrs)
# v = sVqc.v.values
# v[uv_reject] = np.nan
# sVqc["v"] = (sVqc.v.dims, v, sVqc.v.attrs)
# w = sVqc.w.values
# w[w_reject] = np.nan
# sVqc["w"] = (sVqc.w.dims, w, sVqc.w.attrs)
# vv = sVqc.vv.values
# vv[vv_reject] = np.nan
# sVqc["vv"] = (sVqc.vv.dims, vv, sVqc.vv.attrs)
# %% [markdown]
# ## New cut off data above surface
# %%
dthresh = 100.
sidelobe_pct = 1 - np.cos(np.deg2rad(sVp.beamAngle))
var_names = ["a1", "a2", "a3", "a4", "va"]
nroll = 5
dmingood = np.full((sVp.time.size, len(var_names)), np.nan)
fig, axs = plt.subplots(len(var_names), 1, figsize=(14, 3*len(var_names)))
for i, var in enumerate(var_names):
idxmax = sVp[var].where(sVp.distance > dthresh).argmax("distance")
dmax = sVp.distance[idxmax]
dsl = (1 - sidelobe_pct)*sVp.distance[idxmax]
# dmax = dmax.where(dmax > dthresh)
dmode = dsl.rolling(time=nroll, min_periods=1, center=True).reduce(mode)
sVp[var].plot(ax=axs[i])
dmingood[:, i] = dmode
dsl.plot(ax=axs[i], color="r")
axs[i].set_title("")
for i in range(len(var_names)):
axs[i].plot(sVp.time, dmingood.min(axis=1), color="k")
# %%
good = dmingood.min(axis=1)
# Make a new dataset without surface
sVs = sVp.copy()
# Loop over the 2D datavars
mask = sVp.distance < xr.DataArray(good, dims={"time": sVp.time})
for var in sVp.data_vars:
if sVp[var].dims == ('distance', 'time'):
print(f"Masking {var}.")
sVs[var] = sVp[var].where(mask)
# Remove distances where there is no good data
sVs = sVs.isel(distance=mask.any("time"))
# %% [markdown]
# ## New quality control
# %%
errthresh = 0.2 # Blur around these errors
errthresh_high = 0.2 # Always remove these errors
maskthresh = 0.35 # Blurred mask threshold
qthresh = 300
vqthresh = 35
sigma = (2, 5)
qsum = sVs.q1 + sVs.q2 + sVs.q3 + sVs.q4
qgood = qsum > qthresh
vqgood = sVs.vq.values > vqthresh
sVqc = sVs.copy()
egood = np.abs(sVs.err) < errthresh
egood_filt = gaussian_filter(egood.values.astype(float), sigma)
ebgood = (egood_filt > maskthresh) & (np.abs(sVs.err) < errthresh_high) & qgood
vebgood = (egood_filt > maskthresh) & vqgood
var_names = ["u", "v", "w", "err"]
for var in var_names:
sVqc[var] = sVs[var].where(ebgood)
sVqc["vv"] = sVs.vv.where(vebgood)
# %% [markdown]
# ## Time binning
# %% [markdown]
# Bin average data to reduce size and errors.
#
# First make bins.
# %%
# Time bin start and end to nearest minute. This will cut off some data.
tstart = (sVqc.time[0].values + np.timedelta64(30, 's')).astype('datetime64[m]')
tend = sVqc.time[-1].values.astype('datetime64[m]')
timebins = np.arange(tstart, tend, np.timedelta64(dt, 's'))
# %% [markdown]
# Group and take mean.
# %%
gb = sVqc.groupby_bins("time", timebins)
sVa = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
sVa["time_bins"] = interval_to_mid(sVa.time_bins.values).astype("datetime64[s]")
sVa = sVa.rename({"time_bins": "time"})
# %% [markdown]
# Mean of heading should be performed using circular mean. (Technically, so should pitch and roll, but for small angles the noncircular mean is ok)
# %%
sVa["heading"] = (["time"], sVqc.heading.groupby_bins("time", timebins).reduce(stats.circmean, high=360.).values)
# %% [markdown]
# ## Old cut off data above surface
#
# Use a simple echo intensity threshold to find the maximum.
# %%
# dmin = 60. # Minimum distance above which to look for the maximum
# nroll = 120 # Number of points in rolling mode window
# fcut = 0.1 # Extra distance to remove (1 - fcut)*dcut
# %%
# sVa.va.isel(time=10000).plot.line('.')
# %% [markdown]
# Identify echo maximum in each beam, using a rolling mode to smooth out data.
# %%
# # fig, ax = plt.subplots()
# dcuts = []
# for var in ["a1", "a2", "a3", "a4", "va"]:
# am = sVa[var].where(sVa.distance > dmin)
# imax = am.argmax(dim="distance", skipna=True)
# dmax = am.distance[imax]
# ro = dmax.rolling(time=nroll, min_periods=1, center=True)
# dm = ro.reduce(mode)
# dcut = (1 - fcut)*dm
# # ax.plot(sVa.time, dmax, 'r')
# # ax.plot(sVa.time, dm, 'orange')
# # ax.plot(sVa.time, dcut, 'g')
# dcuts.append(dcut.values)
# %%
# dcuts = np.stack(dcuts, axis=1)
# # Use only the vertical beam for finding the surface.
# dcut_min = dcuts[:, 4]
# dcut_min = xr.DataArray(dcut_min, dims={"time": sVa.time})
# %% [markdown]
# Mask and remove data above distance threshold.
# %%
# sVm = sVa.where(sVa.distance < dcut_min)
# # The masking process converts some variables to 2D, change them back...
# sVm["p"] = sVa.p
# sVm["t"] = sVa.t
# sVm["pitch"] = sVa.pitch
# sVm["rol"] = sVa.rol
# sVm["heading"] = sVa.heading
# sVm = sVm.isel(distance=~np.isnan(sVm.u).all(axis=0))
# %% [markdown]
# ## Plotting time series
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-10T11:00"))
sVm_ = sVm.sel(time=timeslice)
fig, axs = plt.subplots(4, 1, figsize=(15, 10), sharex=True)
sVm_.u.plot(ax=axs[0], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.v.plot(ax=axs[1], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.w.plot(ax=axs[2], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.vv.plot(ax=axs[3], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
fig, ax = plt.subplots(figsize=(12, 3))
sVm_.p.plot(ax=ax)
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-10T11:00"))
sVm_ = sVm.sel(time=timeslice)
fig, axs = plt.subplots(8, 1, figsize=(15, 25), sharex=True)
sVm_.u.plot(ax=axs[0], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.v.plot(ax=axs[1], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.vv.plot(ax=axs[2], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.a1.plot(ax=axs[3], x="time")
sVm_.a2.plot(ax=axs[4], x="time")
sVm_.a3.plot(ax=axs[5], x="time")
sVm_.a4.plot(ax=axs[6], x="time")
sVm_.va.plot(ax=axs[7], x="time")
fig, axs = plt.subplots(3, 1, figsize=(11, 8))
sVm_.heading.plot(ax=axs[0])
sVm_.rol.plot(ax=axs[1])
sVm_.pitch.plot(ax=axs[2])
# %% [markdown]
# # Plug in other instruments to dataset
#
# Group and bin average.
# %%
gb = virt.groupby_bins("time", timebins)
virta = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
virta["time_bins"] = interval_to_mid(virta.time_bins.values).astype("datetime64[ms]")
virta = virta.rename({"time_bins": "time"})
gb = sbe.groupby_bins("time", timebins)
sbea = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
sbea["time_bins"] = interval_to_mid(sbea.time_bins.values).astype("datetime64[ms]")
sbea = sbea.rename({"time_bins": "time"})
# %% [markdown]
# Look at a couple of plots.
# %%
fig, ax = plt.subplots(figsize=(12, 3))
virta.turb.plot(ax=ax)
fig, axs = plt.subplots(3, 1, figsize=(12, 10), sharex=True)
sbea.p.plot(ax=axs[0])
sbea.t.plot(ax=axs[1])
sbea.SP.plot(ax=axs[2])
# %% [markdown]
# Assign other data to the sentinal dataset.
# %%
ds = sVa.copy()
# %%
ds["turb_RBR"] = (sVa.p.dims, virta.turb, virta.turb.attrs)
ds["SP_SBE37"] = (sVa.p.dims, sbea.SP, sbea.SP.attrs)
ds["C_SBE37"] = (sVa.p.dims, sbea.C, sbea.C.attrs)
ds["t_SBE37"] = (sVa.p.dims, sbea.t, sbea.t.attrs)
ds["p_SBE37"] = (sVa.p.dims, sbea.p, sbea.p.attrs)
# %% [markdown]
# Try a plot...
# %%
fig, ax = plt.subplots()
ds.p_SBE37.plot(ax=ax)
ds.p.plot(ax=ax, yincrease=False)
# %% [markdown]
# Estimate some more thermodynamic variables.
# %%
import gsw
# %%
ds["SA_SBE37"] = (ds.p.dims, gsw.SA_from_SP(ds.SP_SBE37, ds.p_SBE37, ds.lon, ds.lat), {"units": "g/kg", "long_name": "Absolute_salinity"})
ds["CT_SBE37"] = (ds.p.dims, gsw.CT_from_t(ds.SA_SBE37, ds.t_SBE37, ds.p_SBE37), {"units": "deg C", "long_name": "Conservative_temperature"})
ds["z_SBE37"] = (ds.p.dims, gsw.z_from_p(ds.p_SBE37, ds.lat), {"units": "m", "long_name": "height"})
ds["depth_SBE37"] = (ds.p.dims, -ds.z_SBE37, {"units": "m", "long_name": "depth"})
ds["z_ADCP"] = (ds.p.dims, gsw.z_from_p(ds.p, ds.lat), {"units": "m", "long_name": "height"})
ds["depth_ADCP"] = (ds.p.dims, -ds.z_ADCP, {"units": "m", "long_name": "depth"})
ds["z"] = (ds.distance.dims, ds.distance + ds.z_ADCP.mean(dim="time"), {"units": "m", "long_name": "height"})
ds["depth"] = (ds.distance.dims, -ds.z, {"units": "m", "long_name": "depth"})
ds = ds.set_coords(["z", "depth"])
# %% [markdown]
# Save dataset to netcdf.
# %%
ds.to_netcdf("../proc/ABLE_sentinel_mooring_2018.nc")
# %% [markdown]
# ## Examine a short segment of the dataset
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice)
fig, axs = plt.subplots(4, 1, figsize=(15, 10), sharex=True, sharey=True)
ds_.u.plot(ax=axs[0], y="depth", x="time", yincrease=False, vmin=-0.2, vmax=0.2, cmap="coolwarm")
ds_.a3.plot(ax=axs[1], y="depth", x="time", yincrease=False)
ds_.vv.plot(ax=axs[2], y="depth", x="time", yincrease=False, vmin=-0.2, vmax=0.2, cmap="coolwarm")
ds_.va.plot(ax=axs[3], y="depth", x="time", yincrease=False)
fig, axs = plt.subplots(4, 1, figsize=(11.7, 10), sharex=True)
ds_.p_SBE37.plot(ax=axs[0])
ds_.CT_SBE37.plot(ax=axs[1])
ds_.turb_RBR.plot(ax=axs[2])
ds_.pitch.plot(ax=axs[3])
# %% [markdown]
# Compare echo intensity near bottom for different beams.
# %%
dist = 5
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice).sel(distance=dist, method="nearest")
fig, ax = plt.subplots(figsize=(11, 4))
ds_.a1.plot(ax=ax, label="beam 1")
ds_.a2.plot(ax=ax, label="beam 2")
ds_.a3.plot(ax=ax, label="beam 3")
ds_.a4.plot(ax=ax, label="beam 4")
ds_.va.plot(ax=ax, label="beam v")
ax.set_ylabel("Echo intensity")
ax.legend()
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice)
fig, ax = plt.subplots(figsize=(10, 10))
for i in range(0, ds_.time.size, 50):
ds__ = ds_.isel(time=i)
ds__.va.plot(ax=ax, label=ds__.time.values.astype("datetime64[s]"))
ax.legend(loc="upper left", bbox_to_anchor=(1, 1))
# %%
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import numpy as np
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
# def cc(arg):
# return mcolors.to_rgba(arg, alpha=0.6)
xs = ds_.distance.values
verts = []
zs = []
for i in range(0, ds_.time.size, 100):
ds__ = ds_.isel(time=i)
time = (ds__.time - ds_.time[0]).astype(float)/1e9
zs.append(time)
ys = ds__.va.values
ys[0], ys[-1] = 0, 0
verts.append(list(zip(xs, ys)))
# zs = [0.0, 1.0, 2.0, 3.0]
# for z in zs:
# ys = np.random.rand(len(xs))
# ys[0], ys[-1] = 0, 0
# verts.append(list(zip(xs, ys)))
poly = PolyCollection(verts) # facecolors=[cc('r'), cc('g'), cc('b'), cc('y')]
poly.set_alpha(0.2)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.set_xlabel('Distance')
ax.set_xlim3d(0, xs.max())
ax.set_ylabel('Y')
ax.set_ylim3d(0, zs[-1])
ax.set_zlabel('Z')
ax.set_zlim3d(0, 200)
ax.view_init(elev=30., azim=30)
plt.show()
# %%
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
timeslice = slice(np.datetime64("2018-09-05T10:00"), np.datetime64("2018-09-05T10:45"))
ds_ = ds.sel(time=timeslice)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(projection='3d')
T, D = np.meshgrid(ds_.distance.values, (ds_.time.values - ds_.time[0].values).astype(float)/1e9)
# Plot a basic wireframe.
ax.plot_wireframe(T, D, ds_.a2.values, rstride=1, cstride=1)
ax.view_init(elev=45., azim=120)
# %% [markdown]
# # New QC
# %%
tslice = slice(np.datetime64("2018-09-07T10:00"), np.datetime64("2018-09-07T11:00"))
# tslice = slice(np.datetime64("2018-09-04T10:00"), np.datetime64("2018-09-04T11:00"))
# tslice = slice(np.datetime64("2018-09-11T14:00"), np.datetime64("2018-09-11T16:00"))
# tslice = slice(np.datetime64("2018-09-10T03:00"), np.datetime64("2018-09-10T04:00"))
enu = sVp.sel(time=tslice)
# %%
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enu.u.plot(ax=axs[0], **hvel_kwargs)
enu.v.plot(ax=axs[1], **hvel_kwargs)
enu.w.plot(ax=axs[2], **vvel_kwargs)
enu.vv.plot(ax=axs[3], **vvel_kwargs)
np.abs(enu.err).plot(ax=axs[4], vmin=0, vmax=0.2)
for ax in axs:
ax.set_xlabel("")
# %%
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enu.q1.plot(ax=axs[0])
enu.q2.plot(ax=axs[1])
enu.q3.plot(ax=axs[2])
enu.q4.plot(ax=axs[3])
enu.vq.plot(ax=axs[4])
for ax in axs:
ax.set_xlabel("")
# %%
dthresh = 100.
sidelobe_pct = 1 - np.cos(np.deg2rad(enu.beamAngle))
var_names = ["a1", "a2", "a3", "a4", "va"]
nroll = 5
dmingood = np.full((enu.time.size, len(var_names)), np.nan)
fig, axs = plt.subplots(len(var_names), 1, figsize=(14, 3*len(var_names)))
for i, var in enumerate(var_names):
idxmax = enu[var].where(enu.distance > dthresh).argmax("distance")
dmax = sVp.distance[idxmax]
dsl = (1 - sidelobe_pct)*enu.distance[idxmax]
# dmax = dmax.where(dmax > dthresh)
dmode = dsl.rolling(time=nroll, min_periods=1, center=True).reduce(mode)
enu[var].plot(ax=axs[i])
dmingood[:, i] = dmode
dmode.plot(ax=axs[i], color="r")
axs[i].set_title("")
for i in range(len(var_names)):
axs[i].plot(enu.time, dmingood.min(axis=1), color="k")
# %%
fig, axs = plt.subplots(3, 1, figsize=(22, 9))
enu.heading.plot(ax=axs[0], marker='.', linestyle="")
enu.rol.plot(ax=axs[1])
enu.pitch.plot(ax=axs[2])
# %%
# Make a new dataset without surface
enus = enu.copy()
# Loop over the 2D datavars
mask = enu.distance < xr.DataArray(dmingood.min(axis=1), dims={"time": enu.time})
for var in enu.data_vars:
if enu[var].dims == ('distance', 'time'):
print(f"Masking {var}.")
enus[var] = enu[var].where(mask)
# Remove distances where there is no good data
enus = enus.isel(distance=mask.any("time"))
# %%
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enus.u.plot(ax=axs[0], **hvel_kwargs)
enus.v.plot(ax=axs[1], **hvel_kwargs)
enus.w.plot(ax=axs[2], **vvel_kwargs)
enus.vv.plot(ax=axs[3], **vvel_kwargs)
np.abs(enus.err).plot(ax=axs[4], vmin=0, vmax=0.2)
for ax in axs:
ax.set_xlabel("")
# %%
from scipy.ndimage import gaussian_filter
# %%
errthresh = 0.2 # Blur around these errors
errthresh_high = 0.2 # Always remove these errors
maskthresh = 0.35 # Blurred mask threshold
qthresh = 300
vqthresh = 35
sigma = (2, 5)
qsum = enus.q1 + enus.q2 + enus.q3 + enus.q4
qgood = qsum > qthresh
vqgood = enus.vq.values > vqthresh
enueb = enus.copy()
egood = np.abs(enus.err) < errthresh
egood_filt = gaussian_filter(egood.values.astype(float), sigma)
ebgood = (egood_filt > maskthresh) & (np.abs(enus.err) < errthresh_high) & qgood
vebgood = (egood_filt > maskthresh) & vqgood
var_names = ["u", "v", "w", "err"]
for var in var_names:
enueb[var] = enus[var].where(ebgood)
enueb["vv"] = enus.vv.where(vebgood)
# %%
fig, ax = plt.subplots(1, 1, figsize=(22, 3.5))
ax.pcolormesh(egood_filt)
ax.contour(egood_filt, [maskthresh], colors="r")
ax.contour(qgood, [0.5], colors="g")
ax.contour(vqgood, [0.5], colors="b")
# %% tags=[]
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(8, 1, sharex=True, figsize=(22, 28))
enueb.u.plot(ax=axs[0], **hvel_kwargs)
enus.u.plot(ax=axs[1], **hvel_kwargs)
enueb.v.plot(ax=axs[2], **hvel_kwargs)
enus.v.plot(ax=axs[3], **hvel_kwargs)
enueb.w.plot(ax=axs[4], **vvel_kwargs)
enus.w.plot(ax=axs[5], **vvel_kwargs)
enueb.vv.plot(ax=axs[6], **vvel_kwargs)
enus.vv.plot(ax=axs[7], **vvel_kwargs)
for ax in axs:
ax.set_xlabel("")
# %% [markdown]
# # Beam separation
# %%
z = sVp.distance[sVp.distance < 120]
angle = np.deg2rad(sVp.beamAngle)
separation_opposite = 2*z* | np.tan(angle) | numpy.tan |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Fairness with noisy protected groups experiments."""
from collections import Counter
import random
from absl import app
from absl import flags
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import tensorflow as tf
import tensorflow_constrained_optimization as tfco
flags.DEFINE_integer('data_seed', None, 'Seed for train/test/split.')
flags.DEFINE_boolean('feature_dependent_multiplier', True,
'If True, train lagrange multipliers based on '
'group features.')
flags.DEFINE_float('learning_rate', 0.1, 'Step size for model parameters.')
flags.DEFINE_integer('skip_iterations', 100,
'Number of training steps to skip before evaluation.')
flags.DEFINE_integer('num_steps', 10000, 'Number of gradient steps.')
flags.DEFINE_float('dual_scale', 5.0, 'Dual scale.')
flags.DEFINE_boolean('unconstrained', False,
'If True, train using TFCO with an empty constraint list.')
flags.DEFINE_boolean('standard_lagrangian', False,
'if True, use standard lagrangian of one multiplier per '
'constraint.')
flags.DEFINE_boolean('resample_proxy_groups', True,
'If True, resample proxy groups every epoch.')
flags.DEFINE_integer('n_resamples_per_candidate', 20,
'when using find_best_candidate_index, we take the '
'max constraint violations over n_resamples_per_candidate '
'resamples of the proxy groups.')
flags.DEFINE_string('group_features_type', 'full_group_vec', 'Type of group '
'features to compute. '
'full_group_vec: uses the full group membership vector of '
'size batch size. '
'size_alone: uses the proportional size of each group as a '
'single feature. '
'size_and_pr: uses the size of the group and the positive '
'rate for the group, resulting in 2 features. '
'avg_features: uses the average of the other features over '
'the group, as well as the group size.'
'kmeans: cluster all examples using kmeans. Group features '
'are the number of examples that fall in each cluster. ')
flags.DEFINE_integer('num_group_clusters', 100, 'number of clusters to use for '
'group_features_type=kmeans.')
flags.DEFINE_integer('num_multiplier_model_hidden_layers', 0,
'Number of hidden layers in the multiplier model.')
flags.DEFINE_float('noise_level', 0.3, 'Noise level of initial proxy groups.')
flags.DEFINE_boolean('uniform_groups', False, 'If True, ignore proxy groups '
'and sample groups uniformly.')
flags.DEFINE_float('min_group_frac', 0.01, 'smallest group size that we want '
'to constrain (as a fraction of the full dataset).')
flags.DEFINE_float('epsilon', 0.05, 'Slack to allow on constraints.')
FLAGS = flags.FLAGS
def load_dataset_adult(noise_level):
"""Loads Adult dataset."""
df = preprocess_data_adult()
df = add_proxy_columns_adult(df)
label_name = 'label'
feature_names = list(df.keys())
feature_names.remove(label_name)
protected_columns = ['race_White', 'race_Black', 'race_Other_combined']
for column in protected_columns:
feature_names.remove(column)
proxy_columns = get_proxy_column_names(protected_columns, noise_level)
feature_names = remove_saved_noise_levels(
protected_columns, feature_names, keep_noise_level=noise_level)
return df, feature_names, label_name, protected_columns, proxy_columns
def preprocess_data_adult():
"""Preprocess Adult dataset."""
categorical_columns = [
'workclass', 'education', 'marital_status', 'occupation', 'relationship',
'race', 'gender', 'native_country'
]
continuous_columns = [
'age', 'capital_gain', 'capital_loss', 'hours_per_week', 'education_num'
]
columns = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
label_column = 'label'
train_df_raw = pd.read_csv(
'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
names=columns,
skipinitialspace=True)
test_df_raw = pd.read_csv(
'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test',
names=columns,
skipinitialspace=True,
skiprows=1)
train_df_raw[label_column] = (
train_df_raw['income_bracket'].apply(lambda x: '>50K' in x)).astype(int)
test_df_raw[label_column] = (
test_df_raw['income_bracket'].apply(lambda x: '>50K' in x)).astype(int)
# Preprocessing Features
pd.options.mode.chained_assignment = None # default='warn'
# Functions for preprocessing categorical and continuous columns.
def binarize_categorical_columns(input_train_df,
input_test_df,
categorical_columns=None):
def fix_columns(input_train_df, input_test_df):
test_df_missing_cols = set(input_train_df.columns) - set(
input_test_df.columns)
for c in test_df_missing_cols:
input_test_df[c] = 0
train_df_missing_cols = set(input_test_df.columns) - set(
input_train_df.columns)
for c in train_df_missing_cols:
input_train_df[c] = 0
input_train_df = input_train_df[input_test_df.columns]
return input_train_df, input_test_df
# Binarize categorical columns.
binarized_train_df = pd.get_dummies(
input_train_df, columns=categorical_columns)
binarized_test_df = pd.get_dummies(
input_test_df, columns=categorical_columns)
# Make sure the train and test dataframes have the same binarized columns.
fixed_train_df, fixed_test_df = fix_columns(binarized_train_df,
binarized_test_df)
return fixed_train_df, fixed_test_df
def bucketize_continuous_column(input_train_df,
input_test_df,
continuous_column_name,
num_quantiles=None,
bins=None):
assert (num_quantiles is None or bins is None)
if num_quantiles is not None:
_, bins_quantized = pd.qcut(
input_train_df[continuous_column_name],
num_quantiles,
retbins=True,
labels=False)
input_train_df[continuous_column_name] = pd.cut(
input_train_df[continuous_column_name], bins_quantized, labels=False)
input_test_df[continuous_column_name] = pd.cut(
input_test_df[continuous_column_name], bins_quantized, labels=False)
elif bins is not None:
input_train_df[continuous_column_name] = pd.cut(
input_train_df[continuous_column_name], bins, labels=False)
input_test_df[continuous_column_name] = pd.cut(
input_test_df[continuous_column_name], bins, labels=False)
# Filter out all columns except the ones specified.
train_df = train_df_raw[categorical_columns + continuous_columns +
[label_column]]
test_df = test_df_raw[categorical_columns + continuous_columns +
[label_column]]
# Bucketize continuous columns.
bucketize_continuous_column(train_df, test_df, 'age', num_quantiles=4)
bucketize_continuous_column(
train_df, test_df, 'capital_gain', bins=[-1, 1, 4000, 10000, 100000])
bucketize_continuous_column(
train_df, test_df, 'capital_loss', bins=[-1, 1, 1800, 1950, 4500])
bucketize_continuous_column(
train_df, test_df, 'hours_per_week', bins=[0, 39, 41, 50, 100])
bucketize_continuous_column(
train_df, test_df, 'education_num', bins=[0, 8, 9, 11, 16])
train_df, test_df = binarize_categorical_columns(
train_df,
test_df,
categorical_columns=categorical_columns + continuous_columns)
full_df = train_df.append(test_df)
full_df['race_Other_combined'] = full_df['race_Amer-Indian-Eskimo'] + full_df[
'race_Asian-Pac-Islander'] + full_df['race_Other']
return full_df
def add_proxy_columns_adult(df):
"""Adds noisy proxy columns to adult dataset."""
proxy_noises = [0.1, 0.2, 0.3, 0.4, 0.5]
protected_columns = ['race_White', 'race_Black', 'race_Other_combined']
# Generate proxy groups.
for noise in proxy_noises:
df = generate_proxy_columns(df, protected_columns, noise_param=noise)
return df
def generate_proxy_columns(df, protected_columns, noise_param=1):
"""Generates noisy proxy columns from binarized protected columns."""
proxy_columns = get_proxy_column_names(protected_columns, noise_param)
num_datapoints = len(df)
num_groups = len(protected_columns)
noise_idx = random.sample(
range(num_datapoints), int(noise_param * num_datapoints))
df_proxy = df.copy()
for i in range(num_groups):
df_proxy[proxy_columns[i]] = df_proxy[protected_columns[i]]
for j in noise_idx:
group_index = -1
for i in range(num_groups):
if df_proxy[proxy_columns[i]][j] == 1:
df_proxy.at[j, proxy_columns[i]] = 0
group_index = i
allowed_new_groups = list(range(num_groups))
allowed_new_groups.remove(group_index)
new_group_index = random.choice(allowed_new_groups)
df_proxy.at[j, proxy_columns[new_group_index]] = 1
break
if group_index == -1:
print('missing group information for datapoint ', j)
return df_proxy
# Split into train/val/test
def train_val_test_split(df, train_fraction, validate_fraction, seed=None):
"""Split the whole dataset into train/val/test."""
if seed is not None:
np.random.seed(seed=seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_fraction * m)
validate_end = int(validate_fraction * m) + train_end
train = df.iloc[perm[:train_end]]
validate = df.iloc[perm[train_end:validate_end]]
test = df.iloc[perm[validate_end:]]
return train, validate, test
def _print_metric(dataset_name, metric_name, metric_value):
"""Prints metrics."""
print('[metric] %s.%s=%f' % (dataset_name, metric_name, metric_value))
def compute_quantiles(features,
num_keypoints=10,
clip_min=None,
clip_max=None,
missing_value=None):
"""Computes quantiles for feature columns."""
# Clip min and max if desired.
if clip_min is not None:
features = np.maximum(features, clip_min)
features = np.append(features, clip_min)
if clip_max is not None:
features = np.minimum(features, clip_max)
features = np.append(features, clip_max)
# Make features unique.
unique_features = np.unique(features)
# Remove missing values if specified.
if missing_value is not None:
unique_features = np.delete(unique_features,
np.where(unique_features == missing_value))
# Compute and return quantiles over unique non-missing feature values.
return np.quantile(
unique_features,
np.linspace(0., 1., num=num_keypoints),
interpolation='nearest').astype(float)
def print_metrics_results_dict(results_dict, iterate='best'):
"""Prints metrics from results_dict."""
index = -1
if iterate == 'best':
if FLAGS.unconstrained:
index = np.argmin(np.array(results_dict['train.true_error_rates']))
else:
index = tfco.find_best_candidate_index(
np.array(results_dict['train.true_error_rates']),
np.array(results_dict['train.sampled_violations_max']).reshape(
(-1, 1)),
rank_objectives=True)
for metric_name, values in results_dict.items():
_print_metric(iterate, metric_name, values[index])
# Helper functions for evaluation.
def error_rate(labels, predictions):
"""Computes error rate."""
# Recall that the labels are binary (0 or 1).
signed_labels = (labels * 2) - 1
return np.mean(signed_labels * predictions <= 0.0)
def group_error_rates(labels, predictions, groups):
"""Returns a list containing error rates for each protected group."""
errors = []
for jj in range(groups.shape[1]):
if groups[:, jj].sum() == 0: # Group is empty?
errors.append(0.0)
else:
signed_labels_jj = 2 * labels[groups[:, jj] == 1] - 1
predictions_jj = predictions[groups[:, jj] == 1]
errors.append(np.mean(signed_labels_jj * predictions_jj <= 0))
return errors
def tpr(labels, predictions):
"""Computes true positive rate."""
# Recall that the labels are binary (0 or 1).
signed_labels = (labels * 2) - 1
predictions_pos = predictions[signed_labels > 0]
return np.mean(predictions_pos > 0.0)
def group_tprs(labels, predictions, groups):
"""Returns a list containing tprs for each protected group."""
tprs = []
for jj in range(groups.shape[1]):
if groups[:, jj].sum() == 0: # Group is empty?
tprs.append(0.0)
else:
signed_labels_jj = 2 * labels[groups[:, jj] == 1] - 1
predictions_jj = predictions[groups[:, jj] == 1]
predictions_jj_pos = predictions_jj[signed_labels_jj > 0]
tprs.append(np.mean(predictions_jj_pos > 0))
return tprs
# Get proxy columns.
def get_proxy_column_names(protected_columns, noise_param, noise_index=''):
"""Gets proxy column names."""
return [
'PROXY' + noise_index + '_' + '%0.2f_' % noise_param + column_name
for column_name in protected_columns
]
def remove_saved_noise_levels(protected_columns, feature_names,
keep_noise_level):
"""Removes saved noise level columns from feature columns."""
saved_noise_levels = [0.1, 0.2, 0.3, 0.4, 0.5]
saved_noise_levels.remove(keep_noise_level)
for noise_level in saved_noise_levels:
proxy_columns = get_proxy_column_names(protected_columns, noise_level)
for column in proxy_columns:
feature_names.remove(column)
return feature_names
def generate_proxy_groups_single_noise(input_groups, noise_param=1):
"""Generate proxy groups within noise noise_param."""
proxy_groups = np.copy(input_groups)
num_groups = len(input_groups[0])
num_datapoints = len(input_groups)
noise_idx = random.sample(
range(num_datapoints), int(noise_param * num_datapoints))
for j in noise_idx:
group_index = -1
for i in range(num_groups):
if proxy_groups[j][i] == 1:
proxy_groups[j][i] = 0
group_index = i
allowed_new_groups = list(range(num_groups))
allowed_new_groups.remove(group_index)
new_group_index = random.choice(allowed_new_groups)
proxy_groups[j][new_group_index] = 1
break
if group_index == -1:
print('missing group information for datapoint ', j)
return proxy_groups
def generate_proxy_groups_uniform(num_examples, min_group_frac=0.05):
"""Generate proxy groups within noise noise_param."""
# Generate a random array of the same shape as input groups. Each column
# in the array is a a random binary vector where the number of 1's is at least
# min_group_size.
group_frac = np.random.uniform(min_group_frac, 1)
num_in_group = int(num_examples * group_frac)
group_assignment = np.array([0] * (num_examples - num_in_group) +
[1] * num_in_group)
np.random.shuffle(group_assignment)
return group_assignment.reshape((-1, 1))
def generate_proxy_groups_noise_array(input_groups, noise_array=None):
"""Generate proxy groups within noise noise_param."""
proxy_groups = np.copy(input_groups)
num_groups = len(input_groups[0])
for row in proxy_groups:
new_j = -1
for k in range(num_groups):
if row[k] == 1:
# draw from noise_params to decide which group to switch to.
new_j = np.random.choice(num_groups, 1, p=noise_array[k])
row[k] = 0
assert new_j >= 0
row[new_j] = 1
return proxy_groups
def extract_group_features(input_groups,
input_features,
input_labels,
group_features_type,
num_group_clusters=None,
kmeans_model=None):
"""Extracts features from groups."""
input_groups_t = input_groups.transpose().astype(int)
all_group_features = []
for group_indices in input_groups_t:
group_fraction = np.mean(group_indices)
if group_features_type == 'size_alone':
all_group_features.append(np.array([group_fraction]))
elif group_features_type == 'size_and_pr':
mean_labels = np.mean(input_labels[group_indices == 1], axis=0)
mean_features = np.append(mean_labels, group_fraction)
all_group_features.append(mean_features)
elif group_features_type == 'avg_features':
mean_features = np.mean(input_features[group_indices == 1], axis=0)
mean_features = np.append(mean_features, group_fraction)
all_group_features.append(mean_features)
elif group_features_type == 'full_group_vec':
print('group_indices shape', group_indices.shape)
all_group_features.append(group_indices)
elif group_features_type == 'kmeans':
group_xs = input_features[group_indices == 1]
clusters = kmeans_model.predict(group_xs)
# Counter doesn't include clusters with count 0.
# Need to manually add 0 counts for clusters that aren't seen.
count_dict = dict.fromkeys(range(num_group_clusters), 0)
count_dict.update(Counter(clusters))
compressed_clusters = np.fromiter(count_dict.values(), dtype='float32')
all_group_features.append(compressed_clusters)
return | np.array(all_group_features) | numpy.array |
from scipy import misc
import tensorflow as tf
import align.detect_face
import matplotlib.pyplot as plt
import numpy as np
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
gpu_memory_fraction = 1.0
# function pick = nms(boxes,threshold,type)
# 非极大值抑制,去掉重复的检测框
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
# 还原后的框的坐标
print("进入nms非极大值抑制")
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
print(x1,y1,x2,y2)
# 得分值,即是人脸的可信度
s = boxes[:,4]
print(s)
area = (x2-x1+1) * (y2-y1+1)
print(area)
# 排序,从小到大,返回的是坐标
I = np.argsort(s)
#print(I)
pick = np.zeros_like(s, dtype=np.int16)
#print(pick)
counter = 0
s = 0
while I.size>0:
i = I[-1]
s = s+1
print("进入while%d"%s)
print(i)
pick[counter] = i
counter += 1
idx = I[0:-1]
#print(idx)
#print(type(idx))
#x22= np.array([17.,18.,19.])
#print(x22[idx])
#print( x1[idx])
#print( y1[idx])
#print( x2[idx])
#print( y2[idx])
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
#print(xx1)
#print(yy1)
#print(xx2)
#print(yy2)
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
#print(inter)
#print(area[idx])
#print(area[i])
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
#print(o)
#print(threshold)
I = I[np.where(o<=threshold)]
#print(I)
pick = pick[0:counter]
print(pick)
print("_________________________")
return pick
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride = 2
cellsize = 12
# 获取x1,y1,x2,y2的坐标
print("进入generate")
#print(imap.shape)
imap = np.transpose(imap)
print(imap.shape)
#print(type(imap))
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
print("进入reg")
#print(reg[:, :, 0].shape)
print(dx1)
print(dy1)
print(dx2)
print(dy2)
# 获取可信度大于阈值的人脸框的坐标
print(imap)
y, x = np.where(imap >= t)
print(y)
print(x)
#print(type(y))
#print(y.shape)
#print(y.shape[0])
# 只有一个符合的情况
if y.shape[0] == 1:
#print("进入if判断")
dx1 = np.flipud(dx1)#翻转矩阵
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
# 筛选出符合条件的框
print("_____________")
# a= imap[(y,x)]
# print(a)
score = imap[(y, x)]
print(score)
print("_____________")
#print(dx1[(y, x)].shape)
print([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])
print((np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])).shape)
print("_____________")
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
print(reg.shape)
if reg.size == 0:
#print("进入if")
reg = np.empty((0, 3))
# 还原尺度
print("_____________")
#print(np.vstack([y,x]))
bb = np.transpose(np.vstack([y, x]))
print(bb)
print('进入计算部分')
#print(stride * bb)
print(scale)
# #print((stride * bb + 1))
#print((stride * bb + 1) / scale)
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)
print(q1)
print(q2)
# shape(None, 9)
#print(np.expand_dims(score, 0))
#print(np.expand_dims(score, 1))
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
#print(boundingbox)
return boundingbox, reg
# boxes返回值中,前4个值是还原比例后的人脸框坐标,第5个值是该人脸框中是人脸的概率,后4个值的未还原的人脸框坐标
# inter-scale nms
# 非极大值抑制,去掉重复的检测框
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
image_path = 'C:\\Users\\rjx\\PycharmProjects\\untitled1\\facenet-master\\data\\test\\test4.jpg'
img = misc.imread(image_path)
#print(img.shape)
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0] # 人脸数目
#print('找到人脸数目为:{}'.format(nrof_faces))
print(_.shape)
print(bounding_boxes.shape)
#print(type(bounding_boxes))
print(bounding_boxes[:,:4])
det = bounding_boxes[:,0:4]
# 保存所有人脸框
det_arr = []
#print(type(det_arr))
# 原图片大小
img_size = np.asarray(img.shape)[0:2]
#print(img_size)
# for i in range(nrof_faces):
# #print(det[i])
# print(np.squeeze(det[i]))
# det_arr.append(np.squeeze(det[i]))
# print(det_arr)
# 即使有多张人脸,也只要一张人脸就够了
# 获取人脸框的大小
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
print(bounding_box_size)
# 原图片中心坐标
img_center = img_size / 2
#print(img_center)
# 求人脸框中心点相对于图片中心点的偏移,
# (det[:,0]+det[:,2])/2和(det[:,1]+det[:,3])/2组成的坐标其实就是人脸框中心点
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
#print([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
#print(offsets)
# 求人脸框中心到图片中心偏移的平方和
# 假设offsets=[[ 4.20016056 145.02849352 -134.53862838] [ -22.14250919 -26.74770141 -30.76835772]]
# 则offset_dist_squared=[ 507.93206189 21748.70346425 19047.33436466]
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
#print(offset_dist_squared)
# 用人脸框像素大小减去偏移平方和的两倍,得到的结果哪个大就选哪个人脸框
# 其实就是综合考虑了人脸框的位置和大小,优先选择框大,又靠近图片中心的人脸框
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
#print(bounding_box_size-offset_dist_squared*2.0)
#print(index)
det_arr.append(det[index,:])
print("______________________________")
#print(det_arr)
#print(enumerate(det_arr))
for i, det in enumerate(det_arr):
# [4,] 边界框扩大margin区域,并进行裁切
det = np.squeeze(det)
#print(i)
#print(det)
bb = np.zeros(4, dtype=np.int32)
# 边界框周围的裁剪边缘,就是我们这里要裁剪的人脸框要比MTCNN获取的人脸框大一点,
# 至于大多少,就由margin参数决定了
# print(bb)
bb[0] = np.maximum(det[0] - 32 / 2, 0)
bb[1] = np.maximum(det[1] - 32 / 2, 0)
bb[2] = np.minimum(det[2] + 32 / 2, img_size[1])
bb[3] = np.minimum(det[3] + 32 / 2, img_size[0])
# print(np.max(det[0] - 32 / 2, 0))
# print(det[1] - 32 / 2)
# print(det[2] + 32 / 2)
# print(det[3] + 32 / 2)
#print(bb)
# 裁剪人脸框,再缩放
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
#print(cropped)
# 缩放到指定大小,并保存图片,以及边界框位置信息
scaled = misc.imresize(cropped, (160, 160), interp='bilinear')
#nrof_successfully_aligned += 1
#filename_base, file_extension = os.path.splitext(output_filename)
#if args.detect_multiple_faces:
# output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
#else:
# output_filename_n = "{}{}".format(filename_base, file_extension)
# 保存图片
#misc.imsave(output_filename_n, scaled)
# 记录信息到bounding_boxes_XXXXX.txt文件里
#text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
###########################################################################################
factor_count=0
total_boxes=np.empty((0,9))
points=np.empty(0)
#print(type(total_boxes))
print(total_boxes)
print("显示total_boxes")
#print(points)
#print(type(points))
# 获取输入的图片的宽高
h=img.shape[0]
w=img.shape[1]
print(h)
print(w)
# 宽/高,谁小取谁 250*250
minl=np.amin([h, w])
#print(minl)
m=12.0/minsize#P Net 12*12 12/20=0.6
minl=minl*m#250*0.6=150
#print(minl)
# create scale pyramid
# 创建比例金字塔
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
#print(minl)
factor_count += 1
#print(factor_count)
print(scales)
# 将图片显示出来
plt.figure()
scale_img = img.copy()
# 第一步,首先将图像缩放到不同尺寸形成“图像金字塔”
# 然后,经过P-Net网络
# first stage
i=0
for scale in scales:
# 宽高要取整
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
print(hs)
print(ws)
# 使用opencv的方法对图片进行缩放
im_data = align.detect_face.imresample(img, (hs, ws))
print(im_data.shape)
print("im_data设置完毕")
#plt.imshow(scale_img)
#plt.show()
# 可视化的显示“图像金字塔”的效果
# --韦访添加
#plt.imshow(img)
#plt.show()
#plt.imshow(im_data)
#plt.show()
#scale_img[0:im_data.shape[0], 0:im_data.shape[1]] = 0
#scale_img[0:im_data.shape[0], 0:im_data.shape[1]] = im_data[0:im_data.shape[0], 0:im_data.shape[1]]
# plt.imshow(scale_img)
# plt.show()
# print('im_data.shape[0]', im_data.shape[0])
# print('im_data.shape[1]', im_data.shape[1])
# # 对图片数据进行归一化处理 [-1,1]
# #print(im_data.shape)
im_data = (im_data - 127.5) * 0.0078125
print("---------------------")
#print(im_data.shape)
# 增加一个维度,即batch size,因为我们这里每次只处理一张图片,其实batch size就是1
img_x = np.expand_dims(im_data, 0)
#print(img_x.shape)
img_y = np.transpose(img_x, (0, 2, 1, 3))
#print(img_y.shape)
# 送进P-Net网络
# 假设img_y.shape=(1, 150, 150, 3)
# 因为P-Net网络要经过3层核为3*3步长为1*1的卷积层,一层步长为2*2池化层
# 所以conv4-2层输出形状为(1, 70, 70, 4)
# 70是这么来的,(150-3+1)/1=148,经过池化层后为148/2=74,
# 再经过一个卷积层(74-3+1)/1=72,再经过一个卷积层(72-3+1)/1=70
# 计算方法参考博客:https://blog.csdn.net/rookie_wei/article/details/80146620
# prob1层的输出形状为(1, 70, 70, 2)
out = pnet(img_y)
#print(type(out))
#print(out[0].shape)
#print(out[1].shape)
# 又变回来
# out0的形状是(1, 70, 70, 4)
# 返回的是可能是人脸的框的坐标
out0 = np.transpose(out[0], (0, 2, 1, 3))
# out1的形状是(1, 70, 70, 2)
# 返回的是对应与out0框中是人脸的可信度,第2个值为是人脸的概率
out1 = np.transpose(out[1], (0, 2, 1, 3))
print("out的shape")
print(out0.shape)
print(out1.shape)
print("-----------------")
#print(out0[:,:,:,:].shape)
print(out0[0,:,:,:].shape)
print("-----------------")
#print(out1[:,:,:,1].shape)
print(out1[0,:,:,1].shape)
# out1[0,:,:,1]:表示框的可信度,只要一个值即可,因为这两个值相加严格等于1,这里只要获取“是”人脸框的概率
# out0[0,:,:,:]:人脸框
# scales:图片缩减比例
# threshold:阈值,这里取0.6
# boxes返回值中,前4个值是还原比例后的人脸框坐标,第5个值是该人脸框中是人脸的概率,后4个值的未还原的人脸框坐标
boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0])
# # 人脸框坐标对应的可信度
# print('处理之前:', out1[0, :, :, 1])
# print('------------------')
# s = boxes[:, 4]
# print('处理之后:', s)
#
# # # 显示人脸框
# print('------------------')
# x1 = boxes[:, 0]
# y1 = boxes[:, 1]
# x2 = boxes[:, 2]
# y2 = boxes[:, 3]
# print(len(boxes))
# print('------------------')
# for i in range(len(boxes)):
# print(x1[i])
# print(y1[i])
# print(x2[i])
# print(y2[i])
# print('------------------')
# print(i)
# plt.gca().add_patch(plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w',facecolor='none'))
# --韦访添加
# plt.imshow(scale_img)
# plt.show()
# exit()
# inter-scale nms
# 非极大值抑制,去掉重复的检测框
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
# x1 = boxes[:, 0]
# y1 = boxes[:, 1]
# x2 = boxes[:, 2]
# y2 = boxes[:, 3]
# for i in range(len(boxes)):
# print(x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w', facecolor='none'))
# --韦访添加
#plt.imshow(scale_img)
#plt.show()
#exit()
# 图片按照所有scale走完一遍,会得到在原图上基于不同scale的所有的bb,然后对这些bb再进行一次NMS
# 并且这次NMS的threshold要提高
numbox = total_boxes.shape[0]
if numbox > 0:
# 再经过nms筛选掉一些可靠度更低的人脸框
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick, :]
# 获取每个人脸框的宽高
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
# x1 = total_boxes[:, 0]
# y1 = total_boxes[:, 1]
# x2 = total_boxes[:, 2]
# y2 = total_boxes[:, 3]
# for i in range(len(total_boxes)):
# print(x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w', facecolor='none'))
# 对人脸框坐标做一些处理,使得人脸框更紧凑
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
# x1 = qq1
# y1 = qq2
# x2 = qq3
# y2 = qq4
# for i in range(len(total_boxes)):
# print('lll', x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='r', facecolor='none'))
# --韦访添加
# plt.imshow(scale_img)
# plt.show()
# exit()
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = align.detect_face.rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = align.detect_face.pad(total_boxes.copy(), w, h)
# R-Net
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage R-Net 对于P-Net输出的bb,缩放到24x24大小
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
# R-Net输入大小为24*24,所以要进行缩放
tempimg[:, :, :, k] = align.detect_face.imresample(tmp, (24, 24))
#else:
# return np.empty()
# 标准化[-1,1]
tempimg = (tempimg - 127.5) * 0.0078125
# 转置[n,24,24,3]
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
# 经过R-Net网络
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
total_boxes = align.detect_face.bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = align.detect_face.rerec(total_boxes.copy())
# 第三步,经过O-Net网络
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = align.detect_face.pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
# O-Net输入大小为48*48,所以要进行缩放
tempimg[:, :, :, k] = align.detect_face.imresample(tmp, (48, 48))
#else:
# return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = | np.transpose(tempimg, (3, 1, 0, 2)) | numpy.transpose |
import sys
import matplotlib.pyplot as plt
from astropy.io import fits
from scipy import optimize
import numpy as np
from pathlib import Path
from scipy import interpolate
import sys
import math as m
from . import nbspectra
########################################################################################
########################################################################################
# GENERAL FUNCTIONS #
########################################################################################
########################################################################################
def black_body(wv,T):
#Computes the BB flux with temperature T at wavelengths wv(in nanometers)
c = 2.99792458e10 #speed of light in cm/s
k = 1.380658e-16 #boltzmann constant
h = 6.6260755e-27 #planck
w=wv*1e-8 #Angstrom to cm
bb=2*h*c**2*w**(-5)*(np.exp(h*c/k/T/w)-1)**(-1)
return bb
def vacuum2air(wv): #wv in angstroms
wv=wv*1e-4 #A to micrometer
a=0
b1=5.792105e-2
b2=1.67917e-3
c1=238.0185
c2=57.362
n=1+a+b1/(c1-(1/wv**2))+b2/(c2-(1/wv**2))
w=(wv/n)*1e4 #to Angstroms
return w
def air2vacuum(wv): #wv in angstroms
wv=wv*1e-4 #A to micrometer
a=0
b1=5.792105e-2
b2=1.67917e-3
c1=238.0185
c2=57.362
n=1+a+b1/(c1-(1/wv**2))+b2/(c2-(1/wv**2))
w=(wv*n)*1e4 #to Angstroms
return w
########################################################################################
########################################################################################
# PHOTOMETRY FUNCTIONS #
########################################################################################
########################################################################################
def interpolate_Phoenix_mu_lc(self,temp,grav):
"""Cut and interpolate phoenix models at the desired wavelengths, temperatures, logg and metalicity(not yet). For spectroscopy.
Inputs
temp: temperature of the model;
grav: logg of the model
Returns
creates a temporal file with the interpolated spectra at the temp and grav desired, for each surface element.
"""
#Demanar tambe la resolucio i ficarho aqui.
import warnings
warnings.filterwarnings("ignore")
path = self.path / 'models' / 'Phoenix_mu' #path relatve to working directory
files = [x.name for x in path.glob('lte*fits') if x.is_file()]
list_temp=np.unique([float(t[3:8]) for t in files])
list_grav=np.unique([float(t[9:13]) for t in files])
#check if the parameters are inside the grid of models
if grav<np.min(list_grav) or grav>np.max(list_grav):
sys.exit('Error in the interpolation of Phoenix_mu models. The desired logg is outside the grid of models, extrapolation is not supported. Please download the \
Phoenix intensity models covering the desired logg from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73')
if temp<np.min(list_temp) or temp>np.max(list_temp):
sys.exit('Error in the interpolation of Phoenix_mu models. The desired T is outside the grid of models, extrapolation is not supported. Please download the \
Phoenix intensity models covering the desired T from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73')
lowT=list_temp[list_temp<=temp].max() #find the model with the temperature immediately below the desired temperature
uppT=list_temp[list_temp>=temp].min() #find the model with the temperature immediately above the desired temperature
lowg=list_grav[list_grav<=grav].max() #find the model with the logg immediately below the desired logg
uppg=list_grav[list_grav>=grav].min() #find the model with the logg immediately above the desired logg
#load the flux of the four phoenix model
name_lowTlowg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(lowT),lowg)
name_lowTuppg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(lowT),uppg)
name_uppTlowg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(uppT),lowg)
name_uppTuppg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(uppT),uppg)
#Check if the files exist in the folder
if name_lowTlowg not in files:
sys.exit('The file '+name_lowTlowg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add it to your path: '+path)
if name_lowTuppg not in files:
sys.exit('The file '+name_lowTuppg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add it to your path: '+path)
if name_uppTlowg not in files:
sys.exit('The file '+name_uppTlowg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add it to your path: '+path)
if name_uppTuppg not in files:
sys.exit('The file '+name_uppTuppg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add it to your path: '+path)
wavelength=np.arange(500,26000) #wavelength in A
idx_wv=np.array(wavelength>self.wavelength_lower_limit) & np.array(wavelength<self.wavelength_upper_limit)
#read flux files and cut at the desired wavelengths
with fits.open(path / name_lowTlowg) as hdul:
amu = hdul[1].data
amu = np.append(amu[::-1],0.0)
flux_lowTlowg=hdul[0].data[:,idx_wv]
with fits.open(path / name_lowTuppg) as hdul:
flux_lowTuppg=hdul[0].data[:,idx_wv]
with fits.open(path / name_uppTlowg) as hdul:
flux_uppTlowg=hdul[0].data[:,idx_wv]
with fits.open(path / name_uppTuppg) as hdul:
flux_uppTuppg=hdul[0].data[:,idx_wv]
#interpolate in temperature for the two gravities
if uppT==lowT: #to avoid nans
flux_lowg = flux_lowTlowg
flux_uppg = flux_lowTuppg
else:
flux_lowg = flux_lowTlowg + ( (temp - lowT) / (uppT - lowT) ) * (flux_uppTlowg - flux_lowTlowg)
flux_uppg = flux_lowTuppg + ( (temp - lowT) / (uppT - lowT) ) * (flux_uppTuppg - flux_lowTuppg)
#interpolate in log g
if uppg==lowg: #to avoid dividing by 0
flux = flux_lowg
else:
flux = flux_lowg + ( (grav - lowg) / (uppg - lowg) ) * (flux_uppg - flux_lowg)
angle0 = flux[0]*0.0 #LD of 90 deg, to avoid dividing by 0? (not sure, ask Kike)
flux_joint = np.vstack([flux[::-1],angle0]) #add LD coeffs at 0 and 1 proj angles
# flpk=flux_joint[0]*np.pi*np.sin(np.cos(amu[0]))**2#Add all fluxes of all angles multiplied by their areas to compute the integrated flux
# for i in range(1,len(amu)):
# flpk=flpk+flux_joint[i]*(np.sin(np.cos(amu[i]))**2-np.sin(np.cos(amu[i-1]))**2)*np.pi
return amu, wavelength[idx_wv], flux_joint
def interpolate_filter(self):
path = self.path / 'models' / 'filters' / self.filter_name
try:
wv, filt = np.loadtxt(path,unpack=True)
except: #if the filter do not exist, create a tophat filter from the wv range
wv=np.array([self.wavelength_lower_limit,self.wavelength_upper_limit])
filt=np.array([1,1])
print('Filter ',self.filter_name,' do not exist inside the filters folder. Using wavelength range in starsim.conf. Filters are available at http://svo2.cab.inta-csic.es/svo/theory/fps3/')
f = interpolate.interp1d(wv,filt,bounds_error=False,fill_value=0)
return f
def limb_darkening_law(self,amu):
if self.limb_darkening_law == 'linear':
mu=1-self.limb_darkening_q1*(1-amu)
elif self.limb_darkening_law == 'quadratic':
a=2*np.sqrt(self.limb_darkening_q1)*self.limb_darkening_q2
b=np.sqrt(self.limb_darkening_q1)*(1-2*self.limb_darkening_q2)
mu=1-a*(1-amu)-b*(1-amu)**2
elif self.limb_darkening_law == 'sqrt':
a=np.sqrt(self.limb_darkening_q1)*(1-2*self.limb_darkening_q2)
b=2*np.sqrt(self.limb_darkening_q1)*self.limb_darkening_q2
mu=1-a*(1-amu)-b*(1-np.sqrt(amu))
elif self.limb_darkening_law == 'log':
a=self.limb_darkening_q2*self.limb_darkening_q1**2+1
b=self.limb_darkening_q1**2-1
mu=1-a*(1-amu)-b*amu*(1-np.log(amu))
else:
sys.exit('Error in limb darkening law, please select one of the following: phoenix, linear, quadratic, sqrt, logarithmic')
return mu
def compute_immaculate_lc(self,Ngrid_in_ring,acd,amu,pare,flnp,f_filt,wv):
N = self.n_grid_rings #Number of concentric rings
flxph = 0.0 #initialze flux of photosphere
sflp=np.zeros(N) #brightness of ring
flp=np.zeros([N,len(wv)]) #spectra of each ring convolved by filter
#Computing flux of immaculate photosphere and of every pixel
for i in range(0,N): #Loop for each ring, to compute the flux of the star.
#Interpolate Phoenix intensity models to correct projected ange:
if self.use_phoenix_limb_darkening:
acd_low=np.max(acd[acd<amu[i]]) #angles above and below the proj. angle of the grid
acd_upp=np.min(acd[acd>=amu[i]])
idx_low=np.where(acd==acd_low)[0][0]
idx_upp=np.where(acd==acd_upp)[0][0]
dlp = flnp[idx_low]+(flnp[idx_upp]-flnp[idx_low])*(amu[i]-acd_low)/(acd_upp-acd_low) #limb darkening
else: #or use a specified limb darkening law
dlp = flnp[0]*limb_darkening_law(self,amu[i])
flp[i,:]=dlp*pare[i]/(4*np.pi)*f_filt(wv) #spectra of one grid in ring N multiplied by the filter.
sflp[i]=np.sum(flp[i,:]) #brightness of onegrid in ring N.
flxph=flxph+sflp[i]*Ngrid_in_ring[i] #total BRIGHTNESS of the immaculate photosphere
return sflp, flxph
def compute_immaculate_facula_lc(self,Ngrid_in_ring,acd,amu,pare,flnp,f_filt,wv):
'''Compute thespectra of each grid element adding LD.
'''
N = self.n_grid_rings #Number of concentric rings
flxfc = 0.0 #initialze flux of photosphere
sflf=np.zeros(N) #brightness of ring
flf=np.zeros([N,len(wv)]) #spectra of each ring convolved by filter
#Computing flux of immaculate photosphere and of every pixel
for i in range(0,N): #Loop for each ring, to compute the flux of the star.
#Interpolate Phoenix intensity models to correct projected ange:
if self.use_phoenix_limb_darkening:
acd_low=np.max(acd[acd<amu[i]]) #angles above and below the proj. angle of the grid
acd_upp=np.min(acd[acd>=amu[i]])
idx_low=np.where(acd==acd_low)[0][0]
idx_upp=np.where(acd==acd_upp)[0][0]
dlp = flnp[idx_low]+(flnp[idx_upp]-flnp[idx_low])*(amu[i]-acd_low)/(acd_upp-acd_low) #limb darkening
else: #or use a specified limb darkening law
dlp = flnp[0]*limb_darkening_law(self,amu[i])
flf[i,:]=dlp*pare[i]/(4*np.pi)*f_filt(wv) #spectra of one grid in ring N multiplied by the filter.
#Limb brightening
dtfmu=250.9-407.4*amu[i]+190.9*amu[i]**2 #(T_fac-T_ph) multiplied by a factor depending on the
sflf[i]=np.sum(flf[i,:])*((self.temperature_photosphere+dtfmu)/(self.temperature_facula))**4 #brightness of onegrid in ring N.
flxfc=flxfc+sflf[i]*Ngrid_in_ring[i] #total BRIGHTNESS of the immaculate photosphere
return sflf, flxfc
def generate_rotating_photosphere_lc(self,Ngrid_in_ring,pare,amu,bph,bsp,bfc,flxph,vec_grid,inversion,plot_map=True):
'''Loop for all the pixels and assign the flux corresponding to the grid element.
'''
simulate_planet=self.simulate_planet
N = self.n_grid_rings #Number of concentric rings
iteration=0
#Now loop for each Observed time and for each grid element. Compute if the grid is ph spot or fc and assign the corresponding CCF.
# print('Diff rotation law is hard coded. Check ref time for inverse problem. Add more Spot evo laws')
if not inversion:
sys.stdout.write(" ")
flux=np.zeros([len(self.obs_times)]) #initialize total flux at each timestamp
filling_sp=np.zeros(len(self.obs_times))
filling_ph=np.zeros(len(self.obs_times))
filling_pl=np.zeros(len(self.obs_times))
filling_fc=np.zeros(len(self.obs_times))
for k,t in enumerate(self.obs_times):
typ=[] #type of grid, ph sp or fc
if simulate_planet:
planet_pos=compute_planet_pos(self,t)#compute the planet position at current time. In polar coordinates!!
else:
planet_pos = [2.0,0.0,0.0]
if self.spot_map.size==0:
spot_pos=np.array([np.array([m.pi/2,-m.pi,0.0,0.0])])
else:
spot_pos=compute_spot_position(self,t) #compute the position of all spots at the current time. Returns theta and phi of each spot.
vec_spot=np.zeros([len(self.spot_map),3])
xspot = np.cos(self.inclination)*np.sin(spot_pos[:,0])*np.cos(spot_pos[:,1])+np.sin(self.inclination)*np.cos(spot_pos[:,0])
yspot = np.sin(spot_pos[:,0])*np.sin(spot_pos[:,1])
zspot = np.cos(spot_pos[:,0])*np.cos(self.inclination)-np.sin(self.inclination)*np.sin(spot_pos[:,0])*np.cos(spot_pos[:,1])
vec_spot[:,:]=np.array([xspot,yspot,zspot]).T #spot center in cartesian
#COMPUTE IF ANY SPOT IS VISIBLE
vis=np.zeros(len(vec_spot)+1)
for i in range(len(vec_spot)):
dist=m.acos(np.dot(vec_spot[i],np.array([1,0,0])))
if (dist-spot_pos[i,2]*np.sqrt(1+self.facular_area_ratio)) <= (np.pi/2):
vis[i]=1.0
if (planet_pos[0]-planet_pos[2]<1):
vis[-1]=1.0
#Loop for each ring.
if (np.sum(vis)==0.0):
flux[k],typ, filling_ph[k], filling_sp[k], filling_fc[k], filling_pl[k] = flxph, [[1.0,0.0,0.0,0.0]]*np.sum(Ngrid_in_ring), np.dot(Ngrid_in_ring,pare), 0.0, 0.0, 0.0
else:
flux[k],typ, filling_ph[k], filling_sp[k], filling_fc[k], filling_pl[k] = nbspectra.loop_generate_rotating_lc_nb(N,Ngrid_in_ring,pare,amu,spot_pos,vec_grid,vec_spot,simulate_planet,planet_pos,bph,bsp,bfc,flxph,vis)
filling_ph[k]=100*filling_ph[k]/np.dot(Ngrid_in_ring,pare)
filling_sp[k]=100*filling_sp[k]/np.dot(Ngrid_in_ring,pare)
filling_fc[k]=100*filling_fc[k]/np.dot(Ngrid_in_ring,pare)
filling_pl[k]=100*filling_pl[k]/np.dot(Ngrid_in_ring,pare)
if not inversion:
sys.stdout.write("\rDate {0}. ff_ph={1:.3f}%. ff_sp={2:.3f}%. ff_fc={3:.3f}%. ff_pl={4:.3f}%. [{5}/{6}]%".format(t,filling_ph[k],filling_sp[k],filling_fc[k],filling_pl[k],k+1,len(self.obs_times)))
if plot_map:
plot_spot_map_grid(self,vec_grid,typ,self.inclination,t)
return self.obs_times, flux/flxph, filling_ph, filling_sp, filling_fc, filling_pl
########################################################################################
########################################################################################
# SPECTROSCOPY FUNCTIONS #
########################################################################################
########################################################################################
def interpolate_Phoenix(self,temp,grav,plot=False):
"""Cut and interpolate phoenix models at the desired wavelengths, temperatures, logg and metalicity(not yet). For spectroscopy.
Inputs
temp: temperature of the model;
grav: logg of the model
Returns
creates a temporal file with the interpolated spectra at the temp and grav desired, for each surface element.
"""
#Demanar tambe la resolucio i ficarho aqui.
import warnings
warnings.filterwarnings("ignore")
path = self.path / 'models' / 'Phoenix' #path relatve to working directory
files = [x.name for x in path.glob('lte*fits') if x.is_file()]
list_temp=np.unique([float(t[3:8]) for t in files])
list_grav=np.unique([float(t[9:13]) for t in files])
#check if the parameters are inside the grid of models
if grav<np.min(list_grav) or grav>np.max(list_grav):
sys.exit('Error in the interpolation of Phoenix models. The desired logg is outside the grid of models, extrapolation is not supported. Please download the \
Phoenix models covering the desired logg from http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/')
if temp<np.min(list_temp) or temp>np.max(list_temp):
sys.exit('Error in the interpolation of Phoenix models. The desired logg is outside the grid of models, extrapolation is not supported. Please download the \
Phoenix models covering the desired logg from http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/')
lowT=list_temp[list_temp<=temp].max() #find the model with the temperature immediately below the desired temperature
uppT=list_temp[list_temp>=temp].min() #find the model with the temperature immediately above the desired temperature
lowg=list_grav[list_grav<=grav].max() #find the model with the logg immediately below the desired logg
uppg=list_grav[list_grav>=grav].min() #find the model with the logg immediately above the desired logg
#load the Phoenix wavelengths.
if not (path / 'WAVE_PHOENIX-ACES-AGSS-COND-2011.fits').exists():
sys.exit('Error in reading the file WAVE_PHOENIX-ACES-AGSS-COND-2011.fits. Please download it from http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/')
with fits.open(path / 'WAVE_PHOENIX-ACES-AGSS-COND-2011.fits') as hdul:
wavelength=hdul[0].data
#cut the wavelength at the ranges set by the user. Adding an overhead of 0.1 nm to allow for high Doppler shifts without losing info
overhead=1.0 #Angstrom
idx_wv=np.array(wavelength>self.wavelength_lower_limit-overhead) & np.array(wavelength<self.wavelength_upper_limit+overhead)
#load the flux of the four phoenix model
name_lowTlowg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'.format(int(lowT),lowg)
name_lowTuppg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'.format(int(lowT),uppg)
name_uppTlowg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'.format(int(uppT),lowg)
name_uppTuppg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'.format(int(uppT),uppg)
#Check if the files exist in the folder
if name_lowTlowg not in files:
sys.exit('The file '+name_lowTlowg+' required for the interpolation does not exist. Please download it from http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/ and add it to your path')
if name_lowTuppg not in files:
sys.exit('The file '+name_lowTuppg+' required for the interpolation does not exist. Please download it from http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/ and add it to your path')
if name_uppTlowg not in files:
sys.exit('The file '+name_uppTlowg+' required for the interpolation does not exist. Please download it from http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/ and add it to your path')
if name_uppTuppg not in files:
sys.exit('The file '+name_uppTuppg+' required for the interpolation does not exist. Please download it from http://phoenix.astro.physik.uni-goettingen.de/data/HiResFITS/PHOENIX-ACES-AGSS-COND-2011/ and add it to your path')
#read flux files and cut at the desired wavelengths
with fits.open(path / name_lowTlowg) as hdul:
flux_lowTlowg=hdul[0].data[idx_wv]
with fits.open(path / name_lowTuppg) as hdul:
flux_lowTuppg=hdul[0].data[idx_wv]
with fits.open(path / name_uppTlowg) as hdul:
flux_uppTlowg=hdul[0].data[idx_wv]
with fits.open(path / name_uppTuppg) as hdul:
flux_uppTuppg=hdul[0].data[idx_wv]
#interpolate in temperature for the two gravities
if uppT==lowT: #to avoid nans
flux_lowg = flux_lowTlowg
flux_uppg = flux_lowTuppg
else:
flux_lowg = flux_lowTlowg + ( (temp - lowT) / (uppT - lowT) ) * (flux_uppTlowg - flux_lowTlowg)
flux_uppg = flux_lowTuppg + ( (temp - lowT) / (uppT - lowT) ) * (flux_uppTuppg - flux_lowTuppg)
#interpolate in log g
if uppg==lowg: #to avoid dividing by 0
flux = flux_lowg
else:
flux = flux_lowg + ( (grav - lowg) / (uppg - lowg) ) * (flux_uppg - flux_lowg)
#Normalize by fitting a 6th degree polynomial to the maximum of the bins of the binned spectra
#nbins depend on the Temperature and wavelength range. 20 bins seems to work for all reasonable parameters. With more bins it starts to pick absorption lines. Less bins degrades the fit.
bins=np.linspace(self.wavelength_lower_limit-overhead,self.wavelength_upper_limit+overhead,20)
wv= wavelength[idx_wv]
x_bin,y_bin=nbspectra.normalize_spectra_nb(bins,np.asarray(wv,dtype=np.float64),np.asarray(flux,dtype=np.float64))
# #divide by 6th deg polynomial
coeff = np.polyfit(x_bin, y_bin, 6)
flux_norm = flux / np.poly1d(coeff)(wv)
#plots to check normalization. For debugging purposes.
if plot:
plt.plot(wv,flux)
plt.plot(x_bin,y_bin,'ok')
plt.plot(wv,np.poly1d(coeff)(wv))
plt.show()
plt.close()
interpolated_spectra = np.array([wv,flux_norm,flux])
return interpolated_spectra
def bisector_fit(self,rv,ccf,plot_test=False,kind_interp='linear',integrated_bis=False):
''' Fit the bisector of the CCF with a 5th deg polynomial
'''
xnew,ynew,xbis,ybis=nbspectra.speed_bisector_nb(rv,ccf,integrated_bis)
f = interpolate.interp1d(ybis,xbis,kind=kind_interp,fill_value=(xbis[0],xbis[-1]),bounds_error=False) #return a function rv=f(ccf) interpolating the BIS for all values of ccf height.
if plot_test: #for debuggin purposes
ys=np.linspace(0,1,1000)
# xs = f(ys)
# plt.plot(xs,ys)
plt.plot(xbis,ybis,'.')
plt.plot(rv,ccf)
# plt.plot(xnew,ynew)
plt.show()
return f
def cifist_coeff_interpolate(amu):
'''Interpolate the cifist bisectors as a function of the projected angle
'''
amv=np.arange(1,0.0,-0.1) #list of angles defined in cfist
if amu<=0.1:
amv_low=0
else:
amv_low=np.max(amv[amv<amu]) #angles above and below the proj. angle of the grid
idx_low=np.where(amv==amv_low)[0][0] #find indexs of below and above angles
amv_upp=np.min(amv[amv>=amu])
idx_upp=np.where(amv==amv_upp)[0][0]
cxm=np.zeros([len(amv),7]) #coeff of the bisectors. NxM, N is number of angles, M=7, the degree of the polynomial
#PARAMS FROM A CCF COMPUTED WITH HARPS MASK.
cxm[0,:]=np.array([-3.51974861,11.1702017,-13.22368296,6.67694456,-0.63201573,-0.44695616,-0.36838495]) #1.0
cxm[1,:]=np.array([-4.05903967,13.21901003,-16.47215949,9.51023171,-2.13104764,-0.05153799,-0.36973749]) #0.9
cxm[2,:]=np.array([-3.92153131,12.76694663,-15.96958217,9.39599116,-2.34394028,0.12546611,-0.42092905]) #0.8
cxm[3,:]=np.array([-3.81892968,12.62209118,-16.06973368,9.71487198,-2.61439945,0.25356088,-0.43310756]) #0.7
cxm[4,:]=np.array([-5.37213406,17.6604689,-22.52477323,13.91461247,-4.13186181,0.60271171,-0.46427559]) #0.6
cxm[5,:]=np.array([-6.35351933,20.92046705,-26.83933359,16.86220487,-5.28285592,0.90643187,-0.47696283]) #0.5
cxm[6,:]=np.array([-7.67270144,25.60866105,-33.4381214,21.58855269,-7.1527039,1.35990694,-0.48001707]) #0.4
cxm[7,:]=np.array([-9.24152009,31.09337903,-41.07410957,27.04196984,-9.32910982,1.89291407,-0.455407]) #0.3
cxm[8,:]=np.array([-11.62006536,39.30962189,-52.38161244,34.98243089,-12.40650704,2.57940618,-0.37337442]) #0.2
cxm[9,:]=np.array([-14.14768805,47.9566719,-64.20294114,43.23156971,-15.57423374,3.13318175,-0.14451226]) #0.1
#PARAMS FROM A CCF COMPUTED WITH PHOENIX TEMPLATE T=5770
# cxm[0,:]=np.array([1.55948401e+01, -5.59100775e+01, 7.98788742e+01, -5.79129621e+01, 2.23124361e+01, -4.37451926e+00, 2.76815127e-02 ])
# cxm[1,:]=np.array([1.48171843e+01, -5.31901561e+01, 7.60918868e+01, -5.51846846e+01, 2.12359712e+01, -4.15656905e+00, 3.09723630e-02 ])
# cxm[2,:]=np.array([1.26415104e+01, -4.56361886e+01, 6.57500389e+01, -4.81159578e+01, 1.87476161e+01, -3.73215320e+00, -2.45358044e-02 ])
# cxm[3,:]=np.array([1.10344258e+01, -3.99142119e+01, 5.76936246e+01, -4.24457366e+01, 1.66941114e+01, -3.37376671e+00, -4.49380604e-02 ])
# cxm[4,:]=np.array([9.9741693 , -36.19064232, 52.47896315, -38.75624903, 15.32328162, -3.09800143, -0.07223029 ])
# cxm[5,:]=np.array([9.76117497, -35.11883268, 50.48605512, -36.96972057, 14.50139362, -2.88347426, -0.08276774]) #0.5
# cxm[6,:]=np.array([10.38959989, -36.94083878, 52.3841557 , -37.73932243,14.50154753, -2.76975367, -0.07371497 ]) #0.4
# cxm[7,:]=np.array([1.18987101e+01, -4.18327688e+01, 5.84865087e+01, -4.13494763e+01, 1.54611520e+01, -2.78820894e+00, -2.90506536e-02 ]) #0.3
# cxm[8,:]=np.array([13.77559813, -48.38724031, 67.48002787, -47.40940284, 17.46750576, -3.01431973, 0.09248942 ]) #0.2
# cxm[9,:]=np.array([16.73411412, -59.08156701, 82.84718709, -58.44626604, 21.52853771, -3.72660173, 0.37589346 ]) #0.1
#extrapolate for amu<0.1
if amu<=0.1:
cxu=cxm[9]+(cxm[8]-cxm[9])*(amu-amv[9])/(amv[8]-amv[9])
else: #interpolate
cxu=cxm[idx_low]+(cxm[idx_upp]-cxm[idx_low])*(amu-amv[idx_low])/(amv[idx_upp]-amv[idx_low])
p=np.poly1d(cxu) #numpy function to generate the RV for any given CCF value
return p
def dumusque_coeffs(amu):
coeffs=np.array([-1.51773453, 3.52774949, -3.18794328, 1.22541774, -0.22479665]) #Polynomial fit to ccf in Fig 2 of Dumusque 2014, plus 400m/s to match Fig6 in Herrero 2016
p=np.poly1d(coeffs)
return p
def compute_immaculate_photosphere_rv(self,Ngrid_in_ring,acd,amu,pare,flpk,rv_ph,rv,ccf,rvel):
'''Asing the ccf to each grid element, Doppler shift, add LD, and add bisectors, in order to compute the ccf of the immaculate photosphere.
input:
acd: angles of the kurucz model
flnp: flux of the HR norm. spectra.
flpk_kur: flux of the kurucz models
dlnp: LD coeffs of the kurucz model for the different angles
'''
N = self.n_grid_rings #Number of concentric rings
flxph = 0.0 #initialze flux of photosphere
sccf=np.zeros(N)
for i in range(0,N): #Loop for each ring, to compute the flux of the star.
#Interpolate Phoenix intensities at the corresponding mu angle. Then HR spectra at mu is HR spectra * (spectra at mu/integrated spectra)
if self.use_phoenix_limb_darkening:
acd_low=np.max(acd[acd<amu[i]]) #angles above and below the proj. angle of the grid
acd_upp= | np.min(acd[acd>=amu[i]]) | numpy.min |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import tensorflow as tf
import numpy as np
import gc
import pandas as pd
from datetime import datetime
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.decomposition import PCA
from tensorflow import keras
# In[2]:
df = pd.read_csv("../Dataset/02-03-2018.csv", low_memory = False)
# In[3]:
df = df.drop([0,1])
# In[4]:
input_label = | np.array(df.loc[:, df.columns != "Label"]) | numpy.array |
import numpy as np
import pandas as pd
import h3.api.numpy_int as h3
import multiprocessing as mp
import math
import geo.geomath as gm
from functools import partial
from timeit import default_timer as timer
class GeoBrute(object):
def __init__(self, locations: np.ndarray):
self.lats = locations[:, 0]
self.lons = locations[:, 1]
def query_radius(self,
location: np.ndarray,
r: float) -> (np.ndarray, np.ndarray):
"""
Selects the indices of the points that lie within a given distance from
a given location.
:param location: Location to query in [lat, lon] format
:param r: Radius in meters
:return: Array of indices
"""
lat = location[0, 0]
lon = location[0, 1]
dist = gm.vec_haversine(self.lats, self.lons, lat, lon)
return np.argwhere(dist <= r)
def query_knn(self, location: np.array, k: int) -> np.array:
dist = gm.vec_haversine(self.lats, self.lons,
location[0], location[1])
idx = np.argsort(dist)
return idx[:k], dist[idx[:k]]
def get_slice(dim: int, i: int, k: int) -> np.ndarray:
return slice(max(0, i - k), min(dim - 1, i + k) + 1)
def calculate_sorted_distances(latitudes, longitudes, lat, lon):
dist = gm.vec_haversine(latitudes, longitudes, lat, lon)
idx = np.argsort(dist)
return idx, dist[idx]
class GeoSpoke(object):
def __init__(self, locations: np.ndarray):
self.lats = locations[:, 0]
self.lons = locations[:, 1]
min_lat, max_lat = self.lats.min(), self.lats.max()
min_lon, max_lon = self.lons.min(), self.lons.max()
h = gm.num_haversine(min_lat, min_lon, max_lat, min_lon)
w = gm.num_haversine(min_lat, min_lon, min_lat, max_lon)
self.density = locations.shape[0] / (w * h)
if max_lat > 0:
self.lat0 = self.lat1 = min_lat - 90
else:
self.lat0 = self.lat1 = max_lat + 90
self.lon0 = (max_lon - min_lon) / 2 - 45
self.lon1 = self.lon0 + 90
self.idx0, self.sorted0 = calculate_sorted_distances(self.lats, self.lons, self.lat0, self.lon0)
self.idx1, self.sorted1 = calculate_sorted_distances(self.lats, self.lons, self.lat1, self.lon1)
def query_radius(self,
location: np.ndarray,
r: float) -> np.ndarray:
"""
Selects the indices of the points that lie within a given distance from
a given location.
:param location: Location to query in [lat, lon] format
:param r: Radius in meters
:return: Array of indices
"""
lat = location[0]
lon = location[1]
d0 = gm.num_haversine(lat, lon, self.lat0, self.lon0)
d1 = gm.num_haversine(lat, lon, self.lat1, self.lon1)
i0 = np.searchsorted(self.sorted0, d0 - r)
i1 = np.searchsorted(self.sorted0, d0 + r)
match0 = self.idx0[i0:i1 + 1]
i0 = np.searchsorted(self.sorted1, d1 - r)
i1 = np.searchsorted(self.sorted1, d1 + r)
match1 = self.idx1[i0:i1 + 1]
intersect = | np.intersect1d(match0, match1) | numpy.intersect1d |
from __future__ import division
import os
import sys
import cv2
import argparse
import glob
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import draw, transform
from scipy.optimize import minimize
from PIL import Image
import objs
import utils
#fp is in cam-ceil normal, height is in cam-floor normal
def data2scene(fp_points, height):
# cam-ceiling / cam-floor
scale = (height - 1.6) / 1.6
#layout_fp, fp_points = fit_layout(fp, scale=None, max_cor=12)
size = 512
ratio = 20/size
fp_points = fp_points.astype(float)
fp_points[0] -= size/2
fp_points[1] -= size/2
fp_points *= scale
fp_points[0] += size/2
fp_points[1] += size/2
fp_points = fp_points.astype(int)
scene = objs.Scene()
scene.cameraHeight = 1.6
scene.layoutHeight = float(height)
scene.layoutPoints = []
for i in range(fp_points.shape[1]):
fp_xy = (fp_points[:,i] - size/2) * ratio
xyz = (fp_xy[1], 0, fp_xy[0])
scene.layoutPoints.append(objs.GeoPoint(scene, None, xyz))
scene.genLayoutWallsByPoints(scene.layoutPoints)
scene.updateLayoutGeometry()
return scene
def f1_score(pred, gt):
TP = np.zeros(gt.shape); FP = np.zeros(gt.shape)
FN = np.zeros(gt.shape); TN = np.zeros(gt.shape)
TP[(pred==gt) & (pred == 1)] = 1
FP[(pred!=gt) & (pred == 1)] = 1
FN[(pred!=gt) & (gt == 1)] = 1
TN[(pred==gt) & (pred == 0)] = 1
TP = | np.sum(TP) | numpy.sum |
import os
import random
import cv2
import keras
import numpy as np
import pandas as pd
import tifffile as tiff
from keras import backend as K
from keras.backend import binary_crossentropy
from keras.callbacks import ModelCheckpoint
from keras.layers import concatenate, Conv2D, Input, MaxPooling2D, UpSampling2D, Cropping2D, Convolution2D, Flatten, \
Dense, Activation, Dropout
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras.optimizers import Nadam
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
n_class = 10
Dir = '/home/yokoyang/PycharmProjects/untitled/896_biaozhu'
train_img = pd.read_csv(Dir + '/data_imageID.csv')
Image_ID = sorted(train_img.ImageId.unique())
N_split = 4
Patch_size = 192
crop_size = 224
edge_size = int((crop_size - Patch_size) / 2)
Class_Type = 1
Scale_Size = Patch_size * N_split
get_size = 231
smooth = 1e-12
def get_mask(image_id):
filename = os.path.join(
Dir, 'mix_all', '{}.npy'.format(image_id))
msk = | np.load(filename) | numpy.load |
""" Masks areas to be carved out based on contour """
import itertools
import numpy, scipy.interpolate, numpy.random
import vec
narrowing_factor = 1.5 # Used when river occupies both sides of a chunk
corner_radius_offset = 0.9
river_deviation_centre = (-2, 2)
river_deviation_width = (-1, 1)
river_frequency_centre = 5.1
river_frequency_width = 2.8
class ChunkSeed(object):
"""
Used to seed generation of chunk specific features such
as winding rivers.
"""
def __init__(self, level_seed, location):
self.level_seed = numpy.cast[int](numpy.array(level_seed))
self.location = numpy.cast[int](numpy.array(location))
def offset(self, relative):
"""
Returns another ChunkSeed object for a chunk offset
by the specified amount.
"""
return ChunkSeed(self.level_seed, self.location + numpy.array(relative))
def __side_seed(self, side):
# Generated seeds will be the same for shared edges
side = self.location + numpy.cast[int]((side + numpy.ones(len(side)))/2)
return side*self.level_seed
def centre_seed(self, side):
""" Seed for river centre generation """
return numpy.cast[numpy.int32](self.__side_seed(side))
def width_seed(self, side):
""" Seed for river width generation """
return numpy.cast[numpy.int32](self.__side_seed(side)*2)
class Meander(object):
"""
Using the 'seed' integer, used to produce a series of
values sampled at an integral interval, interpolated from
a random series at interval 'step' found in the
specified 'range'.
If a final value is specified for the output series
then it's allowed to deviate by the 'final_precision'
fraction of the full range.
"""
def __init__(self, seed, step, range=(-1, 1), final_precision=0.05):
self.seed = seed
self.step = step
self.range = range
self.final_precision = final_precision
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, val):
# Numpy now enforces mtrand 32-bit seed integer restriction
self._seed = val & 0xffffffff
def first(self):
"""
Return value of the first point of the generated
series.
"""
gen = numpy.random.mtrand.RandomState(self.seed)
return int(numpy.round(gen.uniform(self.range[0], self.range[1], 1)[0]))
def series(self, points, final=None):
"""
Produces a 'points' number long series of interpolated
values. If a 'final' vale is supplied then the last
value in the returned series will match this value to
within the precision specified by 'final_precision'.
"""
# Get the source random samples
source_points = int(numpy.ceil(float(points)/self.step))
gen = numpy.random.mtrand.RandomState(self.seed)
y1 = gen.uniform(self.range[0], self.range[1], source_points)
#x1 = numpy.linspace(-(float(source_points) % step), float(points) - 1, source_points)
x1 = numpy.linspace(0, float(points) + float(source_points) % self.step - 1, source_points)
# Adjust final sample to meet required result
if final is not None:
accept = abs(self.range[1] - self.range[0])*self.final_precision
for i in xrange(0, 20): # Really shouldn't go deeper than this but let's be sure
f = scipy.interpolate.interp1d(x1, y1, kind='cubic')
error = final - f(float(points) - 1)
if abs(error) < accept:
break
else:
y1[-1] = y1[-1] + error
# Find interpolated points
x2 = numpy.linspace(0.0, float(points) - 1, points)
y2 = scipy.interpolate.interp1d(x1, y1, kind='cubic')(x2)
return numpy.cast[int](numpy.round(y2))
def river_shore(shape, seed, base_width, v):
"""
Produce a series of points representing a meandering river width
"""
# Set up some required variables
axis, axis_inv = (0, 1) if v[0] != 0 else (1, 0)
next = numpy.ones(len(v), v.dtype); next[axis] = 0
centre_range = numpy.array(river_deviation_centre)
width_range = numpy.array(river_deviation_width)
# Discover the final point in the sequence based on the next block over
final_centre = Meander(seed.offset(next).centre_seed(v), river_frequency_centre, centre_range).first()
final_width = Meander(seed.offset(next).width_seed(v), river_frequency_width, width_range).first()
# Find the centre and width sequences that will contribute to the overall river
river_centres = Meander(seed.centre_seed(v), river_frequency_centre, centre_range).series(shape[axis_inv], final_centre)
river_widths = Meander(seed.width_seed(v), river_frequency_width, width_range).series(shape[axis_inv], final_width)
# Add everything up and make sure river never moves out of the chunk
widths = (base_width + c*v[axis] + w for c, w in itertools.izip(river_centres, river_widths))
return [w if w > 1 else 1 for w in widths]
def trace_ellipse(centre, axes, bound=((0, 0), (15, 15))):
"""
Trace the pixels of a quadrant of a specified ellipse
constrained to within a given window.
"""
# Ellipse interior checking function
abs_axes = numpy.abs( | numpy.array(axes) | numpy.array |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import numpy as np
import pyarrow as pa
tensor_type_pairs = [
('i1', pa.int8()),
('i2', pa.int16()),
('i4', pa.int32()),
('i8', pa.int64()),
('u1', pa.uint8()),
('u2', pa.uint16()),
('u4', pa.uint32()),
('u8', pa.uint64()),
('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())
]
@pytest.mark.parametrize('sparse_tensor_type', [
pa.SparseCSRMatrix,
pa.SparseCOOTensor,
])
def test_sparse_tensor_attrs(sparse_tensor_type):
data = np.array([
[0, 1, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 3, 0, 0, 0],
])
dim_names = ['x', 'y']
sparse_tensor = sparse_tensor_type.from_dense_numpy(data, dim_names)
assert sparse_tensor.ndim == 2
assert sparse_tensor.size == 25
assert sparse_tensor.shape == data.shape
assert sparse_tensor.is_mutable
assert sparse_tensor.dim_name(0) == dim_names[0]
assert sparse_tensor.dim_names == dim_names
assert sparse_tensor.non_zero_length == 4
def test_sparse_tensor_coo_base_object():
data = np.array([[4], [9], [7], [5]])
coords = np.array([[0, 0], [0, 2], [1, 1], [3, 3]])
array = np.array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
sparse_tensor = pa.SparseCOOTensor.from_dense_numpy(array)
n = sys.getrefcount(sparse_tensor)
result_data, result_coords = sparse_tensor.to_numpy()
assert sys.getrefcount(sparse_tensor) == n + 2
sparse_tensor = None
assert np.array_equal(data, result_data)
assert np.array_equal(coords, result_coords)
assert result_coords.flags.f_contiguous # column-major
def test_sparse_tensor_csr_base_object():
data = np.array([[1], [2], [3], [4], [5], [6]])
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
array = np.array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
sparse_tensor = pa.SparseCSRMatrix.from_dense_numpy(array)
n = sys.getrefcount(sparse_tensor)
result_data, result_indptr, result_indices = sparse_tensor.to_numpy()
assert sys.getrefcount(sparse_tensor) == n + 3
sparse_tensor = None
assert np.array_equal(data, result_data)
assert np.array_equal(indptr, result_indptr)
assert np.array_equal(indices, result_indices)
@pytest.mark.parametrize('sparse_tensor_type', [
pa.SparseCSRMatrix,
pa.SparseCOOTensor,
])
def test_sparse_tensor_equals(sparse_tensor_type):
def eq(a, b):
assert a.equals(b)
assert a == b
assert not (a != b)
def ne(a, b):
assert not a.equals(b)
assert not (a == b)
assert a != b
data = np.random.randn(10, 6)[::, ::2]
sparse_tensor1 = sparse_tensor_type.from_dense_numpy(data)
sparse_tensor2 = sparse_tensor_type.from_dense_numpy(
np.ascontiguousarray(data))
eq(sparse_tensor1, sparse_tensor2)
data = data.copy()
data[9, 0] = 1.0
sparse_tensor2 = sparse_tensor_type.from_dense_numpy(
np.ascontiguousarray(data))
ne(sparse_tensor1, sparse_tensor2)
@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs)
def test_sparse_tensor_coo_from_dense(dtype_str, arrow_type):
dtype = np.dtype(dtype_str)
data = np.array([[4], [9], [7], [5]]).astype(dtype)
coords = np.array([[0, 0], [0, 2], [1, 1], [3, 3]])
array = np.array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]]).astype(dtype)
tensor = pa.Tensor.from_numpy(array)
# Test from numpy array
sparse_tensor = pa.SparseCOOTensor.from_dense_numpy(array)
repr(sparse_tensor)
assert sparse_tensor.type == arrow_type
result_data, result_coords = sparse_tensor.to_numpy()
assert np.array_equal(data, result_data)
assert np.array_equal(coords, result_coords)
# Test from Tensor
sparse_tensor = pa.SparseCOOTensor.from_tensor(tensor)
repr(sparse_tensor)
assert sparse_tensor.type == arrow_type
result_data, result_coords = sparse_tensor.to_numpy()
assert np.array_equal(data, result_data)
assert np.array_equal(coords, result_coords)
@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs)
def test_sparse_tensor_csr_from_dense(dtype_str, arrow_type):
dtype = np.dtype(dtype_str)
dense_data = np.array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]]).astype(dtype)
data = np.array([[1], [2], [3], [4], [5], [6]])
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
tensor = pa.Tensor.from_numpy(dense_data)
# Test from numpy array
sparse_tensor = pa.SparseCSRMatrix.from_dense_numpy(dense_data)
repr(sparse_tensor)
result_data, result_indptr, result_indices = sparse_tensor.to_numpy()
assert np.array_equal(data, result_data)
assert | np.array_equal(indptr, result_indptr) | numpy.array_equal |
import numpy as np
def braille():
return {
'a' : np.array([[1, 0], [0, 0], [0, 0]], dtype=bool),
'b' : np.array([[1, 0], [1, 0], [0, 0]], dtype=bool),
'c' : np.array([[1, 1], [0, 0], [0, 0]], dtype=bool),
'd' : np.array([[1, 1], [0, 1], [0, 0]], dtype=bool),
'e' : np.array([[1, 0], [0, 1], [0, 0]], dtype=bool),
'f' : np.array([[1, 1], [1, 0], [0, 0]], dtype=bool),
'g' : np.array([[1, 1], [1, 1], [0, 0]], dtype=bool),
'h' : | np.array([[1, 0], [1, 1], [0, 0]], dtype=bool) | numpy.array |
__author__ = 'jatwood'
import numpy as np
def rw_laplacian(A):
Dm1 = np.zeros(A.shape)
degree = A.sum(0)
for i in range(A.shape[0]):
if degree[i] == 0:
Dm1[i,i] = 0.
else:
Dm1[i,i] = - 1. / degree[i]
return -np.asarray(Dm1.dot(A),dtype='float32')
def laplacian(A):
D = np.zeros(A.shape)
out_degree = A.sum(0)
for i in range(A.shape[0]):
D[i,i] = out_degree[i]
return np.asarray(D - A, dtype='float32')
def A_power_series(A,k):
"""
Computes [A**0, A**1, ..., A**k]
:param A: 2d numpy array
:param k: integer, degree of series
:return: 3d numpy array [A**0, A**1, ..., A**k]
"""
assert k >= 0
Apow = [np.identity(A.shape[0])]
if k > 0:
Apow.append(A)
for i in range(2, k+1):
Apow.append(np.dot(A, Apow[-1]))
return np.asarray(Apow, dtype='float32')
def adjacency_matrix_to_list(A):
adjacency_list = []
n_nodes = A.shape[0]
for i in range(n_nodes):
for j in range(n_nodes):
if A[i,j] == 1:
adjacency_list.append([i,j])
return | np.asarray(adjacency_list, 'float32') | numpy.asarray |
"""
A test that validates that automatically optimized approximation to the analytical solution
to the real coverage doesn't deviate from precise analytical solution to any significant degree.
See:
• "efficacy of an arbitrary CI method for proportions - analytical solution.jpg"
• "CI_efficacy_proportion.py".
CImethodForProportion_efficacyToolkit.calculate_coverage_analytically()
• "CI_efficacy_diff_betw_two_proportions.py".
CImethodForDiffBetwTwoProportions_efficacyToolkit.calculate_coverage_analytically()
"""
import time
import numpy as np
from CI_methods_analyser.data_functions import float_to_str
from CI_methods_analyser.CI_efficacy_proportion import CImethodForProportion_efficacyToolkit
from CI_methods_analyser.methods_for_CI_for_proportion import wald_interval, wilson_score_interval
from Tests.plot_difference import plot_relative_difference
"""
Comparing to z_precision=9 as the maximum precision. Why 9?
https://www.wolframalpha.com/input/?i=9+sigmas
9 sigmas two-tailed p-value is 2e-19, which is just 2 times more than
the maximum precision of units in mantissa given by 63 bits for mantissa in np.float128:
it's almost 1 in 1e19, which is sensitivty of 1e-19 per unit.
It means that the values outside 9 sigmas all add up to about 2e-19.
Therefore the individual `y` values of a given binomial distribution outside 9 sigmas
don't exceed 2e-19, and only values of at least 1e-19 can be added to a np.float128 value
of approximately from 0.5 to 0.9999999...
Thus, z_precision of 9 behaves here just like a maximum precision.
"""
print("")
print("===== CI test1 ======")
print("")
start_time = time.time()
proportions = ('0.001', '0.999', '0.003')
CI_test_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_1_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.90)
CI_test_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_1b_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.90)
CI_test_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_1_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.90, z_precision=9)
CI_test_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_1b_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.90, z_precision=9)
CI_test_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_2_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.95)
CI_test_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_2b_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.95)
CI_test_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_2_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.95, z_precision=9)
CI_test_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_2b_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.95, z_precision=9)
CI_test_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_3_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.99)
CI_test_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_3b_auto.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.99)
CI_test_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_3_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.99, z_precision=9)
CI_test_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_3b_max.calculate_coverage_analytically(
sample_size=100, proportions=proportions, confidence=0.99, z_precision=9)
CI_test_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_4_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.90)
CI_test_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_4b_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.90)
CI_test_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_4_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_4b_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_5_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.95)
CI_test_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_5b_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.95)
CI_test_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_5_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_5b_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_6_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.99)
CI_test_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_6b_auto.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.99)
CI_test_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test_6_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test_6b_max.calculate_coverage_analytically(
sample_size=1000, proportions=proportions, confidence=0.99, z_precision=9)
print("CI test1 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test2 ======")
print("")
start_time = time.time()
proportions = ('0.00001', '0.00999', '0.00004')
CI_test2_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_1_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.90)
CI_test2_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_1b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.90)
CI_test2_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_1_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test2_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_1b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test2_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_2_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.95)
CI_test2_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_2b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.95)
CI_test2_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_2_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test2_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_2b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test2_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_3_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99)
CI_test2_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_3b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99)
CI_test2_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_3_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test2_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_3b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test2_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_4_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.90)
CI_test2_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_4b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.90)
CI_test2_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_4_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test2_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_4b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.90, z_precision=9)
CI_test2_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_5_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95)
CI_test2_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_5b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95)
CI_test2_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_5_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test2_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_5b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test2_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_6_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test2_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_6b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test2_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test2_6_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test2_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test2_6b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
print("CI test2 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test3 ======")
print("")
start_time = time.time()
proportions = ('0.000001', '0.000999', '0.000004')
CI_test3_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_1_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99)
CI_test3_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_1b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99)
CI_test3_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_1_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test3_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_1b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test3_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_2_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.995)
CI_test3_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_2b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.995)
CI_test3_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_2_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.995, z_precision=9)
CI_test3_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_2b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.995, z_precision=9)
CI_test3_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_3_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.999)
CI_test3_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_3b_auto.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.999)
CI_test3_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_3_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test3_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_3b_max.calculate_coverage_analytically(
sample_size=10000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test3_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_4_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test3_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_4b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test3_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_4_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test3_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_4b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test3_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_5_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.995)
CI_test3_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_5b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.995)
CI_test3_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_5_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.995, z_precision=9)
CI_test3_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_5b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.995, z_precision=9)
CI_test3_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_6_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test3_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_6b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test3_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test3_6_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test3_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test3_6b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
print("CI test3 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test4 ======")
print("")
start_time = time.time()
proportions = ('0.000001', '0.000999', '0.000007')
CI_test4_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test4_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test4_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test4_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test4_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9995)
CI_test4_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9995)
CI_test4_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9995, z_precision=9)
CI_test4_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9995, z_precision=9)
CI_test4_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9999)
CI_test4_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9999)
CI_test4_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9999, z_precision=9)
CI_test4_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.9999, z_precision=9)
CI_test4_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999)
CI_test4_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999)
CI_test4_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test4_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test4_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9995)
CI_test4_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9995)
CI_test4_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9995, z_precision=9)
CI_test4_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9995, z_precision=9)
CI_test4_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9999)
CI_test4_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9999)
CI_test4_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test4_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9999, z_precision=9)
CI_test4_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test4_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.9999, z_precision=9)
print("CI test4 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test5 ======")
print("")
start_time = time.time()
proportions = ('0.0000001', '0.0000199', '0.0000002')
CI_test5_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test5_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test5_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test5_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test5_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test5_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test5_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test5_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test5_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test5_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test5_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test5_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test5_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test5_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test5_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test5_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test5_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test5_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test5_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test5_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test5_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test5_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test5_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test5_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test5_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test5_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
print("CI test5 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test6 ======")
print("")
start_time = time.time()
proportions = ('0.0000001', '0.0001999', '0.0000011')
CI_test6_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test6_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test6_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test6_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test6_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test6_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test6_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test6_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test6_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test6_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test6_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test6_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test6_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test6_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test6_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test6_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test6_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test6_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test6_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test6_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test6_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test6_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test6_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test6_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test6_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test6_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
print("CI test6 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test7 ======")
print("")
start_time = time.time()
proportions = ('0.0001', '0.1999', '0.0019')
CI_test7_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test7_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999)
CI_test7_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test7_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test7_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test7_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995)
CI_test7_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test7_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test7_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test7_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999)
CI_test7_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test7_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test7_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test7_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999)
CI_test7_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test7_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99999, z_precision=9)
CI_test7_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test7_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995)
CI_test7_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test7_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999995, z_precision=9)
CI_test7_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test7_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999)
CI_test7_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test7_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
CI_test7_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test7_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999999, z_precision=9)
print("CI test7 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
print("")
print("===== CI test8 ======")
print("")
start_time = time.time()
proportions = ('0.0001', '0.1999', '0.0019')
CI_test8_1_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_1_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95)
CI_test8_1b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_1b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95)
CI_test8_1_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_1_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test8_1b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_1b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test8_2_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_2_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test8_2b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_2b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99)
CI_test8_2_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_2_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test8_2b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_2b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test8_3_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_3_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test8_3b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_3b_auto.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999)
CI_test8_3_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_3_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test8_3b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_3b_max.calculate_coverage_analytically(
sample_size=100000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test8_4_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_4_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.95)
CI_test8_4b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_4b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.95)
CI_test8_4_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_4_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test8_4b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_4b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.95, z_precision=9)
CI_test8_5_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_5_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99)
CI_test8_5b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_5b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99)
CI_test8_5_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_5_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test8_5b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_5b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.99, z_precision=9)
CI_test8_6_auto = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_6_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999)
CI_test8_6b_auto = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_6b_auto.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999)
CI_test8_6_max = CImethodForProportion_efficacyToolkit(wald_interval, "Wald Interval")
CI_test8_6_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999, z_precision=9)
CI_test8_6b_max = CImethodForProportion_efficacyToolkit(wilson_score_interval, "Wilson Score Interval")
CI_test8_6b_max.calculate_coverage_analytically(
sample_size=1000000, proportions=proportions, confidence=0.999, z_precision=9)
print("CI test8 finished")
print("--- %s seconds ---" % float_to_str((time.time() - start_time), 5))
test_fig1 = plot_relative_difference(
np.array(CI_test_1_auto.coverage), np.array(CI_test_1_max.coverage),
plt_figure_num="CI test 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_1_auto.f.calculation_inputs()}")
test_fig1b = plot_relative_difference(
np.array(CI_test_1b_auto.coverage), np.array(CI_test_1b_max.coverage),
plt_figure_num="CI test 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_1b_auto.f.calculation_inputs()}")
test_fig2 = plot_relative_difference(
np.array(CI_test_2_auto.coverage), np.array(CI_test_2_max.coverage),
plt_figure_num="CI test 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_2_auto.f.calculation_inputs()}")
test_fig2b = plot_relative_difference(
np.array(CI_test_2b_auto.coverage), np.array(CI_test_2b_max.coverage),
plt_figure_num="CI test 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_2b_auto.f.calculation_inputs()}")
test_fig3 = plot_relative_difference(
np.array(CI_test_3_auto.coverage), np.array(CI_test_3_max.coverage),
plt_figure_num="CI test 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_3_auto.f.calculation_inputs()}")
test_fig3b = plot_relative_difference(
np.array(CI_test_3b_auto.coverage), np.array(CI_test_3b_max.coverage),
plt_figure_num="CI test 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_3b_auto.f.calculation_inputs()}")
test_fig4 = plot_relative_difference(
np.array(CI_test_4_auto.coverage), np.array(CI_test_4_max.coverage),
plt_figure_num="CI test 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_4_auto.f.calculation_inputs()}")
test_fig4b = plot_relative_difference(
np.array(CI_test_4b_auto.coverage), np.array(CI_test_4b_max.coverage),
plt_figure_num="CI test 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_4b_auto.f.calculation_inputs()}")
test_fig5 = plot_relative_difference(
np.array(CI_test_5_auto.coverage), np.array(CI_test_5_max.coverage),
plt_figure_num="CI test 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_5_auto.f.calculation_inputs()}")
test_fig5b = plot_relative_difference(
np.array(CI_test_5b_auto.coverage), np.array(CI_test_5b_max.coverage),
plt_figure_num="CI test 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_5b_auto.f.calculation_inputs()}")
test_fig6 = plot_relative_difference(
np.array(CI_test_6_auto.coverage), np.array(CI_test_6_max.coverage),
plt_figure_num="CI test 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_6_auto.f.calculation_inputs()}")
test_fig6b = plot_relative_difference(
np.array(CI_test_6b_auto.coverage), np.array(CI_test_6b_max.coverage),
plt_figure_num="CI test 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test_6b_auto.f.calculation_inputs()}")
test2_fig1 = plot_relative_difference(
np.array(CI_test2_1_auto.coverage), np.array(CI_test2_1_max.coverage),
plt_figure_num="CI test2 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_1_auto.f.calculation_inputs()}")
test2_fig1b = plot_relative_difference(
np.array(CI_test2_1b_auto.coverage), np.array(CI_test2_1b_max.coverage),
plt_figure_num="CI test2 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_1b_auto.f.calculation_inputs()}")
test2_fig2 = plot_relative_difference(
np.array(CI_test2_2_auto.coverage), np.array(CI_test2_2_max.coverage),
plt_figure_num="CI test2 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_2_auto.f.calculation_inputs()}")
test2_fig2b = plot_relative_difference(
np.array(CI_test2_2b_auto.coverage), np.array(CI_test2_2b_max.coverage),
plt_figure_num="CI test2 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_2b_auto.f.calculation_inputs()}")
test2_fig3 = plot_relative_difference(
np.array(CI_test2_3_auto.coverage), np.array(CI_test2_3_max.coverage),
plt_figure_num="CI test2 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_3_auto.f.calculation_inputs()}")
test2_fig3b = plot_relative_difference(
np.array(CI_test2_3b_auto.coverage), np.array(CI_test2_3b_max.coverage),
plt_figure_num="CI test2 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_3b_auto.f.calculation_inputs()}")
test2_fig4 = plot_relative_difference(
np.array(CI_test2_4_auto.coverage), np.array(CI_test2_4_max.coverage),
plt_figure_num="CI test2 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_4_auto.f.calculation_inputs()}")
test2_fig4b = plot_relative_difference(
np.array(CI_test2_4b_auto.coverage), np.array(CI_test2_4b_max.coverage),
plt_figure_num="CI test2 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_4b_auto.f.calculation_inputs()}")
test2_fig5 = plot_relative_difference(
np.array(CI_test2_5_auto.coverage), np.array(CI_test2_5_max.coverage),
plt_figure_num="CI test2 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_5_auto.f.calculation_inputs()}")
test2_fig5b = plot_relative_difference(
np.array(CI_test2_5b_auto.coverage), np.array(CI_test2_5b_max.coverage),
plt_figure_num="CI test2 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_5b_auto.f.calculation_inputs()}")
test2_fig6 = plot_relative_difference(
np.array(CI_test2_6_auto.coverage), np.array(CI_test2_6_max.coverage),
plt_figure_num="CI test2 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_6_auto.f.calculation_inputs()}")
test2_fig6b = plot_relative_difference(
np.array(CI_test2_6b_auto.coverage), np.array(CI_test2_6b_max.coverage),
plt_figure_num="CI test2 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test2_6b_auto.f.calculation_inputs()}")
test3_fig1 = plot_relative_difference(
np.array(CI_test3_1_auto.coverage), np.array(CI_test3_1_max.coverage),
plt_figure_num="CI test3 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_1_auto.f.calculation_inputs()}")
test3_fig1b = plot_relative_difference(
np.array(CI_test3_1b_auto.coverage), np.array(CI_test3_1b_max.coverage),
plt_figure_num="CI test3 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_1b_auto.f.calculation_inputs()}")
test3_fig2 = plot_relative_difference(
np.array(CI_test3_2_auto.coverage), np.array(CI_test3_2_max.coverage),
plt_figure_num="CI test3 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_2_auto.f.calculation_inputs()}")
test3_fig2b = plot_relative_difference(
np.array(CI_test3_2b_auto.coverage), np.array(CI_test3_2b_max.coverage),
plt_figure_num="CI test3 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_2b_auto.f.calculation_inputs()}")
test3_fig3 = plot_relative_difference(
np.array(CI_test3_3_auto.coverage), np.array(CI_test3_3_max.coverage),
plt_figure_num="CI test3 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_3_auto.f.calculation_inputs()}")
test3_fig3b = plot_relative_difference(
np.array(CI_test3_3b_auto.coverage), np.array(CI_test3_3b_max.coverage),
plt_figure_num="CI test3 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_3b_auto.f.calculation_inputs()}")
test3_fig4 = plot_relative_difference(
np.array(CI_test3_4_auto.coverage), np.array(CI_test3_4_max.coverage),
plt_figure_num="CI test3 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_4_auto.f.calculation_inputs()}")
test3_fig4b = plot_relative_difference(
np.array(CI_test3_4b_auto.coverage), np.array(CI_test3_4b_max.coverage),
plt_figure_num="CI test3 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_4b_auto.f.calculation_inputs()}")
test3_fig5 = plot_relative_difference(
np.array(CI_test3_5_auto.coverage), np.array(CI_test3_5_max.coverage),
plt_figure_num="CI test3 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_5_auto.f.calculation_inputs()}")
test3_fig5b = plot_relative_difference(
np.array(CI_test3_5b_auto.coverage), np.array(CI_test3_5b_max.coverage),
plt_figure_num="CI test3 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_5b_auto.f.calculation_inputs()}")
test3_fig6 = plot_relative_difference(
np.array(CI_test3_6_auto.coverage), np.array(CI_test3_6_max.coverage),
plt_figure_num="CI test3 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_6_auto.f.calculation_inputs()}")
test3_fig6b = plot_relative_difference(
np.array(CI_test3_6b_auto.coverage), np.array(CI_test3_6b_max.coverage),
plt_figure_num="CI test3 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test3_6b_auto.f.calculation_inputs()}")
test4_fig1 = plot_relative_difference(
np.array(CI_test4_1_auto.coverage), np.array(CI_test4_1_max.coverage),
plt_figure_num="CI test4 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_1_auto.f.calculation_inputs()}")
test4_fig1b = plot_relative_difference(
np.array(CI_test4_1b_auto.coverage), np.array(CI_test4_1b_max.coverage),
plt_figure_num="CI test4 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_1b_auto.f.calculation_inputs()}")
test4_fig2 = plot_relative_difference(
np.array(CI_test4_2_auto.coverage), np.array(CI_test4_2_max.coverage),
plt_figure_num="CI test4 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_2_auto.f.calculation_inputs()}")
test4_fig2b = plot_relative_difference(
np.array(CI_test4_2b_auto.coverage), np.array(CI_test4_2b_max.coverage),
plt_figure_num="CI test4 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_2b_auto.f.calculation_inputs()}")
test4_fig3 = plot_relative_difference(
np.array(CI_test4_3_auto.coverage), np.array(CI_test4_3_max.coverage),
plt_figure_num="CI test4 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_3_auto.f.calculation_inputs()}")
test4_fig3b = plot_relative_difference(
np.array(CI_test4_3b_auto.coverage), np.array(CI_test4_3b_max.coverage),
plt_figure_num="CI test4 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_3b_auto.f.calculation_inputs()}")
test4_fig4 = plot_relative_difference(
np.array(CI_test4_4_auto.coverage), np.array(CI_test4_4_max.coverage),
plt_figure_num="CI test4 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_4_auto.f.calculation_inputs()}")
test4_fig4b = plot_relative_difference(
np.array(CI_test4_4b_auto.coverage), np.array(CI_test4_4b_max.coverage),
plt_figure_num="CI test4 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_4b_auto.f.calculation_inputs()}")
test4_fig5 = plot_relative_difference(
np.array(CI_test4_5_auto.coverage), np.array(CI_test4_5_max.coverage),
plt_figure_num="CI test4 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_5_auto.f.calculation_inputs()}")
test4_fig5b = plot_relative_difference(
np.array(CI_test4_5b_auto.coverage), np.array(CI_test4_5b_max.coverage),
plt_figure_num="CI test4 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_5b_auto.f.calculation_inputs()}")
test4_fig6 = plot_relative_difference(
np.array(CI_test4_6_auto.coverage), np.array(CI_test4_6_max.coverage),
plt_figure_num="CI test4 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_6_auto.f.calculation_inputs()}")
test4_fig6b = plot_relative_difference(
np.array(CI_test4_6b_auto.coverage), np.array(CI_test4_6b_max.coverage),
plt_figure_num="CI test4 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test4_6b_auto.f.calculation_inputs()}")
test5_fig1 = plot_relative_difference(
np.array(CI_test5_1_auto.coverage), np.array(CI_test5_1_max.coverage),
plt_figure_num="CI test5 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_1_auto.f.calculation_inputs()}")
test5_fig1b = plot_relative_difference(
np.array(CI_test5_1b_auto.coverage), np.array(CI_test5_1b_max.coverage),
plt_figure_num="CI test5 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_1b_auto.f.calculation_inputs()}")
test5_fig2 = plot_relative_difference(
np.array(CI_test5_2_auto.coverage), np.array(CI_test5_2_max.coverage),
plt_figure_num="CI test5 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_2_auto.f.calculation_inputs()}")
test5_fig2b = plot_relative_difference(
np.array(CI_test5_2b_auto.coverage), np.array(CI_test5_2b_max.coverage),
plt_figure_num="CI test5 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_2b_auto.f.calculation_inputs()}")
test5_fig3 = plot_relative_difference(
np.array(CI_test5_3_auto.coverage), np.array(CI_test5_3_max.coverage),
plt_figure_num="CI test5 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_3_auto.f.calculation_inputs()}")
test5_fig3b = plot_relative_difference(
np.array(CI_test5_3b_auto.coverage), np.array(CI_test5_3b_max.coverage),
plt_figure_num="CI test5 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_3b_auto.f.calculation_inputs()}")
test5_fig4 = plot_relative_difference(
np.array(CI_test5_4_auto.coverage), np.array(CI_test5_4_max.coverage),
plt_figure_num="CI test5 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_4_auto.f.calculation_inputs()}")
test5_fig4b = plot_relative_difference(
np.array(CI_test5_4b_auto.coverage), np.array(CI_test5_4b_max.coverage),
plt_figure_num="CI test5 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_4b_auto.f.calculation_inputs()}")
test5_fig5 = plot_relative_difference(
np.array(CI_test5_5_auto.coverage), np.array(CI_test5_5_max.coverage),
plt_figure_num="CI test5 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_5_auto.f.calculation_inputs()}")
test5_fig5b = plot_relative_difference(
np.array(CI_test5_5b_auto.coverage), np.array(CI_test5_5b_max.coverage),
plt_figure_num="CI test5 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_5b_auto.f.calculation_inputs()}")
test5_fig6 = plot_relative_difference(
np.array(CI_test5_6_auto.coverage), np.array(CI_test5_6_max.coverage),
plt_figure_num="CI test5 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_6_auto.f.calculation_inputs()}")
test5_fig6b = plot_relative_difference(
np.array(CI_test5_6b_auto.coverage), np.array(CI_test5_6b_max.coverage),
plt_figure_num="CI test5 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test5_6b_auto.f.calculation_inputs()}")
test6_fig1 = plot_relative_difference(
np.array(CI_test6_1_auto.coverage), np.array(CI_test6_1_max.coverage),
plt_figure_num="CI test6 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_1_auto.f.calculation_inputs()}")
test6_fig1b = plot_relative_difference(
np.array(CI_test6_1b_auto.coverage), np.array(CI_test6_1b_max.coverage),
plt_figure_num="CI test6 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_1b_auto.f.calculation_inputs()}")
test6_fig2 = plot_relative_difference(
np.array(CI_test6_2_auto.coverage), np.array(CI_test6_2_max.coverage),
plt_figure_num="CI test6 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_2_auto.f.calculation_inputs()}")
test6_fig2b = plot_relative_difference(
np.array(CI_test6_2b_auto.coverage), np.array(CI_test6_2b_max.coverage),
plt_figure_num="CI test6 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_2b_auto.f.calculation_inputs()}")
test6_fig3 = plot_relative_difference(
np.array(CI_test6_3_auto.coverage), np.array(CI_test6_3_max.coverage),
plt_figure_num="CI test6 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_3_auto.f.calculation_inputs()}")
test6_fig3b = plot_relative_difference(
np.array(CI_test6_3b_auto.coverage), np.array(CI_test6_3b_max.coverage),
plt_figure_num="CI test6 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_3b_auto.f.calculation_inputs()}")
test6_fig4 = plot_relative_difference(
np.array(CI_test6_4_auto.coverage), np.array(CI_test6_4_max.coverage),
plt_figure_num="CI test6 4", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_4_auto.f.calculation_inputs()}")
test6_fig4b = plot_relative_difference(
np.array(CI_test6_4b_auto.coverage), np.array(CI_test6_4b_max.coverage),
plt_figure_num="CI test6 4b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_4b_auto.f.calculation_inputs()}")
test6_fig5 = plot_relative_difference(
np.array(CI_test6_5_auto.coverage), np.array(CI_test6_5_max.coverage),
plt_figure_num="CI test6 5", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_5_auto.f.calculation_inputs()}")
test6_fig5b = plot_relative_difference(
np.array(CI_test6_5b_auto.coverage), np.array(CI_test6_5b_max.coverage),
plt_figure_num="CI test6 5b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_5b_auto.f.calculation_inputs()}")
test6_fig6 = plot_relative_difference(
np.array(CI_test6_6_auto.coverage), np.array(CI_test6_6_max.coverage),
plt_figure_num="CI test6 6", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_6_auto.f.calculation_inputs()}")
test6_fig6b = plot_relative_difference(
np.array(CI_test6_6b_auto.coverage), np.array(CI_test6_6b_max.coverage),
plt_figure_num="CI test6 6b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test6_6b_auto.f.calculation_inputs()}")
test7_fig1 = plot_relative_difference(
np.array(CI_test7_1_auto.coverage), np.array(CI_test7_1_max.coverage),
plt_figure_num="CI test7 1", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test7_1_auto.f.calculation_inputs()}")
test7_fig1b = plot_relative_difference(
np.array(CI_test7_1b_auto.coverage), np.array(CI_test7_1b_max.coverage),
plt_figure_num="CI test7 1b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test7_1b_auto.f.calculation_inputs()}")
test7_fig2 = plot_relative_difference(
np.array(CI_test7_2_auto.coverage), np.array(CI_test7_2_max.coverage),
plt_figure_num="CI test7 2", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test7_2_auto.f.calculation_inputs()}")
test7_fig2b = plot_relative_difference(
np.array(CI_test7_2b_auto.coverage), np.array(CI_test7_2b_max.coverage),
plt_figure_num="CI test7 2b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test7_2b_auto.f.calculation_inputs()}")
test7_fig3 = plot_relative_difference(
np.array(CI_test7_3_auto.coverage), np.array(CI_test7_3_max.coverage),
plt_figure_num="CI test7 3", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test7_3_auto.f.calculation_inputs()}")
test7_fig3b = plot_relative_difference(
np.array(CI_test7_3b_auto.coverage), np.array(CI_test7_3b_max.coverage),
plt_figure_num="CI test7 3b", title="Diff between two {dim}d data[{shape}] - avg = {avg}, max = {max}\n"+f"{CI_test7_3b_auto.f.calculation_inputs()}")
test7_fig4 = plot_relative_difference(
np.array(CI_test7_4_auto.coverage), | np.array(CI_test7_4_max.coverage) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/8/17
# @Author : github.com/guofei9987
import numpy as np
from .base import SkoBase
from sko.operators import mutation
class SimulatedAnnealingBase(SkoBase):
"""
DO SA(Simulated Annealing)
Parameters
----------------
func : function
The func you want to do optimal
n_dim : int
number of variables of func
x0 : array, shape is n_dim
initial solution
T_max :float
initial temperature
T_min : float
end temperature
L : int
num of iteration under every temperature(Long of Chain)
Attributes
----------------------
Examples
-------------
See https://github.com/guofei9987/scikit-opt/blob/master/examples/demo_sa.py
"""
def __init__(self, func, x0, T_max=100, T_min=1e-7, L=300, max_stay_counter=150, **kwargs):
assert T_max > T_min > 0, 'T_max > T_min > 0'
self.func = func
self.T_max = T_max # initial temperature
self.T_min = T_min # end temperature
self.L = int(L) # num of iteration under every temperature(also called Long of Chain)
# stop if best_y stay unchanged over max_stay_counter times (also called cooldown time)
self.max_stay_counter = max_stay_counter
self.n_dim = len(x0)
self.best_x = np.array(x0) # initial solution
self.best_y = self.func(self.best_x)
self.T = self.T_max
self.iter_cycle = 0
self.generation_best_X, self.generation_best_Y = [self.best_x], [self.best_y]
# history reasons, will be deprecated
self.best_x_history, self.best_y_history = self.generation_best_X, self.generation_best_Y
def get_new_x(self, x):
u = np.random.uniform(-1, 1, size=self.n_dim)
x_new = x + 20 * np.sign(u) * self.T * ((1 + 1.0 / self.T) ** np.abs(u) - 1.0)
return x_new
def cool_down(self):
self.T = self.T * 0.7
def isclose(self, a, b, rel_tol=1e-09, abs_tol=1e-30):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def run(self):
x_current, y_current = self.best_x, self.best_y
stay_counter = 0
while True:
for i in range(self.L):
x_new = self.get_new_x(x_current)
y_new = self.func(x_new)
# Metropolis
df = y_new - y_current
if df < 0 or np.exp(-df / self.T) > np.random.rand():
x_current, y_current = x_new, y_new
if y_new < self.best_y:
self.best_x, self.best_y = x_new, y_new
self.iter_cycle += 1
self.cool_down()
self.generation_best_Y.append(self.best_y)
self.generation_best_X.append(self.best_x)
# if best_y stay for max_stay_counter times, stop iteration
if self.isclose(self.best_y_history[-1], self.best_y_history[-2]):
stay_counter += 1
else:
stay_counter = 0
if self.T < self.T_min:
stop_code = 'Cooled to final temperature'
break
if stay_counter > self.max_stay_counter:
stop_code = 'Stay unchanged in the last {stay_counter} iterations'.format(stay_counter=stay_counter)
break
return self.best_x, self.best_y
fit = run
class SimulatedAnnealingValue(SimulatedAnnealingBase):
"""
SA on real value function
"""
def __init__(self, func, x0, T_max=100, T_min=1e-7, L=300, max_stay_counter=150, **kwargs):
super().__init__(func, x0, T_max, T_min, L, max_stay_counter, **kwargs)
lb, ub = kwargs.get('lb', None), kwargs.get('ub', None)
if lb is not None and ub is not None:
self.has_bounds = True
self.lb, self.ub = np.array(lb) * np.ones(self.n_dim), np.array(ub) * np.ones(self.n_dim)
assert self.n_dim == len(self.lb) == len(self.ub), 'dim == len(lb) == len(ub) is not True'
assert np.all(self.ub > self.lb), 'upper-bound must be greater than lower-bound'
self.hop = kwargs.get('hop', self.ub - self.lb)
elif lb is None and ub is None:
self.has_bounds = False
self.hop = kwargs.get('hop', 10)
else:
raise ValueError('input parameter error: lb, ub both exist, or both not exist')
self.hop = self.hop * np.ones(self.n_dim)
class SAFast(SimulatedAnnealingValue):
"""
u ~ Uniform(0, 1, size = d)
y = sgn(u - 0.5) * T * ((1 + 1/T)**abs(2*u - 1) - 1.0)
xc = y * (upper - lower)
x_new = x_old + xc
c = n * exp(-n * quench)
T_new = T0 * exp(-c * k**quench)
"""
def __init__(self, func, x0, T_max=100, T_min=1e-7, L=300, max_stay_counter=150, **kwargs):
super().__init__(func, x0, T_max, T_min, L, max_stay_counter, **kwargs)
self.m, self.n, self.quench = kwargs.get('m', 1), kwargs.get('n', 1), kwargs.get('quench', 1)
self.c = self.m * np.exp(-self.n * self.quench)
def get_new_x(self, x):
r = np.random.uniform(-1, 1, size=self.n_dim)
xc = np.sign(r) * self.T * ((1 + 1.0 / self.T) ** np.abs(r) - 1.0)
x_new = x + xc * self.hop
if self.has_bounds:
return np.clip(x_new, self.lb, self.ub)
return x_new
def cool_down(self):
self.T = self.T_max * | np.exp(-self.c * self.iter_cycle ** self.quench) | numpy.exp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Interpolation of Amplitude and Phase using the interpolant derived by <NAME>.
For details see - https://doi.org/10.1007/s10236-017-1122-8
TODO:
* Add a wrapper class
* Add type and error check
* Add case for missing value
Author: khan
"""
import numpy as np
import matplotlib.pyplot as plt
class Point(object):
'''
Point(x, y=0, a=None, p=None, isradians=False) is point object to hold point information
including the amplitude and phase.
args:
x (float) : x position
y (float) : y position
a (float) : amplitude
p (float) : phase in degree or radians (default: degrees)
isradians (bool): if input is in degrees or radians (default: false)
returns:
An instance of Point class.
attributes:
x (float) : x position
y (float) : y position
a (float) : amplitude
p (float) : phase in radians
methods:
print() : prints the attributes
TODO:
* add typecheck and error handling
'''
def __init__(self, x, y=0, a=None, p=None, isradians=False):
self.x = float(x)
self.y = float(y)
if a is not None:
self.a = float(a)
else:
self.a = float(0)
self.isradians = isradians
if p is not None:
if self.isradians:
self.p = float(p)
else:
self.p = float(p)*np.pi/180.0
else:
self.p = float(0)
def print(self):
print(self.x, self.y, self.a, self.p)
def __lt__(self, other):
return(self.x < other.x and self.y < other.y)
class Grid(object):
'''
Grid(x, y, A=None, P=None, isradians=False) is the grid object to hold points in a meshgrid.
args:
x ([float]) : x positon array in the structured grid
y ([float]) : y position array in the structured grid
A ([[float]]) : 2D array of size (x, y) containing amplitude
P ([[float]]) : 2D array of size (x, y) containing phase
isradians (bool) : if the phase in in radians
'''
def __init__(self, x, y, A=None, P=None, isradians=False):
# Initiating variables
self.x = x
self.y = y
__X, __Y = np.meshgrid(self.x, self.y, indexing='xy')
self.shape = __X.shape
self.length = len(__X.flat)
self.isradians = isradians
if A is None:
__A = np.zeros(shape=self.shape)
else:
__A = A
if P is None:
__P = np.zeros(shape=self.shape)
else:
__P = P
# Creating point meshgrid
self.points = np.array([Point(x=__X.flat[i], y=__Y.flat[i], a=__A.flat[i], p=__P.flat[i], isradians=self.isradians) for i in np.arange(self.length)])
self.points = np.reshape(self.points, self.shape)
def getpoints(self, reshaped=True):
__points = np.array([point for point in self.points.flat])
if reshaped:
return(np.reshape(self.points, self.shape))
else:
return(self.points)
def getx(self, reshaped=True):
__X = | np.array([point.x for point in self.points.flat]) | numpy.array |
import numpy as np
import pandas as pd
from glob import glob
from astropy.io import fits
import matplotlib.pyplot as plt
import os
from matplotlib import cm
from scipy.interpolate import InterpolatedUnivariateSpline
def create_combined():
#read synthetic fluxes
path_of_grid = '/home/mtsantaki/oporto/gaia_synthetic_kurucz/results_005/'
spectra = glob(path_of_grid + '*11200.spec')
spectra = list(map(lambda x: x.split('/')[-1], spectra))
data = []
for i, specname in enumerate(spectra[:]):
print(i)
teff = specname.split('_')[0]
logg = specname.split('_')[1]
feh = specname.split('_')[2]
vmic = specname.split('_')[3]
vmac = specname.split('_')[4]
vsini = specname.split('_')[5]
alpha = specname.split('_')[6]
#if vsini == '3.0':
hdulist = fits.open(path_of_grid + specname)
x = hdulist[1].data
flux = x['flux']
flux = flux.tolist()
params = | np.append(flux, [teff, logg, feh, alpha, vmic, vmac, vsini]) | numpy.append |
# coding: utf-8
# # 1) Straight Line Graph : (plt.plot(X,Y))
# # 2) Scatter Graph : plt.scatter(x,y)
# # 3) Bar Graph : plt.bar(x, y)
#
# # 4) Histogram Graph : plt.hist(X, cumulative=True, bins=20)
# only X is required
# it needs to single value
# # 5 Pie chart Graph : plt.pie(X)
# It need to single value(x)
#
# # 6) Fill Graph : plt.fill(X,Y)
# # 7) Histogram 2d Graph : plt.hist2d(X, Y)
# # 8) Area Plot or StackPlot Graph : plt.stackplot(X, Y)
#
# In[6]:
from matplotlib import pyplot as plt
import numpy as np
# In[43]:
plt.plot([1,2,3,4,5,6,7])
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.show()
# In[31]:
x = [1,2,3,4]
y = [5,6,7,2]
plt.plot(x,y,linewidth = 1.0,color = "yellow")
plt.xlabel("X-axis",color="b")
plt.ylabel("Y-axis",color="g")
plt.title("Practice",color ="r")
plt.show()
# In[32]:
x = [1,2,3,4]
y = [5,6,7,2]
plt.scatter(x,y,linewidth = 1.0,color = "yellow")
plt.xlabel("X-axis",color="b")
plt.ylabel("Y-axis",color="g")
plt.title("Practice",color ="r")
plt.show()
# In[35]:
x = [1,2,3,4]
y = [5,6,7,2]
plt.bar(x,y,linewidth = 1.0,color = "yellow")
plt.xlabel("X-axis",color="b")
plt.ylabel("Y-axis",color="g")
plt.title("Practice",color ="r")
plt.show()
# In[36]:
x = [1,2,3,4,3,6,8,9]
y = [5,6,7,2,7,2,4,1]
plt.bar(x,y,linewidth = 1.0,color = "yellow")
plt.xlabel("X-axis",color="b")
plt.ylabel("Y-axis",color="g")
plt.title("Practice",color ="r")
plt.show()
# # 1) Matplot Combine(X1,Y1 and X2,Y2)
# In[48]:
x = [1,2,3,4]
y = [5,6,7,2]
x1 = [3,6,9,8]
y1 = [5,6,7,2]
plt.plot(x,y,label="First",color = "yellow")
plt.plot(x1,y1,label="Second",color = "green")
plt.xlabel("X-axis",color="b")
plt.ylabel("Y-axis",color="g")
plt.title("Practice",color ="r")
plt.show()
# # 2) With Line
# In[36]:
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y1 = [1, 3, 5, 3, 1, 3, 5, 3, 1]
y2 = [2, 4, 6, 4, 2, 4, 6, 4, 2]
plt.plot(x, y1, label="line L")
# plt.plot(x, y2, label="line H")
plt.plot()
plt.xlabel("x axis")
plt.ylabel("y axis")
plt.title("Line Graph Example")
plt.legend()
plt.show()
# # Without Line
# In[4]:
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y1 = [1, 3, 5, 3, 1, 3, 5, 3, 1]
y2 = [2, 4, 6, 4, 2, 4, 6, 4, 2]
plt.plot(x, y1)
# plt.plot(x, y2, label="line H")
plt.plot()
plt.xlabel("x axis")
plt.ylabel("y axis")
plt.title("Line Graph Example")
plt.legend()
plt.show()
# In[9]:
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y1 = [1, 3, 5, 3, 1, 3, 5, 3, 1]
y2 = [2, 4, 6, 4, 2, 4, 6, 4, 2]
# plt.plot(x, y1, label="line L")
plt.plot(x, y2, label="it is testing line , if line label statement is large , so label will display in center")
plt.plot()
plt.xlabel("x axis")
plt.ylabel("y axis")
plt.title("Line Graph Example")
plt.legend()
plt.show()
# # 3) Bar Graph Combine(X1 , Y1 and X2, Y2)
# In[37]:
# Look at index 4 and 6, which demonstrate overlapping cases.
x1 = [1, 3, 4, 5, 6, 7, 9]
y1 = [4, 7, 2, 4, 7, 8, 3]
x2 = [2, 4, 6, 8, 10]
y2 = [5, 6, 2, 6, 2]
# Colors: https://matplotlib.org/api/colors_api.html
plt.bar(x1, y1, label="Blue Bar", color='b')
plt.bar(x2, y2, label="Green Bar", color='g')
plt.plot()
plt.xlabel("bar number")
plt.ylabel("bar height")
plt.title("Bar Chart Example")
plt.legend()
plt.show()
# # 4) Bar Graph Single Display
# In[12]:
# Look at index 4 and 6, which demonstrate overlapping cases.
x1 = [1, 3, 4, 5, 6, 7, 9]
y1 = [4, 7, 2, 4, 7, 8, 3]
plt.bar(x1, y1, label="Blue Bar", color='r')
plt.plot()
plt.xlabel("bar number")
plt.ylabel("bar height")
plt.title("Bar Chart Example")
plt.legend()
plt.show()
# In[13]:
# Look at index 4 and 6, which demonstrate overlapping cases.
x2 = [2, 4, 6, 8, 10]
y2 = [5, 6, 2, 6, 2]
plt.bar(x2, y2, label="Green Bar", color='g')
plt.plot()
plt.xlabel("bar number")
plt.ylabel("bar height")
plt.title("Bar Chart Example")
plt.legend()
plt.show()
# # 5) Histograms
# In[16]:
# Use numpy to generate a bunch of random data in a bell curve around 5.
n = 5 + np.random.randn(1000)
m = [m for m in range(len(n))]
plt.bar(m, n)
plt.title("Raw Data")
plt.show()
plt.hist(n, bins=20)
plt.title("Histogram")
plt.show()
plt.hist(n)
plt.title("Histogram for without Cumulative and bins")
plt.show()
plt.hist(n, cumulative=True, bins=20)
plt.title("Cumulative Histogram for n")
plt.show()
plt.hist(m, cumulative=True, bins=20)
plt.title("Cumulative Histogram for m")
plt.show()
plt.plot(m, n)
plt.title("Plot Graph")
plt.show()
# print("M = {}\n".format(m))
# print("\nN = \n",n)
# # 6 Scatter Plots
# In[27]:
x1 = [2, 3, 4]
y1 = [5, 5, 5]
x2 = [1, 2, 3, 4, 5]
y2 = [2, 3, 2, 3, 4]
y3 = [6, 8, 7, 8, 7]
# Markers: https://matplotlib.org/api/markers_api.html
plt.scatter(x1, y1)
plt.scatter(x2, y2, marker='v', color='r')
plt.scatter(x2, y3, marker='^', color='m')
plt.title('Scatter Plot Example')
plt.show()
# # 7 Stack Plot
# In[17]:
idxes = [ 1, 2, 3, 4, 5, 6, 7, 8, 9]
arr1 = [23, 40, 28, 43, 8, 44, 43, 18, 17]
arr2 = [17, 30, 22, 14, 17, 17, 29, 22, 30]
arr3 = [15, 31, 18, 22, 18, 19, 13, 32, 39]
# Adding legend for stack plots is tricky.
plt.plot([], [], color='r', label = 'D 1')
plt.plot([], [], color='g', label = 'D 2')
plt.plot([], [], color='b', label = 'D 3')
plt.stackplot(idxes, arr1, arr2, arr3, colors= ['r', 'g', 'b'])
plt.title('Stack Plot Example')
plt.legend()
plt.show()
# In[30]:
idxes = [ 1, 2, 3, 4, 5, 6, 7, 8, 9]
arr1 = [23, 40, 28, 43, 8, 44, 43, 18, 17]
# Adding legend for stack plots is tricky.
plt.plot([], [], color='yellow', label = 'D 1')
plt.stackplot(idxes, arr1, colors= ['r'])
plt.title('Stack Plot Example')
plt.legend()
plt.show()
# In[31]:
import sys
print('Hello, Colaboratory from Python {}!'.format(sys.version_info[0]))
# In[32]:
import tensorflow as tf
tf.test.gpu_device_name()
# # 9 Pie chart
# In[33]:
import matplotlib.pyplot as plt
labels = 'S1', 'S2', 'S3'
sections = [56, 66, 24]
colors = ['c', 'g', 'y']
plt.pie(sections, labels=labels, colors=colors,
startangle=90,
explode = (0, 0.1, 0),
autopct = '%1.2f%%')
plt.axis('equal') # Try commenting this out.
plt.title('Pie Chart Example')
plt.show()
# In[35]:
# import tensorflow as tf
# import numpy as np
# with tf.Session():
# input1 = tf.constant(1.0, shape=[2, 3])
# input2 = tf.constant(np.reshape(np.arange(1.0, 7.0, dtype=np.float32), (2, 3)))
# output = tf.add(input1, input2)
# result = output.eval()
# result
# In[7]:
x = np.arange(20)
y = [x_i + np.random.randn(1) for x_i in x]
a, b = np.polyfit(x, y, 1)
_ = plt.plot(x, y, 'o', np.arange(20), a*np.arange(20)+b, '-')
# In[33]:
# !pip install -q matplotlib-venn
# In[34]:
# Now the newly-installed library can be used anywhere else in the notebook.
# Only needs to be run once at the top of the notebook.
# !pip install -q matplotlib-venn
from matplotlib_venn import venn2
_ = venn2(subsets = (3, 2, 1))
# # 10 Fill
# You can plot multiple polygons by providing multiple x, y, [color] groups.
#
#
# In[39]:
x1 = [1, 3, 4, 5, 6, 7, 9]
y1 = [4, 7, 2, 4, 7, 8, 3]
x2 = [2, 4, 6, 8, 10]
y2 = [5, 6, 2, 6, 2]
plt.fill(x1, y1, label="Blue Bar", color='b')
plt.fill(x2, y2, label="Green Bar", color='g')
plt.plot()
plt.xlabel("bar number")
plt.ylabel("bar height")
plt.title("Bar Chart Example")
plt.legend()
plt.show()
# # 11) Fill and Bar combine
# In[40]:
x1 = [1, 3, 4, 5, 6, 7, 9]
y1 = [4, 7, 2, 4, 7, 8, 3]
x2 = [2, 4, 6, 8, 10]
y2 = [5, 6, 2, 6, 2]
plt.fill(x1, y1, label="Blue Bar", color='b')
plt.bar(x2, y2, label="Green Bar", color='g')
plt.plot()
plt.xlabel("bar number")
plt.ylabel("bar height")
plt.title("Bar Chart Example")
plt.legend()
plt.show()
# In[41]:
| np.random.seed(19680801) | numpy.random.seed |
import numpy as np
import scipy
import scipy.ndimage
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
import collections
from PIL import Image
import numbers
__author__ = "<NAME>"
__license__ = "GPL"
__version__ = "0.1.0"
__status__ = "Development"
def center_crop(x, center_crop_size):
assert x.ndim == 3
centerw, centerh = x.shape[1] // 2, x.shape[2] // 2
halfw, halfh = center_crop_size[0] // 2, center_crop_size[1] // 2
return x[:, centerw - halfw:centerw + halfw, centerh - halfh:centerh + halfh]
def to_tensor(x):
import torch
x = x.transpose((2, 0, 1))
return torch.from_numpy(x).float()
def random_num_generator(config, random_state=np.random):
if config[0] == 'uniform':
ret = random_state.uniform(config[1], config[2], 1)[0]
elif config[0] == 'lognormal':
ret = random_state.lognormal(config[1], config[2], 1)[0]
else:
print(config)
raise Exception('unsupported format')
return ret
def poisson_downsampling(image, peak, random_state=np.random):
if not isinstance(image, np.ndarray):
imgArr = np.array(image, dtype='float32')
else:
imgArr = image.astype('float32')
Q = imgArr.max(axis=(0, 1)) / peak
if Q[0] == 0:
return imgArr
ima_lambda = imgArr / Q
noisy_img = random_state.poisson(lam=ima_lambda)
return noisy_img.astype('float32')
def elastic_transform(image, alpha=1000, sigma=30, spline_order=1, mode='nearest', random_state=np.random):
"""Elastic deformation of image as described in [Simard2003]_.
.. [Simard2003] Simard, <NAME>, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert image.ndim == 3
shape = image.shape[:2]
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),
sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = [np.reshape(x + dx, (-1, 1)), | np.reshape(y + dy, (-1, 1)) | numpy.reshape |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 20:20:11 2018
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import vehicle
from pyro.planning import randomtree
###############################################################################
sys = vehicle.KinematicBicyleModel()
###############################################################################
x_start = np.array([0,0,0])
x_goal = | np.array([0,1,0]) | numpy.array |
import numpy as np
from PIL import Image
import sys
# 2018.05.29
# create Color R,G,B Range
Color_Range = []
Color_Diff = int(256 / 4)
for r in range(256):
for g in range(256):
for b in range(256):
if r % Color_Diff == 0 and g % Color_Diff == 0 and b % Color_Diff == 0:
Color_Range.append([r, g, b])
Color_Range = np.array(Color_Range)
print(Color_Range.shape)
# print(Color_Range)
image = Image.open("test.jpg")
W, H = image.size
image_array = np.array(image)
print("image shape:", image_array.shape)
def gcd(m, n):
return m if n == 0 else gcd(n, m % n)
def sample_crop(Bg_image_array, UL_local_tuple, LR_local_tuple):
"""
# parameter:
image the background image by numpy_array
UL_local_tuple (x1,y1) the location of Upper Left Corner
LR_local_tuple (x2,y2) the location of Lower Right Corner
"""
(x1, y1) = UL_local_tuple
(x2, y2) = LR_local_tuple
x1 = int(x1)
x2 = int(x2)
y1 = int(y1)
y2 = int(y2)
image_crop_H = y2 - y1
image_crop_W = x2 - x1
sample_array = | np.zeros((image_crop_H, image_crop_W, Bg_image_array.shape[2]), dtype=np.uint8) | numpy.zeros |
"""
This is the starter code and some suggested architecture we provide you with.
But feel free to do any modifications as you wish or just completely ignore
all of them and have your own implementations.
"""
import numpy as np
import scipy.io
from scipy import stats
import random
import time
import math
####################### config ########################
MAX_DEPTH = 3
NODE_PUTITY_THRESH = 0 # 0.5 - 1.0
IG_THRESH = 0 # 0.001 - 0.9, mostly less than 0.1
#######################################################
class DecisionTree:
label_list = [0, 1]
def __init__(self, features, max_depth=3, npt=0, igt=0):
"""
TODO: initialization of a decision tree
"""
# hyper_params
self.NODE_PUTITY_THRESH = npt
self.IG_THRESH = igt
self.max_depth = max_depth
self.features = features
self.left = None
self.right = None
self.split_id = None
self.thresh = None
self.data = None
self.labels = None
self.pred = None
@staticmethod
def entropy(y):
"""
TODO: implement a method that calculates the entropy given all the labels
"""
if y.shape[0] == 0:
return 0
num = np.sum(y < 0.5)
p = num / y.shape[0]
if p < 1e-10 or 1-p < 1e-10:
return 0
res = -p * math.log(p, 2) - (1-p) * math.log(1-p,2)
return res
@staticmethod
def information_gain(X, y, thresh, total_entr):
"""
TODO: implement a method that calculates information gain given a vector of features
and a split threshold
"""
y0 = y[np.where(X < thresh)[0]]
p0 = y0.size / y.size
y1 = y[np.where(X >= thresh)[0]]
p1 = y1.size / y.size
sub_entr = p0*DecisionTree.entropy(y0) + p1*DecisionTree.entropy(y1)
return total_entr - sub_entr
@staticmethod
def gini_impurity(y):
"""
TODO: implement a method that calculates the gini impurity given all the labels
"""
if y.shape[0] == 0:
return 0
res = 1
for label in DecisionTree.label_list:
p = np.sum(y == label) / y.shape[0]
res -= p ** 2
return res
@staticmethod
def gini_purification(X, y, thresh):
"""
TODO: implement a method that calculates reduction in impurity gain given a vector of features
and a split threshold
"""
total_gini = DecisionTree.gini_impurity(y)
y0 = y[np.where(X < thresh)[0]]
p0 = y0.size / y.size
y1 = y[np.where(X >= thresh)[0]]
p1 = y1.size / y.size
sub_gini = p0 * DecisionTree.gini_impurity(y0) + p1 * DecisionTree.gini_impurity(y1)
return total_gini - sub_gini
def split(self, X, y, idx, thresh):
"""
TODO: implement a method that return a split of the dataset given an index of the feature and
a threshold for it
"""
Xi = X[:, idx]
X0 = X[np.where(Xi < thresh)[0], :]
y0 = y[np.where(Xi < thresh)[0], :]
X1 = X[np.where(Xi >= thresh)[0], :]
y1 = y[np.where(Xi >= thresh)[0], :]
return X0, y0, X1, y1
def segmenter(self, X, y):
"""
TODO: compute entropy gain for all single-dimension splits,
return the feature and the threshold for the split that
has maximum gain
"""
max_id = 0
max_thresh = 0
max_ig = 0
total_entr = DecisionTree.entropy(y)
for i in range(X.shape[1]):
Xi = X[:, i]
for thresh in | np.unique(Xi) | numpy.unique |
import unittest
from SUASSystem import *
from SDA import *
import numpy
import interop
class SDAConverterTestCase(unittest.TestCase):
def setUp(self):
self.setup_client_converter()
def setup_client_converter(self):
self.initial_coordinates = Location(38.8703041, -77.3214035, 0)
self.boundary_points = [
{
"boundary_pts" : [
{"latitude" : 38.8697558739, "longitude" : -77.3221076688, "order" : 0},
{"latitude" : 38.8708523261, "longitude" : -77.3221076743, "order" : 1},
{"latitude" : 38.8708523261, "longitude" : -77.3206993257, "order" : 2},
{"latitude" : 38.8697558739, "longitude" : -77.3206993312, "order" : 3}
]
}
]
self.sda_converter = SDAConverter(self.initial_coordinates, self.boundary_points)
def test_convert_fly_zones(self):
converted_fly_zones = self.sda_converter.convert_fly_zones(self.boundary_points)
print(converted_fly_zones)
for num_of_fly_zone in range(len(converted_fly_zones)):
for index_in_fly_zone in range(len(converted_fly_zones[num_of_fly_zone])):
location = Location(self.boundary_points[num_of_fly_zone]["boundary_pts"][index_in_fly_zone]["latitude"], self.boundary_points[num_of_fly_zone]["boundary_pts"][index_in_fly_zone]["longitude"], 0)
expected_converted_boundary_point = haversine(self.initial_coordinates, location, units="US")
self.assertEqual(converted_fly_zones[num_of_fly_zone][index_in_fly_zone][0], expected_converted_boundary_point[0])
self.assertEqual(converted_fly_zones[num_of_fly_zone][index_in_fly_zone][1], expected_converted_boundary_point[1])
def test_avoid_obstacles(self):
self.sda_converter.obstacle_map.set_drone_position(numpy.array([0,0,0]))
self.sda_converter.reset_obstacles()
self.sda_converter.obstacle_map.add_obstacle(StationaryObstacle(numpy.array([40,0,0]),5,100))
test_waypoint = Location(38.8703041, -77.3210866228, 0)
self.sda_converter.set_waypoint(test_waypoint)
self.sda_converter.avoid_obstacles()
boolean, possible_paths = self.sda_converter.obstacle_map.is_obstacle_in_path()
expected_guided_path = self.sda_converter.obstacle_map.get_min_path(possible_paths)
self.assertEqual(self.sda_converter.current_path[0][0], expected_guided_path[0][0])
self.assertEqual(self.sda_converter.current_path[0][1], expected_guided_path[0][1])
self.assertTrue(boolean)
def test_get_uav_avoid_coordinates(self):
self.sda_converter.obstacle_map.set_drone_position(numpy.array([0,0,0]))
self.sda_converter.reset_obstacles()
self.sda_converter.obstacle_map.add_obstacle(StationaryObstacle(numpy.array([40,0,0]),5,0))
test_waypoint = Location(38.8703041, -77.3210866228, 0)
self.sda_converter.set_waypoint(test_waypoint)
self.sda_converter.avoid_obstacles()
avoid_coordinates = self.sda_converter.get_uav_avoid_coordinates()
converted_avoid_coordiantes = haversine(self.initial_coordinates, avoid_coordinates, units="US")
self.assertEqual(int(converted_avoid_coordiantes[0]), self.sda_converter.current_path[0][0])
self.assertEqual(int(converted_avoid_coordiantes[1]), self.sda_converter.current_path[0][1])
avoid_coordinates.alt = avoid_coordinates.get_alt() * 3.28084
self.sda_converter.set_uav_position(avoid_coordinates)
avoid_coordinates2 = self.sda_converter.get_uav_avoid_coordinates()
converted_avoid_coordiantes2 = haversine(self.initial_coordinates, avoid_coordinates2, units="US")
self.assertEqual(int(converted_avoid_coordiantes2[0]), self.sda_converter.current_path[1][0])
self.assertEqual(int(converted_avoid_coordiantes2[1]), self.sda_converter.current_path[1][1])
def test_has_uav_reached_guided_waypoint(self):
"""
test case #1
didn't reach the point
"""
self.sda_converter.obstacle_map.set_drone_position(numpy.array([0,0,0]))
self.sda_converter.reset_obstacles()
self.sda_converter.obstacle_map.add_obstacle(StationaryObstacle(numpy.array([40,0,0]),5,100))
test_waypoint = Location(38.8703041, -77.3210866228, 0)
self.sda_converter.set_waypoint(test_waypoint)
self.sda_converter.avoid_obstacles()
self.assertEqual(False, self.sda_converter.has_uav_reached_guided_waypoint())
"""
test case #2
reached the point
"""
self.sda_converter.obstacle_map.set_drone_position(numpy.array([0,0,0]))
self.sda_converter.obstacle_map.reset_waypoints()
self.sda_converter.reset_obstacles()
self.sda_converter.obstacle_map.add_obstacle(StationaryObstacle(numpy.array([40,0,0]),5,100))
test_waypoint = Location(38.8703041, -77.3210866228, 0)
self.sda_converter.set_waypoint(test_waypoint)
self.sda_converter.avoid_obstacles()
self.sda_converter.obstacle_map.set_drone_position(numpy.array([5, -35, 0]))
self.assertTrue(self.sda_converter.has_uav_reached_guided_waypoint())
def test_does_guided_path_exist(self):
self.sda_converter.obstacle_map.set_drone_position(numpy.array([0,0,0]))
self.sda_converter.reset_obstacles()
self.sda_converter.obstacle_map.add_obstacle(StationaryObstacle(numpy.array([40,0,0]),5,100))
test_waypoint = Location(38.8703041, -77.3210866228, 0)
self.sda_converter.set_waypoint(test_waypoint)
self.sda_converter.avoid_obstacles()
self.assertTrue(self.sda_converter.does_guided_path_exist())
def test_get_distance_to_current_guided_waypoint(self):
self.sda_converter.obstacle_map.set_drone_position(numpy.array([0,0,20]))
self.sda_converter.reset_obstacles()
self.sda_converter.obstacle_map.add_obstacle(StationaryObstacle(numpy.array([50,50,0]),10,20))
#test_waypoint = Location(38.8703041, -77.3210866228, 0)
#self.sda_converter.set_waypoint(test_waypoint)
self.sda_converter.obstacle_map.add_waypoint(numpy.array([100,100,20]))
self.sda_converter.avoid_obstacles()
distance = VectorMath.get_magnitude( | numpy.array([0,0,20]) | numpy.array |
from __future__ import print_function, division
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import RMSprop
import tensorflow.keras.backend as K
import inputs
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class WGAN():
def __init__(self):
# 输入参数
self.rows = 120 # 动作的帧数 120,120/4 = 30
self.columns = 220 # 动作的特征数 225->220(24*9+3+1),220/4 = 55
self.channels = 1
self.img_shape = (self.rows, self.columns, self.channels)
self.latent_dim = 100
# 这些参数和优化器是根据论文来的
self.n_critic = 5 # the number of iterations of the critic per generator iteration
self.clip_value = 0.01 # clipping parameter
optimizer = RMSprop(lr=0.00005) # learning rate
# 建模critic
self.critic = self.build_critic()
self.critic.compile(loss=self.wasserstein_loss,
optimizer=optimizer,
metrics=['accuracy'])
# 建模generator
self.generator = self.build_generator()
# 建模combined
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
self.critic.trainable = False # combined模型只训练generator【不知道为什么这样起作用】
valid = self.critic(img)
self.combined = Model(z, valid)
self.combined.compile(loss=self.wasserstein_loss,
optimizer=optimizer,
metrics=['accuracy'])
def wasserstein_loss(self, y_true, y_pred):
#论文中的损失函数
return K.mean(y_true * y_pred)
def build_generator(self):
model = Sequential()
# (None, 100) 输入
model.add(Dense(128 * 30 * 55, activation="relu", input_dim=self.latent_dim))
# (None, 211200) 参数21331200
model.add(Reshape((30, 55, 128)))
# (None, 30, 55, 128)
model.add(UpSampling2D())
# (None, 60, 110, 128)
model.add(Conv2D(128, kernel_size=4, padding="same"))
# (None, 60, 110, 128) 参数262272
model.add(BatchNormalization(momentum=0.8))
# (None, 60, 110, 128) 参数512
model.add(Activation("relu"))
# (None, 60, 110, 128)
model.add(UpSampling2D())
# (None, 120, 220, 128)
model.add(Conv2D(64, kernel_size=4, padding="same"))
# (None, 120, 220, 64) 参数131136
model.add(BatchNormalization(momentum=0.8))
# (None, 120, 220, 64) 参数256
model.add(Activation("relu"))
# (None, 120, 220, 64)
model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
# (None, 120, 220, 1) 参数1025
model.add(Activation("tanh"))
# (None, 120, 220, 1) 输出
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_critic(self):
model = Sequential()
# (None, 120, 220, 1) 输入
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
# (None, 60, 110, 16) 参数160
model.add(LeakyReLU(alpha=0.2))
# (None, 60, 110, 16)
model.add(Dropout(0.25))
# (None, 60, 110, 16)
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
# (None, 30, 55, 32) 参数4640
model.add(ZeroPadding2D(padding=((0,0),(0,1))))
# (None, 30, 56, 32)
model.add(BatchNormalization(momentum=0.8))
# (None, 30, 56, 32) 参数128
model.add(LeakyReLU(alpha=0.2))
# (None, 30, 56, 32)
model.add(Dropout(0.25))
# (None, 30, 56, 32)
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
# (None, 15, 28, 64) 参数18496
model.add(BatchNormalization(momentum=0.8))
# (None, 15, 28, 64) 参数256
model.add(LeakyReLU(alpha=0.2))
# (None, 15, 28, 64)
model.add(Dropout(0.25))
# (None, 15, 28, 64)
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
# (None, 15, 28, 128) 参数73856
model.add(BatchNormalization(momentum=0.8))
# (None, 15, 28, 128) 参数512
model.add(LeakyReLU(alpha=0.2))
# (None, 15, 28, 128)
model.add(Dropout(0.25))
# (None, 15, 28, 128)
model.add(Flatten())
# (None, 53760)
model.add(Dense(1))
# (None, 1) 参数53761
# 输出:-1表示valid也就是数据集的真图,1是fake也就是生成的假图
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size=7, sample_interval=50):
train_dataset = inputs.get_dataset_fn(batch_size, "../data/tf_sstables/aist_generation_train_v2_tfrecord-*")
train_iter = tf.nest.map_structure(iter, train_dataset)
valid = -np.ones((batch_size, 1)) #用-1表示valid,不是1
fake = np.ones((batch_size, 1)) #用1表示fake,不是0
for epoch in range(epochs):
for _ in range(self.n_critic): # self.n_critic = 5
# ---------------------
# Train Discriminator
# ---------------------
data = next(train_iter)
valid_motions = data["motion_input"] # (batch_size, 120, 220)
valid_motions = np.expand_dims(valid_motions, axis=3) # (batch_size, 120, 220, 1)
noise = | np.random.normal(0, 1, (batch_size, self.latent_dim)) | numpy.random.normal |
"""
This file contains all the funcitons for creating an image plus with projected lines of a predefined height from radar data.
The height can either be predefined or calculated by the radar elevation field of view.
This file has been completely reworked on 2019-01-23 for best functionalities. Some function arguments changed, so please verify if you referr to this file.
"""
# Standard libraries
import os
import os.path as osp
import sys
import math
import time
# 3rd party libraries
import cv2
import json
import numpy as np
from pyquaternion import Quaternion
from PIL import Image
# Local modules
# Allow relative imports when being executed as script.
if __name__ == "__main__" and not __package__:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import crfnet.raw_data_fusion # noqa: F401
__package__ = "crfnet.raw_data_fusion"
from nuscenes.utils.data_classes import PointCloud
from ...utils import radar
# from nuscenes.utils.geometry_utils import view_points
def _resize_image(image_data, target_shape):
"""
Perfomrs resizing of the image and calculates a matrix to adapt the intrinsic camera matrix
:param image_data: [np.array] with shape (height x width x 3)
:param target_shape: [tuple] with (width, height)
:return resized image: [np.array] with shape (height x width x 3)
:return resize matrix: [numpy array (3 x 3)]
"""
# print('resized', type(image_data))
stupid_confusing_cv2_size_because_width_and_height_are_in_wrong_order = (target_shape[1], target_shape[0])
resized_image = cv2.resize(image_data, stupid_confusing_cv2_size_because_width_and_height_are_in_wrong_order)
resize_matrix = np.eye(3, dtype=resized_image.dtype)
resize_matrix[1, 1] = target_shape[0]/image_data.shape[0]
resize_matrix[0, 0] = target_shape[1]/image_data.shape[1]
return resized_image, resize_matrix
def _radar_transformation(radar_data, height=None):
"""
Transforms the given radar data with height z = 0 and another height as input using extrinsic radar matrix to vehicle's co-sy
This function appends the distance to the radar point.
Parameters:
:param radar_data: [numpy array] with radar parameter (e.g. velocity) in rows and radar points for one timestep in columns
Semantics: x y z dyn_prop id rcs vx vy vx_comp vy_comp is_quality_valid ambig_state x_rms y_rms invalid_state pdh0 distance
:param radar_extrinsic: [numpy array (3x4)] that consists of the extrinsic parameters of the given radar sensor
:param height: [tuple] (min height, max height) that defines the (unknown) height of the radar points
Returns:
:returns radar_data: [numpy array (m x no of points)] that consists of the transformed radar points with z = 0
:returns radar_xyz_endpoint: [numpy array (3 x no of points)] that consits of the transformed radar points z = height
"""
# Field of view (global)
ELEVATION_FOV_SR = 20
ELEVATION_FOV_FR = 14
# initialization
num_points = radar_data.shape[1]
# Radar points for the endpoint
radar_xyz_endpoint = radar_data[0:3,:].copy()
# variant 1: constant height substracted by RADAR_HEIGHT
RADAR_HEIGHT = 0.5
if height:
radar_data[2, :] = np.ones((num_points,)) * (height[0] - RADAR_HEIGHT) # lower points
radar_xyz_endpoint[2, :] = np.ones((num_points,)) * (height[1] - RADAR_HEIGHT) # upper points
# variant 2: field of view
else:
dist = radar_data[-1,:]
count = 0
for d in dist:
# short range mode
if d <= 70:
radar_xyz_endpoint[2, count] = -d * np.tan(ELEVATION_FOV_SR/2)
# long range mode
else:
radar_xyz_endpoint[2, count] = -d * np.tan(ELEVATION_FOV_FR/2)
count += 1
return radar_data, radar_xyz_endpoint
def _create_line(P1, P2, img):
"""
Produces and array that consists of the coordinates and intensities of each pixel in a line between two points
:param P1: [numpy array] that consists of the coordinate of the first point (x,y)
:param P2: [numpy array] that consists of the coordinate of the second point (x,y)
:param img: [numpy array] the image being processed
:return itbuffer: [numpy array] that consists of the coordinates and intensities of each pixel in the radii (shape: [numPixels, 3], row = [x,y])
"""
# define local variables for readability
imageH = img.shape[0]
imageW = img.shape[1]
P1X = P1[0]
P1Y = P1[1]
P2X = P2[0]
P2Y = P2[1]
# difference and absolute difference between points
# used to calculate slope and relative location between points
dX = P2X - P1X
dY = P2Y - P1Y
dXa = np.abs(dX)
dYa = np.abs(dY)
# predefine numpy array for output based on distance between points
itbuffer = np.empty(
shape=(np.maximum(int(dYa), int(dXa)), 2), dtype=np.float32)
itbuffer.fill(np.nan)
# Obtain coordinates along the line using a form of Bresenham's algorithm
negY = P1Y > P2Y
negX = P1X > P2X
if P1X == P2X: # vertical line segment
itbuffer[:, 0] = P1X
if negY:
itbuffer[:, 1] = | np.arange(P1Y - 1, P1Y - dYa - 1, -1) | numpy.arange |
import time
import joblib
import pandas as pd
import numpy as np
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from scipy.interpolate import Rbf
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
from argument import args, functions
model_basic_path = "./record/model/"
figure3D_basic_path = "./record/figure/3D/"
figure2D_basic_path = "./record/figure/2D/"
masks_1 = [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]
# masks_1 = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
masks_2 = [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]
# masks_2 = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if __name__ == "__main__":
# read training data
length = len(masks_1)
for i in range(length):
if masks_1[i]:
continue
for j in range(i, length):
if masks_2[j]:
continue
print("task: " + functions[i] + " " + functions[j])
df = pd.read_csv("./record/data/%d_%d.csv" % (i, j))
cols = list(df.columns)
# plot: only plot function with two variables
if len(cols) > 3:
continue
xx = args[i]
yy = args[j]
x_len = len(xx)
y_len = len(yy)
# file the value of Z
# attention: X: f1_args, Y:f2_args
if i != j:
Z = | np.array(df[cols[-1]]) | numpy.array |
import warnings
import itertools
import pandas
import math
import sys
import numpy as np
from plotly.offline import download_plotlyjs, init_notebook_mode, plot
# init_notebook_mode(connected=True)
dataframe = pandas.read_csv('../data/realTweets/Twitter_volume_AAPL.csv')
time = dataframe[list(dataframe)[0]]
values = dataframe[list(dataframe)[1]]
training_ratio = 0.8
validation_ratio = 0.5
# Deviding the data set to training, validation, testing parts
training_end = int(math.floor(len(values)*training_ratio))
training_set_values = np.array(values[0:training_end])
training_set_time = np.array(time[0:training_end])
validation_start = training_end + 1
validation_end = validation_start + int(math.floor(len(values)*(1-training_ratio)*validation_ratio))
validation_set_values = np.array(values[validation_start:validation_end])
validation_set_time = np.array(time[validation_start:validation_end])
testing_start = validation_end + 1
testing_end = len(values)
testing_set_values = np.array(values[testing_start:testing_end])
testing_set_time = np.array(time[testing_start:testing_end])
class ARMA:
def __init__(self, p,q):
self.p = p
self.q = q
# Setters
def set_p(self,p):
self.p=p
def set_q(self,q):
self.q=q
def set_training_data_time(self, time):
self.training_data_time = time
def set_validation_data_time(self, time):
self.validation_data_time = time
def set_testing_data_time(self, time):
self.testing_data_time = time
def set_validation_data_set(self,data):
self.validation_data_set = data
def set_testing_data_set(self,data):
self.testing_data_set = data
def set_training_data_set(self,data):
self.training_data = data
self.Z = data - np.mean(data)
self.Z.shape = (len(data),1)
# Model
def shock(self,mean,std):
return np.random.normal(mean, std, 1)
# return 0
def calculate_AR_normal_matrix_x_row(self,data,t,mean,std):
row = np.zeros((1,self.p+1))
j = 0
for i in range(t-self.p,t):
if i < 0:
row[0][j] = 0
else:
row[0][j] = data[i]
j+=1
row[0][-1] = self.shock(mean,std)
return row
def calculate_AR_weights(self):
normal_matrix = np.zeros((len(self.training_data),self.p+1))
mean = np.mean(self.Z)
std = np.std(self.Z, ddof=1)
for i in range(0,len(self.training_data)):
normal_matrix[i] = self.calculate_AR_normal_matrix_x_row(self.Z,i,mean,std)
normal_matrix_tanspose = normal_matrix.transpose()
self.AR_weights = np.dot(np.dot(np.linalg.pinv(np.dot(normal_matrix_tanspose,normal_matrix)),normal_matrix_tanspose),self.Z)
def get_AR_prediction(self,data_set):
self.calculate_AR_weights()
self.AR_prediction = np.zeros((np.max(data_set.shape),1))
mean = np.mean(data_set)
std = np.std(data_set, ddof=1)
Z = np.array(data_set)
Z.shape = (np.max(Z.shape),1)
Z = Z - mean
for i in range(0,np.max(Z.shape)):
self.AR_prediction[i] = np.dot(self.calculate_AR_normal_matrix_x_row(Z, i, mean, std), self.AR_weights)
self.AR_prediction = self.AR_prediction.transpose()[0] + mean
return self.AR_prediction
def get_previous_q_values(self,data,t):
previous_q = np.zeros(self.q)
j = 0
for i in range(t-self.q,t):
if i < 0:
previous_q[j] = 0
else:
previous_q[j] = data[i]
j+=1
return previous_q
def get_MA_prediction(self,data_set):
self.MA_prediction = np.zeros(np.max(data_set.shape))
Z = np.array(data_set)
Z.shape = (np.max(Z.shape),1)
for i in range(0,np.max(Z.shape)):
self.MA_prediction[i] = np.average(self.get_previous_q_values(Z, i))
return self.MA_prediction
def calculate_AR_MA_normal_matrix_x_row(self,t):
row = np.zeros((1,2))
row[0][0] = self.MA_prediction[t]
row[0][1] = self.AR_prediction[t]
return row
def calculate_AR_MA_weights(self):
self.get_MA_prediction(self.training_data)
self.get_AR_prediction(self.training_data)
normal_matrix = np.zeros((len(self.training_data),2))
for i in range(0,len(self.training_data)):
normal_matrix[i] = self.calculate_AR_MA_normal_matrix_x_row(i)
normal_matrix_tanspose = normal_matrix.transpose()
self.weights = np.dot(np.dot(np.linalg.pinv(np.dot(normal_matrix_tanspose,normal_matrix)),normal_matrix_tanspose),self.training_data)
# print(self.weights)
# #normalizing weigts
# total = self.weights[0] + self.weights[1]
# self.weights[0] = self.weights[0]/total
# self.weights[1] = self.weights[1]/total
# print(self.weights)
def get_prediction(self, data_set):
self.calculate_AR_MA_weights()
self.get_MA_prediction(data_set)
self.get_AR_prediction(data_set)
Z = np.array(data_set)
Z.shape = (np.max(Z.shape),1)
self.prediction = np.zeros((np.max(Z.shape),1))
for i in range(0,np.max(Z.shape)):
self.prediction[i] = np.dot(self.calculate_AR_MA_normal_matrix_x_row(i), self.weights)
self.prediction = self.prediction.transpose()[0]
return self.prediction
# Diagnostics and identification messures
def mse(self,values,pridicted):
error = 0.0
for i in range(0,len(values)):
error += (values[i] - pridicted[i])**2
return error/len(values)
def get_mse(self, data, prediction):
return self.mse(data,prediction)
def plot_autocorrelation(self, data_set, lag):
autocorrelations = | np.zeros(lag) | numpy.zeros |
"""
Coordinate-based meta-analysis estimators
"""
import warnings
import multiprocessing as mp
import numpy as np
import nibabel as nib
from scipy import ndimage, special
from nilearn.masking import apply_mask, unmask
from statsmodels.sandbox.stats.multicomp import multipletests
from .base import CBMAEstimator
from .kernel import MKDAKernel, KDAKernel
from ..base import MetaResult
from ...utils import vox2mm, null_to_p, p_to_z
from ...due import due, Doi
@due.dcite(Doi('10.1093/scan/nsm015'), description='Introduces MKDA.')
class MKDADensity(CBMAEstimator):
"""
Multilevel kernel density analysis- Density analysis
"""
def __init__(self, dataset, ids, kernel_estimator=MKDAKernel, **kwargs):
kernel_args = {k.split('kernel__')[1]: v for k, v in kwargs.items()
if k.startswith('kernel__')}
kwargs = {k: v for k, v in kwargs.items() if not k.startswith('kernel__')}
self.mask = dataset.mask
self.coordinates = dataset.coordinates.loc[dataset.coordinates['id'].isin(ids)]
self.kernel_estimator = kernel_estimator
self.kernel_arguments = kernel_args
self.ids = ids
self.voxel_thresh = None
self.clust_thresh = None
self.n_iters = None
self.results = None
def fit(self, voxel_thresh=0.01, q=0.05, n_iters=1000, n_cores=4):
null_ijk = np.vstack(np.where(self.mask.get_data())).T
self.voxel_thresh = voxel_thresh
self.clust_thresh = q
self.n_iters = n_iters
k_est = self.kernel_estimator(self.coordinates, self.mask)
ma_maps = k_est.transform(self.ids, **self.kernel_arguments)
# Weight each SCM by square root of sample size
ids_df = self.coordinates.groupby('id').first()
if 'n' in ids_df.columns and 'inference' not in ids_df.columns:
ids_n = ids_df.loc[self.ids, 'n'].astype(float).values
weight_vec = np.sqrt(ids_n)[:, None] / np.sum(np.sqrt(ids_n))
elif 'n' in ids_df.columns and 'inference' in ids_df.columns:
ids_n = ids_df.loc[self.ids, 'n'].astype(float).values
ids_inf = ids_df.loc[self.ids, 'inference'].map({'ffx': 0.75,
'rfx': 1.}).values
weight_vec = (np.sqrt(ids_n)[:, None] * ids_inf[:, None]) / \
np.sum(np.sqrt(ids_n) * ids_inf)
else:
weight_vec = np.ones((len(ma_maps), 1))
ma_maps = apply_mask(ma_maps, self.mask)
ma_maps *= weight_vec
of_map = np.sum(ma_maps, axis=0)
of_map = unmask(of_map, self.mask)
vthresh_of_map = of_map.get_data().copy()
vthresh_of_map[vthresh_of_map < voxel_thresh] = 0
rand_idx = np.random.choice(null_ijk.shape[0],
size=(self.coordinates.shape[0], n_iters))
rand_ijk = null_ijk[rand_idx, :]
iter_ijks = np.split(rand_ijk, rand_ijk.shape[1], axis=1)
iter_df = self.coordinates.copy()
conn = np.ones((3, 3, 3))
# Define parameters
iter_conn = [conn] * n_iters
iter_wv = [weight_vec] * n_iters
iter_dfs = [iter_df] * n_iters
params = zip(iter_ijks, iter_dfs, iter_wv, iter_conn)
pool = mp.Pool(n_cores)
perm_results = pool.map(self._perm, params)
pool.close()
perm_max_values, perm_clust_sizes = zip(*perm_results)
percentile = 100 * (1 - q)
# Cluster-level FWE
# Determine size of clusters in [1 - clust_thresh]th percentile (e.g.
# 95th)
clust_size_thresh = np.percentile(perm_clust_sizes, percentile)
cfwe_of_map = np.zeros(of_map.shape)
labeled_matrix = ndimage.measurements.label(vthresh_of_map, conn)[0]
clust_sizes = [np.sum(labeled_matrix == val) for val in np.unique(labeled_matrix)]
for i, clust_size in enumerate(clust_sizes):
if clust_size >= clust_size_thresh and i > 0:
clust_idx = np.where(labeled_matrix == i)
cfwe_of_map[clust_idx] = vthresh_of_map[clust_idx]
cfwe_of_map = apply_mask(nib.Nifti1Image(cfwe_of_map, of_map.affine),
self.mask)
# Voxel-level FWE
# Determine OF values in [1 - clust_thresh]th percentile (e.g. 95th)
vfwe_thresh = np.percentile(perm_max_values, percentile)
vfwe_of_map = of_map.get_data().copy()
vfwe_of_map[vfwe_of_map < vfwe_thresh] = 0.
vfwe_of_map = apply_mask(nib.Nifti1Image(vfwe_of_map, of_map.affine),
self.mask)
vthresh_of_map = apply_mask(nib.Nifti1Image(vthresh_of_map,
of_map.affine),
self.mask)
self.results = MetaResult(vthresh=vthresh_of_map, cfwe=cfwe_of_map,
vfwe=vfwe_of_map, mask=self.mask)
def _perm(self, params):
iter_ijk, iter_df, weight_vec, conn = params
iter_ijk = np.squeeze(iter_ijk)
iter_df[['i', 'j', 'k']] = iter_ijk
k_est = self.kernel_estimator(iter_df, self.mask)
iter_ma_maps = k_est.transform(self.ids, **self.kernel_arguments)
iter_ma_maps = apply_mask(iter_ma_maps, self.mask)
iter_ma_maps *= weight_vec
iter_of_map = np.sum(iter_ma_maps, axis=0)
iter_max_value = np.max(iter_of_map)
iter_of_map = unmask(iter_of_map, self.mask)
vthresh_iter_of_map = iter_of_map.get_data().copy()
vthresh_iter_of_map[vthresh_iter_of_map < self.voxel_thresh] = 0
labeled_matrix = ndimage.measurements.label(vthresh_iter_of_map, conn)[0]
clust_sizes = [np.sum(labeled_matrix == val) for val in np.unique(labeled_matrix)]
clust_sizes = clust_sizes[1:] # First cluster is zeros in matrix
if clust_sizes:
iter_max_cluster = np.max(clust_sizes)
else:
iter_max_cluster = 0
return iter_max_value, iter_max_cluster
@due.dcite(Doi('10.1093/scan/nsm015'), description='Introduces MKDA.')
class MKDAChi2(CBMAEstimator):
"""
Multilevel kernel density analysis- Chi-square analysis
"""
def __init__(self, dataset, ids, ids2=None, kernel_estimator=MKDAKernel,
**kwargs):
kernel_args = {k.split('kernel__')[1]: v for k, v in kwargs.items()
if k.startswith('kernel__')}
kwargs = {k: v for k, v in kwargs.items() if not k.startswith('kernel__')}
self.mask = dataset.mask
# Check kernel estimator (must be MKDA)
k_est = kernel_estimator(dataset.coordinates, self.mask)
assert isinstance(k_est, MKDAKernel)
self.kernel_estimator = kernel_estimator
self.kernel_arguments = kernel_args
if ids2 is None:
ids2 = list(set(dataset.coordinates['id'].values) - set(ids))
all_ids = ids + ids2
self.coordinates = dataset.coordinates.loc[dataset.coordinates['id'].isin(all_ids)]
self.ids = ids
self.ids2 = ids2
self.voxel_thresh = None
self.corr = None
self.n_iters = None
self.results = None
def fit(self, voxel_thresh=0.01, q=0.05, corr='FWE', n_iters=5000,
prior=0.5, n_cores=4):
self.voxel_thresh = voxel_thresh
self.corr = corr
self.n_iters = n_iters
k_est = self.kernel_estimator(self.coordinates, self.mask)
ma_maps1 = k_est.transform(self.ids, masked=True,
**self.kernel_arguments)
ma_maps2 = k_est.transform(self.ids2, masked=True,
**self.kernel_arguments)
# Calculate different count variables
eps = np.spacing(1)
n_selected = len(self.ids)
n_unselected = len(self.ids2)
n_mappables = n_selected + n_unselected
# Transform MA maps to 1d arrays
ma_maps_all = np.vstack((ma_maps1, ma_maps2))
n_selected_active_voxels = np.sum(ma_maps1, axis=0)
n_unselected_active_voxels = np.sum(ma_maps2, axis=0)
# Nomenclature for variables below: p = probability,
# F = feature present, g = given, U = unselected, A = activation.
# So, e.g., pAgF = p(A|F) = probability of activation
# in a voxel if we know that the feature is present in a study.
pF = (n_selected * 1.0) / n_mappables
pA = np.array(np.sum(ma_maps_all, axis=0) / n_mappables).squeeze()
# Conditional probabilities
pAgF = n_selected_active_voxels * 1.0 / n_selected
pAgU = n_unselected_active_voxels * 1.0 / n_unselected
pFgA = pAgF * pF / pA
# Recompute conditionals with uniform prior
pAgF_prior = prior * pAgF + (1 - prior) * pAgU
pFgA_prior = pAgF * prior / pAgF_prior
# One-way chi-square test for consistency of activation
pAgF_chi2_vals = self._one_way(np.squeeze(n_selected_active_voxels),
n_selected)
pAgF_p_vals = special.chdtrc(1, pAgF_chi2_vals)
pAgF_sign = np.sign(n_selected_active_voxels - np.mean(n_selected_active_voxels))
pAgF_z = p_to_z(pAgF_p_vals, tail='two') * pAgF_sign
# Two-way chi-square for specificity of activation
cells = np.squeeze(
np.array([[n_selected_active_voxels, n_unselected_active_voxels],
[n_selected - n_selected_active_voxels,
n_unselected - n_unselected_active_voxels]]).T)
pFgA_chi2_vals = self._two_way(cells)
pFgA_p_vals = special.chdtrc(1, pFgA_chi2_vals)
pFgA_p_vals[pFgA_p_vals < 1e-240] = 1e-240
pFgA_sign = np.sign(pAgF - pAgU).ravel()
pFgA_z = p_to_z(pFgA_p_vals, tail='two') * pFgA_sign
images = {
'pA': pA,
'pAgF': pAgF,
'pFgA': pFgA,
('pAgF_given_pF=%0.2f' % prior): pAgF_prior,
('pFgA_given_pF=%0.2f' % prior): pFgA_prior,
'consistency_z': pAgF_z,
'specificity_z': pFgA_z,
'consistency_chi2': pAgF_chi2_vals,
'specificity_chi2': pFgA_chi2_vals}
if corr == 'FWE':
pool = mp.Pool(n_cores)
iter_dfs = [self.coordinates.copy()] * n_iters
null_ijk = np.vstack(np.where(self.mask.get_data())).T
rand_idx = np.random.choice(null_ijk.shape[0],
size=(self.coordinates.shape[0],
n_iters))
rand_ijk = null_ijk[rand_idx, :]
iter_ijks = np.split(rand_ijk, rand_ijk.shape[1], axis=1)
params = zip(iter_dfs, iter_ijks, range(n_iters))
perm_results = pool.map(self._perm, params)
pool.close()
pAgF_null_chi2_dist, pFgA_null_chi2_dist = zip(*perm_results)
# pAgF_FWE
pAgF_null_chi2_dist = np.squeeze(pAgF_null_chi2_dist)
np.savetxt('null_dist.txt', pAgF_null_chi2_dist)
pAgF_p_FWE = np.empty_like(pAgF_chi2_vals).astype(float)
for voxel in range(pFgA_chi2_vals.shape[0]):
pAgF_p_FWE[voxel] = null_to_p(pAgF_chi2_vals[voxel],
pAgF_null_chi2_dist,
tail='upper')
# Crop p-values of 0 or 1 to nearest values that won't evaluate to
# 0 or 1. Prevents inf z-values.
pAgF_p_FWE[pAgF_p_FWE < eps] = eps
pAgF_p_FWE[pAgF_p_FWE > (1. - eps)] = 1. - eps
pAgF_z_FWE = p_to_z(pAgF_p_FWE, tail='two') * pAgF_sign
images['consistency_p_FWE'] = pAgF_p_FWE
images['consistency_z_FWE'] = pAgF_z_FWE
# pFgA_FWE
pFgA_null_chi2_dist = np.squeeze(pFgA_null_chi2_dist)
pFgA_p_FWE = np.empty_like(pFgA_chi2_vals).astype(float)
for voxel in range(pFgA_chi2_vals.shape[0]):
pFgA_p_FWE[voxel] = null_to_p(pFgA_chi2_vals[voxel],
pFgA_null_chi2_dist,
tail='upper')
# Crop p-values of 0 or 1 to nearest values that won't evaluate to
# 0 or 1. Prevents inf z-values.
pFgA_p_FWE[pFgA_p_FWE < eps] = eps
pFgA_p_FWE[pFgA_p_FWE > (1. - eps)] = 1. - eps
pFgA_z_FWE = p_to_z(pFgA_p_FWE, tail='two') * pFgA_sign
images['specificity_p_FWE'] = pFgA_p_FWE
images['specificity_z_FWE'] = pFgA_z_FWE
elif corr == 'FDR':
_, pAgF_p_FDR, _, _ = multipletests(pAgF_p_vals, alpha=0.05,
method='fdr_bh',
is_sorted=False,
returnsorted=False)
pAgF_z_FDR = p_to_z(pAgF_p_FDR, tail='two') * pAgF_sign
images['consistency_z_FDR'] = pAgF_z_FDR
_, pFgA_p_FDR, _, _ = multipletests(pFgA_p_vals, alpha=0.05,
method='fdr_bh',
is_sorted=False,
returnsorted=False)
pFgA_z_FDR = p_to_z(pFgA_p_FDR, tail='two') * pFgA_sign
images['specificity_z_FDR'] = pFgA_z_FDR
self.results = MetaResult(mask=self.mask, **images)
def _perm(self, params):
iter_df, iter_ijk, iter_ = params
if iter_ % 500 == 0:
print('Now running iteration {0}'.format(iter_))
iter_ijk = np.squeeze(iter_ijk)
iter_df[['i', 'j', 'k']] = iter_ijk
k_est = self.kernel_estimator(iter_df, self.mask)
temp_ma_maps = k_est.transform(self.ids, masked=True,
**self.kernel_arguments)
temp_ma_maps2 = k_est.transform(self.ids2, masked=True,
**self.kernel_arguments)
n_selected = temp_ma_maps.shape[0]
n_unselected = temp_ma_maps2.shape[0]
n_selected_active_voxels = np.sum(temp_ma_maps, axis=0)
n_unselected_active_voxels = np.sum(temp_ma_maps2, axis=0)
# Conditional probabilities
pAgF = n_selected_active_voxels * 1.0 / n_selected
pAgU = n_unselected_active_voxels * 1.0 / n_unselected
# One-way chi-square test for consistency of activation
pAgF_chi2_vals = self._one_way(np.squeeze(n_selected_active_voxels),
n_selected)
iter_pAgF_chi2 = np.max(pAgF_chi2_vals)
# Two-way chi-square for specificity of activation
cells = np.squeeze(
np.array([[n_selected_active_voxels, n_unselected_active_voxels],
[n_selected - n_selected_active_voxels,
n_unselected - n_unselected_active_voxels]]).T)
pFgA_chi2_vals = self._two_way(cells)
iter_pFgA_chi2 = np.max(pFgA_chi2_vals)
return iter_pAgF_chi2, iter_pFgA_chi2
def _one_way(self, data, n):
""" One-way chi-square test of independence.
Takes a 1D array as input and compares activation at each voxel to
proportion expected under a uniform distribution throughout the array.
Note that if you're testing activation with this, make sure that only
valid voxels (e.g., in-mask gray matter voxels) are included in the
array, or results won't make any sense!
"""
term = data.astype('float64')
no_term = n - term
t_exp = np.mean(term, 0)
t_exp = np.array([t_exp, ] * data.shape[0])
nt_exp = n - t_exp
t_mss = (term - t_exp) ** 2 / t_exp
nt_mss = (no_term - nt_exp) ** 2 / nt_exp
chi2 = t_mss + nt_mss
return chi2
def _two_way(self, cells):
""" Two-way chi-square test of independence.
Takes a 3D array as input: N(voxels) x 2 x 2, where the last two
dimensions are the contingency table for each of N voxels. Returns an
array of p-values.
"""
# Mute divide-by-zero warning for bad voxels since we account for that
# later
warnings.simplefilter("ignore", RuntimeWarning)
cells = cells.astype('float64') # Make sure we don't overflow
total = np.apply_over_axes(np.sum, cells, [1, 2]).ravel()
chi_sq = np.zeros(cells.shape, dtype='float64')
for i in range(2):
for j in range(2):
exp = np.sum(cells[:, i, :], 1).ravel() * \
np.sum(cells[:, :, j], 1).ravel() / total
bad_vox = np.where(exp == 0)[0]
chi_sq[:, i, j] = (cells[:, i, j] - exp) ** 2 / exp
chi_sq[bad_vox, i, j] = 1.0 # Set p-value for invalid voxels to 1
chi_sq = np.apply_over_axes(np.sum, chi_sq, [1, 2]).ravel()
return chi_sq
@due.dcite(Doi('10.1016/S1053-8119(03)00078-8'),
description='Introduces the KDA algorithm.')
@due.dcite(Doi('10.1016/j.neuroimage.2004.03.052'),
description='Also introduces the KDA algorithm.')
class KDA(CBMAEstimator):
"""
Kernel density analysis
"""
def __init__(self, dataset, ids, ids2=None, kernel_estimator=KDAKernel,
**kwargs):
kernel_args = {k.split('kernel__')[1]: v for k, v in kwargs.items()
if k.startswith('kernel__')}
kwargs = {k: v for k, v in kwargs.items() if not k.startswith('kernel__')}
self.mask = dataset.mask
self.coordinates = dataset.coordinates.loc[dataset.coordinates['id'].isin(ids)]
self.kernel_estimator = kernel_estimator
self.kernel_arguments = kernel_args
self.ids = ids
self.clust_thresh = None
self.n_iters = None
self.images = {}
def fit(self, q=0.05, n_iters=10000, n_cores=4):
null_ijk = np.vstack(np.where(self.mask.get_data())).T
self.clust_thresh = q
self.n_iters = n_iters
k_est = self.kernel_estimator(self.coordinates, self.mask)
ma_maps = k_est.transform(self.ids, masked=True, **self.kernel_arguments)
of_map = np.sum(ma_maps, axis=0)
rand_idx = np.random.choice(null_ijk.shape[0],
size=(self.coordinates.shape[0], n_iters))
rand_ijk = null_ijk[rand_idx, :]
iter_ijks = np.split(rand_ijk, rand_ijk.shape[1], axis=1)
iter_df = self.coordinates.copy()
# Define parameters
iter_dfs = [iter_df] * n_iters
params = zip(iter_ijks, iter_dfs)
pool = mp.Pool(n_cores)
perm_max_values = pool.map(self._perm, params)
pool.close()
percentile = 100 * (1 - q)
# Determine OF values in [1 - clust_thresh]th percentile (e.g. 95th)
vfwe_thresh = | np.percentile(perm_max_values, percentile) | numpy.percentile |
## Script to run graph partitioning experiment on Wiki dataset
# Load packages
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import time
import ot
from scipy import linalg
from scipy import sparse
import gromovWassersteinAveraging as gwa
import spectralGW as sgw
from geodesicVisualization import *
import json
# Load the S-GWL code
import DataIO as DataIO
import EvaluationMeasure as Eval
import GromovWassersteinGraphToolkit as GwGt
from GromovWassersteinGraphToolkit import *
import pickle
import warnings
# Load modules for network partitioning experiments
import community
from networkx.algorithms.community import greedy_modularity_communities
from networkx.algorithms.community.asyn_fluid import asyn_fluidc
from networkx.algorithms.community.quality import performance, coverage, modularity
from sklearn import metrics
from infomap import Infomap
# Breakpoint analysis package
# import ruptures as rpt
from scipy.cluster.hierarchy import dendrogram, linkage, cut_tree
from scipy.signal import find_peaks
warnings.filterwarnings("ignore")
def graph_partition_gd2(cost_s, p_s, p_t,idx2node, ot_hyperpara, trans0=None):
"""
** May 19, 2020: Gradient descent version of graph_partition
Achieve a single graph partition via calculating Gromov-Wasserstein discrepancy
between the target graph and proposed one
Args:
cost_s: (n_s, n_s) adjacency matrix of source graph
p_s: (n_s, 1) the distribution of source nodes
p_t: (n_t, 1) the distribution of target nodes
idx2node: a dictionary {key = idx of row in cost, value = name of node}
ot_hyperpara: a dictionary of hyperparameters
Returns:
sub_costs: a dictionary {key: cluster idx,
value: sub cost matrices}
sub_probs: a dictionary {key: cluster idx,
value: sub distribution of nodes}
sub_idx2nodes: a dictionary {key: cluster idx,
value: a dictionary mapping indices to nodes' names
trans: (n_s, n_t) the optimal transport
"""
cost_t = np.diag(p_t[:, 0])
cost_s = np.asarray(cost_s)
# cost_t = 1 / (1 + cost_t)
trans, log = gwa.gromov_wasserstein_asym_fixed_initialization(cost_s, cost_t, p_s.flatten(), p_t.flatten(), trans0)
d_gw = log['gw_dist']
sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(cost_s, trans, p_s, p_t, idx2node)
return sub_costs, sub_probs, sub_idx2nodes, trans, d_gw
def get_partition(coup):
est_idx = np.argmax(coup, axis=1)
num_clusters = np.max(est_idx)
partition = []
for j in range(num_clusters+1):
partition.append(set(np.argwhere(est_idx == j).T[0]))
return partition
# dictionaries for holding results
scores = {}
runtimes = {}
avetimes = {}
# load data
f = open('data/wikicats.p', 'rb')
database = pickle.load(f)
f.close()
dG = database['G']
labels = database['labels']
num_nodes = dG.number_of_nodes()
num_partitions = len(np.unique(labels))
idx2node = {}
for n in dG.nodes:
idx2node[n] = n
G = dG.to_undirected()
# Load precomputed noisy version
save_name = "wiki_sym_noise.txt"
with open(save_name, "rb") as fp:
nG = pickle.load(fp)
save_name = "wiki_asym_noise.txt"
with open(save_name, "rb") as fp:
ndG = pickle.load(fp)
print('---Data files loaded. Computing...\n')
def process_sgwl_wiki(cost,database,num_nodes,num_partitions,verbose=False):
p_s = np.zeros((num_nodes, 1))
p_s[:, 0] = np.sum(cost, axis=1) ** .001
p_s /= np.sum(p_s)
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=num_partitions)
ot_dict = {'loss_type': 'L2', # the key hyperparameters of GW distance
'ot_method': 'proximal',
'beta': 2e-7,
'outer_iteration': 300,
# outer, inner iteration, error bound of optimal transport
'iter_bound': 1e-30,
'inner_iteration': 1,
'sk_bound': 1e-30,
'node_prior': 0,
'max_iter': 200, # iteration and error bound for calcuating barycenter
'cost_bound': 1e-16,
'update_p': False, # optional updates of source distribution
'lr': 0,
'alpha': 0}
sub_costs, sub_probs, sub_idx2nodes, trans, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
est_idx = np.argmax(trans, axis=1)
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
if verbose:
print('Mutual information score = {:3.3f}'.format(mutual_info))
return mutual_info, d_gw, trans
###########################################################
###########################################################
# Method: Fluid communities (symmetrized)
###########################################################
# Raw data
if not nx.is_connected(G):
#print('---Fluid community requires connected graph, skipping raw version---')
scores['fluid-symmetrized-raw'] = 'failed'
runtimes['fluid-symmetrized-raw'] = 'failed'
else:
time_s = time.time()
comp = asyn_fluidc(G.to_undirected(), k=num_partitions)
list_nodes = [frozenset(c) for c in comp]
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fluid-symmetrized-raw'] = mutual_info
runtimes['fluid-symmetrized-raw'] = runtime
# Noisy data
if not nx.is_connected(nG):
print('---Fluid community requires connected graph, skipping noisy version---')
scores['fluid-symmetrized-noisy'] = 'failed'
runtimes['fluid-symmetrized-noisy'] = 'failed'
else:
time_s = time.time()
comp = asyn_fluidc(nG.to_undirected(), k=num_partitions)
list_nodes = [frozenset(c) for c in comp]
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fluid-symmetrized-noisy'] = mutual_info
runtimes['fluid-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: FastGreedy (symmetrized)
###########################################################
# Raw
time_s = time.time()
list_nodes = list(greedy_modularity_communities(G))
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fastgreedy-symmetrized-raw'] = mutual_info
runtimes['fastgreedy-symmetrized-raw'] = runtime
# Noisy
time_s = time.time()
list_nodes = list(greedy_modularity_communities(nG))
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fastgreedy-symmetrized-noisy'] = mutual_info
runtimes['fastgreedy-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Louvain (symmetrized)
###########################################################
# Raw
time_s = time.time()
partition = community.best_partition(G)
est_idx = np.zeros((num_nodes,))
for com in set(partition.values()):
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
for idx in list_nodes:
est_idx[idx] = com
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['louvain-symmetrized-raw'] = mutual_info
runtimes['louvain-symmetrized-raw'] = runtime
# Noisy
time_s = time.time()
partition = community.best_partition(nG)
est_idx = np.zeros((num_nodes,))
for com in set(partition.values()):
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
for idx in list_nodes:
est_idx[idx] = com
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['louvain-symmetrized-noisy'] = mutual_info
runtimes['louvain-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Infomap (symmetrized)
###########################################################
# Raw
time_s = time.time()
im = Infomap()
for node in G.nodes:
im.add_node(node)
for edge in G.edges:
im.add_link(edge[0], edge[1])
im.add_link(edge[1], edge[0])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-symmetrized-raw'] = mutual_info
runtimes['infomap-symmetrized-raw'] = runtime
# Noisy
print('---Running Infomap with noisy data---\n')
time_s = time.time()
im = Infomap()
for node in nG.nodes:
im.add_node(node)
for edge in nG.edges:
im.add_link(edge[0], edge[1])
im.add_link(edge[1], edge[0])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-symmetrized-noisy'] = mutual_info
runtimes['infomap-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Infomap (asymmetric)
###########################################################
# Raw
time_s = time.time()
im = Infomap()
for node in dG.nodes:
im.add_node(node)
for edge in dG.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-asymmetric-raw'] = mutual_info
runtimes['infomap-asymmetric-raw'] = runtime
# Noisy
print('---Running Infomap with noisy data---\n')
time_s = time.time()
im = Infomap()
for node in ndG.nodes:
im.add_node(node)
for edge in ndG.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-asymmetric-noisy'] = mutual_info
runtimes['infomap-asymmetric-noisy'] = runtime
###########################################################
###########################################################
# Method: GWL, symmetrized
###########################################################
# Raw
start = time.time()
cost = nx.adjacency_matrix(G).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-symmetrized-raw'] = mutual_info
runtimes['gwl-symmetrized-raw'] = end-start
# Noisy
start = time.time()
cost = nx.adjacency_matrix(nG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-symmetrized-noisy'] = mutual_info
runtimes['gwl-symmetrized-noisy'] = end-start
###########################################################
###########################################################
# Method: GWL, asymmetric
###########################################################
# Raw
start = time.time()
cost = nx.adjacency_matrix(dG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-asymmetric-raw'] = mutual_info
runtimes['gwl-asymmetric-raw'] = end-start
# Noisy
start = time.time()
cost = nx.adjacency_matrix(ndG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-asymmetric-noisy'] = mutual_info
runtimes['gwl-asymmetric-noisy'] = end-start
###########################################################
###########################################################
# Method: SpecGWL
###########################################################
# Note that the GWL pipeline above takes the true number of clusters as input.
# We now show how this number is estimated in the SpecGWL pipeline for
# a bona fide unsupervised partitioning method.
def t_selection_pipeline_undirected_wiki(G,ts,num_partitions,fraction_t_to_keep=0.25):
mis = []
coups = []
d_gws = []
rt = []
for t in ts:
start = time.time()
cost = sgw.undirected_normalized_heat_kernel(G,t)
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,num_partitions)
mis.append(mutual_info)
coups.append(coup)
d_gws.append(d_gw)
end = time.time()
rt.append(end-start)
print('Couplings Computed')
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
num_to_keep = int(np.round(fraction_t_to_keep*len(ts)))
good_t_max = ts[np.argsort(coverages)][-num_to_keep:]
good_t_grad = ts[np.argsort(np.abs(np.gradient(coverages)))][:num_to_keep]
return mis, coups, d_gws, good_t_max, good_t_grad, rt
def t_selection_pipeline_directed_wiki(G,ts,num_partitions,fraction_t_to_keep=0.25):
mis = []
coups = []
d_gws = []
rt = []
for t in ts:
start = time.time()
cost = sgw.directed_heat_kernel(G,t)
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,num_partitions)
mis.append(mutual_info)
coups.append(coup)
d_gws.append(d_gw)
end = time.time()
rt.append(end-start)
print('Couplings Computed')
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
num_to_keep = int(np.round(fraction_t_to_keep*len(ts)))
good_t_max = ts[np.argsort(coverages)][-num_to_keep:]
good_t_grad = ts[np.argsort(np.abs(np.gradient(coverages)))][:num_to_keep]
return mis, coups, d_gws, good_t_max, good_t_grad, rt
# Keeping t fixed, do a grid search to estimate the number of clusters
num_clusts = list(range(5,30))
t = 20
cost = sgw.undirected_normalized_heat_kernel(G,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(G,partition))
modularities.append(modularity(G,partition))
# Estimate number of clusters
estimated_clusters_raw_sym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_raw_sym)
# Now perform modularity/coverage maximizing pipeline
ts = np.linspace(5,50,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_undirected_wiki(G,ts,estimated_clusters_raw_sym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
modularities = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
modularities.append(modularity(G,partition))
wiki_raw_sym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Raw, Sym:',wiki_raw_sym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-symmetric-raw'] = wiki_raw_sym_ami
runtimes['specgwl-symmetric-raw'] = rt[np.argmax(coverages)]
## Repeat for undirected, noisy data
num_clusts = list(range(5,30))
t = 20
cost = sgw.undirected_normalized_heat_kernel(nG,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(nG,partition))
modularities.append(modularity(nG,partition))
estimated_clusters_noisy_sym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_noisy_sym)
ts = np.linspace(5,20,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_undirected_wiki(nG,ts,estimated_clusters_noisy_sym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(nG,partition))
wiki_noisy_sym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Noisy, Sym:',wiki_noisy_sym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-symmetric-noisy'] = wiki_noisy_sym_ami
runtimes['specgwl-symmetric-noisy'] = rt[np.argmax(coverages)]
## Repeat for directed, raw data
num_clusts = list(range(5,30))
t = 20
cost = sgw.directed_heat_kernel(dG,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(dG,partition))
modularities.append(modularity(dG,partition))
estimated_clusters_raw_asym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_raw_asym)
ts = np.linspace(5,20,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_directed_wiki(dG,ts,estimated_clusters_raw_asym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(dG,partition))
wiki_raw_asym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Raw, Asym:',wiki_raw_asym_ami)
print('Occurs at t-value:',ts[ | np.argmax(coverages) | numpy.argmax |
#!/usr/bin/env python
"""Tests for `rawtools` package."""
import numpy as np
import pytest
from numpy import uint8, uint16
from rawtools import rawtools
DIMS = (4, 5)
@pytest.fixture
def slice_uint8():
"""Sample uint8 slice"""
return np.rint(np.arange(0, 20, dtype=uint8).reshape(DIMS))
@pytest.fixture
def slice_uint16():
"""Sample uint16 slice"""
return np.rint(np.arange(0, 20, dtype=uint16).reshape(DIMS))
@pytest.fixture
def slice_uint16_high_variance():
"""Sample uint16 slice with variable values"""
return np.array([-1, 0, 100, 1000, 5000, 14830, 50321, 65535, 65536], dtype=uint16)
def test_scale_uint8(slice_uint8):
"""Test scaling a unsigned 8-bit integer array to own bounds."""
from rawtools.convert import scale
xs = np.arange(0, 20, dtype=uint8).reshape(DIMS)
lbound = np.iinfo(uint8).min
ubound = np.iinfo(uint8).max
scaled_slice = scale(xs, lbound, ubound, lbound, ubound)
np.testing.assert_array_equal(scaled_slice, slice_uint8)
def test_scale_uint16_to_uint8(slice_uint16):
"""Test scaling an unsigned 16-bit integer array to an unsigned 8-bit array's bounds."""
from rawtools.convert import scale
lbound = | np.iinfo(uint16) | numpy.iinfo |
# pdf.py
import os.path as osp
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as au
import astropy.constants as ac
import pandas as pd
from mpl_toolkits.axes_grid1 import ImageGrid
from ..plt_tools.cmap import cmap_apply_alpha
from ..util.scp_to_pc import scp_to_pc
from ..load_sim import LoadSim
class PDF:
bins=dict(nH=np.logspace(-2,5,71),
nHI=np.logspace(-2,5,71),
nH2=np.logspace(-2,5,71),
nHII=np.logspace(-2,5,71),
T=np.logspace(0,5,51),
pok=np.logspace(0,7,71),
chi_PE_tot=np.logspace(-4,5,91),
chi_FUV_tot=np.logspace(-4,5,91),
Bmag=np.logspace(-7,-4,91),
Erad_LyC=np.logspace(-17,-8,91),
)
@LoadSim.Decorators.check_pickle
def read_pdf2d(self, num,
bin_fields=None, bins=None, prefix='pdf2d',
savdir=None, force_override=False):
if bins is not None:
self.bins = bins
bin_fields_def = [['nH', 'pok'], ['nH', 'T']]
if bin_fields is None:
bin_fields = bin_fields_def
ds = self.load_vtk(num=num)
res = dict()
for bf in bin_fields:
k = '-'.join(bf)
res[k] = dict()
dd = ds.get_field(bf)
xdat = dd[bf[0]].data.flatten()
ydat = dd[bf[1]].data.flatten()
# Volume weighted hist
weights = None
H, xe, ye = np.histogram2d(xdat, ydat, (self.bins[bf[0]], self.bins[bf[1]]),
weights=weights)
res[k]['H'] = H
res[k]['xe'] = xe
res[k]['ye'] = ye
# Density weighted hist
weights = (ds.get_field('nH'))['nH'].data.flatten()
Hw, xe, ye = np.histogram2d(xdat, ydat, (self.bins[bf[0]], self.bins[bf[1]]),
weights=weights)
res[k]['Hw'] = Hw
res['domain'] = ds.domain
return res
@LoadSim.Decorators.check_pickle
def read_pdf2d_phase(self, num, prefix='pdf2d_phase',
savdir=None, force_override=False):
"""
Read 2d pdf of density, chi_FUV, pok
"""
r = dict()
ds = self.load_vtk(num)
fields = ['nH','xH2','xHII','xHI','pok','T','Bmag','Erad_LyC']
self.logger.info('Reading fields {0:s}'.format(', '.join(fields)))
dd = self.get_chi(ds, fields=fields, freq=['LW','PE']) # see ./fields.py
#bins = (np.logspace(-2,5,71), np.logspace(-4,5,91))
# Masked array
idx_HII = dd['xHII'].data.flatten() > 0.5
idx_HI = (dd['xHI'].data.flatten() > 0.5)
idx_H2 = (dd['xH2'].data.flatten() > 0.25)
#idx_HI = ~idx_HII & ~idx_H2
dat_all = {
'nH-chi_PE_tot': (dd['nH'].data.flatten(),
(dd['chi_PE_ext'] + dd['chi_PE']).data.flatten(),
dd['nH'].data.flatten()),
'nH2-chi_PE_tot': (dd['nH'].data.flatten()[idx_H2],
(dd['chi_PE_ext'] + dd['chi_PE']).data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-chi_PE_tot': (dd['nH'].data.flatten()[idx_HI],
(dd['chi_PE_ext'] + dd['chi_PE']).data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-chi_PE_tot': (dd['nH'].data.flatten()[idx_HII],
(dd['chi_PE_ext'] + dd['chi_PE']).data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-chi_FUV_tot': (dd['nH'].data.flatten(),
(dd['chi_FUV_ext'] + dd['chi_FUV']).data.flatten(),
dd['nH'].data.flatten()),
'nH2-chi_FUV_tot': (dd['nH'].data.flatten()[idx_H2],
(dd['chi_FUV_ext'] + dd['chi_FUV']).data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-chi_FUV_tot': (dd['nH'].data.flatten()[idx_HI],
(dd['chi_FUV_ext'] + dd['chi_FUV']).data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-chi_FUV_tot': (dd['nH'].data.flatten()[idx_HII],
(dd['chi_FUV_ext'] + dd['chi_FUV']).data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-pok': (dd['nH'].data.flatten(),
dd['pok'].data.flatten(),
dd['nH'].data.flatten()),
'nH2-pok': (dd['nH'].data.flatten()[idx_H2],
dd['pok'].data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-pok': (dd['nH'].data.flatten()[idx_HI],
dd['pok'].data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-pok': (dd['nH'].data.flatten()[idx_HII],
dd['pok'].data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-Bmag': (dd['nH'].data.flatten(),
dd['Bmag'].data.flatten(),
dd['nH'].data.flatten()),
'nH2-Bmag': (dd['nH'].data.flatten()[idx_H2],
dd['Bmag'].data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-Bmag': (dd['nH'].data.flatten()[idx_HI],
dd['Bmag'].data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-Bmag': (dd['nH'].data.flatten()[idx_HII],
dd['Bmag'].data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-T': (dd['nH'].data.flatten(),
dd['T'].data.flatten(),
dd['nH'].data.flatten()),
'nH2-T': (dd['nH'].data.flatten()[idx_H2],
dd['T'].data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-T': (dd['nH'].data.flatten()[idx_HI],
dd['T'].data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-T': (dd['nH'].data.flatten()[idx_HII],
dd['T'].data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-Erad_LyC': (dd['nH'].data.flatten(),
dd['Erad_LyC'].data.flatten(),
dd['nH'].data.flatten()),
'nHII-Erad_LyC': (dd['nH'].data.flatten()[idx_HII],
dd['Erad_LyC'].data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
}
for k, (xdat,ydat,wdat) in dat_all.items():
r[k] = dict()
kx, ky = k.split('-')
bins = (self.bins[kx], self.bins[ky])
H, xe, ye = np.histogram2d(xdat, ydat, bins=bins, weights=None)
Hw, _, _ = np.histogram2d(xdat, ydat, bins=bins, weights=wdat)
r[k]['H'] = H
r[k]['Hw'] = Hw
r[k]['xe'] = xe
r[k]['ye'] = ye
return r
@LoadSim.Decorators.check_pickle
def read_density_pdf_all(self, prefix='density_pdf_all',
savdir=None, force_override=False):
rr = dict()
# nums = self.nums
#nums = [0,10,20]
nums = range(0, self.get_num_max_virial())
print('density_pdf_all: {0:s} nums:'.format(self.basename), nums, end=' ')
for i in nums:
print(i, end=' ')
r = self.read_density_pdf(num=i, force_override=False)
if i == 0:
for k in r.keys():
rr[k] = []
for k in r.keys():
try:
rr[k].append(r[k].value.item())
except:
rr[k].append(r[k])
rr = pd.DataFrame(rr)
return rr
@LoadSim.Decorators.check_pickle
def read_density_pdf(self, num, prefix='density_pdf',
savdir=None, force_override=False):
"""
Read 1d pdf of density
"""
bins = np.logspace(-3, 7, 101)
ds = self.load_vtk(num)
dd = ds.get_field(['nH','specific_scalar_CL','xn'])
# Select neutral cloud gas
idx = np.logical_and(dd['xn'].data > 0.5, dd['specific_scalar_CL'].data > 5e-1)
nH_cl = (dd['nH']*dd['specific_scalar_CL']).data[idx]
x = np.log(nH_cl)
res = dict()
res['time_code'] = ds.domain['time']
try:
res['nH_cl_meanV'] = np.mean(nH_cl)
res['nH_cl_meanM'] = np.average(nH_cl, weights=nH_cl)
res['muV'] = np.sum(x)/len(nH_cl)
res['muM'] = np.sum(x*nH_cl)/np.sum(nH_cl)
res['sigmaV'] = | np.std(x) | numpy.std |
from numba.core.errors import TypingError
from sumo.modes.prepare import similarity
from sumo.utils import check_matrix_symmetry
from sumo.constants import SIMILARITY_METHODS
import numpy as np
import pytest
def _test_assumptions_no_numba(func):
with pytest.raises(AssertionError):
func(np.array([1]), np.array([1, 1]), missing=0.1)
with pytest.raises(AttributeError):
func([2], [3], missing=0.1)
def _test_assumptions_numba(func):
with pytest.raises(AssertionError):
func(np.array([1]), np.array([1, 1]), missing=0.1)
with pytest.raises(TypingError):
# running a function in nopython numba mode with incorrect attribute types raises numba specific error
func([2], [3], missing=0.1)
def _test_threshold(func):
assert np.isnan(func(np.array([np.nan, 1, 1]), np.array([1, np.nan, 2]), missing=0.5))
assert np.isnan(func(np.array([np.nan, 1]), np.array([1, np.nan]), missing=0.1))
def test_euclidean_dist():
assert np.allclose(similarity.euclidean_dist(np.array([1, 2]), np.array([1, 2]), missing=0.1), 0)
assert np.allclose(similarity.euclidean_dist(np.arange(16), np.arange(16), missing=0.1), 0)
v = np.array([1 if i != 0 else 2 for i in range(100)])
assert np.allclose(similarity.euclidean_dist(v, np.ones(100), missing=0.1), 1 / 100)
assert np.allclose(similarity.euclidean_dist(np.array([1, 1, 1]), np.array([1, 1, 2]), missing=0.1), 1 / 3)
assert np.allclose(similarity.euclidean_dist(np.array([1, np.nan, 1]), np.array([1, np.nan, 2]), missing=0.1), 0.5)
assert np.allclose(similarity.euclidean_dist(np.array([np.nan, 1, 1]), np.array([1, np.nan, 2]), missing=0.1), 1)
_test_assumptions_numba(similarity.euclidean_dist)
_test_threshold(similarity.euclidean_dist)
def test_cosine_sim():
assert np.allclose(similarity.cosine_sim(np.array([1, 0, 0]), np.array([0, 1, 0]), missing=0.1), 0)
assert np.allclose(similarity.cosine_sim(np.array([1, 1, 0]), np.array([1, 1, 0]), missing=0.1), 1)
_test_assumptions_no_numba(similarity.cosine_sim)
_test_threshold(similarity.cosine_sim)
def test_correlation():
_test_assumptions_no_numba(similarity.correlation)
with pytest.raises(AssertionError):
similarity.correlation(np.array([1]), np.array([1, 1]), missing=0.1, method="method")
_test_threshold(similarity.correlation)
assert np.allclose(similarity.correlation(np.array([1, 2]), np.array([1, 2]), missing=0.1), 1)
assert np.allclose(similarity.correlation(np.arange(16), np.arange(16), missing=0.1), 1)
assert np.allclose(similarity.correlation(np.arange(16), np.arange(16), missing=0.1, method="spearman"), 1)
assert np.allclose(similarity.correlation(np.array([1, 2, 3]), np.array([1, 2, np.nan]), missing=0.1), 1)
assert np.allclose(
similarity.correlation(np.array([1, 2, 3, np.nan]), np.array([1, 2, np.nan, np.nan]), missing=0.1), 1)
def test_feature_rbf_similarity():
f = np.random.random((10, 20))
a = similarity.feature_rbf_similarity(f)
assert check_matrix_symmetry(a)
assert np.all( | np.diag(a) | numpy.diag |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def simulate_normal_demand(timesteps, I0, mu = 100, sigma = 5, showplot = False, figsize = (15, 10)):
Inventory = {-1 : I0, -0.5 : I0}
Demand = {}
Shortage = {}
Production = {}
for i in | np.arange(0, timesteps, 0.5) | numpy.arange |
import numpy as np
from mot.common.gaussian_density import GaussianDensity
from mot.common.state import Gaussian
from mot.configs import SensorModelConfig
from mot.measurement_models import MeasurementModel
from mot.motion_models import MotionModel
from .base_single_object_tracker import SingleObjectTracker
class NearestNeighbourTracker(SingleObjectTracker):
def __init__(
self,
gating_size: float,
meas_model: MeasurementModel,
sensor_model: SensorModelConfig,
motion_model: MotionModel,
*args,
**kwargs,
) -> None:
self.meas_model = meas_model
self.sensor_model = sensor_model
self.motion_model = motion_model
self.gating_size = gating_size
super().__init__()
@property
def name(self):
return "Nearest Neighbout SOT"
def estimate(self, initial_state: Gaussian, measurements):
"""Tracks a single object using nearest neighbour association
For each filter recursion iteration implemented next steps:
1) gating
2) calculates the predicted likelihood for each measurement in the gate
3) find the nearest neighbour measurement
4) compares the weight of the missed detection hypotheses and
the weight of the object detection hypothesis created using
the nearest neigbour measurement
5) if the object detection hypothesis using the nearest neighbour
measurement has the hightes weight, perform Kalman update
6) extract object state estimate
7) prediction
"""
prev_state = initial_state
estimations = [None for x in range(len(measurements))]
for timestep, measurements_in_scene in enumerate(measurements):
estimations[timestep] = self.estimation_step(
predicted_state=prev_state,
current_measurements=np.array(measurements_in_scene),
)
prev_state = GaussianDensity.predict(state=estimations[timestep], motion_model=self.motion_model, dt=1.0)
return tuple(estimations)
def estimation_step(self, predicted_state: Gaussian, current_measurements: np.ndarray):
# 1. Gating
(meas_in_gate, _) = GaussianDensity.ellipsoidal_gating(
state_prev=predicted_state,
z=current_measurements,
measurement_model=self.meas_model,
gating_size=self.gating_size,
)
if meas_in_gate.size == 0: # number of hypothesis
current_step_state = predicted_state
else:
# 2. Calculate the predicted likelihood for each measurement in the gate
predicted_likelihood = GaussianDensity.predict_loglikelihood(
state_pred=predicted_state,
z=meas_in_gate,
measurement_model=self.meas_model,
)
# Hypothesis evaluation
# detection
w_theta_factor = np.log(self.sensor_model.P_D / self.sensor_model.intensity_c)
w_theta_k = predicted_likelihood + w_theta_factor
# misdetection
w_theta_0 = 1 - self.sensor_model.P_D
# 3. Compare the weight of the missed detection
# hypothesis and the weight of the object detection hypothesis
# using the nearest neighbour measurement
max_k = | np.argmax(w_theta_k) | numpy.argmax |
from color_trans_gradients_2 import delegator, create_undistorted_hls_image, create_sobel_image
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import pickle
# Loading camera calibration
cameraCalibration = pickle.load(open('serialized_camera_data/camera_calibration.p', 'rb'))
mtx, dist = map(cameraCalibration.get, ('mtx', 'dist'))
# Load test images.
test_images_with_names = list(map(lambda imageFileName: (imageFileName, cv2.imread(imageFileName)),
glob.glob('./test_images/*.jpg')))
original_image = test_images_with_names[1][1]
hls_image = create_undistorted_hls_image(original_image)
create_saturation_channel_images = lambda img: create_undistorted_hls_image(img)[:, :, 2]
take_sobel_in_X = lambda img: create_sobel_image(create_saturation_channel_images(img), thresh_min=10, thresh_max=160)
take_sobel_in_Y = lambda img: create_sobel_image(create_saturation_channel_images(img), direction='y', thresh_min=10,
thresh_max=160)
def combine_sobel_gradients(img):
"""
Here we calculate the sobel along x & y
"""
sobel_X = take_sobel_in_X(img)
sobel_Y = take_sobel_in_Y(img)
combined_sobel = np.zeros_like(sobel_X)
combined_sobel[((sobel_X == 1) & (sobel_Y == 1))] = 1
return combined_sobel
combined_sobel_image = delegator(test_images_with_names, combine_sobel_gradients, display_image=False)
perspective_matrix = pickle.load(open('serialized_camera_data/perspective_transform.p', 'rb'))
M, Minv = map(perspective_matrix.get, ('M', 'Minv'))
def do_perspective_transformation(image, M=M):
"""
Adjust the `image` using the transformation matrix `M`.
"""
img_size = (image.shape[1], image.shape[0])
warped = cv2.warpPerspective(image, M, img_size)
return warped
do_combine_sobel_transform = lambda img: do_perspective_transformation(combine_sobel_gradients(img))
transformed_binary_images = delegator(test_images_with_names, do_combine_sobel_transform, display_image=False,
cmap='gray')
# conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
def search_lanes_and_fit_polynomial(image, nwindows=9, margin=110, minpix=50):
"""
This Function search the lane pixels & then try to fit the polynomial on both lanes.
Returns (left_fit, right_fit, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy)
"""
# get a perpective transformed image
binary_warped_image = do_combine_sobel_transform(image)
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped_image[binary_warped_image.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped_image, binary_warped_image, binary_warped_image)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows
window_height = np.int(binary_warped_image.shape[0] / nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped_image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# iterate through the windows one by one as we have to cover whole lane
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
y_low_coordinate = binary_warped_image.shape[0] - (window + 1) * window_height
y_high_coordinate = binary_warped_image.shape[0] - window * window_height
x_left_low = leftx_current - margin
x_left_high = leftx_current + margin
x_right_low = rightx_current - margin
x_right_high = rightx_current + margin
# Draw the windows on the visualization image, this draw a rectangle(window) on each iteration
cv2.rectangle(out_img, (x_left_low, y_low_coordinate), (x_left_high, y_high_coordinate), (0, 255, 0), 2)
cv2.rectangle(out_img, (x_right_low, y_low_coordinate), (x_right_high, y_high_coordinate), (0, 255, 0), 2)
# These are the coordinates which are inside our window
good_left_inds = ((nonzeroy >= y_low_coordinate) & (nonzeroy < y_high_coordinate) & (nonzerox >= x_left_low) & (
nonzerox < x_left_high)).nonzero()[0]
good_right_inds = \
((nonzeroy >= y_low_coordinate) & (nonzeroy < y_high_coordinate) & (nonzerox >= x_right_low) & (
nonzerox < x_right_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each lane, this representation is in pixels
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# this representation is for real word
# Fit a second order polynomial to each lane
left_fit_m = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_m = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2)
return (left_fit, right_fit, left_fit_m, right_fit_m, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy)
def draw_windows_and_fitted_lines(image, ax):
"""
This method draws the windows and fitted line on each image with the help of 'search_lanes_and_fit_polynomial' Fn.
Returns (`left_fit` and `right_fit`)
"""
left_fit, right_fit, left_fit_m, right_fit_m, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy = search_lanes_and_fit_polynomial(
image)
# Visualization
ploty = np.linspace(0, image.shape[0] - 1, image.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# color left lane with red
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
# color right lane with blue
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
ax.imshow(out_img)
# plotting the fitted curve
ax.plot(left_fitx, ploty, color='yellow')
ax.plot(right_fitx, ploty, color='yellow')
return (left_fit, right_fit, left_fit_m, right_fit_m)
def draw_lane_lines_on_all_images(images, cols=2, rows=3, figsize=(15, 13)):
"""
This method calls draw_windows_and_fitted_lines Fn for each image and then show the grid of output images.
"""
no_of_images = len(images)
fig, axes = plt.subplots(rows, cols, figsize=figsize)
indexes = range(cols * rows)
image_path_with_fitted_parameters = []
for ax, index in zip(axes.flat, indexes):
if index < no_of_images:
image_path, image = images[index]
left_fit, right_fit, left_fit_m, right_fit_m = draw_windows_and_fitted_lines(image, ax)
ax.set_title(image_path)
ax.axis('off')
image_path_with_fitted_parameters.append((image_path, left_fit, right_fit, left_fit_m, right_fit_m))
fig.show()
return image_path_with_fitted_parameters
# Lets do some action and draw the polygon & windows on images
images_top_view_with_curve = draw_lane_lines_on_all_images(test_images_with_names)
print('done')
# here we calculating curvature
def find_radius_of_curvature(yRange, left_fit_cr):
"""
This Fn finds & returns the curvature of the polynomial
"""
return ((1 + (2 * left_fit_cr[0] * yRange * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
for image_polygon in images_top_view_with_curve:
image_path, left_fit, right_fit, left_fit_m, right_fit_m = image_polygon
max_Y = 719
# converting from meters to kilometers
leftCurvature = find_radius_of_curvature(max_Y, left_fit_m) / 1000
rightCurvature = find_radius_of_curvature(max_Y, right_fit_m) / 1000
print('Image : {}, Left : {:.2f} km, Right : {:.2f} km'.format(image_path, leftCurvature, rightCurvature))
# Warp the lane boundaries on top of image
def fill_the_lane_area(img, left_fit, right_fit):
"""
This Fn calculate the polynomial & fill the lanes area using fillPolly method.
"""
yMax = img.shape[0]
ploty = np.linspace(0, yMax - 1, yMax)
color_warp = np.zeros_like(img).astype(np.uint8)
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# format the points for fillPoly method
pts_left = np.array([np.transpose( | np.vstack([left_fitx, ploty]) | numpy.vstack |
# -*- coding: utf-8 -*-
import numpy as np
import scipy
import scipy.linalg
import scipy.optimize
import scipy.spatial
def vector(x, y, z):
""" A shortcut for creating 3D-space vectors;
in case you need a lot of manual np.array([...]) """
return np.array([x, y, z])
def deg2rad(deg):
""" Convert degrees (input) to radians """
return deg*np.pi/180.
def rad2deg(rad):
""" convert radians (input) to degrees """
return rad*180./np.pi
def norm(vector):
""" a shortcut to scipy.linalg.norm() """
return scipy.linalg.norm(vector)
def unit_vector(vector):
""" Returns a vector of magnitude 1 with the same direction"""
return vector / norm(vector)
def angle_between(v1, v2):
""" Returns the angle between vectors 'v1' and 'v2', in radians:
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
Kudos: https://stackoverflow.com/questions/2827393/
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def arbitrary_rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
# Kudos to
# https://stackoverflow.com/questions/6802577/rotation-of-3d-vector
#import math
#
# axis = np.asarray(axis)
# axis = axis / math.sqrt(np.dot(axis, axis))
# a = math.cos(theta / 2.0)
# b, c, d = -axis * math.sin(theta / 2.0)
# aa, bb, cc, dd = a * a, b * b, c * c, d * d
# bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
# return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
# [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
# [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
# Also Kudos to the guy with another answer for the same question (used here): """
return scipy.linalg.expm(np.cross(np.eye(3), axis/norm(axis)*theta))
def arbitrary_rotation(point, axis, theta, origin):
""" Rotate a point around any axis given by axis by angle theta [radians] """
rotated_point = np.dot(arbitrary_rotation_matrix(axis, theta), point - origin)
return rotated_point + origin
def rotate(point, angle, axis='x'):
""" Rotate a point around a given axis by specified angle """
if axis == 'y':
axis = vector(0, 1, 0)
elif axis == 'z':
axis = vector(0, 0, 1)
elif axis == 'x':
axis = vector(1, 0, 0)
else:
raise ValueError("Rotation axis should be either 'x', 'y', or 'z' ")
return arbitrary_rotation(point, axis, angle, vector(0, 0, 0))
def to_polar(point, axis='z'):
""" Convert (x, y, z) point to (radius, angle, height);
the axis of the new polar coordinate system can be chosen ('x' or 'z') """
assert axis in ['x', 'z']
if axis == 'z':
radius = (point[0]**2 + point[1]**2)**0.5
angle = np.arctan2(point[1], point[0])
height = point[2]
else: # axis == 'x'
radius = (point[1]**2 + point[2]**2)**0.5
angle = | np.arctan2(point[2], point[1]) | numpy.arctan2 |
import argparse
import os
import json
import numpy as np
import PIL.Image as Image
import xml.etree.ElementTree as ET
from simplification.cutil import simplify_coords
from skimage import measure
def convert_mask_to_polygon(
mask,
max_polygon_points=100,
score_threshold=0.5,
max_refinement_iterations=25,
edge_safety_padding=1,
):
"""Convert a numpy mask to a polygon outline in normalized coordinates.
:param mask: Pixel mask, where each pixel has an object (float) score in [0, 1], in size ([1, height, width])
:type: mask: <class 'numpy.array'>
:param max_polygon_points: Maximum number of (x, y) coordinate pairs in polygon
:type: max_polygon_points: Int
:param score_threshold: Score cutoff for considering a pixel as in object.
:type: score_threshold: Float
:param max_refinement_iterations: Maximum number of times to refine the polygon
trying to reduce the number of pixels to meet max polygon points.
:type: max_refinement_iterations: Int
:param edge_safety_padding: Number of pixels to pad the mask with
:type edge_safety_padding: Int
:return: normalized polygon coordinates
:rtype: list of list
"""
# Convert to numpy bitmask
mask = mask[0]
mask_array = np.array((mask > score_threshold), dtype=np.uint8)
image_shape = mask_array.shape
# Pad the mask to avoid errors at the edge of the mask
embedded_mask = np.zeros(
(
image_shape[0] + 2 * edge_safety_padding,
image_shape[1] + 2 * edge_safety_padding,
),
dtype=np.uint8,
)
embedded_mask[
edge_safety_padding : image_shape[0] + edge_safety_padding,
edge_safety_padding : image_shape[1] + edge_safety_padding,
] = mask_array
# Find Image Contours
contours = measure.find_contours(embedded_mask, 0.5)
simplified_contours = []
for contour in contours:
# Iteratively reduce polygon points, if necessary
if max_polygon_points is not None:
simplify_factor = 0
while (
len(contour) > max_polygon_points
and simplify_factor < max_refinement_iterations
):
contour = simplify_coords(contour, simplify_factor)
simplify_factor += 1
# Convert to [x, y, x, y, ....] coordinates and correct for padding
unwrapped_contour = [0] * (2 * len(contour))
unwrapped_contour[::2] = np.ceil(contour[:, 1]) - edge_safety_padding
unwrapped_contour[1::2] = | np.ceil(contour[:, 0]) | numpy.ceil |
import numpy as np
from numpy import exp, sqrt, cos, pi, sin
from base_function import BaseFunction
class Levi(BaseFunction):
target_E = 0.
xmin = np.array([-10.,-10.])
xmax = np.array([10.,10.])
def getEnergy(self, coords):
x, y = coords
E = sin(3.*pi*x)**2 + (x-1.)**2 * (1. + sin(3*pi*y)**2) \
+ (y-1.)**2 * (1. + sin(2*pi*y)**2)
return E
def getEnergyGradient(self, coords):
x, y = coords
E = self.getEnergy(coords)
dEdx = 2.*3.*pi* cos(3.*pi*x) * sin(3.*pi*x) + 2.*(x-1.) * (1. + sin(3*pi*y)**2)
dEdy = (x-1.)**2 * 2.*3.*pi* | cos(3.*pi*y) | numpy.cos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.