prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Subset optimization functions
#
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME> and <NAME>. "Unconstrained Salient
# Object Detection via Proposal Subset Optimization."
# CVPR, 2016.
# Code written by <NAME>, 2020
import numpy as np
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import load_img
from functions.utils import do_nms, get_max_inc_float, get_iou_float, expand_roi
from functions.get_Proposals import get_proposals
def prop_opt(bboxes, bboxscore, param):
""" The main function for the subset optimization.
Args:
bboxes: Bounding boxes.
bboxscore: Scores of the bboxes.
param: parameters of the model.
Returns: res: Bounding boxes after the optimization.
stat: _:
"""
# for the special case when lambda == 0
if param['lambda'] == 0:
stat = {}
res = bboxes.copy()
stat['O'] = np.arange(bboxes.shape[1]).reshape(-1, 1)
return res, stat
stat = do_map_forward(bboxes, bboxscore.astype(float), param)
if stat['O'].size > 1:
stat = do_map_backward(bboxes, bboxscore.astype(float), param, stat)
# We use the second output to intialized the optimization again
if param['perturb'] and len(stat['O']) > 1:
# use the second output to initialize the forward pass
statTmp = do_map_eval(bboxes, bboxscore.astype(float), param, stat['O'][1], stat['W'], stat['BGp'])
statTmp = do_map_forward(bboxes, bboxscore.astype(float), param, statTmp)
if statTmp['f'] > stat['f']:
stat = statTmp.copy()
res = np.take(bboxes, stat['O'].flatten(), axis = 1).copy()
return res, stat
def do_map_forward(B, S, param, stat = None):
if B.size == 0:
print('Empty proposal set')
stat = {}
return stat
nB = B.shape[1]
if not stat:
# initialization
stat = {}
stat['W'] = np.array([])
stat['Xp'] = np.array([]) # optimal w_{ij} given the output set
stat['X'] = np.zeros((nB, 1)) # assignment
# construct W
stat['W'], stat['Xp'] = get_w(B, S, param)
stat['BGp'] = stat['Xp'].copy()
stat['nms'] = np.zeros((B.shape[1], 1))
stat['f'] = stat['Xp'].sum()
stat['O'] = np.array([], dtype=int)
## loop
while len(stat['O']) < min(param['maxnum'], nB):
V = np.maximum(stat['W'] - stat['Xp'].reshape(-1, 1), 0)
scores = V.sum(axis = 0) + stat['nms'].flatten().T
vote = np.argmax(scores)
score = scores[vote]
if score == 0: # no supporters
break
tmpf = stat['f'] + score + param['phi']
if (tmpf > stat['f']):
mask = V[:, vote] > 0
stat['X'][mask] = vote
stat['O'] = np.append(stat['O'], vote).reshape(-1, 1)
stat['Xp'][mask] = stat['W'][mask, vote]
stat['f'] = tmpf
stat['nms'] = stat['nms'] + param['gamma'] * get_nms_penalty(B, B[:, vote]).reshape(-1, 1)
else:
break
return stat
def do_map_backward(B, S, param, stat):
while stat['O'].size != 0:
flag = False
bestStat = stat.copy()
for i in range(len(stat['O'])):
O = stat['O'].copy()
O = np.delete(O, i)
statTmp = do_map_eval(B, S, param, O, stat['W'], stat['BGp'])
if statTmp['f'] > bestStat['f']:
flag = True
bestStat = statTmp.copy()
stat = bestStat.copy()
if not flag:
break
return stat
def do_map_eval(B, S, param, O, W = None, BGp = None):
""" This function evaluate the target function
given a output window set.
Args:
B: .
S: .
param: .
O: .
W: .
BGp: .
Returns: statTmp: .
"""
statTmp = {}
statTmp['W'] = | np.array([]) | numpy.array |
from __future__ import division
import numpy as np
from numpy import pi, sqrt, exp, power, log, log10
import os
import constants as ct
import particle as pt
import tools as tl
##############################
# Preparing SKA configurations
##############################
def initialize():
"""This routine is supposed to be run only once, \
i.e. when the module is loaded, therefore\
the I/O is not optimized for speed concerns.
"""
SKA_conf = {}
# # --------------
for exper in ['low', 'mid']:
# if exper == "low":
# path = local_path + "/data/SKA1-low_accumu.csv"
# elif exper == "mid":
# path = local_path + "/data/SKA1-mid_accumu.csv"
# data_raw = np.loadtxt(path, delimiter=',')
# radius = data_raw[:, 0]
# fraction = data_raw[:, 1]
# bins_radius = np.logspace(1, 5, 20) # bin it
# hist_radius = np.interp(np.log10(bins_radius), np.log10(
# radius), fraction, left=0) # sample at the bin edges
# if exper == "low":
# # compute the x-y coordinates of all units
# x_arr, y_arr = get_telescope_coordinate(
# fraction*ct._SKALow_number_of_stations_, radius, SKA=exper)
# # save it
# SKA_conf['low radius'] = (data_raw, x_arr, y_arr, bins_radius,
# hist_radius)
# elif exper == "mid":
# x_arr, y_arr = get_telescope_coordinate(
# fraction*ct._SKA1Mid_number_of_dishes_, radius, SKA=exper)
# SKA_conf['mid radius'] = (data_raw, x_arr, y_arr, bins_radius,
# hist_radius)
# get coordinates
if exper == "low":
SKA_conf['low0'] = np.loadtxt(
local_path + "/data/SKA1_config_low0.csv", delimiter=',')
SKA_conf['low1'] = np.loadtxt(
local_path + "/data/SKA1_config_low1.csv", delimiter=',')
SKA_conf['low2'] = np.loadtxt(
local_path + "/data/SKA1_config_low2_6clusters.csv", delimiter=',')
# update clusters, it's 6 stations per cluster
new_arr = []
for xy in (SKA_conf['low2']):
for j in range(2):
for k in range(3):
x = xy[0] + j*50
y = xy[1] + (k-1)*50
new_arr.append([x, y])
new_arr = np.array(new_arr)
SKA_conf['low2'] = new_arr
# combine them
SKA_conf['low_coord'] = np.concatenate(
(SKA_conf['low0'], SKA_conf['low1'], SKA_conf['low2']))
x_arr = SKA_conf['low_coord'][:, 0]
y_arr = SKA_conf['low_coord'][:, 1]
elif exper == "mid":
SKA_conf['mid0_MeerKAT'] = np.loadtxt(
local_path + "/data/SKA1_config_mid0_MK.csv", delimiter=',')
SKA_conf['mid0_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid0_SKA.csv", delimiter=',')
SKA_conf['mid1_MeerKAT'] = np.loadtxt(
local_path + "/data/SKA1_config_mid1_MK.csv", delimiter=',')
SKA_conf['mid1_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid1_SKA.csv", delimiter=',')
SKA_conf['mid2_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid2_SKA.csv", delimiter=',')
# combine them
SKA_conf['mid_coord'] = np.concatenate(
(SKA_conf['mid0_MeerKAT'],
SKA_conf['mid0_SKA'],
SKA_conf['mid1_MeerKAT'],
SKA_conf['mid1_SKA'],
SKA_conf['mid2_SKA']))
# convert km to m
SKA_conf['mid_coord'][:, 0] = SKA_conf['mid_coord'][:, 0]*1.e3
SKA_conf['mid_coord'][:, 1] = SKA_conf['mid_coord'][:, 1]*1.e3
x_arr = SKA_conf['mid_coord'][:, 0]
y_arr = SKA_conf['mid_coord'][:, 1]
# get baseline distribution
baseline_arr = get_baseline(x_arr, y_arr)
hist_baseline, bins_baseline = np.histogram(
baseline_arr, bins=np.logspace(1, 5, 20000))
# correcting the over-counting of baseline pair
hist_baseline = hist_baseline/2.
hist_baseline_cumsum = np.cumsum(hist_baseline)
# save it
if exper == "low":
SKA_conf['low baseline'] = (
baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum)
elif exper == "mid":
SKA_conf['mid baseline'] = (
baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum)
# about effective area
if exper == "low":
path = local_path + "/data/SKA1-low_Aeff_over_Tsys.txt"
data_raw = np.loadtxt(path)
# low is given in MHz, convert to GHz
data_raw[:, 0] = data_raw[:, 0] * 1.e-3
SKA_conf['low A/T'] = data_raw
elif exper == "mid":
path = local_path + "/data/SKA1-mid_Aeff_over_Tsys.txt"
data_raw = np.loadtxt(path)
SKA_conf['mid A/T'] = data_raw
SKA_conf['A/T'] = np.concatenate((SKA_conf['low A/T'],
SKA_conf['mid A/T']))
# computing efficiency
# make a nu grid
Nsteps = 2001
nulow = np.logspace( | log10(ct._nu_min_ska_low_) | numpy.log10 |
# LSTM for sequence classification in the IMDB dataset
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, Convolution1D, Flatten, Dropout
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.callbacks import TensorBoard
import numpy as np
# Using keras to load the dataset with the top_words
top_words = 10000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
path = '/home/santanu/Downloads/Mobile_App/'
X_train = np.load(path + "aclImdb/X_train.npy")
y_train = | np.load(path + "aclImdb/y_train.npy") | numpy.load |
import _pickle, numpy as np, itertools as it
from time import perf_counter
# from cppimport import import_hook
#
# # import cppimport
#
# # cppimport.set_quiet(False)
#
import rpxdock as rp
from rpxdock.bvh import bvh_test
from rpxdock.bvh import BVH, bvh
import rpxdock.homog as hm
def test_bvh_isect_cpp():
assert bvh_test.TEST_bvh_test_isect()
def test_bvh_isect_fixed():
# print()
mindist = 0.01
totbvh, totnaive = 0, 0
for i in range(10):
xyz1 = np.random.rand(1000, 3) + [0.9, 0.9, 0]
xyz2 = np.random.rand(1000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
assert len(bvh1) == 1000
pos1 = hm.htrans([0.9, 0.9, 0.9])
pos2 = np.eye(4)
tbvh = perf_counter()
clash1 = bvh.bvh_isect_fixed(bvh1, bvh2, mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
clash2 = bvh.naive_isect_fixed(bvh1, bvh2, mindist)
tn = perf_counter() - tn
assert clash1 == clash2
# print(f"{i:3} clash {clash1:1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
print("total times", totbvh, totnaive / totbvh, totnaive)
def test_bvh_isect():
t = rp.Timer().start()
N1, N2 = 10, 10
N = N1 * N2
mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = np.random.rand(1250, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1250, 3) - [0.5, 0.5, 0.5]
pos1 = hm.rand_xform(N2, cart_sd=0.8)
pos2 = hm.rand_xform(N2, cart_sd=0.8)
t.checkpoint('init')
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
t.checkpoint('BVH')
clash = list()
for inner in range(N2):
clash1 = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[inner], pos2=pos2[inner],
mindist=mindist)
t.checkpoint('bvh_isect')
clash2 = bvh.naive_isect(bvh1, bvh2, pos1[inner], pos2[inner], mindist)
t.checkpoint('naive_isect')
assert clash1 == clash2
clash.append(clash1)
clashvec = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
t.checkpoint('bvh_isect_vec')
assert np.all(clashvec == clash)
nclash += sum(clash)
assert clashvec[1] == bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2[1], mindist)
bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2[1], mindist) # ?? make sure api works?
bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2, mindist)
print(
f"Ngeom {N1:,} Npos {N2:,} isect {nclash/N:4.2f} bvh: {int(N/t.sum.bvh_isect):,}/s",
f"bvh_vec {int(N/t.sum.bvh_isect_vec):,} fastnaive {int(N/t.sum.naive_isect):,}/s",
f"ratio {int(t.sum.naive_isect/t.sum.bvh_isect_vec):,}x",
)
def test_bvh_isect_fixed_range():
N1, N2 = 10, 10
N = N1 * N2
mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvh1_half = BVH(xyz1[250:750])
bvh2_half = BVH(xyz2[250:750])
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
isect1 = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(isect1 == isect2)
bounds = [250], [749], [250], [749]
isect1 = bvh.bvh_isect_vec(bvh1_half, bvh2_half, pos1, pos2, mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
assert np.all(isect1 == isect2)
def test_bvh_min_cpp():
assert bvh_test.TEST_bvh_test_min()
def test_bvh_min_dist_fixed():
xyz1 = np.random.rand(5000, 3) + [0.9, 0.9, 0.0]
xyz2 = np.random.rand(5000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist_fixed(bvh1, bvh2)
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(xyz1[i1] - xyz2[i2])
assert np.allclose(d, dtest, atol=1e-6)
# tnp = perf_counter()
# dnp = np.min(np.linalg.norm(xyz1[:, None] - xyz2[None], axis=2))
# tnp = perf_counter() - tnp
tn = perf_counter()
dn = bvh.naive_min_dist_fixed(bvh1, bvh2)
tn = perf_counter() - tn
print()
print("from bvh: ", d)
print("from naive:", dn)
assert np.allclose(dn, d, atol=1e-6)
print(f"tnaivecpp {tn:5f} tbvh {tbvh:5f} tbvhcreate {tcre:5f}")
print("bvh acceleration vs naive", tn / tbvh)
# assert tn / tbvh > 100
def test_bvh_min_dist():
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
pos1 = hm.rand_xform(N, cart_sd=1)
pos2 = hm.rand_xform(N, cart_sd=1)
dis = list()
for i in range(N):
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(pos1[i] @ hm.hpoint(xyz1[i1]) - pos2[i] @ hm.hpoint(xyz2[i2]))
assert np.allclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert np.allclose(dn, d, atol=1e-6)
dis.append((d, i1, i2))
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
d, i1, i2 = bvh.bvh_min_dist_vec(bvh1, bvh2, pos1, pos2)
for a, b, c, x in zip(d, i1, i2, dis):
assert a == x[0]
assert b == x[1]
assert c == x[2]
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_min_dist_floormin():
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
for i in range(N):
pos1 = hm.rand_xform(cart_sd=1)
pos2 = hm.rand_xform(cart_sd=1)
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(pos1 @ hm.hpoint(xyz1[i1]) - pos2 @ hm.hpoint(xyz2[i2]))
assert np.allclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert np.allclose(dn, d, atol=1e-6)
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_slide_single_inline():
bvh1 = BVH([[-10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == 8
# moves xyz1 to -2,0,0
# should always come in from "infinity" from -direction
bvh1 = BVH([[10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == -12
# also moves xyz1 to -2,0,0
for i in range(100):
np.random.seed(i)
dirn = np.array([np.random.randn(), 0, 0])
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn() / 10)
xyz1 = np.array([[np.random.randn(), 0, 0]])
xyz2 = np.array([[np.random.randn(), 0, 0]])
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=rad, dirn=dirn)
xyz1 += d * dirn
assert np.allclose(np.linalg.norm(xyz1 - xyz2), 2 * rad, atol=1e-4)
def test_bvh_slide_single():
nmiss = 0
for i in range(100):
# np.random.seed(i)
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn())
xyz1 = np.random.randn(1, 3)
xyz2 = np.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=rad, dirn=dirn)
if d < 9e8:
xyz1 += d * dirn
assert np.allclose(np.linalg.norm(xyz1 - xyz2), 2 * rad, atol=1e-4)
else:
nmiss += 1
delta = xyz2 - xyz1
d0 = delta.dot(dirn)
dperp2 = np.sum(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_single_xform():
nmiss = 0
for i in range(1000):
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn() * 2.0)
xyz1 = np.random.randn(1, 3)
xyz2 = np.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform()
pos2 = hm.rand_xform()
d = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, rad=rad, dirn=dirn)
if d < 9e8:
p1 = (pos1 @ hm.hpoint(xyz1[0]))[:3] + d * dirn
p2 = (pos2 @ hm.hpoint(xyz2[0]))[:3]
assert np.allclose(np.linalg.norm(p1 - p2), 2 * rad, atol=1e-4)
else:
nmiss += 1
p2 = pos2 @ hm.hpoint(xyz2[0])
p1 = pos1 @ hm.hpoint(xyz1[0])
delta = p2 - p1
d0 = delta[:3].dot(dirn)
dperp2 = np.sum(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_whole():
# timings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhmin 17,968/s fracmiss: 0.0834
# np.random.seed(0)
N1, N2 = 2, 10
totbvh, totbvhf, totmin = 0, 0, 0
nmiss = 0
for j in range(N1):
xyz1 = np.random.rand(5000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(5000, 3) - [0.5, 0.5, 0.5]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
# bvh1f = BVH_32bit(xyz1)
# bvh2f = BVH_32bit(xyz2)
# tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
radius = 0.001 + np.random.rand() / 10
slides = list()
for i in range(N2):
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1[i], pos2[i], radius, dirn)
tbvh = perf_counter() - tbvh
tbvhf = perf_counter()
# dslide = bvh.bvh_slide_32bit(bvh1f, bvh2f, pos1[i], pos2[i], radius, dirn)
tbvhf = perf_counter() - tbvhf
slides.append(dslide)
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert dn > 2 * radius
nmiss += 1
else:
tmp = hm.htrans(dirn * dslide) @ pos1[i]
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, tmp, pos2[i])
tn = perf_counter() - tn
if not np.allclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert np.allclose(dn, 2 * radius, atol=1e-6)
# print(
# i,
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dslide,
# )
totmin += tn
totbvh += tbvh
totbvhf += tbvhf
slides2 = bvh.bvh_slide_vec(bvh1, bvh2, pos1, pos2, radius, dirn)
assert np.allclose(slides, slides2)
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
f"fracmiss: {nmiss/N}",
)
def test_collect_pairs_simple():
print("test_collect_pairs_simple")
bufbvh = -np.ones((100, 2), dtype="i4")
bufnai = -np.ones((100, 2), dtype="i4")
bvh1 = BVH([[0, 0, 0], [0, 2, 0]])
bvh2 = BVH([[0.9, 0, 0], [0.9, 2, 0]])
assert len(bvh1) == 2
mindist = 1.0
pos1 = np.eye(4)
pos2 = np.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
print(pbvh.shape)
assert len(pbvh) == 2 and nnai == 2
assert np.all(pbvh == [[0, 0], [1, 1]])
assert np.all(bufnai[:nnai] == [[0, 0], [1, 1]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[0, 1]])
assert np.all(bufnai[:nnai] == [[0, 1]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[1, 0]])
assert np.all(bufnai[:nnai] == [[1, 0]])
def test_collect_pairs_simple_selection():
print("test_collect_pairs_simple_selection")
bufbvh = -np.ones((100, 2), dtype="i4")
bufnai = -np.ones((100, 2), dtype="i4")
crd1 = [[0, 0, 0], [0, 0, 0], [0, 2, 0], [0, 0, 0]]
crd2 = [[0, 0, 0], [0.9, 0, 0], [0, 0, 0], [0.9, 2, 0]]
mask1 = [1, 0, 1, 0]
mask2 = [0, 1, 0, 1]
bvh1 = BVH(crd1, mask1)
bvh2 = BVH(crd2, mask2)
assert len(bvh1) == 2
assert np.allclose(bvh1.radius(), 1.0, atol=1e-6)
assert np.allclose(bvh1.center(), [0, 1, 0], atol=1e-6)
mindist = 1.0
pos1 = np.eye(4)
pos2 = np.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 2 and nnai == 2
assert np.all(pbvh == [[0, 1], [2, 3]])
assert np.all(bufnai[:nnai] == [[0, 1], [2, 3]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[0, 3]])
assert np.all(bufnai[:nnai] == [[0, 3]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[2, 1]])
assert np.all(bufnai[:nnai] == [[2, 1]])
def test_collect_pairs():
N1, N2 = 1, 50
N = N1 * N2
Npts = 500
totbvh, totbvhf, totmin = 0, 0, 0
totbvh, totnai, totct, ntot = 0, 0, 0, 0
bufbvh = -np.ones((Npts * Npts, 2), dtype="i4")
bufnai = -np.ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
for i in range(N2):
tbvh = perf_counter()
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist, bufbvh)
tbvh = perf_counter() - tbvh
assert not o
tnai = perf_counter()
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist, bufnai)
tnai = perf_counter() - tnai
tct = perf_counter()
nct = bvh.bvh_count_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist)
tct = perf_counter() - tct
ntot += nct
assert nct == len(pbvh)
totnai += 1
pairs.append(pbvh.copy())
totbvh += tbvh
totnai += tnai
totct += tct
assert len(pbvh) == nnai
if len(pbvh) == 0:
continue
o = np.lexsort((pbvh[:, 1], pbvh[:, 0]))
pbvh[:] = pbvh[:][o]
o = np.lexsort((bufnai[:nnai, 1], bufnai[:nnai, 0]))
bufnai[:nnai] = bufnai[:nnai][o]
assert np.all(pbvh == bufnai[:nnai])
pair1 = pos1[i] @ hm.hpoint(xyz1[pbvh[:, 0]])[..., None]
pair2 = pos2[i] @ hm.hpoint(xyz2[pbvh[:, 1]])[..., None]
dpair = np.linalg.norm(pair2 - pair1, axis=1)
assert np.max(dpair) <= mindist
pcount = bvh.bvh_count_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(pcount == [len(x) for x in pairs])
pairs2, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
for i, p in enumerate(pairs):
lb, ub = lbub[i]
assert np.all(pairs2[lb:ub] == pairs[i])
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[:3], pos2[0], mindist)
assert len(y) == 3
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[0], pos2[:5], mindist)
assert len(y) == 5
print(
f"collect test {N:,} iter bvh {int(N/totbvh):,}/s naive {int(N/totnai):,}/s ratio {totnai/totbvh:7.2f} count-only {int(N/totct):,}/s avg cnt {ntot/N}"
)
def test_collect_pairs_range():
N1, N2 = 1, 500
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(lbub == rlbub)
assert np.all(pairs == rpairs)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, [250],
[750])
assert len(rlbub) == len(pos1)
assert np.all(rpairs[:, 0] >= 250)
assert np.all(rpairs[:, 0] <= 750)
filt_pairs = pairs[np.logical_and(pairs[:, 0] >= 250, pairs[:, 0] <= 750)]
# assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, [600],
[1000], -1, [100], [400], -1)
assert len(rlbub) == len(pos1)
assert np.all(rpairs[:, 0] >= 600)
assert np.all(rpairs[:, 0] <= 1000)
assert np.all(rpairs[:, 1] >= 100)
assert np.all(rpairs[:, 1] <= 400)
filt_pairs = pairs[(pairs[:, 0] >= 600) * (pairs[:, 0] <= 1000) * (pairs[:, 1] >= 100) *
(pairs[:, 1] <= 400)]
assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
def test_collect_pairs_range_sym():
# np.random.seed(132)
N1, N2 = 5, 100
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(lbub == rlbub)
assert np.all(pairs == rpairs)
bounds = [100], [400], len(xyz1) // 2
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
assert len(rlbub) == len(pos1)
assert np.all(
np.logical_or(np.logical_and(100 <= rpairs[:, 0], rpairs[:, 0] <= 400),
np.logical_and(600 <= rpairs[:, 0], rpairs[:, 0] <= 900)))
filt_pairs = pairs[np.logical_or(np.logical_and(100 <= pairs[:, 0], pairs[:, 0] <= 400),
np.logical_and(600 <= pairs[:, 0], pairs[:, 0] <= 900))]
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
bounds = [100], [400], len(xyz1) // 2, [20], [180], len(xyz1) // 5
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
def awful(p):
return np.logical_and(
np.logical_or(np.logical_and(100 <= p[:, 0], p[:, 0] <= 400),
np.logical_and(600 <= p[:, 0], p[:, 0] <= 900)),
np.logical_or(
np.logical_and(+20 <= p[:, 1], p[:, 1] <= 180),
np.logical_or(
np.logical_and(220 <= p[:, 1], p[:, 1] <= 380),
np.logical_or(
np.logical_and(420 <= p[:, 1], p[:, 1] <= 580),
np.logical_or(np.logical_and(620 <= p[:, 1], p[:, 1] <= 780),
np.logical_and(820 <= p[:, 1], p[:, 1] <= 980))))))
assert len(rlbub) == len(pos1)
assert np.all(awful(rpairs))
filt_pairs = pairs[awful(pairs)]
assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
def test_slide_collect_pairs():
# timings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhmin 17,968/s fracmiss: 0.0834
# np.random.seed(0)
N1, N2 = 2, 50
Npts = 5000
totbvh, totbvhf, totcol, totmin = 0, 0, 0, 0
nhit = 0
buf = -np.ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyzcol1 = xyz1[:int(Npts / 5)]
xyzcol2 = xyz2[:int(Npts / 5)]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvhcol1 = BVH(xyzcol1)
bvhcol2 = BVH(xyzcol2)
# tcre = perf_counter() - tcre
for i in range(N2):
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
radius = 0.001 + np.random.rand() / 10
pairdis = 3 * radius
pos1 = hm.rand_xform(cart_sd=0.5)
pos2 = hm.rand_xform(cart_sd=0.5)
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, radius, dirn)
tbvh = perf_counter() - tbvh
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert dn > 2 * radius
else:
nhit += 1
pos1 = hm.htrans(dirn * dslide) @ pos1
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
if not np.allclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert np.allclose(dn, 2 * radius, atol=1e-6)
tcol = perf_counter()
pair, o = bvh.bvh_collect_pairs(bvhcol1, bvhcol2, pos1, pos2, pairdis, buf)
assert not o
if len(pair) > 0:
tcol = perf_counter() - tcol
totcol += tcol
pair1 = pos1 @ hm.hpoint(xyzcol1[pair[:, 0]])[..., None]
pair2 = pos2 @ hm.hpoint(xyzcol2[pair[:, 1]])[..., None]
dpair = np.linalg.norm(pair2 - pair1, axis=1)
assert np.max(dpair) <= pairdis
totmin += tn
totbvh += tbvh
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
f"fracmiss: {nhit/N} collect {int(nhit/totcol):,}/s",
)
def test_bvh_accessors():
xyz = np.random.rand(10, 3) - [0.5, 0.5, 0.5]
b = BVH(xyz)
assert np.allclose(b.com()[:3], np.mean(xyz, axis=0))
p = b.centers()
dmat = np.linalg.norm(p[:, :3] - xyz[:, None], axis=2)
assert np.allclose(np.min(dmat, axis=1), 0)
def random_walk(N):
x = np.random.randn(N, 3).astype("f").cumsum(axis=0)
x -= x.mean(axis=0)
return 0.5 * x / x.std()
def test_bvh_isect_range(body=None, cart_sd=0.3, N2=10, mindist=0.02):
N1 = 1 if body else 2
N = N1 * N2
totbvh, totnaive, totbvh0, nhit = 0, 0, 0, 0
for ibvh in range(N1):
if body:
bvh1, bvh2 = body.bvh_bb, body.bvh_bb
else:
# xyz1 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(1000)
xyz2 = random_walk(1000)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
ranges = list()
for i in range(N2):
tbvh0 = perf_counter()
c = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], mindist=mindist)
tbvh0 = perf_counter() - tbvh0
# if not c:
# continue
if c:
nhit += 1
tbvh = perf_counter()
range1 = bvh.isect_range_single(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],
mindist=mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
range2 = bvh.naive_isect_range(bvh1, bvh2, pos1[i], pos2[i], mindist)
assert range1 == range2
tn = perf_counter() - tn
ranges.append(range1)
# print(f"{str(range1):=^80}")
# body.move_to(pos1).dump_pdb("test1.pdb")
# body.move_to(pos2).dump_pdb("test2.pdb")
# return
# print(f"{i:3} range {range1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
totbvh0 += tbvh0
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, mindist)
ranges = np.array(ranges)
assert np.all(lb == ranges[:, 0])
assert np.all(ub == ranges[:, 1])
ok = np.logical_and(lb >= 0, ub >= 0)
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, lb, ub)
assert not np.any(isect[ok])
print(
f"iscet {nhit:,} hit of {N:,} iter bvh: {int(nhit/totbvh):,}/s fastnaive {int(nhit/totnaive):,}/s",
f"ratio {int(totnaive/totbvh):,}x isect-only: {totbvh/totbvh0:3.3f}x",
)
def test_bvh_isect_range_ids():
N1 = 50
N2 = 100
N = N1 * N2
# Nids = 100
cart_sd = 0.3
mindist = 0.03
Npts = 1000
factors = [1000, 500, 250, 200, 125, 100, 50, 40, 25, 20, 10, 8, 5, 4, 2, 1]
# Npts = 6
# factors = [3]
# mindist = 0.3
# N1 = 1
assert all(Npts % f == 0 for f in factors)
for ibvh in range(N1):
# for ibvh in [5]:
# np.random.seed(ibvh)
# print(ibvh)
Nids = factors[ibvh % len(factors)]
# xyz1 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(Npts)
xyz2 = random_walk(Npts)
tcre = perf_counter()
bvh1 = BVH(xyz1, [], np.repeat(np.arange(Nids), Npts / Nids))
bvh2 = BVH(xyz2, [], np.repeat(np.arange(Nids), Npts / Nids))
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
# pos1 = pos1[99:]
# pos2 = pos2[99:]
# print(bvh1.vol_lb())
# print(bvh1.vol_ub())
# print(bvh1.obj_id())
# assert 0
# assert bvh1.max_id() == Nids - 1
# assert bvh1.min_lb() == 0
# assert bvh1.max_ub() == Nids - 1
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, mindist)
pos1 = pos1[lb != -1]
pos2 = pos2[lb != -1]
ub = ub[lb != -1]
lb = lb[lb != -1]
# print(lb, ub)
assert np.all(0 <= lb) and np.all(lb - 1 <= ub) and np.all(ub < Nids)
isectall = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(isectall == np.logical_or(lb > 0, ub < Nids - 1))
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, lb, ub)
if np.any(isect):
print(np.where(isect)[0])
print('lb', lb[isect])
print('ub', ub[isect])
print('cA', clash[isect, 0])
print('cB', clash[isect, 1])
# print('is', isect.astype('i') * 100)
# print('isectlbub', np.sum(isect), np.sum(isect) / len(isect))
assert not np.any(isect[lb <= ub])
def test_bvh_isect_range_lb_ub(body=None, cart_sd=0.3, N1=3, N2=20, mindist=0.02):
N1 = 1 if body else N1
N = N1 * N2
Npts = 1000
nhit, nrangefail = 0, 0
args = [
rp.Bunch(maxtrim=a, maxtrim_lb=b, maxtrim_ub=c) for a in (-1, 400) for b in (-1, 300)
for c in (-1, 300)
]
for ibvh, arg in it.product(range(N1), args):
if body:
bvh1, bvh2 = body.bvh_bb, body.bvh_bb
else:
# xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
# xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(Npts)
xyz2 = random_walk(Npts)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
ranges = list()
for i in range(N2):
c = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], mindist=mindist)
if c: nhit += 1
range1 = bvh.isect_range_single(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],
mindist=mindist, **arg)
ranges.append(range1)
if range1[0] < 0:
nrangefail += 1
assert c
continue
assert (arg.maxtrim < 0) or (np.diff(range1) + 1 >= Npts - arg.maxtrim)
assert (arg.maxtrim_lb < 0) or (range1[0] <= arg.maxtrim_lb)
assert (arg.maxtrim_ub < 0) or (range1[1] + 1 >= Npts - arg.maxtrim_ub)
# mostly covered elsewhere, and quite slow
# range2 = bvh.naive_isect_range(bvh1, bvh2, pos1[i], pos2[i], mindist)
# assert range1 == range2
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, mindist, **arg)
ranges = np.array(ranges)
assert np.all(lb == ranges[:, 0])
assert np.all(ub == ranges[:, 1])
print(f"iscet {nhit:,} hit of {N:,} iter, frangefail {nrangefail/nhit}", )
def test_bvh_pickle(tmpdir):
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform(cart_sd=1)
pos2 = hm.rand_xform(cart_sd=1)
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
rng = bvh.isect_range_single(bvh1, bvh2, pos1, pos2, mindist=d + 0.01)
with open(tmpdir + "/1", "wb") as out:
_pickle.dump(bvh1, out)
with open(tmpdir + "/2", "wb") as out:
_pickle.dump(bvh2, out)
with open(tmpdir + "/1", "rb") as out:
bvh1b = _pickle.load(out)
with open(tmpdir + "/2", "rb") as out:
bvh2b = _pickle.load(out)
assert len(bvh1) == len(bvh1b)
assert len(bvh2) == len(bvh2b)
assert np.allclose(bvh1.com(), bvh1b.com())
assert np.allclose(bvh1.centers(), bvh1b.centers())
assert np.allclose(bvh2.com(), bvh2b.com())
assert np.allclose(bvh2.centers(), bvh2b.centers())
db, i1b, i2b = bvh.bvh_min_dist(bvh1b, bvh2b, pos1, pos2)
assert np.allclose(d, db)
assert i1 == i1b
assert i2 == i2b
rngb = bvh.isect_range_single(bvh1b, bvh2b, pos1, pos2, mindist=d + 0.01)
assert rngb == rng
def test_bvh_threading_isect_may_fail():
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
reps = 1
npos = 1000
Npts = 1000
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
mindist = 0.1
tottmain, tottthread = 0, 0
nt = 2
exe = ThreadPoolExecutor(nt)
for i in range(reps):
pos1 = hm.rand_xform(npos, cart_sd=0.5)
pos2 = hm.rand_xform(npos, cart_sd=0.5)
buf = np.empty((Npts, 2), dtype="i4")
t = perf_counter()
_ = [bvh.bvh_isect(bvh1, bvh2, p1, p2, mindist) for p1, p2 in zip(pos1, pos2)]
isect = np.array(_)
tmain = perf_counter() - t
tottmain += tmain
t = perf_counter()
futures = exe.map(
bvh.bvh_isect_vec,
repeat(bvh1),
repeat(bvh2),
| np.split(pos1, nt) | numpy.split |
# standard imports
import numpy as np
import matplotlib.pyplot as plt
# Add parent directory to path
import sys
import os
parent_path = '..\\nistapttools'
if parent_path not in sys.path:
sys.path.append(os.path.abspath(parent_path))
# custom imports
import apt_fileio
import m2q_calib
import plotting_stuff
import initElements_P3
import histogram_functions
import peak_param_determination as ppd
from histogram_functions import bin_dat
import voltage_and_bowl
from voltage_and_bowl import do_voltage_and_bowl
from voltage_and_bowl import mod_full_vb_correction
import colorcet as cc
def create_histogram(xs, ys, x_roi=None, delta_x=0.1, y_roi=None, delta_y=0.1):
"""Create a 2d histogram of the data, specifying the bin intensity, region
of interest (on the y-axis), and the spacing of the y bins"""
# even number
num_x = int(np.ceil((x_roi[1]-x_roi[0])/delta_x))
num_y = int(np.ceil((y_roi[1]-y_roi[0])/delta_y))
return np.histogram2d(xs, ys, bins=[num_x, num_y],
range=[x_roi, y_roi],
density=False)
def _extents(f):
"""Helper function to determine axis extents based off of the bin edges"""
delta = f[1] - f[0]
return [f[0] - delta/2, f[-1] + delta/2]
def plot_2d_histo(ax, N, x_edges, y_edges, scale='log'):
if scale=='log':
dat = np.log10(1+N)
elif scale=='lin':
dat = N
"""Helper function to plot a histogram on an axis"""
ax.imshow(np.transpose(dat), aspect='auto',
extent=_extents(x_edges) + _extents(y_edges),
origin='lower', cmap=cc.cm.CET_L8,
interpolation='antialiased')
def corrhist(epos, delta=1, roi=None):
dat = epos['tof']
if roi is None:
roi = [0, 1000]
N = int(np.ceil((roi[1]-roi[0])/delta))
corrhist = np.zeros([N,N], dtype=int)
multi_idxs = np.where(epos['ipp']>1)[0]
for multi_idx in multi_idxs:
n_hits = epos['ipp'][multi_idx]
cluster = dat[multi_idx:multi_idx+n_hits]
idx1 = -1
idx2 = -1
for i in range(n_hits):
for j in range(i+1,n_hits):
idx1 = int(np.floor(cluster[i]/delta))
idx2 = int(np.floor(cluster[j]/delta))
if idx1 < N and idx1>=0 and idx2 < N and idx2>=0:
corrhist[idx1,idx2] += 1
edges = np.arange(roi[0],roi[1]+delta,delta)
assert edges.size-1 == N
return (edges, corrhist+corrhist.T-np.diag(np.diag(corrhist)))
def calc_t0(tof,tof_vcorr_fac,tof_bcorr_fac,sigma):
BB = tof_bcorr_fac[0::2]+tof_bcorr_fac[1::2]
t0 = ((tof_bcorr_fac[0::2]*tof[0::2]+tof_bcorr_fac[1::2]*tof[1::2]) - sigma/(tof_vcorr_fac[0::2]))/BB
t0 = np.ravel(np.column_stack((t0,t0)))
return t0
def create_sigma_delta_histogram(raw_tof, tof_vcorr_fac, tof_bcorr_fac, sigmas=None, delta_range=None, delta_step=0.5):
# Must be a doubles only epos...
# scan through a range of sigmas and compute the corrected data
if sigmas is None:
sigmas = np.linspace(0,2000,2**7)
if delta_range is None:
delta_range = [0,700]
delta_n_bins = int((delta_range[1]-delta_range [0])/delta_step)
# print('delta_n_bins = '+str(delta_n_bins))
res_dat = np.zeros((sigmas.size,delta_n_bins))
for sigma_idx in np.arange(sigmas.size):
t0 = calc_t0(raw_tof, tof_vcorr_fac, tof_bcorr_fac, sigmas[sigma_idx])
tof_corr = tof_vcorr_fac*tof_bcorr_fac*(raw_tof-t0)
dts = np.abs(tof_corr[:-1:2]-tof_corr[1::2])
N, delta_edges = np.histogram(dts, bins=delta_n_bins, range=delta_range)
res_dat[sigma_idx,:] = N
if np.mod(sigma_idx,10)==0:
print("Loop index "+str(sigma_idx+1)+" of "+str(sigmas.size))
delta_centers = 0.5*(delta_edges[:-1]+delta_edges[1:])
return (res_dat, sigmas, delta_centers)
def interleave(a,b):
return np.ravel(np.column_stack((a,b)))
def calc_slope_and_intercept(raw_tof, volt_coeff, bowl_coeff):
A = volt_coeff[0::2]
B_alpha = bowl_coeff[0::2]
B_beta = bowl_coeff[1::2]
tof_alpha = raw_tof[0::2]
tof_beta = raw_tof[1::2]
intercept = 2*A*B_alpha*B_beta*(tof_beta-tof_alpha)/(B_alpha+B_beta)
slope = (B_beta-B_alpha)/(B_beta+B_alpha)
return (slope, intercept)
# Note that x is sums and y is diffs
def compute_dist_to_line(slope, intercept, x, y):
return np.abs(intercept+slope*x-y)/np.sqrt(1+slope**2)
def calc_parametric_line(raw_tof, volt_coeff, bowl_coeff, n=2):
if n>0:
t = raw_tof.reshape(-1,n)
v = volt_coeff.reshape(-1,n)
b = bowl_coeff.reshape(-1,n)
else:
t = raw_tof
v = volt_coeff
b = bowl_coeff
r0 = v*b*(t-np.sum(b*t,axis=1)[:,np.newaxis]/np.sum(b,axis=1)[:,np.newaxis])
r1 = b/ | np.sum(b,axis=1) | numpy.sum |
import pyNeuroChem as pyc
import hdnntools as hdt
import numpy as np
import time
import math
from ase_interface import ANIENS
from ase_interface import ensemblemolecule
# ------------------------------------------------------------------------
# Class for ANI cross validaiton computer for multiple conformers a time
# ------------------------------------------------------------------------
class anicrossvalidationconformer(object):
''' Constructor '''
def __init__(self,cnstfile,saefile,nnfprefix,Nnet,gpuid=[0], sinet=False):
# Number of networks
self.Nn = Nnet
gpua = [gpuid[int(np.floor(i/(Nnet/len(gpuid))))] for i in range(self.Nn)]
# Construct pyNeuroChem class
self.ncl = [pyc.conformers(cnstfile, saefile, nnfprefix+str(i)+'/networks/', gpua[i], sinet) for i in range(self.Nn)]
#self.ncl = [pync.conformers(cnstfile, saefile, nnfprefix+str(1)+'/networks/', gpuid, sinet) for i in range(self.Nn)]
''' Compute the energy and mean force of a set of conformers for the CV networks '''
def compute_energyandforce_conformations(self,X,S,ensemble=True):
Na = X.shape[0] * len(S)
X_split = np.array_split(X, math.ceil(Na/10000))
energies = np.zeros((self.Nn, X.shape[0]), dtype=np.float64)
forces = np.zeros((self.Nn, X.shape[0], X.shape[1], X.shape[2]), dtype=np.float32)
shift = 0
for j,x in enumerate(X_split):
for i, nc in enumerate(self.ncl):
nc.setConformers(confs=x,types=list(S))
E = nc.energy().copy()
F = nc.force().copy()
#print(E.shape,x.shape,energies.shape,shift)
energies[i,shift:shift+E.shape[0]] = E
forces[i,shift:shift+E.shape[0]] = F
shift += x.shape[0]
sigma = hdt.hatokcal * np.std(energies,axis=0) / np.sqrt(float(len(S)))
if ensemble:
return hdt.hatokcal*np.mean(energies,axis=0), hdt.hatokcal*np.mean(forces,axis=0), sigma#, charges
else:
return hdt.hatokcal*energies, hdt.hatokcal*forces, sigma
##--------------------------------------------------------------------------------
## Class for ANI cross validaiton computer for a single molecule at a time
##--------------------------------------------------------------------------------
class anicrossvalidationmolecule(object):
def __init__(self, cnstfile, saefile, nnfprefix, Nnet, gpuid=0, sinet=False):
# Number of networks
self.Nn = Nnet
# Construct pyNeuroChem class
self.ncl = [pyc.molecule(cnstfile, saefile, nnfprefix + str(i) + '/networks/', gpuid, sinet) for i in
range(self.Nn)]
def set_molecule(self,X,S):
for nc in self.ncl:
nc.setMolecule(coords=X, types=list(S))
def compute_energies_and_forces_molecule(self, x, S):
Na = x.shape[0]
energy = np.zeros((self.Nn), dtype=np.float64)
forces = np.zeros((self.Nn, Na, 3), dtype=np.float32)
for i,nc in enumerate(self.ncl):
nc.setMolecule(coords=x, types=list(S))
energy[i] = nc.energy()[0]
forces[i, :, :] = nc.force()
sigmap = hdt.hatokcal * np.std(energy) / np.sqrt(Na)
energy = hdt.hatokcal * energy.mean()
forces = hdt.hatokcal * np.mean(forces, axis=0)
return energy, forces, sigmap
# ------------------------------------------------------------------------
# pyNeuroChem -- single molecule batched optimizer (one at a time)
# ------------------------------------------------------------------------
class moleculeOptimizer(anicrossvalidationmolecule):
def __init__(self, cns, sae, nnf, Nn, gpuid=0):
anicrossvalidationmolecule.__init__(self,cns, sae, nnf, Nn, gpuid)
# Gradient descent optimizer
def optimizeGradientDescent (self, X, S, alpha=0.0004, convergence=0.027, maxsteps=10000, printer=True, printstep=50):
Xf = np.zeros(X.shape,dtype=np.float32)
for i,x in enumerate(X):
print('--Optimizing conformation:',i,'--')
xn = np.array(x, np.float32)
for j in range(maxsteps):
e, f, p = self.compute_energies_and_forces_molecule(xn, S)
xn = xn + alpha*f
if printer and j%printstep==0:
print(' -',j,"{0:.3f}".format(e),
"{0:.4f}".format(np.abs(f).sum()),
"{0:.4f}".format(np.max(np.abs(f))),
"{0:.4f}".format(p))
if np.max(np.abs(f)) < convergence:
break
print('Complete')
print(' -', j, "{0:.3f}".format(e),
"{0:.4f}".format(np.abs(f).sum()),
"{0:.4f}".format(np.max(np.abs(f))),
"{0:.4f}".format(p))
Xf[i] = xn
return Xf
# Conjugate gradient optimizer
def optimizeConjugateGradient (self, X, S, alpha=0.0004, convergence=0.027, maxsteps=10000, printer=True, printstep=50):
Xf = np.zeros(X.shape,dtype=np.float32)
for i,x in enumerate(X):
print('--Optimizing conformation:',i,'--')
xn = np.array(x, np.float32)
hn = np.zeros(xn.shape, dtype=np.float32)
fn = np.zeros(xn.shape, dtype=np.float32)
for j in range(maxsteps):
e, f, p = self.compute_energies_and_forces_molecule(xn, S)
if j != 0:
gamma = np.power(np.linalg.norm(f), 2) / np.power(np.linalg.norm(fn), 2)
else:
gamma = 0
fn = f
hn = f + gamma * hn
xn = xn + alpha * hn
if printer and j%printstep==0:
print(' -',j,"{0:.3f}".format(e),
"{0:.4f}".format(np.abs(f).sum()),
"{0:.4f}".format(np.max(np.abs(f))),
"{0:.4f}".format(p))
if np.max(np.abs(f)) < convergence:
break
print('Complete')
print(' -', j, "{0:.3f}".format(e),
"{0:.4f}".format(np.abs(f).sum()),
"{0:.4f}".format(np.max(np.abs(f))),
"{0:.4f}".format(p))
Xf[i] = xn
return Xf
# In the works function for optimizing one at a time with lBFGS
def optimizelBFGS (self, X, S, alpha=0.0004, convergence=0.027, maxsteps=10000, printer=True, printstep=50):
Xf = | np.zeros(X.shape,dtype=np.float32) | numpy.zeros |
from functools import reduce
from math import exp, isclose, log, pi
from os import makedirs, path
import matplotlib.pyplot as plt
import numpy as np
from scipy import special
working_dir = path.dirname(path.abspath(__file__))
makedirs(path.join(working_dir, 'plots'), exist_ok=True)
try:
data = np.load(path.join(working_dir, 'data.npy'))
except FileNotFoundError:
data = np.load(path.join(working_dir, 'task4.npy'))
def hist(x_array, n_bins, continuous=True, normalize=True):
min_val = x_array.min()
max_val = x_array.max()
count = np.zeros(int(n_bins))
for x in x_array:
bin_number = int((n_bins - 1) * ((x - min_val) / (max_val - min_val)))
count[bin_number] += 1
# normalize the distribution
if normalize:
count /= x_array.shape[0]
if continuous:
count /= ((max_val - min_val) / n_bins)
return count, np.linspace(min_val, max_val, num=n_bins)
num_bins = 100
counts, bins = hist(data, num_bins, continuous=False, normalize=False)
plt.bar(bins, counts, width=0.5, align='edge', color='gray')
plt.xlabel('x')
plt.ylabel(r'$P\left(x\right)$')
plt.savefig(path.join(working_dir, 'plots/hist.eps'), bbox_inches='tight')
plt.close()
counts, bins = hist(data, num_bins, continuous=False, normalize=True)
plt.bar(bins, counts, width=0.5, align='edge', color='gray')
plt.xlabel('x')
plt.ylabel(r'$P\left(x\right)$')
plt.savefig(
path.join(working_dir, 'plots/hist_normalized.eps'), bbox_inches='tight'
)
def poisson_likelihood(x, lambda_):
n = x.shape[0]
lambda_x = reduce(
lambda y, z: y * z, (lambda_ ** x).tolist()
)
x_factorial = reduce(
lambda y, z: y * z, special.factorial(x, exact=True).tolist()
)
return exp(- lambda_ * n) * lambda_x / x_factorial
def poisson_log_likelihood(x, lambda_):
n = x.shape[0]
log_lambda_x = log(lambda_) * np.sum(x)
log_x_factorial = np.sum(np.log(special.factorial(x, exact=True)))
return (- lambda_ * n) + log_lambda_x - log_x_factorial
# Poisson MLE
lambda_hat = | np.mean(data) | numpy.mean |
# Copyright (c) 2017-2020 <NAME>.
# Author: <NAME>
# Email: <EMAIL>
# Update: 2020 - 2 - 12
import numpy as np
from .Utility import to_list
def gaussian_kernel(kernel_size: (int, tuple, list), width: float):
"""generate a gaussian kernel
Args:
kernel_size: the size of generated gaussian kernel. If is a scalar, the
kernel is a square matrix, or it's a kernel of HxW.
width: the standard deviation of gaussian kernel. If width is 0, the
kernel is identity, if width reaches to +inf, the kernel becomes
averaging kernel.
"""
kernel_size = np.asarray(to_list(kernel_size, 2), np.float)
half_ksize = (kernel_size - 1) / 2.0
x, y = np.mgrid[-half_ksize[0]:half_ksize[0] + 1,
-half_ksize[1]:half_ksize[1] + 1]
kernel = np.exp(-(x ** 2 + y ** 2) / (2 * width ** 2))
return kernel / (kernel.sum() + 1e-8)
def anisotropic_gaussian_kernel(kernel_size: (int, tuple, list), theta: float,
l1: float, l2: float):
"""generate anisotropic gaussian kernel
Args:
kernel_size: the size of generated gaussian kernel. If is a scalar, the
kernel is a square matrix, or it's a kernel of HxW.
theta: rotation angle (rad) of the kernel. [0, pi]
l1: scaling of eigen values on base 0. [0.1, 10]
l2: scaling of eigen values on base 1. [0.1, L1]
"""
def gmdistribution(mu, sigma):
half_k = (kernel_size - 1) / 2.0
x, y = np.mgrid[-half_k[0]:half_k[0] + 1, -half_k[1]:half_k[1] + 1]
X = np.expand_dims( | np.stack([y, x], axis=-1) | numpy.stack |
import axi
import random
import numpy as np
def horizontal_lines():
paths = []
# horizontal lines
for i in range(4):
y = 1 / 8 + i / 4
paths.append([(0, y), (1, y)])
# horizontal lines
for i in range(4):
x = 1 / 8 + i / 4
paths.append([(x, 0), (x, 1 / 8)])
paths.append([(x, 7 / 8), (x, 1)])
return axi.Drawing(paths)
def diagonal_lines():
paths = [
[(0, 1 / 8), (1 / 8, 0)],
[(0, 3 / 8), (3 / 8, 0)],
[(0, 5 / 8), (5 / 8, 0)],
[(0, 7 / 8), (7 / 8, 0)],
[(1 / 8, 1), (1, 1 / 8)],
[(3 / 8, 1), (1, 3 / 8)],
[(5 / 8, 1), (1, 5 / 8)],
[(7 / 8, 1), (1, 7 / 8)]
]
return axi.Drawing(paths)
def chevrons():
paths = [
[(0, 3 / 8), (1 / 8, 4 / 8), (0, 5 / 8)],
[(0, 1 / 8), (3 / 8, 4 / 8), (0, 7 / 8)],
[(1 / 8, 0), (5 / 8, 4 / 8), (1 / 8, 1)],
[(3 / 8, 0), (7 / 8, 4 / 8), (3 / 8, 1)],
[(5 / 8, 0), (1, 3 / 8)],
[(1, 5 / 8), (5 / 8, 1)],
[(7 / 8, 0), (1, 1 / 8)],
[(1, 7 / 8), (7 / 8, 1)]
]
return axi.Drawing(paths)
def corner_circles(samples_per_circle=32):
paths = []
# Draw the circles centered on (0, 0)
for i in range(4):
radius = 1 / 8 + i / 4
thetas = np.linspace(0, np.pi / 2, num=samples_per_circle)
x = radius * np.cos(thetas)
y = radius * np.sin(thetas)
path = list(zip(x, y))
paths.append(path)
# Draw the circles centered on (1, 1)
for i in range(4):
radius = 1 / 8 + i / 4
thetas = np.linspace(np.pi, 3 * np.pi / 2, num=samples_per_circle)
x = 1 + radius * np.cos(thetas)
y = 1 + radius * np.sin(thetas)
points = np.stack(arrays=[x, y])
path = []
for point_idx in range(samples_per_circle):
dist = np.linalg.norm(points[:, point_idx])
if dist < 7 / 8:
if len(path) > 1:
paths.append(path)
path = []
else:
path.append(tuple(points[:, point_idx]))
if len(path) > 1:
paths.append(path)
return axi.Drawing(paths)
def edge_circles(samples_per_circle=32):
paths = []
# Draw the circles centered on (0, 0.5)
for i in range(2):
radius = 1 / 8 + i / 4
thetas = np.linspace(-np.pi / 2, np.pi / 2, num=samples_per_circle)
x = radius * | np.cos(thetas) | numpy.cos |
import numpy as np
import h5py as h5
from converters import convgeo2ply
def extract_geometry(data_file, output_dir, nth_coord):
"""
Extracts the geometry of the body used in Abhiram's simulations of flow around an axisymmetric ramp body.
In his simulations, the geometry is located at [k,j,i]=[1,:,:] (non-cartesian coordinate system)
Geometry is saved to a .ply file.
:param data_file: File to extract geometry from
:param output_dir: Output directory within which to save geometry file (just directory, no filename).
:param nth_coord: Save geometry with every nth coordinate (i.e. skip n-1 coords before saving the nth one). This helps reduce unnecessary mesh complexity. Higher is less detailed.
"""
# Open file
data = h5.File(data_file, "r")
# Extract mesh coords
xpt2f = np.ndarray.flatten(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, ::nth_coord, ::nth_coord], order="C") # k,j,i
ypt2f = np.ndarray.flatten(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateY"][" data"][1, ::nth_coord, ::nth_coord], order="C")
zpt2f = np.ndarray.flatten(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateZ"][" data"][1, ::nth_coord, ::nth_coord], order="C")
# Get resolutions of each surface geometry dimension
ires = len(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, 1, ::nth_coord])
jres = len(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, ::nth_coord, 1])
# close data file
data.close()
# Compile list of vertices as columns
verts = np.swapaxes(np.array([xpt2f,ypt2f,zpt2f]),0,1)
# Compile list of triangles - each triangle consists of 3 vertex IDs, and these lines make each "upper left" and "lower right" triangle (which forms a quadrilateral) out of adjacent vertices
upper_lefts = np.swapaxes(np.array([range(0, ires * (jres - 1) - 1), range(1, ires * (jres - 1)), range(ires, ires * jres - 1)]), 0, 1)
lower_rights = np.swapaxes(np.array([range(ires, ires * jres - 1), range(1, ires * (jres - 1)), range(ires + 1, ires * jres)]), 0, 1)
num_tris_half = np.shape(upper_lefts)[0]
# Delete "wraparound" faces
upper_lefts = np.delete(upper_lefts, np.arange(ires - 1, num_tris_half + 1, ires), 0)
lower_rights = np.delete(lower_rights, np.arange(ires - 1, num_tris_half + 1, ires), 0)
# Concatenate triangle arrays together
tris = np.concatenate((upper_lefts, lower_rights), axis=0)
# Convert geometry to a .ply file for Blender
convgeo2ply(verts=verts, tris=tris, output_path_ply=output_dir + "/body.ply")
def extract_geometry_general(data_file, output_dir, nth_coord, axis="K", level=1):
"""
Extracts geometry from any surface in ijk coordinates. An axis, and the level along that axis, is specified, where the surface lies along the other two axes at the specified level.
:param data_file: File to extract geometry from
:param output_dir: Output directory within which to save geometry file (just directory, no filename).
:param nth_coord: Save geometry with every nth coordinate (i.e. skip n-1 coords before saving the nth one). This helps reduce unnecessary mesh complexity. Higher is less detailed.
"""
# Open file
data = h5.File(data_file, "r")
# Extract mesh coords
if axis.upper()=="I":
xpt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][::nth_coord, ::nth_coord, level],
order="C") # k,j,i
ypt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateY"][" data"][::nth_coord, ::nth_coord, level], order="C")
zpt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateZ"][" data"][::nth_coord, ::nth_coord, level], order="C")
# Get resolutions of each surface geometry dimension
xres = len(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, ::nth_coord, 1])
yres = len(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][::nth_coord, 1, 1])
elif axis.upper()=="J":
xpt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][::nth_coord, level, ::nth_coord],
order="C") # k,j,i
ypt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateY"][" data"][::nth_coord, level, ::nth_coord],
order="C")
zpt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateZ"][" data"][::nth_coord, level, ::nth_coord],
order="C")
xres = len(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, 1, ::nth_coord])
yres = len(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][::nth_coord, 1, 1])
elif axis.upper()=="K":
xpt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][level, ::nth_coord, ::nth_coord],
order="C") # k,j,i
ypt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateY"][" data"][level, ::nth_coord, ::nth_coord],
order="C")
zpt2f = np.ndarray.flatten(
data["Base"]["Zone1"]["GridCoordinates"]["CoordinateZ"][" data"][level, ::nth_coord, ::nth_coord],
order="C")
# Get resolutions of each surface geometry dimension
xres = len(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, 1, ::nth_coord])
yres = len(data["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"][1, ::nth_coord, 1])
data.close()
# Compile list of vertices as columns
verts = np.swapaxes(np.array([xpt2f, ypt2f, zpt2f]), 0, 1)
# Compile list of triangles - each triangle consists of 3 vertex IDs, and these lines make each "upper left" and "lower right" triangle (which forms a quadrilateral) out of adjacent vertices
upper_lefts = np.swapaxes(
np.array([range(0, xres * (yres - 1) - 1), range(1, xres * (yres - 1)), range(xres, xres * yres - 1)]), 0, 1)
lower_rights = np.swapaxes(
np.array([range(xres, xres * yres - 1), range(1, xres * (yres - 1)), range(xres + 1, xres * yres)]), 0, 1)
num_tris_half = np.shape(upper_lefts)[0]
# Delete "wraparound" faces
upper_lefts = np.delete(upper_lefts, np.arange(xres - 1, num_tris_half + 1, xres), 0)
lower_rights = np.delete(lower_rights, | np.arange(xres - 1, num_tris_half + 1, xres) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 13:14:39 2018
@author: ning
"""
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import RandomForestClassifier,VotingClassifier,GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from mne.decoding import Vectorizer,LinearModel
from sklearn import metrics
import pandas as pd
import numpy as np
def make_clf(vectorized = True,hard_soft='soft',voting=True,decoding=True):
"""
vectorized: to wrap the 3D matrix to 2D
hard_soft: decision making step for voting classifier
voting: if true, classifiers are SGD, SVM, KNN, naive bayes, dense neural networkd, random forest
and gradient boosting classifier
to make sure the voting classifier returns probabilistic prediction, we need to carefully define
each of the individual 'voters'
if false, classifier are SDG, SVM, random forest and gradient boosting classifier
All nested with a standardized scaler - mean centered and unit variance
"""
linear_ = SGDClassifier(max_iter=int(2e3),tol=1e-3,random_state=12345,loss='modified_huber')
svc = SVC(max_iter=int(2e3),tol=1e-3,random_state=12345,kernel='rbf',probability=True,)
rf = RandomForestClassifier(n_estimators=100,random_state=12345)
knn = KNeighborsClassifier(n_neighbors=10,)
bayes = GaussianNB(priors=(0.4,0.6))
gdb = GradientBoostingClassifier(random_state=12345)
NN = MLPClassifier(hidden_layer_sizes=(100,50,20),learning_rate='adaptive',solver='sgd',max_iter=int(1e3),
shuffle=True,random_state=12345)
clf = []
if vectorized:
clf.append(('vectorize',Vectorizer()))
clf.append(('scaler',StandardScaler()))
if voting=='vote':
clf.append(('estimator',VotingClassifier([('SGD',linear_),
('SVM',svc),
('RF',rf),
('KNN',knn),
('naive_bayes',bayes),
('DNN',NN),
('GDB',gdb)],voting=hard_soft,)))
elif voting == 'linear':
est = SVC(max_iter=-1,tol=1e-3,random_state=12345,kernel='linear',probability=True,)
if decoding:
clf.append(('estimator',LinearModel(est)))
else:
clf.append(('estimator',est))
else:
clf.append(('estimator',VotingClassifier([('SGD',linear_),
('SVM',svc),
('RF',rf),
('GDB',gdb)],voting=hard_soft,)))
clf = Pipeline(clf)
return clf
def row_selection(row_element,idx_pos):
"""
small helper function for the next function
"""
if row_element in idx_pos:
return True
else:
return False
def prediction_pipeline(labels,images,clf,working_trial_orders,condition='load2'):
"""
This function is to process predicted labels, predicted prbabilities, and true labels in
different experimental conditions.
"""
if condition == 'load2':# just in case I figure out how to do this in load 5 condition
image1,image2 = images
positive,negative = [],[] # preallocate for the predictions within the positive probe trials and negative probe trials
idx_pos,idx_neg = [],[]# preallocate for the trial numbers
for ii,(label, image1_,image2_) in enumerate(zip(labels,image1,image2)):
# print(label,image1_.shape,image2_.shape)
if label:# "1" in label can be used as "true"
positive.append([clf.predict_proba(image1_.reshape(1,61,200))[0],# single item probabilistic prediction returns a 3D vector, thus, we take the 1st dimension out
clf.predict_proba(image2_.reshape(1,61,200))[0]])
idx_pos.append(ii)
else:
negative.append([clf.predict_proba(image1_.reshape(1,61,200))[0],
clf.predict_proba(image2_.reshape(1,61,200))[0]])
idx_neg.append(ii)
positive = np.array(positive)
negative = np.array(negative)
positive_soft_max = np.argmax(positive,axis=1)
negative_soft_max = np.argmax(negative,axis=1)
positive_prob = positive[:,:,-1]# we only care about the probability of the positive probe images (old images)
negative_prob = negative[:,:,-1]
soft_max_ = np.concatenate((positive_soft_max,negative_soft_max))# I called this "soft max", but it is not doing such thing
prob_ = np.concatenate((positive_prob,negative_prob))
soft_max_idx = np.concatenate((idx_pos,idx_neg))
# create a data frame with two columns, and each column contains the probability of weather this image will present again in the probe, regardless
results = pd.DataFrame(soft_max_,columns=['image1_pred','image2_pred'])
results['order'] = soft_max_idx
results['image1_pred_prob'] = prob_[:,0]
results['image2_pred_prob'] = prob_[:,1]
results = results.sort_values('order').reset_index()
results['labels'] = labels
results['image1']=np.array(working_trial_orders['probe'] == working_trial_orders['image1'],dtype=int)
results['image2']=np.array(working_trial_orders['probe'] == working_trial_orders['image2'],dtype=int)
# pred = results[['image1_pred','image2_pred']].values
pred_prob = results[['image1_pred_prob','image2_pred_prob']].values
truth = results[['image1','image2']].values
# print(metrics.classification_report(truth,pred))
# predictive ability of order
print('predictive ability of order',metrics.roc_auc_score(truth[:,0],pred_prob[:,0]),metrics.roc_auc_score(truth[:,1],pred_prob[:,1]))
# predictive ability of positive and negative stimuli
results['trial']=[row_selection(row_element,idx_pos) for row_element in results['order'].values]
positive_trials = results.iloc[results['trial'].values]
truth_ = positive_trials[['image1','image2']].values.flatten()
pred_prob_ = positive_trials[['image1_pred_prob','image2_pred_prob']].values.flatten()
print('predictive ability of positive and negative stimuli',metrics.roc_auc_score(truth_,pred_prob_))
return {'predictive ability of order':[metrics.roc_auc_score(truth[:,0],pred_prob[:,0]),
metrics.roc_auc_score(truth[:,1],pred_prob[:,1])],
'predictive ability of positive and negative stimuli':metrics.roc_auc_score(truth_,pred_prob_)}
elif condition == 'load5':# to be continue
iamge1,image2,image3,image4,image5 = images
positive,negative = [],[]
idx_pos,idx_neg = [],[]
for ii, (label,image1_,image2_,image3_,image4_,image5_) in enumerate(zip(labels,image1,image2,image3,image4,image5)):
if label:
positive.append([clf.predict_proba(image1_.reshape(1,61,200))[0],
clf.predict_proba(image2_.reshape(1,61,200))[0],
clf.predict_proba(image3_.reshape(1,61,200))[0],
clf.predict_proba(image4_.reshape(1,61,200))[0],
clf.predict_proba(image5_.reshape(1,61,200))[0]])
idx_pos.append(ii)
else:
negative.append([clf.predict_proba(image1_.reshape(1,61,200))[0],
clf.predict_proba(image2_.reshape(1,61,200))[0],
clf.predict_proba(image3_.reshape(1,61,200))[0],
clf.predict_proba(image4_.reshape(1,61,200))[0],
clf.predict_proba(image5_.reshape(1,61,200))[0]])
idx_neg.append(ii)
positive = | np.array(positive) | numpy.array |
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
import matplotlib.pyplot as plt
import pandas as pd
import json
from numpy import loadtxt
import sys, os
sys.path.append(os.path.join('..'))
if sys.version_info[0] < 3:
import io
open = io.open
plt.style.use("seaborn")
from sklearn.manifold import TSNE
def plot_words(embedding, model_alias, professions, gender_specific, bias_type, biased):
# load x-axis
x = np.loadtxt(os.path.join("..", "output", f"{model_alias}_gender_subspace.csv"), delimiter=',')
# load vector for y-axis
y_ax = np.loadtxt(os.path.join("..", "output", f"{model_alias}_neutrality.csv"), delimiter=',')
#combine
wordlist = professions+gender_specific
# choose only words that are in the embeddings
wordlist = [w for w in wordlist if w in embedding.vocab]
# retrieve vectors
vectors = [embedding[k] for k in wordlist]
# flipped
y = np.flipud(y_ax)
# normalize
x/= np.linalg.norm(x)
y/= np.linalg.norm(y)
# Get pseudo-inverse matrix
W = np.array(vectors)
B = | np.array([x,y]) | numpy.array |
import numpy as np
import random
import bisect
import environment
import pickle
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
from keras.regularizers import l2
from keras import backend as K
from keras.models import load_model
import tensorflow as tf
import time
def sample_from_distribution(distribution):
total = np.sum(distribution)
cdf = []
cumsum = 0
for w in distribution:
cumsum += w
result.append(cumsum / total)
x = random.random()
idx = bisect.bisect(cdf, x)
return idx
def epsilon_greedy_selection(q, actions, epsilon=0.1):
if np.random.uniform(0, 1) < epsilon:
# exploration
return np.random.choice(actions)
else:
# exploitation
arg = np.argsort(q[actions])[::-1]
n_tied = sum(np.isclose(q[actions], q[actions][arg[0]]))
return actions[np.random.choice(arg[0:n_tied])]
class Dumby():
def __init__(self, env, epsilon=0.3, gamma=0.75, algorithm='dqn', schedule={}):
self.state_size = env.n_states
self.action_size = env.n_actions
self.batch_size = 32
self.gamma = gamma # discount rate
self.epsilon = epsilon # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.algorithm = algorithm
self.schedule = schedule
self.in_between_training_steps = self.batch_size
if self.algorithm=='dqn':
self.memory = deque(maxlen=2000)
self.target_model = self._build_model()
elif self.algorithm =='sarsa':
self.alpha = 0.1
self.q = np.zeros((self.state_size, self.action_size))
self.q.fill(float('-inf'))
for s in range(self.state_size):
actions = env.actions(s)
for a in actions:
self.q[s, a] = 0
def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
error = y_true - y_pred
cond = K.abs(error) <= clip_delta
squared_loss = 0.5 * K.square(error)
quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.abs(error) - clip_delta)
return K.mean(tf.where(cond, squared_loss, quadratic_loss))
def _build_model(self):
l2_reg = 0.00001
model = Sequential()
# model.add(Dense(10, input_dim=self.state_size, activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add(Dropout(0.1))
# model.add(Dense(16, input_dim=self.state_size, activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add(Dropout(0.1))
model.add(Dense(24, activation='relu', input_dim=self.state_size)) #, kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg), activation_regularizer=l2(l2_reg)))
model.add(Dropout(0.01))
model.add(Dense(24, activation='relu')) #, kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg), activation_regularizer=l2(l2_reg)))
model.add(Dropout(0.01))
# model.add(Dropout(0.1))
# model.add(Dense(30, activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add(Dropout(0.3))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
# model.compile(loss=self._huber_loss,
# optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
if len(self.memory) >= self.batch_size and self.in_between_training_steps >= self.batch_size:
# print(' replay')
print('[!] Fitting model with replay')
loss = self.replay()
self.in_between_training_steps = 0
self.in_between_training_steps += 1
# def forget(self):
# del self.memory
# self.memory = deque(maxlen=2000)
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def act(self, state, actions):
if np.random.rand() <= self.epsilon:
return np.random.choice(actions)
# return random.randrange(self.action_size)
if self.algorithm=='dqn':
act_values = self.target_model.predict(state)
# if np.argmax(act_values[0]) not in actions:
# act_ = np.random.choice(actions)
# print('random action', act_)
# return act_
# else:
# # print(['{:.3f}'.format(si) for si in state[0,:]], ['{:.3f}'.format(si) for si in act_values[0,:]])
# print('predicted action', np.argmax(act_values[0]))
return np.argmax(act_values[0]) # returns action
elif self.algorithm == 'sarsa':
q_ = self.q[state]
arg = np.argsort(q_[actions])[::-1]
n_tied = sum(np.isclose(q_[actions], q_[actions][arg[0]]))
return actions[np.random.choice(arg[0:n_tied])]
def replay(self):
# minibatch = random.sample(self.memory, batch_size)
# for state, action, reward, next_state, done in minibatch:
# target = reward
# if not done:
# target = (reward + self.gamma * np.amax(self.target_model.predict(next_state)[0]))
# target_f = self.target_model.predict(state)
# target_f[0][action] = target
# self.target_model.fit(state, target_f, epochs=1, verbose=0)
# if self.epsilon > self.epsilon_min:
# self.epsilon *= self.epsilon_decay
#minibatch = random.sample(self.memory, batch_size)
# minibatch = self.memory
losses = []
#print(len(self.memory), len(self.memory[0]))
# minibatch = self.memory #random.sample(self.memory, batch_size)
#print(len(self.memory), self.batch_size)
minibatch = random.sample(self.memory, self.batch_size)
counter_ = 1
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.target_model.predict(next_state)[0])
target_f = self.target_model.predict(state)
target_f[0][action] = target
# print(state, target_f, reward, self.gamma * np.amax(self.target_model.predict(next_state)[0]), self.target_model.predict(state))
history = self.target_model.fit(state, target_f, epochs=1, verbose=0)
# target = self.target_model.predict(state)
# if done:
# target[0][action] = reward
# else:
# # a = self.target_model.predict(next_state)[0]
# t = self.target_model.predict(next_state)[0]
# target[0][action] = reward + self.gamma * np.argmax(t)
# # print('log:', action, reward, np.argmax(t), reward + self.gamma * np.argmax(t))
# # target[0][action] = reward + self.gamma * t[np.argmax(a)]
# #print(state, target)
# history = self.target_model.fit(state, target, epochs=1, verbose=0)
# print('loss:', history.history['loss'])
losses.append(history.history['loss'])
print('[-] Fitting loss instance #{} in minibatch: {}'.format(counter_, history.history['loss']))
counter_ += 1
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
return | np.mean(losses) | numpy.mean |
#This script is intended to find the top and the mid pedestal of the H mod plasma profile for the pre and post processing of the simulation
#Developed by <NAME> on 01/22/2020
import numpy as np
import matplotlib.pyplot as plt
from efittools import read_efit_file
def find_pedestal(file_name, path_name, plot):
if len(path_name)==0 and ('/' not in file_name):
path_name = './'
eqdskdata=read_efit_file(path_name+file_name)
p = eqdskdata['pressure'] #pressure read from eqdsk
x = eqdskdata['rhotor'] #radial location read from eqdsk
dp = eqdskdata['pprime'] #First order of pressure
x=x[int(len(x)*0.6):]
p=p[int(len(p)*0.6):]
dp=dp[int(len(dp)*0.6):]
dp0 = | np.gradient(p,x) | numpy.gradient |
"""
Plotting
"""
# from matplotlib.colors import LogNorm
# from matplotlib.ticker import ScalarFormatter
import matplotlib.pyplot as plt
# from matplotlib.cm import ScalarMappable
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os
from scipy import fftpack
from scipy.fftpack import fft, ifft, rfft, fftfreq
from xseis import xutil
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = 11, 8
def sigs(d, shifts=None, labels=None, **kwargs):
if shifts is None:
shifts = np.arange(0, d.shape[0], 1) * 1.0
for i, sig in enumerate(d):
tmp = sig / np.max(np.abs(sig)) + shifts[i]
plt.plot(tmp, **kwargs)
if labels is not None:
for i, lbl in enumerate(labels):
plt.text(0, shifts[i] + 0.1, lbl, fontsize=15)
def v2color(vals):
cnorm = plt.Normalize(vmin= | np.nanmin(vals) | numpy.nanmin |
import numpy as np
import pandas as pd
from .io_base import DataTaker
class CAENDT57XX(DataTaker):
"""
decode CAENDT5725 or CAENDT5730 digitizer data.
Setting the model_name will set the appropriate sample_rate
Use the input_config function to set certain variables by passing
a dictionary, this will most importantly assemble the file header used
by CAEN CoMPASS to label output files.
"""
def __init__(self, *args, **kwargs):
self.id = None
self.model_name = "DT5725" # hack -- can't set the model name in the init
self.decoder_name = "caen"
self.file_header = None
self.adc_bitcount = 14
self.sample_rates = {"DT5725": 250e6, "DT5730": 500e6}
self.sample_rate = None
if self.model_name in self.sample_rates.keys():
self.sample_rate = self.sample_rates[self.model_name]
else:
raise TypeError("Unidentified digitizer type: "+str(model_name))
self.v_range = 2.0
self.e_cal = None
self.e_type = None
self.int_window = None
self.parameters = ["TIMETAG", "ENERGY", "E_SHORT", "FLAGS"]
self.decoded_values = {
"board": None,
"channel": None,
"timestamp": None,
"energy": None,
"energy_short": None,
"flags": None,
"num_samples": None,
"waveform": []
}
super().__init__(*args, **kwargs)
def input_config(self, config):
self.id = config["id"]
self.v_range = config["v_range"]
self.e_cal = config["e_cal"]
self.e_type = config["e_type"]
self.int_window = config["int_window"]
self.file_header = "CH_"+str(config["channel"])+"@"+self.model_name+"_"+str(config["id"])+"_Data_"
def get_event_size(self, t0_file):
with open(t0_file, "rb") as file:
if self.e_type == "uncalibrated":
first_event = file.read(24)
[num_samples] = np.frombuffer(first_event[20:24], dtype=np.uint16)
return 24 + 2*num_samples
elif self.e_type == "calibrated":
first_event = file.read(30)
[num_samples] = np.frombuffer(first_event[26:30], dtype=np.uint32)
return 30 + 2 * num_samples # number of bytes / 2
else:
raise TypeError("Invalid e_type! Valid e_type's: uncalibrated, calibrated")
def get_event(self, event_data_bytes):
self.decoded_values["board"] = np.frombuffer(event_data_bytes[0:2], dtype=np.uint16)[0]
self.decoded_values["channel"] = np.frombuffer(event_data_bytes[2:4], dtype=np.uint16)[0]
self.decoded_values["timestamp"] = np.frombuffer(event_data_bytes[4:12], dtype=np.uint64)[0]
if self.e_type == "uncalibrated":
self.decoded_values["energy"] = np.frombuffer(event_data_bytes[12:14], dtype=np.uint16)[0]
self.decoded_values["energy_short"] = np.frombuffer(event_data_bytes[14:16], dtype=np.uint16)[0]
self.decoded_values["flags"] = np.frombuffer(event_data_bytes[16:20], np.uint32)[0]
self.decoded_values["num_samples"] = np.frombuffer(event_data_bytes[20:24], dtype=np.uint32)[0]
self.decoded_values["waveform"] = np.frombuffer(event_data_bytes[24:], dtype=np.uint16)
elif self.e_type == "calibrated":
self.decoded_values["energy"] = np.frombuffer(event_data_bytes[12:20], dtype=np.float64)[0]
self.decoded_values["energy_short"] = np.frombuffer(event_data_bytes[20:22], dtype=np.uint16)[0]
self.decoded_values["flags"] = | np.frombuffer(event_data_bytes[22:26], np.uint32) | numpy.frombuffer |
#!/usr/bin/env python
# coding=utf-8
"""
Script to generate uncertain sets of building/building physics uncertain
parameters
"""
import math
import random as rd
import numpy as np
import matplotlib.pyplot as plt
def calc_array_mod_years_single_build(nb_samples, year_of_constr, max_year,
time_sp_force_retro=40):
"""
Calculate array of modification years for single building. Assumes
equal distribution of mod. year probability density function.
If time_sp_force_retro is set and smaller than time span between max_year
(e.g. current year) and year_of_constr (year of construction), time span
is only considered between max_year and (max_year - time_sp_force_retro).
This should guarantee, that at least on modernization happened in the span
of time_sp_force_retro.
Parameters
----------
nb_samples : int
Number of samples
year_of_constr : int
Year of construction of building
max_year : int
Last possible year of retrofit (e.g. current year)
Should be larger than year_of_constr
time_sp_force_retro : int, optional
Timespan to force retrofit (default: 40). If value is set, forces
retrofit within its time span. If set to None, time span is not
considered.
Returns
-------
array_mod_years : list (of ints)
List of modernization years.
"""
# list_mod_years = []
array_mod_years = np.zeros(nb_samples)
# # Currently unused: List with TEASER years of modernization
# list_teaser_mod_y = [1982, 1995, 2002, 2009]
# Calc min_year
if time_sp_force_retro is not None:
if max_year - year_of_constr > time_sp_force_retro:
min_year = int(max_year - time_sp_force_retro)
else:
min_year = int(year_of_constr + 1)
else:
min_year = int(year_of_constr + 1)
# Do sampling
for i in range(len(array_mod_years)):
array_mod_years[i] = rd.randint(min_year, max_year)
return array_mod_years
def calc_inf_samples(nb_samples, mean=0, sdev=1, max_val=2):
"""
Performs building infiltration rate sampling based on log normal
distribution.
Reset values larger than max_val to 0.26 (1/h, average value)
Parameters
----------
nb_samples : int
Number of samples
mean : float, optional
Mean of log normal distribution (default: 0)
sdev : float, optional
Standard deviation of log normal distribution (default: 1)
max_val : float, optional
Maximal allowed value for natural infiltration rate (default: 2)
Returns
-------
array_inf : list (of floats)
List of infiltration rates in 1/h
References
----------
For reference values:
Münzenberg, Uwe (2004): Der natürliche Luftwechsel in Gebäuden und seine
Bedeutung bei der Beurteilung von Schimmelpilzschäden. In: Umwelt,
Gebäude & Gesundheit: Innenraumhygiene, Raumluftqualität und
Energieeinsparung. Ergebnisse des 7, S. 263–271.
"""
array_inf = | np.random.lognormal(mean=mean, sigma=sdev, size=nb_samples) | numpy.random.lognormal |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 The HERA Team
# Licensed under the 2-clause BSD License
from __future__ import print_function, division, absolute_import
from time import time
import numpy as np
import tensorflow as tf
import h5py
import random
from sklearn.metrics import confusion_matrix
from scipy import ndimage
from copy import copy
def transpose(X):
"""
Transpose for use in the map functions.
"""
return X.T
def normalize(X):
"""
Normalization for the log amplitude required in the folding process.
"""
sh = np.shape(X)
absX = np.abs(X)
absX = np.where(absX <= 0.0, (1e-8) * np.random.randn(sh[0], sh[1]), absX)
LOGabsX = np.nan_to_num(np.log10(absX))
return np.nan_to_num((LOGabsX - np.nanmean(LOGabsX)) / np.nanstd(np.abs(LOGabsX)))
def normphs(X):
"""
Normalization for the phase in the folding proces.
"""
sh = np.shape(X)
return np.array(np.sin(np.angle(X)))
def tfnormalize(X):
"""
Skip connection layer normalization.
"""
sh = np.shape(X)
X_norm = tf.contrib.layers.layer_norm(X, trainable=False)
return X
def foldl(data, ch_fold=16, padding=2):
"""
Folding function for carving up a waterfall visibility flags for prediction in the FCN.
"""
sh = np.shape(data)
_data = data.T.reshape(ch_fold, sh[1] / ch_fold, -1)
_DATA = np.array(map(transpose, _data))
_DATApad = np.array(
map(
np.pad,
_DATA,
len(_DATA) * [((padding + 2, padding + 2), (padding, padding))],
len(_DATA) * ["reflect"],
)
)
return _DATApad
def pad(data, padding=2):
"""
Padding function applied to folded spectral windows.
Reflection is default padding.
"""
sh = np.shape(data)
t_pad = 16
data_pad = np.pad(
data, pad_width=((t_pad + 2, t_pad + 2), (t_pad, t_pad)), mode="reflect"
)
return data_pad
def unpad(data, diff=4, padding=2):
"""
Unpadding function for recovering flag predictions.
"""
sh = np.shape(data)
t_unpad = sh[0]
return data[padding[0] : sh[0] - padding[0], padding[1] : sh[1] - padding[1]]
def store_iterator(it):
a = [x for x in it]
return np.array(a)
def fold(data, ch_fold=16, padding=2):
"""
Folding function for carving waterfall visibilities with additional normalized log
and phase channels.
Input: (Batch, Time, Frequency)
Output: (Batch*FoldFactor, Time, Reduced Frequency, Channels)
"""
sh = np.shape(data)
_data = data.T.reshape(ch_fold, int(sh[1] / ch_fold), -1)
_DATA = store_iterator(map(transpose, _data))
_DATApad = store_iterator(map(pad, _DATA))
DATA = np.stack(
(
store_iterator(map(normalize, _DATApad)),
store_iterator(map(normphs, _DATApad)),
np.mod(store_iterator(map(normphs, _DATApad)), np.pi),
),
axis=-1,
)
return DATA
def unfoldl(data_fold, ch_fold=16, padding=2):
"""
Unfolding function for recombining the carved label (flag) frequency windows back into a complete
waterfall visibility.
Input: (Batch*FoldFactor, Time, Reduced Frequency, Channels)
Output: (Batch, Time, Frequency)
"""
sh = np.shape(data_fold)
data_unpad = data_fold[
:, (padding + 2) : (sh[1] - (padding + 2)), padding : sh[2] - padding
]
ch_fold, ntimes, dfreqs = np.shape(data_unpad)
data_ = np.transpose(data_unpad, (0, 2, 1))
_data = data_.reshape(ch_fold * dfreqs, ntimes).T
return _data
def stacked_layer(
input_layer,
num_filter_layers,
kt,
kf,
activation,
stride,
pool,
bnorm=True,
name="None",
dropout=None,
maxpool=True,
mode=True,
):
"""
Creates a 3x stacked layer of convolutional layers. Each layer uses the same kernel size.
Batch normalized output is default and recommended for faster convergence, although
not every may require it (???).
Input: Tensor Variable (Batch*FoldFactor, Time, Reduced Frequency, Input Filter Layers)
Output: Tensor Variable (Batch*FoldFactor, Time/2, Reduced Frequency/2, num_filter_layers)
"""
conva = tf.layers.conv2d(
inputs=input_layer,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padding="same",
activation=activation,
)
if kt - 2 < 0:
kt = 3
if dropout is not None:
convb = tf.layers.dropout(
tf.layers.conv2d(
inputs=conva,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padding="same",
activation=activation,
),
rate=dropout,
)
else:
convb = tf.layers.conv2d(
inputs=conva,
filters=num_filter_layers,
kernel_size=[kt, kt],
strides=[1, 1],
padding="same",
activation=activation,
)
shb = convb.get_shape().as_list()
convc = tf.layers.conv2d(
inputs=convb,
filters=num_filter_layers,
kernel_size=(1, 1),
padding="same",
activation=activation,
)
if bnorm:
bnorm_conv = tf.layers.batch_normalization(
convc, scale=True, center=True, training=mode, fused=True
)
else:
bnorm_conv = convc
if maxpool:
pool = tf.layers.max_pooling2d(
inputs=bnorm_conv, pool_size=pool, strides=stride
)
elif maxpool is None:
pool = bnorm_conv
else:
pool = tf.layers.average_pooling2d(
inputs=bnorm_conv, pool_size=pool, strides=stride
)
return pool
def batch_accuracy(labels, predictions):
"""
Returns the RFI class accuracy.
"""
labels = tf.cast(labels, dtype=tf.int64)
predictions = tf.cast(predictions, dtype=tf.int64)
correct = tf.reduce_sum(
tf.cast(tf.equal(tf.add(labels, predictions), 2), dtype=tf.int64)
)
total = tf.reduce_sum(labels)
return tf.divide(correct, total)
def accuracy(labels, predictions):
"""
Numpy version of RFI class accuracy.
"""
correct = 1.0 * np.sum((labels + predictions) == 2)
total = 1.0 * np.sum(labels == 1)
print("correct", correct)
print("total", total)
try:
return correct / total
except BaseException:
return 1.0
def MCC(tp, tn, fp, fn):
"""
Calculates the Mathews Correlation Coefficient.
"""
if tp == 0 and fn == 0:
return tp * tn - fp * fn
else:
return (tp * tn - fp * fn) / np.sqrt(
(1.0 * (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
)
def f1(tp, tn, fp, fn):
"""
Calculates the F1 Score.
"""
precision = tp / (1.0 * (tp + fp))
recall = tp / (1.0 * (tp + fn))
return 2.0 * precision * recall / (precision + recall)
def SNRvsTPR(data, true_flags, flags):
"""
Calculates the signal-to-noise ratio versus true positive rate (recall).
"""
SNR = np.linspace(0.0, 4.0, 30)
snr_tprs = []
data_ = np.copy(data)
flags_ = np.copy(flags)
true_flags_ = np.copy(true_flags)
for snr_ in SNR:
snr_map = np.log10(data_ * flags_ / np.std(data_ * np.logical_not(true_flags)))
snr_inds = snr_map < snr_
confuse_mat = confusion_matrix(
true_flags_[snr_inds].astype(int).reshape(-1),
flags_[snr_inds].astype(int).reshape(-1),
)
if np.size(confuse_mat) == 1:
tp = 1e-10
tn = confuse_mat[0][0]
fp = 1e-10
fn = 1e-10
else:
try:
tn, fp, fn, tp = confuse_mat.ravel()
except BaseException:
tp = np.nan
fn = np.nan
snr_tprs.append(MCC(tp, tn, fp, fn))
data_[snr_inds] = 0.0
return snr_tprs
def hard_thresh(layer, thresh=0.5):
"""
Thresholding function for predicting based on raw FCN output.
"""
layer_sigmoid = 1.0 / (1.0 + np.exp(-layer))
return np.where(layer_sigmoid > thresh, np.ones_like(layer), np.zeros_like(layer))
def softmax(X):
return np.exp(X) / np.sum(np.exp(X), axis=-1)
def ROC_stats(ground_truth, logits):
ground_truth = np.reshape(ground_truth, [-1])
thresholds = np.linspace(-1, 4.0, 30)
FPR = []
TPR = []
MCC_arr = []
F2 = []
for thresh in thresholds:
pred_ = hard_thresh(logits, thresh=thresh).reshape(-1)
tn, fp, fn, tp = confusion_matrix(ground_truth, pred_).ravel()
recall = tp / (1.0 * (tp + fn))
precision = tp / (1.0 * (tp + fp))
TPR.append(tp / (1.0 * (tp + fn)))
FPR.append(fp / (1.0 * (fp + tn)))
MCC_arr.append(MCC(tp, tn, fp, fn))
F2.append(5.0 * recall * precision / (4.0 * precision + recall))
best_thresh = thresholds[np.nanargmax(F2)]
return FPR, TPR, MCC_arr, F2, best_thresh
def load_pipeline_dset(stage_type):
"""
Additional loading function for specific evaluation datasets.
"""
# f = h5py.File('JK_5Jan2019.h5','r')
f = h5py.File("IDR21TrainingData_Raw_vX.h5", "r")
# f = h5py.File('IDR21InitialFlags_v2.h5','r')
# f = h5py.File('IDR21TrainingData_Raw_v2.h5')
# f = h5py.File('IDR21TrainingData.h5','r')
# f = h5py.File('RealVisRFI_v5.h5','r')
# f = h5py.File('RawRealVis_v1.h5','r')
# f = h5py.File('SimVis_Blips_100.h5','r')
# f = h5py.File('SimVis_1000_v9.h5','r')
try:
if stage_type == "uv":
return f["uv"]
elif stage_type == "uvO":
return f["uvO"]
elif stage_type == "uvOC":
return f["uvOC"]
elif stage_type == "uvOCRS":
return f["uvOCRS"]
elif stage_type == "uvOCRSD":
return f["uvOCRSD"]
except BaseException:
return f
def stride(input_data, input_labels):
"""
Takes an input waterfall visibility with labels and strides across frequency,
producing (Nchan - 64)/S new waterfalls to be folded.
"""
spw_hw = 32 # spectral window half width
nchans = 1024
fold = nchans / (2 * spw_hw)
sample_spws = random.sample(range(0, 60), fold)
x = np.array(
[
input_data[:, i - spw_hw : i + spw_hw]
for i in range(spw_hw, 1024 - spw_hw, (nchans - 2 * spw_hw) / 60)
]
)
x_labels = np.array(
[
input_labels[:, i - spw_hw : i + spw_hw]
for i in range(spw_hw, 1024 - spw_hw, (nchans - 2 * spw_hw) / 60)
]
)
X = np.array([x[i].T for i in sample_spws])
X_labels = np.array([x_labels[i].T for i in sample_spws])
X_ = X.reshape(-1, 60).T
X_labels = X_labels.reshape(-1, 60).T
return X_, X_labels
def patchwise(data, labels):
"""
A spectral window is strided over the visibility
augmenting the existing training or evaluation
datasets.
"""
strided_dp = np.array(map(stride, data, labels))
data_strided = np.copy(strided_dp[:, 0, :, :])
labels_strided = np.copy(strided_dp[:, 1, :, :].astype(int))
return data_strided, labels_strided
def expand_dataset(data, labels):
"""
Comprehensive data augmentation function. Uses reflections, patchwise, gaussian noise, and
gaussian blurring, to improve robustness of the DFCN model which increases performance
when applied to real data.
Bloat factor is how large to increase the dataset size.
"""
bloat = 5
sh = np.shape(data)
out_data = []
out_labels = []
for i in range(bloat * sh[0]):
rnd_num = np.random.rand()
rnd_data_ind = np.random.randint(0, sh[0])
order = np.random.choice(np.logspace(-4, -1, 10))
noise = np.random.randn(sh[1], sh[2]) + 1j * np.random.randn(sh[1], sh[2])
noise_data = np.copy(data[rnd_data_ind])
noise_labels = np.copy(labels[rnd_data_ind])
noise_data[:, :, 0] += order * | np.abs(noise) | numpy.abs |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from random import randint
def matplotlib_imshow(img, name, one_channel=False):
if one_channel:
img = img.mean(dim=0)
# img = img / 2 + 0.5 # unnormalize
npimg = img.cpu().numpy()
str_name = "./outputs/" + name
plt.imsave(str_name, npimg, cmap="Greys")
# plt.figure()
# if one_channel:
# plt.imshow(npimg, cmap="Greys")
# else:
# plt.imshow(np.transpose(npimg, (1, 2, 0)))
# plt.show()
def euler2R(abc):
cosabc=torch.cos(abc)
sinabc=torch.sin(abc)
R=torch.zeros((abc.shape[0],3,3), device=abc.device)
R[:,0,0] = cosabc[:,0]*cosabc[:,1]*cosabc[:,2] - sinabc[:,0]*sinabc[:,2]
R[:,0,1] = sinabc[:,0]*cosabc[:,1]*cosabc[:,2] + cosabc[:,0]*sinabc[:,2]
R[:,0,2] = -1*sinabc[:,1]*cosabc[:,2]
R[:,1,0] = -1*cosabc[:,0]*cosabc[:,1]*sinabc[:,2] - sinabc[:,0]*cosabc[:,2]
R[:,1,1] = -1*sinabc[:,0]*cosabc[:,1]*sinabc[:,2] + cosabc[:,0]*cosabc[:,2]
R[:,1,2] = sinabc[:,1]*sinabc[:,2]
R[:,2,0] = cosabc[:,0]*sinabc[:,1]
R[:,2,1] = sinabc[:,0]*sinabc[:,1]
R[:,2,2] = cosabc[:,1]
return R
def quaternion2R(qq):
R=torch.zeros((qq.shape[0],3,3), device=qq.device)
criterion = nn.Softmax(dim=1)
qq_intermediate = criterion(qq)
qqq = torch.sqrt(qq_intermediate)
# norms_squ = torch.norm(qq,dim=1)
# norms_squ = torch.unsqueeze(norms_squ, 1)
# qqq = torch.div(qq, norms_squ)
R[:,0,0] = 1 - 2*(qqq[:,2]*qqq[:,2] + qqq[:,3]*qqq[:,3])
R[:,0,1] = 2*(qqq[:,1]*qqq[:,2] - qqq[:,3]*qqq[:,0])
R[:,0,2] = 2*(qqq[:,1]*qqq[:,3] + qqq[:,2]*qqq[:,0])
R[:,1,0] = 2*(qqq[:,1]*qqq[:,2] + qqq[:,3]*qqq[:,0])
R[:,1,1] = 1 - 2*(qqq[:,1]*qqq[:,1] + qqq[:,3]*qqq[:,3])
R[:,1,2] = 2*(qqq[:,2]*qqq[:,3] - qqq[:,1]*qqq[:,0])
R[:,2,0] = 2*(qqq[:,1]*qqq[:,3] - qqq[:,2]*qqq[:,0])
R[:,2,1] = 2*(qqq[:,2]*qqq[:,3] + qqq[:,1]*qqq[:,0])
R[:,2,2] = 1 - 2*(qqq[:,1]*qqq[:,1] + qqq[:,2]*qqq[:,2])
return R
def getRbeta():
#compute the 60 rotation matrices in the coordinate system of <NAME> and <NAME>, Computers in Physics, vol. 9, no. 4, July/August 1995.
S=np.array([[np.cos(2*np.pi/5), -np.sin(2*np.pi/5), 0], [np.sin(2*np.pi/5), np.cos(2*np.pi/5), 0], [0, 0, 1]])
U=np.array([[1/np.sqrt(5), 0, 2/np.sqrt(5)], [0, 1, 0], [-2/np.sqrt(5), 0, 1/np.sqrt(5)]])
P=np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
T=np.dot(U,np.dot(S,np.linalg.inv(U)))
Rbeta=np.zeros((60,3,3))
Rbeta[0,:,:]=np.eye(3)
Rbeta[1,:,:]=S
Rbeta[2,:,:]=np.dot(S,S) #S^2
Rbeta[3,:,:]=np.dot(Rbeta[2,:,:],S) #S^3
Rbeta[4,:,:]=np.dot(Rbeta[3,:,:],S) #S^4
Rbeta[5,:,:]=np.dot(S,T)
Rbeta[6,:,:]= | np.dot(T,Rbeta[5,:,:]) | numpy.dot |
import os, sys
import numpy as np
import torch
import open3d as o3d
from . import pcd_utils
class Colors():
red = [0.8, 0.2, 0]
green = [0, 0.7, 0.2]
blue = [0, 0, 1]
gold = [1, 0.706, 0]
greenish = [0, 0.8, 0.506]
def visualize_point_tensor(
points_list, R, t,
colors_list=None,
compute_bbox_list=None,
additional_pcds=[],
exit_after=False,
convert_to_opengl_coords=True
):
assert len(points_list) == len(colors_list) == len(compute_bbox_list)
# World frame
referece_frame = create_frame(size=1.0)
additional_pcds.append(referece_frame)
# camera frame
camera_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=[0, 0, 0]
)
camera_frame.rotate(R, pcd_utils.origin)
camera_frame.translate(t, relative=True)
additional_pcds.append(camera_frame)
# Unit bbox
unit_bbox = create_unit_bbox()
additional_pcds.append(unit_bbox)
# Go over list of numpy arrays and convert them to o3d.geometry.PointClouds
# (maybe also create bboxes around them)
pcds = []
bboxes = []
for i, points in enumerate(points_list):
if torch.is_tensor(points):
points_np = points.cpu().numpy()
elif isinstance(points, type(np.empty(0))):
points_np = points
if len(points_np.shape) == 3:
# we then assume the first dimension is the batch_size
points_np = points_np.squeeze(axis=0)
if points_np.shape[1] > points_np.shape[0] and points_np.shape[0] == 3:
points_np = np.moveaxis(points_np, 0, -1) # [N, 3]
# transform to opengl coordinates
if convert_to_opengl_coords:
points_np = pcd_utils.transform_pointcloud_to_opengl_coords(points_np)
pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points_np))
if colors_list is not None:
if colors_list[i] is not None:
color_np = colors_list[i] * np.ones_like(points_np)
pcd.colors = o3d.utility.Vector3dVector(color_np)
pcds.append(pcd)
if compute_bbox_list is not None:
if compute_bbox_list[i]:
bbox = pcd_utils.BBox(points_np)
bboxes.append(bbox.get_bbox_as_line_set())
# sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
# sphere = sphere.translate(np.array([0, -1, 0]), relative=True)
# sphere.paint_uniform_color([1.0, 0.0, 0.0])
# additional_pcds.append(sphere)
# sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
# sphere = sphere.translate(np.array([0, 0, 1]), relative=True)
# sphere.paint_uniform_color([1.0, 0.0, 0.0])
# additional_pcds.append(sphere)
# transform also additional_pcds if necessary
if convert_to_opengl_coords:
for additional_pcd in additional_pcds:
additional_pcd.transform(pcd_utils.T_opengl_cv_homogeneous)
o3d.visualization.draw_geometries([*additional_pcds, *pcds, *bboxes])
if exit_after:
exit()
def create_unit_bbox():
# unit bbox
unit_bbox = pcd_utils.BBox.compute_bbox_from_min_point_and_max_point(
np.array([-1, -1, -1]), np.array([1, 1, 1])
)
return unit_bbox
def create_frame(size=1.0, origin=[0, 0, 0]):
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=size, origin=origin
)
return frame
def create_lines_from_start_and_end_points(start_points, end_points, color=[201/255, 177/255, 14/255]):
if start_points.shape[1] > start_points.shape[0] and start_points.shape[0] == 3:
start_points = start_points.transpose()
end_points = end_points.transpose()
num_pairs = start_points.shape[0]
all_points = np.concatenate((start_points, end_points), axis=0)
lines = [[i, i + num_pairs] for i in range(0, num_pairs, 1)]
line_colors = [color for i in range(num_pairs)]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(all_points),
lines=o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(line_colors)
return line_set
def create_lines_from_view_vectors(
view_vectors_original,
offsets_original,
dist_original,
R, t,
return_geoms=False,
convert_to_opengl_coords=False
):
view_vectors = np.copy(view_vectors_original)
offsets = np.copy(offsets_original)
dist = np.copy(dist_original)
# Move coordinates to the last axis
view_vectors = np.moveaxis(view_vectors, 0, -1) # [N, 3]
offsets = np.moveaxis(offsets, 0, -1) # [N, 3]
len_dist_shape = len(dist.shape)
if len_dist_shape == 1:
dist = dist[:, np.newaxis]
else:
dist = np.moveaxis(dist, 0, -1) # [N, 1]
N = offsets.shape[0] # number of points (and lines)
# Advance along the view_vectors by a distance of "dist"
end_points = offsets + view_vectors * dist
# Concatenate offsets and end_points into one array
points = np.concatenate((offsets, end_points), axis=0)
# Compute list of edges between offsets and end_points
lines = [[i, i + N] for i in range(0, N, 1)]
line_colors = [[201/255, 177/255, 14/255] for i in range(N)]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(line_colors)
# Offsets PointCloud
offsets_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(offsets))
offsets_pcd.paint_uniform_color(Colors.red)
# End points PointCloud
end_points_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(end_points))
end_points_pcd.paint_uniform_color(Colors.green)
# Concatenate PointClouds
pcds = [offsets_pcd, end_points_pcd]
# Convert to opengl coordinates if necessary
if not return_geoms or convert_to_opengl_coords:
offsets_pcd.transform(pcd_utils.T_opengl_cv_homogeneous)
end_points_pcd.transform(pcd_utils.T_opengl_cv_homogeneous)
line_set.transform(pcd_utils.T_opengl_cv_homogeneous)
if return_geoms:
return line_set, pcds
else:
# camera frame
camera_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=[0, 0, 0]
)
camera_frame.rotate(R, pcd_utils.origin)
camera_frame.translate(t, relative=True)
camera_frame.rotate(pcd_utils.T_opengl_cv, pcd_utils.origin) # convert to opengl coordinates for visualization
o3d.visualization.draw_geometries([camera_frame, *pcds, line_set])
exit()
def viz_and_exit(pcd_list):
o3d.visualization.draw_geometries(pcd_list)
exit()
def visualize_mesh(mesh_path):
# world frame
world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=[0, 0, 0]
)
mesh = o3d.io.read_triangle_mesh(mesh_path)
o3d.visualization.draw_geometries([world_frame, mesh])
def visualize_grid(points_list, colors=None, exit_after=True):
# world frame
world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.5, origin=[0, 0, 0]
)
world_frame = pcd_utils.rotate_around_axis(world_frame, axis_name="x", angle=-np.pi)
pcds = []
for i, points in enumerate(points_list):
pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(np.moveaxis(points, 0, -1)))
pcd = pcd_utils.rotate_around_axis(pcd, "x", np.pi)
if colors:
pcd.paint_uniform_color(colors[i])
pcds.append(pcd)
o3d.visualization.draw_geometries([world_frame, *pcds])
if exit_after: exit()
def visualize_sphere():
import marching_cubes as mcubes
from utils.sdf_utils import sphere_tsdf
# Extract sphere with Marching cubes.
dim = 20
# Extract the 0-isosurface.
X, Y, Z = np.meshgrid(np.arange(-1, 1, 2.0 / dim), np.arange(-1, 1, 2.0 / dim), np.arange(-1, 1, 2.0 / dim))
sdf = sphere_tsdf(X, Y, Z)
vertices, triangles = mcubes.marching_cubes(sdf, 0)
# Convert extracted surface to o3d mesh.
mesh_sphere = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(vertices), o3d.utility.Vector3iVector(triangles))
mesh_sphere.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_sphere])
def merge_line_sets(line_sets):
# Compute total number of vertices and faces.
num_points = 0
num_lines = 0
num_line_colors = 0
for i in range(len(line_sets)):
num_points += np.asarray(line_sets[i].points).shape[0]
num_lines += np.asarray(line_sets[i].lines).shape[0]
num_line_colors += np.asarray(line_sets[i].colors).shape[0]
# Merge points and faces.
points = np.zeros((num_points, 3), dtype=np.float64)
lines = np.zeros((num_lines, 2), dtype=np.int32)
line_colors = np.zeros((num_line_colors, 3), dtype=np.float64)
vertex_offset = 0
line_offset = 0
vertex_color_offset = 0
for i in range(len(line_sets)):
current_points = np.asarray(line_sets[i].points)
current_lines = np.asarray(line_sets[i].lines)
current_line_colors = np.asarray(line_sets[i].colors)
points[vertex_offset:vertex_offset + current_points.shape[0]] = current_points
lines[line_offset:line_offset + current_lines.shape[0]] = current_lines + vertex_offset
line_colors[vertex_color_offset:vertex_color_offset + current_line_colors.shape[0]] = current_line_colors
vertex_offset += current_points.shape[0]
line_offset += current_lines.shape[0]
vertex_color_offset += current_line_colors.shape[0]
# Create a merged line set object.
line_set = o3d.geometry.LineSet(o3d.utility.Vector3dVector(points), o3d.utility.Vector2iVector(lines))
line_set.colors = o3d.utility.Vector3dVector(line_colors)
return line_set
def merge_meshes(meshes):
# Compute total number of vertices and faces.
num_vertices = 0
num_triangles = 0
num_vertex_colors = 0
for i in range(len(meshes)):
num_vertices += np.asarray(meshes[i].vertices).shape[0]
num_triangles += | np.asarray(meshes[i].triangles) | numpy.asarray |
import pandas as pd
import numpy as np
import math
#朴素贝叶斯分类
class NaiveBayes:
def __init__(self,dataspath):
self.model = {}
self.datas = self.loadDataSet(dataspath)
self.categorys = set(self.datas[:,-1])
self.features = len(self.datas[0])-1
def loadDataSet(self,dataspath):
'''
:param dataspath:读入数据的地址,csv格式文件
:return: n列的 np数组,前n-1列为特征,最后一列为label
'''
data_set = pd.read_csv(dataspath)
data_set_train = data_set.iloc[:,0:4]
data_set_label = data_set.iloc[:,5:6]
return np.concatenate(( | np.array(data_set_train) | numpy.array |
import pytest
import numpy as np
from ardent.utilities import _validate_scalar_to_multi
from ardent.utilities import _validate_ndarray
from ardent.utilities import _validate_xyz_resolution
from ardent.utilities import _compute_axes
from ardent.utilities import _compute_coords
from ardent.utilities import _multiply_by_affine # TODO: write test for this function.
"""
Test _validate_scalar_to_multi.
"""
def test__validate_scalar_to_multi():
# Test proper use.
kwargs = dict(value=1, size=1, dtype=float)
correct_output = np.array([1], float)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=1, size=0, dtype=int)
correct_output = np.array([], int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=9.5, size=4, dtype=int)
correct_output = np.full(4, 9, int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=[1, 2, 3.5], size=3, dtype=float)
correct_output = np.array([1, 2, 3.5], float)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=[1, 2, 3.5], size=3, dtype=int)
correct_output = np.array([1, 2, 3], int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=(1, 2, 3), size=3, dtype=int)
correct_output = np.array([1, 2, 3], int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
kwargs = dict(value=np.array([1, 2, 3], float), size=3, dtype=int)
correct_output = np.array([1, 2, 3], int)
assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output)
# Test improper use.
kwargs = dict(value=[1, 2, 3, 4], size='size: not an int', dtype=float)
expected_exception = TypeError
match = "size must be interpretable as an integer."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[], size=-1, dtype=float)
expected_exception = ValueError
match = "size must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[1, 2, 3, 4], size=3, dtype=int)
expected_exception = ValueError
match = "The length of value must either be 1 or it must match size."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=np.arange(3*4, dtype=int).reshape(3,4), size=3, dtype=float)
expected_exception = ValueError
match = "value must not have more than 1 dimension."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value=[1, 2, 'c'], size=3, dtype=int)
expected_exception = ValueError
match = "value and dtype are incompatible with one another."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
kwargs = dict(value='c', size=3, dtype=int)
expected_exception = ValueError
match = "value and dtype are incompatible with one another."
with pytest.raises(expected_exception, match=match):
_validate_scalar_to_multi(**kwargs)
"""
Test _validate_ndarray.
"""
def test__validate_ndarray():
# Test proper use.
kwargs = dict(array=np.arange(3, dtype=int), dtype=float)
correct_output = np.arange(3, dtype=float)
assert np.array_equal(_validate_ndarray(**kwargs), correct_output)
kwargs = dict(array=[[0,1,2], [3,4,5]], dtype=float)
correct_output = np.arange(2*3, dtype=float).reshape(2,3)
assert np.array_equal(_validate_ndarray(**kwargs), correct_output)
kwargs = dict(array=np.array([0,1,2]), broadcast_to_shape=(2,3))
correct_output = np.array([[0,1,2], [0,1,2]])
assert np.array_equal(_validate_ndarray(**kwargs), correct_output)
kwargs = dict(array=np.array(7), required_ndim=1)
correct_output = np.array([7])
assert np.array_equal(_validate_ndarray(**kwargs), correct_output)
# Test improper use.
# Validate arguments.
kwargs = dict(array=np.arange(3), minimum_ndim=1.5)
expected_exception = TypeError
match = "minimum_ndim must be of type int."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), minimum_ndim=-1)
expected_exception = ValueError
match = "minimum_ndim must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), required_ndim=1.5)
expected_exception = TypeError
match = "required_ndim must be either None or of type int."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), required_ndim=-1)
expected_exception = ValueError
match = "required_ndim must be non-negative."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), dtype="not of type type")
expected_exception = TypeError
match = "dtype must be either None or a valid type."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
# Validate array.
kwargs = dict(array=np.array(print), dtype=int)
expected_exception = TypeError
match = "array is of a type that is incompatible with dtype."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.array('string that is not an int'), dtype=int)
expected_exception = ValueError
match = "array has a value that is incompatible with dtype."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.array([[], 1]), dtype=None, forbid_object_dtype=True)
expected_exception = TypeError
match = "Casting array to a np.ndarray produces an array of dtype object \nwhile forbid_object_dtype == True and dtype != object."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), required_ndim=2)
expected_exception = ValueError
match = "If required_ndim is not None, array.ndim must equal it unless array.ndim == 0 and required_ndin == 1."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
kwargs = dict(array=np.arange(3), minimum_ndim=2)
expected_exception = ValueError
match = "array.ndim must be at least equal to minimum_ndim."
with pytest.raises(expected_exception, match=match):
_validate_ndarray(**kwargs)
"""
Test _validate_xyz_resolution.
"""
def test__validate_xyz_resolution():
# Test proper use.
kwargs = dict(ndim=1, xyz_resolution=2)
correct_output = np.full(1, 2, float)
assert np.array_equal(_validate_xyz_resolution(**kwargs), correct_output)
kwargs = dict(ndim=4, xyz_resolution=1.5)
correct_output = np.full(4, 1.5, float)
assert np.array_equal(_validate_xyz_resolution(**kwargs), correct_output)
kwargs = dict(ndim=3, xyz_resolution= | np.ones(3, int) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 05 13:41:23 2018
@author: DanielM
"""
from neuron import h, gui # gui necessary for some parameters to h namespace
import numpy as np
import net_tunedrevexpdrives
from input_generator import inhom_poiss
import os
import argparse
import time
from analysis_main import time_stamps_to_signal
import pdb
import sys
import matplotlib.pyplot as plt
tsts = time_stamps_to_signal
# Handle command line inputs with argparse
parser = argparse.ArgumentParser(description='Pattern separation paradigm')
parser.add_argument('-runs',
nargs=3,
type=int,
help='start stop range for the range of runs',
default=[0, 1, 1],
dest='runs')
parser.add_argument('-savedir',
type=str,
help='complete directory where data is saved',
default=os.getcwd(),
dest='savedir')
parser.add_argument('-seed',
type=int,
help='the seed making the network reproducible',
default=1000,
dest='seed')
parser.add_argument('-pp_mod_rate',
type=int,
help='Frequency at which the input is modulated',
default=10,
dest='pp_mod_rate')
parser.add_argument('-pp_max_rate',
type=int,
help='The maximum frequency the input reaches',
default=100,
dest='pp_max_rate')
parser.add_argument('-n_cells_gcs_mcs_bcs_hcs_pps',
nargs=5,
type=int,
help='the cell numbers of the network',
default=[2000, 60, 24, 24, 40],
dest='n_cells')
parser.add_argument('-W_pp_gc',
type=float,
help='the weight of the pp to gc connection',
default=1e-3,
dest='W_pp_gc')
parser.add_argument('-W_pp_bc',
nargs=3,
type=float,
help='the weight of the pp to bc connection',
default=[0.0e-3,2e-3,3e-3],
dest='W_pp_bc')
parser.add_argument('-n_pp_gc',
type=int,
help='number of pp to gc synapses ',
default=20,
dest='n_pp_gc')
parser.add_argument('-n_pp_bc',
type=int,
help='number of pp to bc synapses',
default=20,
dest='n_pp_bc')
parser.add_argument('-W_gc_bc',
nargs=3,
type=float,
help='weight of gc to bc synapses',
default=[2.5e-2,3.5e-2,1e-3],
dest='W_gc_bc')
parser.add_argument('-W_gc_hc',
type=float,
help='number of gc to hc synapses',
default=2.5e-2,
dest='W_gc_hc')
parser.add_argument('-W_bc_gc',
type=float,
help='number of bc to gc synapses',
default=4.8e-3,
dest='W_bc_gc')
parser.add_argument('-W_hc_gc',
type=float,
help='number of hc to gc synapses',
default=6e-3,
dest='W_hc_gc')
parser.add_argument('-delta_t',
type=float,
help='number of hc to gc synapses',
default=0.0,
dest='delta_t')
parser.add_argument('-t_pp_to_bc_offset',
type=float,
help="temporal offset between pp innervation of gcs and bcs",
default=-4.5,
dest="t_pp_to_bc_offset")
parser.add_argument('-rec_cond',
type=int,
help='number of hc to gc synapses',
default=1,
dest='rec_cond')
args = parser.parse_args()
# Where to search for nrnmech.dll file. Must be adjusted for your machine.
dll_files = [("/home/daniel/repos/pyDentate/mechs_7-6_linux/x86_64/.libs/libnrnmech.so"),
("C:\\Users\\Daniel\\repos\\pyDentate\\mechs_7-6_win\\nrnmech.dll")]
for x in dll_files:
if os.path.isfile(x):
dll_dir = x
print("DLL loaded from: " + str(dll_dir))
h.nrn_load_dll(dll_dir)
# Generate temporal patterns for the 100 PP inputs
np.random.seed(args.seed)
"""
temporal_patterns_full = inhom_poiss(mod_rate=args.pp_mod_rate,
max_rate=args.pp_max_rate,
n_inputs=400)
"""
temporal_patterns_full_list_1 = [np.array([100.0+np.random.normal(0,2)]) for x in range(args.n_cells[4]-1)]
temporal_patterns_full_list_2 = [np.array([100+np.random.normal(0,2)]), 600.0]
temporal_patterns_full_list_3 = [np.array([100.0+np.random.normal(0,2)+args.delta_t]) for x in range(args.n_cells[4])]
temporal_patterns_full_list_4 = [np.array([]) for x in range(400-2*args.n_cells[4])]
temporal_patterns_full = np.array(temporal_patterns_full_list_1+temporal_patterns_full_list_2+temporal_patterns_full_list_3+temporal_patterns_full_list_4, dtype=np.object)
# Start the runs of the model
runs = range(args.runs[0], args.runs[1], args.runs[2])
pp_bc_weights = np.arange(args.W_pp_bc[0], args.W_pp_bc[1], args.W_pp_bc[2])
gc_bc_weights = np.arange(args.W_gc_bc[0], args.W_gc_bc[1], args.W_gc_bc[2])
print(pp_bc_weights)
print(gc_bc_weights)
for ff_weight in pp_bc_weights:
for fb_weight in gc_bc_weights:
for run in runs:
start_proc_t = time.perf_counter()
print("Run: " + str(run) + ". Total time: " + str(start_proc_t))
temporal_patterns = temporal_patterns_full.copy()
nw = net_tunedrevexpdrives.TunedNetwork(seed=args.seed+run,
n_gcs=args.n_cells[0],
n_mcs=args.n_cells[1],
n_bcs=args.n_cells[2],
n_hcs=args.n_cells[3],
W_pp_gc=args.W_pp_gc,
W_pp_bc=ff_weight,
n_pp_gc=args.n_pp_gc,
n_pp_bc=args.n_pp_bc,
W_gc_bc=fb_weight,
W_gc_hc=args.W_gc_hc,
W_bc_gc=args.W_bc_gc,
W_hc_gc=args.W_hc_gc,
ff_t_offset=args.t_pp_to_bc_offset,
temporal_patterns=temporal_patterns,
rec_cond=bool(args.rec_cond))
print("Done setting up nw")
nw.populations[0].voltage_recording(range(2000))
nw.populations[1].voltage_recording(range(60))
nw.populations[2].voltage_recording(range(24))
nw.populations[3].voltage_recording(range(24))
# Run the model
"""Initialization for -2000 to -100"""
h.cvode.active(0)
dt = 0.1
h.steps_per_ms = 1.0/dt
h.finitialize(-60)
h.t = -2000
h.secondorder = 0
h.dt = 10
while h.t < -100:
h.fadvance()
h.secondorder = 2
h.t = 0
h.dt = 0.1
"""Setup run control for -100 to 1500"""
h.frecord_init() # Necessary after changing t to restart the vectors
while h.t < 300:
h.fadvance()
end_proc_t = time.perf_counter()
print("Done Running at " + str(end_proc_t) + " after " + str((end_proc_t - start_proc_t)/60) + " minutes")
save_data_name = (f"{str(nw)}_"
f"{nw.seed:06d}_"
f"{run:03d}_"
f"{nw.populations[0].get_cell_number():05d}_"
f"{nw.populations[1].get_cell_number():05d}_"
f"{nw.populations[2].get_cell_number():05d}_"
f"{nw.populations[3].get_cell_number():05d}_"
f"{args.n_cells[4]:05d}_"
f"{args.n_pp_gc:04d}_"
f"{args.n_pp_bc:04d}_"
f"{args.W_pp_gc:08.5f}_"
f"{ff_weight:08.5f}_"
f"{args.pp_mod_rate:04d}_"
f"{args.pp_max_rate:04d}_"
f"{fb_weight:08.5f}_"
f"{fb_weight:08.5f}_"
f"{args.W_bc_gc:08.5f}_"
f"{args.W_hc_gc:08.5f}_"
f"{args.t_pp_to_bc_offset}_"
f"{args.delta_t:08.5f}")
if run == 0:
fig = nw.plot_aps(time=200)
tuned_fig_file_name =save_data_name
nw.save_ap_fig(fig, args.savedir, tuned_fig_file_name)
pp_lines = np.empty(400, dtype = np.object)
pp_lines[0+run:args.n_cells[4]+run] = temporal_patterns[0+run:args.n_cells[4]+run]
curr_pp_ts = np.array(tsts(pp_lines, dt_signal=0.1, t_start=0, t_stop=300), dtype = np.bool)
curr_gc_ts = np.array(tsts(nw.populations[0].get_properties()['ap_time_stamps'], dt_signal=0.1, t_start=0, t_stop=300), dtype = np.bool)
curr_mc_ts = np.array(tsts(nw.populations[1].get_properties()['ap_time_stamps'], dt_signal=0.1, t_start=0, t_stop=300), dtype = np.bool)
curr_hc_ts = np.array(tsts(nw.populations[2].get_properties()['ap_time_stamps'], dt_signal=0.1, t_start=0, t_stop=300), dtype = np.bool)
curr_bc_ts = np.array(tsts(nw.populations[3].get_properties()['ap_time_stamps'], dt_signal=0.1, t_start=0, t_stop=300), dtype = np.bool)
np.savez(args.savedir + os.path.sep + "time-stamps_" + save_data_name,
pp_ts = np.array(curr_pp_ts),
gc_ts = np.array(curr_gc_ts),
mc_ts = np.array(curr_mc_ts),
bc_ts = | np.array(curr_bc_ts) | numpy.array |
import numpy as np
import pandas as pd
import halfspace.projections as hsp
import halfspace.stress_comps_vectorized as scv
rho = 2700
g = 9.81
def cat_t_priors(num_pts, n_trials, s1_range, s3_range, theta_range, first_iter):
t_priors = sample_T_priors(n_trials, s1_range, s3_range, theta_range)
run_ind = np.arange(n_trials) + first_iter
t_priors = np.hstack(( t_priors, run_ind.reshape([n_trials, 1]) ))
t_priors = np.repeat(t_priors, num_pts, axis=0)
t_prior_df = pd.DataFrame(t_priors, columns=['txx', 'tyy', 'txy', 'iter'])
return t_prior_df
def sample_T_priors(n_trials, s1_range, s3_range, theta_range):
s1s = np.random.uniform(s1_range[0], s1_range[1], n_trials)
s3s = np.random.uniform(s3_range[0], s3_range[1], n_trials) * s1s
thetas = np.random.uniform(theta_range[0], theta_range[1], n_trials)
xxs = scv.xx_stress_from_s1_s3_theta(s1s, s3s, thetas)
yys = scv.yy_stress_from_s1_s3_theta(s1s, s3s, thetas)
xys = scv.xy_stress_from_s1_s3_theta(s1s, s3s, thetas)
del s1s, s3s, thetas # save some RAM (important for large n_trials)
xxs = xxs.reshape([n_trials, 1])
yys = yys.reshape([n_trials, 1])
xys = xys.reshape([n_trials, 1])
t_priors = np.concatenate((xxs, yys, xys), axis=1)
return t_priors
def make_mc_df(in_df, n_trials=1, s1_range=(0,2), s3_range=(-1,1),
theta_range=(0, np.pi), first_iter=0):
num_pts = len(in_df.index)
important_cols = ['strike', 'dip', 'rake', 'depth', 'slip_m', 'xx_stress',
'yy_stress', 'zz_stress', 'xy_stress', 'xz_stress',
'yz_stress']
mc_df = pd.DataFrame( np.tile(in_df[important_cols].values, [n_trials, 1]),
columns=important_cols)
t_prior_df = cat_t_priors(num_pts, n_trials, s1_range, s3_range,
theta_range, first_iter)
mc_df = pd.concat((mc_df, t_prior_df), axis=1)
del t_prior_df
mc_df.rename(columns={'xx_stress':'mxx', 'yy_stress':'myy',
'zz_stress':'mzz', 'xy_stress':'mxy',
'xz_stress':'mxz', 'yz_stress':'myz'}, inplace=True)
return mc_df
def get_total_stresses(mc_df, rho, g):
mc_df['tau_s'] = scv.strike_shear(strike=mc_df.strike,
dip=mc_df.dip, rho=rho, g=g,
mxx=mc_df.mxx*1e6,
myy=mc_df.myy*1e6,
mzz=mc_df.mzz*1e6,
mxy=mc_df.mxy*1e6,
mxz=mc_df.mxz*1e6,
myz=mc_df.myz*1e6,
txx=mc_df.txx,
tyy=mc_df.tyy,
txy=mc_df.txy,
depth=mc_df.depth*-1)
mc_df['tau_d'] = scv.dip_shear(strike=mc_df.strike,
dip=mc_df.dip, rho=rho, g=g,
mxx=mc_df.mxx*1e6,
myy=mc_df.myy*1e6,
mzz=mc_df.mzz*1e6,
mxy=mc_df.mxy*1e6,
mxz=mc_df.mxz*1e6,
myz=mc_df.myz*1e6,
txx=mc_df.txx,
tyy=mc_df.tyy,
txy=mc_df.txy,
depth=mc_df.depth*-1)
mc_df['tau_rake'] = hsp.get_rake_from_shear_components(
strike_shear=mc_df.tau_s,
dip_shear=mc_df.tau_d)
mc_df['rake_misfit_rad'] = np.radians(hsp.angle_difference(mc_df.rake,
mc_df.tau_rake,
return_abs=True))
return mc_df
def get_litho_tect_stresses(mc_df, rho, g):
mc_df['tau_s'] = scv.strike_shear(strike=mc_df.strike,
dip=mc_df.dip, rho=rho, g=g,
mxx=0.,
myy=0.,
mzz=0.,
mxy=0.,
mxz=0.,
myz=0.,
txx=mc_df.txx,
tyy=mc_df.tyy,
txy=mc_df.txy,
depth=mc_df.depth*-1)
mc_df['tau_d'] = scv.dip_shear(strike=mc_df.strike,
dip=mc_df.dip, rho=rho, g=g,
mxx=0.,
myy=0.,
mzz=0.,
mxy=0.,
mxz=0.,
myz=0.,
txx=mc_df.txx,
tyy=mc_df.tyy,
txy=mc_df.txy,
depth=mc_df.depth*-1)
mc_df['tau_rake'] = hsp.get_rake_from_shear_components(
strike_shear=mc_df.tau_s,
dip_shear=mc_df.tau_d)
mc_df['rake_misfit_rad'] = np.radians(hsp.angle_difference(mc_df.rake,
mc_df.tau_rake,
return_abs=True))
return mc_df
def do_stress_calcs(in_df, n_trials=1, s1_range=(0,3), s3_range=(-1,1),
theta_range=(0, np.pi), topo_stress=True, first_iter=0,
rho=2700, g=9.81, l_norm=1):
mc_df = make_mc_df(in_df, n_trials=n_trials, s1_range=s1_range,
s3_range=s3_range, theta_range=theta_range,
first_iter=first_iter)
if topo_stress == True:
mc_df = get_total_stresses(mc_df, rho, g)
else:
mc_df = get_litho_tect_stresses(mc_df, rho, g)
#calculate misfits
max_slip = in_df.slip_m.max()
sum_weights = np.sum(in_df.slip_m)
mean_weights = | np.sum(max_slip / in_df.slip_m) | numpy.sum |
import numpy as np
from common.dataset.pre_process.norm_data import norm_to_pixel
from common.transformation.cam_utils import normalize_screen_coordinates
def load_mpi_test(file_path, seq, norm):
"""
Usage: Load a section once
:param dataset_root: root path
:param section: There are six sequences in this (seq=0,1,2,3,4,5). And 2935 poses in a unique set(seq==7).
If you want to evaluate by scene setting, you can use the sequencewise evaluation
to convert to these numbers by doing
#1:Studio with Green Screen (TS1*603 + TS2 *540)/ (603+540)
#2:Studio without Green Screen (TS3*505+TS4*553)/(505+553)
#3:Outdoor (TS5*276+TS6*452)/(276+452)
:return: Normalized 2d/3d pose, normalization params and camera intrinics. All types: List
"""
info = | np.load(file_path, allow_pickle=True) | numpy.load |
"""
Script plots trends of 2 m temperature over the WACC period. Subplot compares
all six experiments with ERA-Interim.
Notes
-----
Author : <NAME>
Date : 20 February 2019
"""
### Import modules
import datetime
import numpy as np
import matplotlib.pyplot as plt
import cmocean
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import read_MonthlyData as MOM
import read_Reanalysis as MOR
import calc_Utilities as UT
### Define directories
directoryfigure = '/home/zlabe/Desktop/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting WACC T2M Trends - %s----' % titletime)
#### Alott time series
year1 = 1978
year2 = 2016
years = np.arange(year1,year2+1,1)
### Add parameters
ensembles = 10
varnames = ['T2M']
runnames = [r'CSST',r'CSIC',r'AMIP',r'AMQ',r'AMS',r'AMQS']
### Call function to read in ERA-Interim
lat,lon,time,lev,era = MOR.readDataR('T2M','surface',False,True)
### Call functions to read in WACCM data
models = np.empty((len(runnames),ensembles,era.shape[0],era.shape[1],
era.shape[2],era.shape[3]))
for i in range(len(runnames)):
lat,lon,time,lev,models[i] = MOM.readDataM('T2M',runnames[i],
'surface',False,True)
### Retrieve time period of interest
modq = np.empty((len(runnames),ensembles,era.shape[0]-1,era.shape[2],
era.shape[3]))
for i in range(len(runnames)):
for j in range(ensembles):
modq[i,j,:,:,:] = UT.calcDecJanFeb(models[i,j,:,:,:],
lat,lon,'surface',1)
eraq = UT.calcDecJanFeb(era,lat,lon,'surface',1)
def detrendData(datavar,years,level,yearmn,yearmx):
"""
Function removes linear trend
Parameters
----------
datavar : 4d numpy array or 5d numpy array
[ensemble,year,lat,lon] or [ensemble,year,level,lat,lon]
years : 1d numpy array
[years]
level : string
Height of variable (surface or profile)
yearmn : integer
First year
yearmx : integer
Last year
Returns
-------
datavardt : 4d numpy array or 5d numpy array
[ensemble,year,lat,lon] or [ensemble,year,level,lat,lon]
Usage
-----
datavardt = detrendData(datavar,years,level,yearmn,yearmx)
"""
print('\n>>> Using detrendData function! \n')
###########################################################################
###########################################################################
###########################################################################
### Import modules
import numpy as np
import scipy.stats as sts
### Slice time period
sliceq = np.where((years >= yearmn) & (years <= yearmx))[0]
datavar = datavar[:,sliceq,:,:]
### Detrend data array
if level == 'surface':
x = np.arange(datavar.shape[1])
slopes = np.empty((datavar.shape[0],datavar.shape[2],datavar.shape[3]))
intercepts = np.empty((datavar.shape[0],datavar.shape[2],
datavar.shape[3]))
for ens in range(datavar.shape[0]):
print('-- Detrended data for ensemble member -- #%s!' % (ens+1))
for i in range(datavar.shape[2]):
for j in range(datavar.shape[3]):
mask = | np.isfinite(datavar[ens,:,i,j]) | numpy.isfinite |
'''---------------------------------------
Import Statements
---------------------------------------'''
import csv
import cv2
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D
import matplotlib.pyplot as plt
from keras.callbacks import ModelCheckpoint
import random
from tempfile import TemporaryFile
correction = 0.25
num_bins = 23
colorConversion = cv2.COLOR_BGR2LAB
'''---------------------------------------
Read data from File
---------------------------------------'''
def read_data_from_file(fileName, lineArray):
with open(fileName) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lineArray.append(line)
'''---------------------------------------
Extract images and Measurements
---------------------------------------'''
def get_images_and_measurements(lineArray, splitToken, imagePath, imageArray, measurementArray):
for line in lineArray:
for i in range(3):
source_path = line[i]
tokens = source_path.split(splitToken)
filename = tokens[-1]
local_path = imagePath + filename
image = cv2.imread(local_path)
imageArray.append(image)
measurement = float(line[3])
measurementArray.append(measurement)
measurementArray.append(measurement + correction)
measurementArray.append(measurement - correction)
'''---------------------------------------
Print Histogram of Data
---------------------------------------'''
def print_histogram(measurement_array, show, title = ''):
avg_samples_per_bin = len(measurement_array)/num_bins
hist, bins = np.histogram(measurement_array, num_bins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.plot((np.min(measurement_array), np.max(measurement_array)), (avg_samples_per_bin, avg_samples_per_bin), 'k-')
if show:
plt.title(title)
plt.show()
'''---------------------------------------
Flip each image and measurement
---------------------------------------'''
def flip_image_and_measurement(imageArray, measurementArray, augmented_images, augmented_measurements):
for image, measurement in zip(imageArray, measurementArray):
augmented_images.append(image)
augmented_measurements.append(measurement)
flipped_image = cv2.flip(image, 1)
flipped_measurement = measurement * -1.0
augmented_images.append(flipped_image)
augmented_measurements.append(flipped_measurement)
'''---------------------------------------
Get Transform
---------------------------------------'''
def get_transform(img,
x_bottom = 1136,
x_top = 267,
depth = 83,
hood_depth = 33,
dst_offset = 271,
cal1_offset = 27,
cal2_offset = 30):
img_size = (img.shape[1], img.shape[0])
# src = (x1, y1) , (x2, y2), (x3, y3), (x4, y4)
x1 = int((img_size[0] - x_top) / 2)
x2 = int((img_size[0] + x_top) / 2)
y1 = y2 = int((img_size[1] - depth))
x3 = int((img_size[0] - x_bottom) / 2)
x4 = int((img_size[0] + x_bottom) / 2)
y3 = y4 = (img_size[1] - hood_depth)
# dst = (j1, k1), (j2, k2), (j3, k3), (j4, k4)
j1 = j3 = (img_size[0] / 2) - dst_offset
j2 = j4 = (img_size[0] / 2) + dst_offset
k1 = k2 = 0
k3 = k4 = img_size[1]
src = np.float32([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
dst = | np.float32([[j1, k1], [j2, k2], [j3, k3], [j4, k4]]) | numpy.float32 |
#!/usr/bin/env python
"""
MagPy-General: Standard pymag package containing the following classes:
Written by <NAME>, <NAME> 2011/2012/2013/2014
Written by <NAME>, <NAME>, <NAME> 2015/2016
Version 0.3 (starting May 2016)
License:
https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import logging
import os
import sys
import tempfile
# ----------------------------------------------------------------------------
# Part 1: Import routines for packages
# ----------------------------------------------------------------------------
logpygen = '' # temporary logger variable
badimports = [] # List of missing packages
nasacdfdir = "c:\CDF Distribution\cdf33_1-dist\lib"
# Logging
# ---------
# Select the user's home directory (platform independent) or environment path
if "MAGPY_LOG_PATH" in os.environ:
path_to_log = os.environ["MAGPY_LOG_PATH"]
if not os.path.exists(path_to_log):
os.makedirs(path_to_log)
else:
path_to_log = tempfile.gettempdir()
def setup_logger(name, warninglevel=logging.WARNING, logfilepath=path_to_log,
logformat='%(asctime)s %(levelname)s - %(name)-6s - %(message)s'):
"""Basic setup function to create a standard logging config. Default output
is to file in /tmp/dir."""
logfile=os.path.join(logfilepath,'magpy.log')
# Check file permission/existance
if not os.path.isfile(logfile):
pass
else:
if os.access(logfile, os.W_OK):
pass
else:
for count in range (1,100):
logfile=os.path.join(logfilepath,'magpy{:02}.log'.format(count))
value = os.access(logfile, os.W_OK)
if value or not os.path.isfile(logfile):
count = 100
break
try:
logging.basicConfig(filename=logfile,
filemode='w',
format=logformat,
level=logging.INFO)
except:
logging.basicConfig(format=logformat,
level=logging.INFO)
logger = logging.getLogger(name)
# Define a Handler which writes "setLevel" messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(warninglevel)
logger.addHandler(console)
return logger
# Package loggers to identify info/problem source
logger = setup_logger(__name__)
# DEPRECATED: replaced by individual module loggers, delete these when sure they're no longer needed:
loggerabs = logging.getLogger('abs')
loggertransfer = logging.getLogger('transf')
loggerdatabase = logging.getLogger('db')
loggerstream = logging.getLogger('stream')
loggerlib = logging.getLogger('lib')
loggerplot = logging.getLogger('plot')
# Special loggers for event notification
stormlogger = logging.getLogger('stream')
logger.info("Initiating MagPy...")
from magpy.version import __version__
logger.info("MagPy version "+str(__version__))
magpyversion = __version__
# Standard packages
# -----------------
try:
import csv
import pickle
import types
import struct
import re
import time, string, os, shutil
#import locale
import copy as cp
import fnmatch
import dateutil.parser as dparser
from tempfile import NamedTemporaryFile
import warnings
from glob import glob, iglob, has_magic
from itertools import groupby
import operator # used for stereoplot legend
from operator import itemgetter
# The following packages are not identically available for python3
try: # python2
import copy_reg as copyreg
except ImportError: # python3
import copyreg as copyreg
# Python 2 and 3: alternative 4
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request, ProxyHandler, install_opener, build_opener
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError, ProxyHandler, install_opener, build_opener
"""
try: # python2
import urllib2
except ImportError: # python3
import urllib.request
"""
try: # python2
import thread
except ImportError: # python3
import _thread
try: # python2
from StringIO import StringIO
pyvers = 2
except ImportError: # python 3
from io import StringIO
pyvers = 3
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: standard packages.\n"
badimports.append(e)
# operating system
try:
PLATFORM = sys.platform
logger.info("Running on platform: {}".format(PLATFORM))
except:
PLATFORM = 'unkown'
# Matplotlib
# ----------
try:
import matplotlib
gui_env = ['TKAgg','GTKAgg','Qt4Agg','WXAgg','Agg']
try:
if not os.isatty(sys.stdout.fileno()): # checks if stdout is connected to a terminal (if not, cron is starting the job)
logger.info("No terminal connected - assuming cron job and using Agg for matplotlib")
gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg']
matplotlib.use('Agg') # For using cron
except:
logger.warning("Problems with identfying cron job - windows system?")
pass
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: problem with matplotlib.\n"
badimports.append(e)
try:
version = matplotlib.__version__.replace('svn', '')
try:
version = map(int, version.replace("rc","").split("."))
MATPLOTLIB_VERSION = list(version)
except:
version = version.strip("rc")
MATPLOTLIB_VERSION = version
logger.info("Loaded Matplotlib - Version %s" % str(MATPLOTLIB_VERSION))
for gui in gui_env:
try:
logger.info("Testing backend {}".format(gui))
try: # will be important from matplotlib3.3 onwards
matplotlib.use(gui, force=True)
except:
matplotlib.use(gui, warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
logger.info("Using backend: {}".format(matplotlib.get_backend()))
from matplotlib.colors import Normalize
from matplotlib.widgets import RectangleSelector, RadioButtons
#from matplotlib.colorbar import ColorbarBase
from matplotlib import mlab
from matplotlib.dates import date2num, num2date
import matplotlib.cm as cm
from pylab import *
from datetime import datetime, timedelta
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError with matplotlib package. Please install to proceed.\n"
logpygen += " ... if installed please check the permissions on .matplotlib in your homedirectory.\n"
badimports.append(e)
# Numpy & SciPy
# -------------
try:
logger.info("Loading Numpy and SciPy...")
import numpy as np
import scipy as sp
from scipy import interpolate
from scipy import stats
from scipy import signal
from scipy.interpolate import UnivariateSpline
from scipy.ndimage import filters
import scipy.optimize as op
import math
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: Python numpy-scipy required - please install to proceed.\n"
badimports.append(e)
# NetCDF
# ------
try:
#print("Loading Netcdf4 support ...")
from netCDF4 import Dataset
except ImportError as e:
#logpygen += "MagPy initiation ImportError: NetCDF not available.\n"
#logpygen += "... if you want to use NetCDF format support please install a current version.\n"
#badimports.append(e)
pass
# NASACDF - SpacePy
# -----------------
def findpath(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return root
try:
logger.info("Loading SpacePy package cdf support ...")
try:
# check for windows
nasacdfdir = findpath('libcdf.dll','C:\CDF_Distribution') ## new path since nasaCDF3.6
if not nasacdfdir:
nasacdfdir = findpath('libcdf.dll','C:\CDF Distribution')
if nasacdfdir:
os.environ["CDF_LIB"] =str(nasacdfdir)
logger.info("Using CDF lib in %s" % nasacdfdir)
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.append(e)
except:
logger.info("... Could not import spacepy")
pass
else:
# create exception and try linux
x=1/0
except:
os.putenv("CDF_LIB", "/usr/local/cdf/lib")
logger.info("using CDF lib in /usr/local/cdf")
### If files (with tt_2000) have been generated with an outdated leapsecondtable
### an exception will occur - to prevent that:
### 1. make sure to use a actual leapsecond table - update cdf regularly
### 2. temporarly set cdf_validate environment variable to no
# This is how option 2 is included TODO -- add this to initialization options
# as an update of cdf is the way to go and not just deactivating the error message
os.putenv("CDF_VALIDATE", "no")
logger.info("... deactivating cdf validation")
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.append(e)
except:
logger.info("... Could not import spacepy")
pass
except ImportError as e:
logpygen += "MagPy initiation ImportError: NASA cdf not available.\n"
logpygen += "... if you want to use NASA CDF format support please install a current version.\n"
badimports.append(e)
if logpygen == '':
logpygen = "OK"
else:
logger.info(logpygen)
logger.info("Missing packages:")
for item in badimports:
logger.info(item)
logger.info("Moving on anyway...")
### Some Python3/2 compatibility code
### taken from http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/
try:
unicode = unicode
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
# Storing function - http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods#edit2155350
# by <NAME>
# Used here to pickle baseline functions from header and store it in a cdf key.
# Not really a transparent method but working nicely. Underlying functional parameters to reconstruct the fit
# are stored as well but would require a link to the absolute data.
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method)
# ----------------------------------------------------------------------------
# Part 2: Define Dictionaries
# ----------------------------------------------------------------------------
# Keys available in DataStream Object:
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
# Empty key values at initiation of stream:
KEYINITDICT = {'time':0,'x':float('nan'),'y':float('nan'),'z':float('nan'),'f':float('nan'),
't1':float('nan'),'t2':float('nan'),'var1':float('nan'),'var2':float('nan'),
'var3':float('nan'),'var4':float('nan'),'var5':float('nan'),'dx':float('nan'),
'dy':float('nan'),'dz':float('nan'),'df':float('nan'),'str1':'-','str2':'-',
'str3':'-','str4':'-','flag':'0000000000000000-','comment':'-','typ':'xyzf',
'sectime':float('nan')}
FLAGKEYLIST = KEYLIST[:16]
# KEYLIST[:8] # only primary values with time
# KEYLIST[1:8] # only primary values without time
# Formats supported by MagPy read function:
PYMAG_SUPPORTED_FORMATS = {
'IAGA':['rw','IAGA 2002 text format'],
'WDC':['rw','World Data Centre format'],
'IMF':['rw', 'Intermagnet Format'],
'IAF':['rw', 'Intermagnet archive Format'],
'BLV':['rw','Baseline format Intermagnet'],
'IYFV':['rw','Yearly mean format Intermagnet'],
'DKA':['rw', 'K value format Intermagnet'],
'DIDD':['rw','Output format from MinGeo DIDD'],
'GSM19':['r', 'Output format from GSM19 magnetometer'],
'COVJSON':['rw', 'Coverage JSON'],
'JSON':['rw', 'JavaScript Object Notation'],
'LEMIHF':['r', 'LEMI text format data'],
'LEMIBIN':['r','Current LEMI binary data format'],
'LEMIBIN1':['r','Deprecated LEMI binary format at WIC'],
'OPT':['r', 'Optical hourly data from WIK'],
'PMAG1':['r','Deprecated ELSEC from WIK'],
'PMAG2':['r', 'Current ELSEC from WIK'],
'GDASA1':['r', 'GDAS binary format'],
'GDASB1':['r', 'GDAS text format'],
'RMRCS':['r', 'RCS data output from Richards perl scripts'],
'RCS':['r', 'RCS raw output'],
'METEO':['r', 'Winklbauer METEO files'],
'NEIC':['r', 'WGET data from USGS - NEIC'],
'LNM':['r', 'Thies Laser-Disdrometer'],
'IWT':['r', 'IWT Tiltmeter data'],
'LIPPGRAV':['r', 'Lippmann Tiltmeter data'],
'GRAVSG':['r', 'GWR TSF data'],
'CR800':['r', 'CR800 datalogger'],
'IONO':['r', 'IM806 Ionometer'],
'RADON':['r', 'single channel analyser gamma data'],
'USBLOG':['r', 'USB temperature logger'],
#'SERSIN':['r', '?'],
#'SERMUL':['r', '?'],
'PYSTR':['rw', 'MagPy full ascii'],
'AUTODIF':['r', 'Deprecated - AutoDIF ouput data'],
'AUTODIF_FREAD':['r', 'Deprecated - Special format for AutoDIF read-in'],
'PYBIN':['r', 'MagPy own binary format'],
'PYASCII':['rw', 'MagPy basic ASCII'],
'POS1TXT':['r', 'POS-1 text format output data'],
'POS1':['r', 'POS-1 binary output at WIC'],
'PMB':['r', 'POS pmb file'],
'QSPIN':['r', 'QSPIN ascii output'],
#'PYNC':['r', 'MagPy NetCDF variant (too be developed)'],
#'DTU1':['r', 'ASCII Data from the DTUs FGE systems'],
#'BDV1':['r', 'Budkov GDAS data variant'],
'GFZTMP':['r', 'GeoForschungsZentrum ascii format'],
'GFZKP':['r', 'GeoForschungsZentrum KP-Index format'],
'PHA':['r', 'Potentially Hazardous Asteroids (PHAs) from the International Astronomical Unions Minor Planet Center, (json, incomplete)'],
'PREDSTORM':['r','PREDSTORM space weather prediction data format'],
'CSV':['rw','comma-separated CSV data'],
'IMAGCDF':['rw','Intermagnet CDF Format'],
'PYCDF':['rw', 'MagPy CDF variant'],
'NOAAACE':['r', 'NOAA ACE satellite data format'],
'NETCDF':['r', 'NetCDF4 format, NOAA DSCOVR satellite data archive format'],
'LATEX':['w','LateX data'],
'CS':['r','Cesium G823'],
#'SFDMI':['r', 'San Fernando variometer'],
#'SFGSM':['r', 'San Fernando GSM90'],
'UNKOWN':['-','Unknown']
}
"""
PYMAG_SUPPORTED_FORMATS = {
'IAGA':'rw', # IAGA 2002 text format
'WDC':'rw', # World Data Centre format
'IMF':'rw', # Intermagnet Format
'IAF':'rw', # Intermagnet archive Format
'IMAGCDF', # Intermagnet CDF Format
'BLV', # Baseline format Intermagnet
'IYFV', # Yearly mean format Intermagnet
'DKA', # K value format Intermagnet
'DIDD', # Output format from DIDD
'GSM19', # Output format from GSM19 magnetometer
'COVJSON', # Coverage JavaScript Object Notation
'JSON', # JavaScript Object Notation
'LEMIHF', # LEMI text format data
'LEMIBIN', # Current LEMI binary data format at WIC
'LEMIBIN1', # Deprecated LEMI binary format at WIC
'OPT', # Optical hourly data from WIK
'PMAG1', # Deprecated ELSEC from WIK
'PMAG2', # Current ELSEC from WIK
'GDASA1', # ?
'GDASB1', # ?
'RMRCS', # RCS data output from Richards perl scripts
'RCS', # RCS data output from Richards perl scripts
'METEO', # RCS data output in METEO files
'NEIC', # WGET data from USGS - NEIC
'LNM', # LaserNiederschlagsMonitor files
'IWT', # Tiltmeter data files at cobs
'LIPPGRAV', # Lippmann Tiltmeter data files at cobs
'CR800', # Data from the CR800 datalogger
'IONO', # Data from IM806 Ionometer
'RADON', # ?
'USBLOG', # ?
'SERSIN', # ?
'SERMUL', # ?
'PYSTR', # MagPy full ascii
'AUTODIF', # AutoDIF ouput data
'AUTODIF_FREAD',# Special format for AutoDIF read-in
'PYCDF', # MagPy CDF variant
'PYBIN', # MagPy own format
'PYASCII', # MagPy basic ASCII
'POS1TXT', # POS-1 text format output data
'POS1', # POS-1 binary output at WIC
'PMB', # POS pmb output
'QSPIN', # QSpin output
'PYNC', # MagPy NetCDF variant (too be developed)
'DTU1', # ASCII Data from the DTU's FGE systems
'SFDMI', # ?
'SFGSM', # ?
'BDV1', # ?
'GFZKP', # GeoForschungsZentrum KP-Index format
'NOAAACE', # NOAA ACE satellite data format
'PREDSTORM' # PREDSTORM space weather prediction data format
'CSV', # comma-separated CSV data with isoformat date in first column
'LATEX', # LateX data
'CS', # ?
'UNKOWN' # 'Unknown'?
}
"""
# ----------------------------------------------------------------------------
# Part 3: Example files for easy access and tests
# ----------------------------------------------------------------------------
from pkg_resources import resource_filename
example1 = resource_filename('magpy', 'examples/example1.zip') #Zip compressed IAGA02
example2 = resource_filename('magpy', 'examples/example2.cdf') #MagPy CDF with F
example3 = resource_filename('magpy', 'examples/example3.txt') #PyStr Baseline
example4 = resource_filename('magpy', 'examples/example4.cdf') #MagPy CDF
example5 = resource_filename('magpy', 'examples/example5.sec') #Imag CDF
example6a = resource_filename('magpy', 'examples/example6a.txt') #DI file
example6b = resource_filename('magpy', 'examples/example6b.txt') #DI file
# ----------------------------------------------------------------------------
# Part 4: Main classes -- DataStream, LineStruct and
# PyMagLog (To be removed)
# ----------------------------------------------------------------------------
class DataStream(object):
"""
Creates a list object from input files /url data
data is organized in columns
keys are column identifier:
key in keys: see KEYLIST
A note on headers:
ALWAYS INITIATE STREAM WITH >>> stream = DataStream([],{}).
All available methods:
----------------------------
- stream.ext(self, columnstructure): # new version of extend function for column operations
- stream.add(self, datlst):
- stream.clear_header(self):
- stream.extend(self,datlst,header):
- stream.union(self,column):
- stream.findtime(self,time):
- stream._find_t_limits(self):
- stream._print_key_headers(self):
- stream._get_key_headers(self,**kwargs):
- stream.sorting(self):
- stream._get_line(self, key, value):
- stream._remove_lines(self, key, value):
- stream._remove_columns(self, keys):
- stream._get_column(self, key):
- stream._put_column(self, column, key, **kwargs):
- stream._move_column(self, key, put2key):
- stream._clear_column(self, key):
- stream._reduce_stream(self, pointlimit=100000):
- stream._aic(self, signal, k, debugmode=None):
- stream._get_k(self, **kwargs):
- stream._get_k_float(self, value, **kwargs):
- stream._get_max(self, key, returntime=False):
- stream._get_min(self, key, returntime=False):
- stream._gf(self, t, tau):
- stream._hf(self, p, x):
- stream._residual_func(self, func, y):
- stream._tau(self, period):
- stream._convertstream(self, coordinate, **kwargs):
- stream._det_trange(self, period):
- stream._is_number(self, s):
- stream._normalize(self, column):
- stream._testtime(self, time):
- stream._drop_nans(self, key):
- stream.aic_calc(self, key, **kwargs):
- stream.baseline(self, absolutestream, **kwargs):
- stream.bindetector(self,key,text=None,**kwargs):
- stream.calc_f(self, **kwargs):
- stream.cut(self,length,kind=0,order=0):
- stream.dailymeans(self):
- stream.date_offset(self, offset):
- stream.delta_f(self, **kwargs):
- stream.dict2stream(self,dictkey='DataBaseValues')
- stream.differentiate(self, **kwargs):
- stream.eventlogger(self, key, values, compare=None, stringvalues=None, addcomment=None, debugmode=None):
- stream.extract(self, key, value, compare=None, debugmode=None):
- stream.extrapolate(self, start, end):
- stream.filter(self, **kwargs):
- stream.fit(self, keys, **kwargs):
- stream.flag_outlier(self, **kwargs):
- stream.flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate):
- stream.func2stream(self,function,**kwargs):
- stream.func_add(self,function,**kwargs):
- stream.func_subtract(self,function,**kwargs):
- stream.get_gaps(self, **kwargs):
- stream.get_sampling_period(self):
- stream.samplingrate(self, **kwargs):
- stream.integrate(self, **kwargs):
- stream.interpol(self, keys, **kwargs):
- stream.k_fmi(self, **kwargs):
- stream.mean(self, key, **kwargs):
- stream.multiply(self, factors):
- stream.offset(self, offsets):
- stream.randomdrop(self, percentage=None, fixed_indicies=None):
- stream.remove(self, starttime=starttime, endtime=endtime):
- stream.remove_flagged(self, **kwargs):
- stream.resample(self, keys, **kwargs):
- stream.rotation(self,**kwargs):
- stream.scale_correction(self, keys, scales, **kwargs):
- stream.smooth(self, keys, **kwargs):
- stream.steadyrise(self, key, timewindow, **kwargs):
- stream.stream2dict(self,dictkey='DataBaseValues')
- stream.stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None)
- stream.trim(self, starttime=None, endtime=None, newway=False):
- stream.variometercorrection(self, variopath, thedate, **kwargs):
- stream.write(self, filepath, **kwargs):
Application methods:
----------------------------
- stream.aic_calc(key) -- returns stream (with !var2! filled with aic values)
- stream.baseline() -- calculates baseline correction for input stream (datastream)
- stream.dailymeans() -- for DI stream - obtains variometer corrected means fo basevalues
- stream.date_offset() -- Corrects the time column of the selected stream by the offst
- stream.delta_f() -- Calculates the difference of x+y+z to f
- stream.differentiate() -- returns stream (with !dx!,!dy!,!dz!,!df! filled by derivatives)
- stream.extrapolate() -- read absolute stream and extrapolate the data
- stream.fit(keys) -- returns function
- stream.filter() -- returns stream (changes sampling_period; in case of fmi ...)
- stream.find_offset(stream_a, stream_b) -- Finds offset of two data streams. (Not optimised.)
- stream.flag_stream() -- Add flags to specific times or time ranges
- stream.func2stream() -- Combine stream and function (add, subtract, etc)
- stream.func_add() -- Add a function to the selected values of the data stream
- stream.func_subtract() -- Subtract a function from the selected values of the data stream
- stream.get_gaps() -- Takes the dominant sample frequency and fills non-existing time steps
- stream.get_sampling_period() -- returns the dominant sampling frequency in unit ! days !
- stream.integrate() -- returns stream (integrated vals at !dx!,!dy!,!dz!,!df!)
- stream.interpol(keys) -- returns function
- stream.k_fmi() -- Calculating k values following the fmi approach
- stream.linestruct2ndarray() -- converts linestrcut data to ndarray. should be avoided
- stream.mean() -- Calculates mean values for the specified key, Nan's are regarded for
- stream.offset() -- Apply constant offsets to elements of the datastream
- stream.plot() -- plot keys from stream
- stream.powerspectrum() -- Calculating the power spectrum following the numpy fft example
- stream.remove_flagged() -- returns stream (removes data from stream according to flags)
- stream.resample(period) -- Resample stream to given sampling period.
- stream.rotation() -- Rotation matrix for rotating x,y,z to new coordinate system xs,ys,zs
- stream.selectkeys(keys) -- ndarray: remove all data except for provided keys (and flag/comment)
- stream.smooth(key) -- smooth the data using a window with requested size
- stream.spectrogram() -- Creates a spectrogram plot of selected keys
- stream.stream2flaglist() -- make flaglist out of stream
- stream.trim() -- returns stream within new time frame
- stream.use_sectime() -- Swap between primary and secondary time (if sectime is available)
- stream.variometercorrection() -- Obtain average DI values at certain timestep(s)
- stream.write() -- Writing Stream to a file
Supporting INTERNAL methods:
----------------------------
A. Standard functions and overrides for list like objects
- self.clear_header(self) -- Clears headers
- self.extend(self,datlst,header) -- Extends stream object
- self.sorting(self) -- Sorts object
B. Internal Methods I: Line & column functions
- self._get_column(key) -- returns a numpy array of selected columns from Stream
- self._put_column(key) -- adds a column to a Stream
- self._move_column(key, put2key) -- moves one column to another key
- self._clear_column(key) -- clears a column to a Stream
- self._get_line(self, key, value) -- returns a LineStruct element corresponding to the first occurence of value within the selected key
- self._reduce_stream(self) -- Reduces stream below a certain limit.
- self._remove_lines(self, key, value) -- removes lines with value within the selected key
- self.findtime(self,time) -- returns index and line for which time equals self.time
B. Internal Methods II: Data manipulation functions
- self._aic(self, signal, k, debugmode=None) -- returns float -- determines Akaki Information Criterion for a specific index k
- self._get_k(self, **kwargs) -- Calculates the k value according to the Bartels scale
- self._get_k_float(self, value, **kwargs) -- Like _get_k, but for testing single values and not full stream keys (used in filtered function)
- self._gf(self, t, tau): -- Gauss function
- self._hf(self, p, x) -- Harmonic function
- self._residual_func(self, func, y) -- residual of the harmonic function
- self._tau(self, period) -- low pass filter with -3db point at period in sec (e.g. 120 sec)
B. Internal Methods III: General utility & NaN handlers
- self._convertstream(self, coordinate, **kwargs) -- Convert coordinates of x,y,z columns in stream
- self._det_trange(self, period) -- starting with coefficients above 1%
- self._find_t_limits(self) -- return times of first and last stream data points
- self._testtime(time) -- returns datetime object
- self._get_min(key) -- returns float
- self._get_max(key) -- returns float
- self._normalize(column) -- returns list,float,float -- normalizes selected column to range 0,1
- nan_helper(self, y) -- Helper to handle indices and logical indices of NaNs
- self._print_key_headers(self) -- Prints keys in datastream with variable and unit.
- self._get_key_headers(self) -- Returns keys in datastream.
- self._drop_nans(self, key) -- Helper to drop lines with NaNs in any of the selected keys.
- self._is_number(self, s) -- ?
Supporting EXTERNAL methods:
----------------------------
Useful functions:
- array2stream -- returns a data stream -- converts a list of arrays to a datastream
- linestruct2ndarray -- returns a data ndarray -- converts a old linestruct format
- denormalize -- returns list -- (column,startvalue,endvalue) denormalizes selected column from range 0,1 ro sv,ev
- find_nearest(array, value) -- find point in array closest to value
- maskNAN(column) -- Tests for NAN values in array and usually masks them
- nearestPow2(x) -- Find power of two nearest to x
*********************************************************************
Standard function description format:
DEFINITION:
Description of function purpose and usage.
PARAMETERS:
Variables:
- variable: (type) Description.
Kwargs:
- variable: (type) Description.
RETURNS:
- variable: (type) Description.
EXAMPLE:
>>> alldata = mergeStreams(pos_stream, lemi_stream, keys=['<KEY>'])
APPLICATION:
Code for simple application.
*********************************************************************
Standard file description format:
Path: *path* (magpy.acquisition.pos1protocol)
Part of package: *package* (acquisition)
Type: *type* (type of file/package)
PURPOSE:
Description...
CONTAINS:
*ThisClass: (Class)
What is this class for?
thisFunction: (Func) Description
DEPENDENCIES:
List all non-standard packages required for file.
+ paths of all MagPy package dependencies.
CALLED BY:
Path to magpy packages that call this part, e.g. magpy.bin.acquisition
*********************************************************************
"""
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
def __init__(self, container=None, header={},ndarray=None):
if container is None:
container = []
self.container = container
if ndarray is None:
ndarray = np.array([np.asarray([]) for elem in KEYLIST])
self.ndarray = ndarray ## Test this! -> for better memory efficiency
#if header is None:
# header = {'Test':'Well, it works'}
#header = {}
self.header = header
#for key in KEYLIST:
# setattr(self,key,np.asarray([]))
#self.header = {'Test':'Well, it works'}
self.progress = 0
# ------------------------------------------------------------------------
# A. Standard functions and overrides for list like objects
# ------------------------------------------------------------------------
def ext(self, columnstructure): # new version of extend function for column operations
"""
the extend and add functions must be replaced in case of
speed optimization
"""
for key in KEYLIST:
self.container.key = np.append(self.container.key, columnstructure.key, 1)
def add(self, datlst):
#try:
assert isinstance(self.container, (list, tuple))
self.container.append(datlst)
#except:
# print list(self.container).append(datlst)
def length(self):
#try:
if len(self.ndarray[0]) > 0:
ll = [len(elem) for elem in self.ndarray]
return ll
else:
try: ## might fail if LineStruct is empty (no time)
if len(self) == 1 and np.isnan(self[0].time):
return [0]
else:
return [len(self)]
except:
return [0]
def replace(self, datlst):
# Replace in stream
# - replace value with existing data
# Method was used by K calc - replaced by internal method there
newself = DataStream()
assert isinstance(self.container, (list, tuple))
ti = list(self._get_column('time'))
try:
ind = ti.index(datlst.time)
except ValueError:
self = self.add(datlst)
return self
except:
return self
li = [elem for elem in self]
del li[ind]
del ti[ind]
li.append(datlst)
return DataStream(li,self.header)
def copy(self):
"""
DESCRIPTION:
method for copying content of a stream to a new stream
APPLICATION:
for non-destructive methods
"""
#print self.container
#assert isinstance(self.container, (list, tuple))
co = DataStream()
#co.header = self.header
newheader = {}
for el in self.header:
newheader[el] = self.header[el]
array = [[] for el in KEYLIST]
if len(self.ndarray[0])> 0:
for ind, key in enumerate(KEYLIST):
liste = []
for val in self.ndarray[ind]: ## This is necessary to really copy the content
liste.append(val)
array[ind] = np.asarray(liste)
co.container = [LineStruct()]
else:
for el in self:
li = LineStruct()
for key in KEYLIST:
if key == 'time':
li.time = el.time
else:
#exec('li.'+key+' = el.'+key)
elkey = getattr(el,key)
setattr(li, key, elkey)
co.add(li)
return DataStream(co.container,newheader,np.asarray(array, dtype=object))
def __str__(self):
return str(self.container)
def __repr__(self):
return str(self.container)
def __getitem__(self, var):
try:
if var in NUMKEYLIST:
return self.ndarray[self.KEYLIST.index(var)].astype(np.float64)
else:
return self.ndarray[self.KEYLIST.index(var)]
except:
return self.container.__getitem__(var)
def __setitem__(self, var, value):
self.ndarray[self.KEYLIST.index(var)] = value
def __len__(self):
return len(self.container)
def clear_header(self):
"""
Remove header information
"""
self.header = {}
def extend(self,datlst,header,ndarray):
array = [[] for key in KEYLIST]
self.container.extend(datlst)
self.header = header
# Some initial check if any data set except timecolumn is contained
datalength = len(ndarray)
#t1 = datetime.utcnow()
if pyvers and pyvers == 2:
ch1 = '-'.encode('utf-8') # not working with py3
ch2 = ''.encode('utf-8')
else:
ch1 = '-'
ch2 = ''
try:
test = []
for col in ndarray:
col = np.array(list(col))
#print (np.array(list(col)).dtype)
if col.dtype in ['float64','float32','int32','int64']:
try:
x = np.asarray(col)[~np.isnan(col)]
except: # fallback 1 -> should not needed any more
#print ("Fallback1")
x = np.asarray([elem for elem in col if not np.isnan(elem)])
else:
#y = np.asarray(col)[col!='-']
#x = np.asarray(y)[y!='']
y = np.asarray(col)[col!=ch1]
x = np.asarray(y)[y!=ch2]
test.append(x)
test = np.asarray(test,dtype=object)
except:
# print ("Fallback -- pretty slowly")
#print ("Fallback2")
test = [[elem for elem in col if not elem in [ch1,ch2]] for col in ndarray]
#t2 = datetime.utcnow()
#print (t2-t1)
emptycnt = [len(el) for el in test if len(el) > 0]
if self.ndarray.size == 0:
self.ndarray = ndarray
elif len(emptycnt) == 1:
print("Tyring to extend with empty data set")
#self.ndarray = np.asarray((list(self.ndarray)).extend(list(ndarray)))
else:
for idx,elem in enumerate(self.ndarray):
if len(ndarray[idx]) > 0:
if len(self.ndarray[idx]) > 0 and len(self.ndarray[0]) > 0:
array[idx] = np.append(self.ndarray[idx], ndarray[idx]).astype(object)
#array[idx] = np.append(self.ndarray[idx], ndarray[idx],1).astype(object)
elif len(self.ndarray[0]) > 0: # only time axis present so far but no data within this elem
fill = ['-']
key = KEYLIST[idx]
if key in NUMKEYLIST or key=='sectime':
fill = [float('nan')]
nullvals = np.asarray(fill * len(self.ndarray[0]))
#array[idx] = np.append(nullvals, ndarray[idx],1).astype(object)
array[idx] = np.append(nullvals, ndarray[idx]).astype(object)
else:
array[idx] = ndarray[idx].astype(object)
self.ndarray = np.asarray(array, dtype=object)
def union(self,column):
seen = set()
seen_add = seen.add
return [ x for x in column if not (x in seen or seen_add(x))]
def removeduplicates(self):
"""
DESCRIPTION:
Identify duplicate time stamps and remove all data.
Lines with first occurence are kept.
"""
# get duplicates in time column
def list_duplicates(seq):
seen = set()
seen_add = seen.add
return [idx for idx,item in enumerate(seq) if item in seen or seen_add(item)]
if not len(self.ndarray[0]) > 0:
print ("removeduplicates: works only with ndarrays")
return
duplicateindicies = list_duplicates(self.ndarray[0])
array = [[] for key in KEYLIST]
for idx, elem in enumerate(self.ndarray):
if len(elem) > 0:
newelem = np.delete(elem, duplicateindicies)
array[idx] = newelem
return DataStream(self, self.header, np.asarray(array,dtype=object))
def start(self, dateformt=None):
st,et = self._find_t_limits()
return st
def end(self, dateformt=None):
st,et = self._find_t_limits()
return et
def findtime(self,time,**kwargs):
"""
DEFINITION:
Find a line within the container which contains the selected time step
or the first line following this timestep (since 0.3.99 using mode 'argmax')
VARIABLES:
startidx (int) index to start search with (speeding up)
endidx (int) index to end search with (speeding up)
mode (string) define search mode (fastest would be 'argmax')
RETURNS:
The index position of the line and the line itself
"""
startidx = kwargs.get('startidx')
endidx = kwargs.get('endidx')
mode = kwargs.get('mode')
#try:
# from bisect import bisect
#except ImportError:
# print("Import error")
st = date2num(self._testtime(time))
if len(self.ndarray[0]) > 0:
if startidx and endidx:
ticol = self.ndarray[0][startidx:endidx]
elif startidx:
ticol = self.ndarray[0][startidx:]
elif endidx:
ticol = self.ndarray[0][:endidx]
else:
ticol = self.ndarray[0]
try:
if mode =='argmax':
## much faster since 0.3.99 (used in flag_stream)
indexes = [np.argmax(ticol>=st)]
else:
## the following method is used until 0.3.98
indexes = [i for i,x in enumerate(ticol) if x == st] ### FASTER
# Other methods
# #############
#indexes = [i for i,x in enumerate(ticol) if np.allclose(x,st,rtol=1e-14,atol=1e-17)] # if the two time equal within about 0.7 milliseconds
#indexes = [bisect(ticol, st)] ## SELECTS ONLY INDEX WHERE VALUE SHOULD BE inserted
#indexes = [ticol.index(st)]
#print("findtime", indexes)
if not len(indexes) == 0:
if startidx:
retindex = indexes[0] + startidx
else:
retindex = indexes[0]
#print("Findtime index:",retindex)
return retindex, LineStruct()
else:
return 0, []
#return list(self.ndarray[0]).index(st), LineStruct()
except:
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
for index, line in enumerate(self):
if line.time == st:
return index, line
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
def _find_t_limits(self):
"""
DEFINITION:
Find start and end times in stream.
RETURNS:
Two datetime objects, start and end.
"""
if len(self.ndarray[0]) > 0:
t_start = num2date(np.min(self.ndarray[0].astype(float))).replace(tzinfo=None)
t_end = num2date(np.max(self.ndarray[0].astype(float))).replace(tzinfo=None)
else:
try: # old type
t_start = num2date(self[0].time).replace(tzinfo=None)
t_end = num2date(self[-1].time).replace(tzinfo=None)
except: # empty
t_start,t_end = None,None
return t_start, t_end
def _print_key_headers(self):
print("%10s : %22s : %28s" % ("MAGPY KEY", "VARIABLE", "UNIT"))
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
except:
header = None
try:
unit = self.header['unit-col-'+key]
except:
unit = None
print("%10s : %22s : %28s" % (key, header, unit))
def _get_key_headers(self,**kwargs):
"""
DEFINITION:
get a list of existing numerical keys in stream.
PARAMETERS:
kwargs:
- limit: (int) limit the lenght of the list
- numerical: (bool) if True, select only numerical keys
RETURNS:
- keylist: (list) a list like ['x','y','z']
EXAMPLE:
>>> data_stream._get_key_headers(limit=1)
"""
limit = kwargs.get('limit')
numerical = kwargs.get('numerical')
if numerical:
TESTLIST = FLAGKEYLIST
else:
TESTLIST = KEYLIST
keylist = []
"""
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
try:
unit = self.header['unit-col-'+key]
except:
unit = None
keylist.append(key)
except:
header = None
"""
if not len(keylist) > 0: # e.g. Testing ndarray
for ind,elem in enumerate(self.ndarray): # use the long way
if len(elem) > 0 and ind < len(TESTLIST):
if not TESTLIST[ind] == 'time':
keylist.append(TESTLIST[ind])
if not len(keylist) > 0: # e.g. header col-? does not contain any info
#for key in FLAGKEYLIST[1:]: # use the long way
for key in TESTLIST[1:]: # use the long way
col = self._get_column(key)
if len(col) > 0:
#if not len(col) == 1 and not ( # maybe add something to prevent reading empty LineStructs)
if len(col) == 1:
if col[0] in ['-',float(nan),'']:
pass
else:
keylist.append(key)
if limit and len(keylist) > limit:
keylist = keylist[:limit]
return keylist
def _get_key_names(self):
"""
DESCRIPTION:
get the variable names for each key
APPLICATION:
keydict = self._get_key_names()
"""
keydict = {}
for key in KEYLIST:
kname = self.header.get('col-'+key)
keydict[kname] = key
return keydict
def dropempty(self):
"""
DESCRIPTION:
Drop empty arrays from ndarray and store their positions
"""
if not len(self.ndarray[0]) > 0:
return self.ndarray, np.asarray([])
newndarray = []
indexarray = []
for ind,elem in enumerate(self.ndarray):
if len(elem) > 0:
newndarray.append(np.asarray(elem).astype(object))
indexarray.append(ind)
keylist = [el for ind,el in enumerate(KEYLIST) if ind in indexarray]
return np.asarray(newndarray), keylist
def fillempty(self, ndarray, keylist):
"""
DESCRIPTION:
Fills empty arrays into ndarray at all position of KEYLIST not provided in keylist
"""
if not len(ndarray[0]) > 0:
return self
if len(self.ndarray) == KEYLIST:
return self
lst = list(ndarray)
for i,key in enumerate(KEYLIST):
if not key in keylist:
lst.insert(i,[])
newndarray = np.asarray(lst,dtype=object)
return newndarray
def sorting(self):
"""
Sorting data according to time (maybe generalize that to some key)
"""
try: # old LineStruct part
liste = sorted(self.container, key=lambda tmp: tmp.time)
except:
pass
if len(self.ndarray[0]) > 0:
self.ndarray, keylst = self.dropempty()
#self.ndarray = self.ndarray[:, np.argsort(self.ndarray[0])] # does not work if some rows have a different length)
ind = np.argsort(self.ndarray[0])
for i,el in enumerate(self.ndarray):
if len(el) == len(ind):
self.ndarray[i] = el[ind]
else:
#print("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("len(t-axis)=%d len(%s)=%d" % (len(self.ndarray[0]), KEYLIST[i], len(self.ndarray[i])))
self.ndarray[i] = np.empty(len(self.ndarray[0])) * np.nan
self.ndarray = self.fillempty(self.ndarray,keylst)
for idx,el in enumerate(self.ndarray):
self.ndarray[idx] = np.asarray(self.ndarray[idx]).astype(object)
else:
self.ndarray = self.ndarray
return DataStream(liste, self.header, self.ndarray)
# ------------------------------------------------------------------------
# B. Internal Methods: Line & column functions
# ------------------------------------------------------------------------
def _get_line(self, key, value):
"""
returns a LineStruct elemt corresponding to the first occurence of value within the selected key
e.g.
st = st._get_line('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lines = [elem for elem in self if eval('elem.'+key) == value]
return lines[0]
def _take_columns(self, keys):
"""
DEFINITION:
extract selected columns of the given keys (Old LineStruct format - decrapted)
"""
resultstream = DataStream()
for elem in self:
line = LineStruct()
line.time = elem.time
resultstream.add(line)
resultstream.header = {}
for key in keys:
if not key in KEYLIST:
pass
elif not key == 'time':
col = self._get_column(key)
#print key, len(col)
try:
resultstream.header['col-'+key] = self.header['col-'+key]
except:
pass
try:
resultstream.header['unit-col-'+key] = self.header['unit-col-'+key]
except:
pass
resultstream = resultstream._put_column(col,key)
return resultstream
def _remove_lines(self, key, value):
"""
removes lines with value within the selected key
e.g.
st = st._remove_lines('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lst = [elem for elem in self if not eval('elem.'+key) == value]
return DataStream(lst, self.header)
def _get_column(self, key):
"""
Returns a numpy array of selected column from Stream
Example:
columnx = datastream._get_column('x')
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
# Speeded up this technique:
ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
try:
col = self[key]
except:
col = self.ndarray[ind]
return col
# Check for initialization value
#testval = self[0][ind]
# if testval == KEYINITDICT[key] or isnan(testval):
# return np.asarray([])
try:
col = np.asarray([row[ind] for row in self])
#get the first ten elements and test whether nan is there -- why ??
"""
try: # in case of string....
novalfound = True
for ele in col[:10]:
if not isnan(ele):
novalfound = False
if novalfound:
return np.asarray([])
except:
return col
"""
return col
except:
return np.asarray([])
def _put_column(self, column, key, **kwargs):
"""
DEFINITION:
adds a column to a Stream
PARAMETERS:
column: (array) single list with data with equal length as stream
key: (key) key to which the data is written
Kwargs:
columnname: (string) define a name
columnunit: (string) define a unit
RETURNS:
- DataStream object
EXAMPLE:
>>> stream = stream._put_column(res, 't2', columnname='Rain',columnunit='mm in 1h')
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add(float('NaN'))
columnname = kwargs.get('columnname')
columnunit = kwargs.get('columnunit')
if not key in KEYLIST:
raise ValueError("Column key not valid")
if len(self.ndarray[0]) > 0:
ind = KEYLIST.index(key)
self.ndarray[ind] = np.asarray(column)
else:
if not len(column) == len(self):
raise ValueError("Column length does not fit Datastream")
for idx, elem in enumerate(self):
setattr(elem, key, column[idx])
if not columnname:
try: # TODO correct that
if eval('self.header["col-%s"]' % key) == '':
exec('self.header["col-%s"] = "%s"' % (key, key))
except:
pass
else:
exec('self.header["col-%s"] = "%s"' % (key, columnname))
if not columnunit:
try: # TODO correct that
if eval('self.header["unit-col-%s"]' % key) == '':
exec('self.header["unit-col-%s"] = "arb"' % (key))
except:
pass
else:
exec('self.header["unit-col-%s"] = "%s"' % (key, columnunit))
return self
def _move_column(self, key, put2key):
'''
DEFINITION:
Move column of key "key" to key "put2key".
Simples.
PARAMETERS:
Variables:
- key: (str) Key to be moved.
- put2key: (str) Key for 'key' to be moved to.
RETURNS:
- stream: (DataStream) DataStream object.
EXAMPLE:
>>> data_stream._move_column('f', 'var1')
'''
if not key in KEYLIST:
logger.error("_move_column: Column key %s not valid!" % key)
if key == 'time':
logger.error("_move_column: Cannot move time column!")
if not put2key in KEYLIST:
logger.error("_move_column: Column key %s (to move %s to) is not valid!" % (put2key,key))
if len(self.ndarray[0]) > 0:
col = self._get_column(key)
self =self._put_column(col,put2key)
return self
try:
for i, elem in enumerate(self):
exec('elem.'+put2key+' = '+'elem.'+key)
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
try:
exec('self.header["col-%s"] = self.header["col-%s"]' % (put2key, key))
exec('self.header["unit-col-%s"] = self.header["unit-col-%s"]' % (put2key, key))
exec('self.header["col-%s"] = None' % (key))
exec('self.header["unit-col-%s"] = None' % (key))
except:
logger.error("_move_column: Error updating headers.")
logger.info("_move_column: Column %s moved to column %s." % (key, put2key))
except:
logger.error("_move_column: It's an error.")
return self
def _drop_column(self,key):
"""
remove a column of a Stream
"""
ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
try:
self.ndarray[ind] = np.asarray([])
except:
# Some array don't allow that, shape error e.g. PYSTRING -> then use this
array = [np.asarray(el) if idx is not ind else np.asarray([]) for idx,el in enumerate(self.ndarray)]
self.ndarray = np.asarray(array,dtype=object)
colkey = "col-%s" % key
colunitkey = "unit-col-%s" % key
try:
self.header.pop(colkey, None)
self.header.pop(colunitkey, None)
except:
print("_drop_column: Error while dropping header info")
else:
print("No data available or LineStruct type (not supported)")
return self
def _clear_column(self, key):
"""
remove a column to a Stream
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add(float('NaN'))
if not key in KEYLIST:
raise ValueError("Column key not valid")
for idx, elem in enumerate(self):
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
return self
def _reduce_stream(self, pointlimit=100000):
"""
DEFINITION:
Reduces size of stream by picking for plotting methods to save memory
when plotting large data sets.
Does NOT filter or smooth!
This function purely removes data points (rows) in a
periodic fashion until size is <100000 data points.
(Point limit can also be defined.)
PARAMETERS:
Kwargs:
- pointlimit: (int) Max number of points to include in stream. Default is 100000.
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
EXAMPLE:
>>> lessdata = ten_Hz_data._reduce_stream(pointlimit=500000)
"""
size = len(self)
div = size/pointlimit
divisor = math.ceil(div)
count = 0.
lst = []
if divisor > 1.:
for elem in self:
if count%divisor == 0.:
lst.append(elem)
count += 1.
else:
logger.warning("_reduce_stream: Stream size (%s) is already below pointlimit (%s)." % (size,pointlimit))
return self
logger.info("_reduce_stream: Stream size reduced from %s to %s points." % (size,len(lst)))
return DataStream(lst, self.header)
def _remove_nancolumns(self):
"""
DEFINITION:
Remove any columsn soley filled with nan values
APPLICATION:
called by plot methods in mpplot
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
"""
array = [[] for key in KEYLIST]
if len(self.ndarray[0]) > 0:
for idx, elem in enumerate(self.ndarray):
if len(self.ndarray[idx]) > 0 and KEYLIST[idx] in NUMKEYLIST:
lst = list(self.ndarray[idx])
#print KEYLIST[idx],lst[0]
if lst[1:] == lst[:-1] and np.isnan(float(lst[0])):
array[idx] = np.asarray([])
else:
array[idx] = self.ndarray[idx]
else:
array[idx] = self.ndarray[idx]
else:
pass
return DataStream(self,self.header,np.asarray(array,dtype=object))
# ------------------------------------------------------------------------
# B. Internal Methods: Data manipulation functions
# ------------------------------------------------------------------------
def _aic(self, signal, k, debugmode=None):
try:
aicval = (k-1)* np.log(np.var(signal[:k]))+(len(signal)-k-1)*np.log(np.var(signal[k:]))
except:
if debugmode:
logger.debug('_AIC: could not evaluate AIC at index position %i' % (k))
pass
return aicval
def harmfit(self,nt, val, fitdegree):
# method for harminic fit according to Phil McFadden's fortran program
"""
DEFINITION:
Method for harmonic fit according to <NAME>en's fortran program
Used by k-value determination
PARAMETERS:
Kwargs:
- nt: (list) Normalized time array.
- val: (list) Value list.
- fitdegree: (int) hramonic degree default is 5.
RETURNS:
- newval: (array) an array with fitted values of length(val).
EXAMPLE:
>>> f_fit = self.harmfit(nt,val, 5)
"""
N = len(nt)
coeff = (val[-1]-val[0]) /(nt[-1]-nt[0])
newval = [elem-coeff*(nt[i]-nt[0]) for i, elem in enumerate(val)]
ReVal = []
ImVal = []
for h in range(0,fitdegree):
ReVal.append(newval[0])
ImVal.append(0.0)
angle = -h*(2.0*np.pi/N)
for i in range(1,len(newval)):
si = np.sin(i*angle)
co = np.cos(i*angle)
ReVal[h] = ReVal[h] + newval[i]*co
ImVal[h] = ImVal[h] + newval[i]*si
#print "Parameter:", len(newval)
#print len(ReVal), ReVal
angle = 2.0*np.pi*(float(N-1)/float(N))/(nt[-1]-nt[0])
harmval = []
for i,elem in enumerate(newval):
harmval.append(ReVal[0])
angle2 = (nt[i]-nt[0])*angle
for h in range(1,fitdegree):
si = np.sin(h*angle2)
co = np.cos(h*angle2)
harmval[i] = harmval[i]+(2.0*(ReVal[h]*co-ImVal[h]*si))
harmval[i] = harmval[i]/float(N)+coeff*(nt[i]-nt[0])
return np.asarray(harmval)
def _get_max(self, key, returntime=False):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
t_ind = KEYLIST.index('time')
if len(self.ndarray[0]) > 0:
result = np.nanmax(self.ndarray[key_ind].astype(float))
ind = np.nanargmax(self.ndarray[key_ind].astype(float))
tresult = self.ndarray[t_ind][ind]
else:
elem = max(self, key=lambda tmp: eval('tmp.'+key))
result = eval('elem.'+key)
tresult = elem.time
if returntime:
return result, tresult
else:
return result
def _get_min(self, key, returntime=False):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
t_ind = KEYLIST.index('time')
if len(self.ndarray[0]) > 0:
result = np.nanmin(self.ndarray[key_ind].astype(float))
ind = np.nanargmin(self.ndarray[key_ind].astype(float))
tresult = self.ndarray[t_ind][ind]
else:
elem = min(self, key=lambda tmp: eval('tmp.'+key))
result = eval('elem.'+key)
tresult = elem.time
if returntime:
return result, tresult
else:
return result
def _get_variance(self, key):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
result = np.nanvar(self.ndarray[key_ind].astype(float))
return result
def amplitude(self,key):
"""
DESCRIPTION:
calculates maximum-minimum difference of the keys timeseries
REQUIRES:
_get_column()
RETURNS:
float: difference between maximum and minimim value in time range
APPLICATION
amp = stream.amplitude('x')
"""
ts = self._get_column(key).astype(float)
ts = ts[~np.isnan(ts)]
maxts = np.max(ts)
mints = np.min(ts)
return maxts-mints
def _gf(self, t, tau):
"""
Gauss function
"""
return np.exp(-((t/tau)*(t/tau))/2)
def _hf(self, p, x):
"""
Harmonic function
"""
hf = p[0]*cos(2*pi/p[1]*x+p[2]) + p[3]*x + p[4] # Target function
return hf
def _residual_func(self, func, y):
"""
residual of the harmonic function
"""
return y - func
def _tau(self, period, fac=0.83255461):
"""
low pass filter with -3db point at period in sec (e.g. 120 sec)
1. convert period from seconds to days as used in daytime
2. return tau (in unit "day")
- The value of 0.83255461 is obtained for -3db (see IAGA Guide)
"""
per = period/(3600*24)
return fac*per/(2*np.pi)
# ------------------------------------------------------------------------
# B. Internal Methods: General utility & NaN handlers
# ------------------------------------------------------------------------
def _convertstream(self, coordinate, **kwargs):
"""
DESCRIPTION:
Convert coordinates of x,y,z columns in other
coordinate system:
- xyz2hdz
- xyz2idf
- hdz2xyz
- idf2xyz
Helper method which call the tranformation routines
APPLICATION:
used by k_fmi, variocorrection
"""
ext = ''
if len(self.ndarray[4]) > 0:
ext = 'F'
if len(self.ndarray[KEYLIST.index('df')]) > 0:
ext = 'G'
if len(self.ndarray[0]) > 0:
if coordinate == 'xyz2hdz':
self = self.xyz2hdz()
self.header['DataComponents'] = 'HDZ'+ext
elif coordinate == 'xyz2idf':
self = self.xyz2idf()
self.header['DataComponents'] = 'IDF'+ext
elif coordinate == 'hdz2xyz':
self = self.hdz2xyz()
self.header['DataComponents'] = 'XYZ'+ext
elif coordinate == 'idf2xyz':
self = self.idf2xyz()
self.header['DataComponents'] = 'XYZ'+ext
elif coordinate == 'idf2hdz':
self = self.idf2xyz()
self = self.xyz2hdz()
self.header['DataComponents'] = 'HDZ'+ext
elif coordinate == 'hdz2idf':
self = self.hdz2xyz()
self = self.xyz2idf()
self.header['DataComponents'] = 'IDF'+ext
else:
print("_convertstream: unkown coordinate transform")
return self
keep_header = kwargs.get('keep_header')
outstream = DataStream()
for elem in self:
row=LineStruct()
exec('row = elem.'+coordinate+'(unit="deg")')
row.typ = ''.join((list(coordinate))[4:])+'f'
outstream.add(row)
if not keep_header:
outstream.header['col-x'] = (list(coordinate))[4]
outstream.header['col-y'] = (list(coordinate))[5]
outstream.header['col-z'] = (list(coordinate))[6]
if (list(coordinate))[4] in ['i','d']:
outstream.header['unit-col-x'] = 'deg'
else:
outstream.header['unit-col-x'] = 'nT'
if (list(coordinate))[5] in ['i','d']:
outstream.header['unit-col-y'] = 'deg'
else:
outstream.header['unit-col-y'] = 'nT'
if (list(coordinate))[6] in ['i','d']:
outstream.header['unit-col-z'] = 'deg'
else:
outstream.header['unit-col-z'] = 'nT'
return DataStream(outstream,outstream.header)
def _delete(self,index):
"""
DESCRIPTION:
Helper method to delete all values at a specific index or range of indicies
from the ndarray
APPLICTAION:
Used by k_fmi with individual indicies
"""
for i,array in enumerate(self.ndarray):
if isinstance( index, (int) ): # removed long (not necessary for python3, error in win)
if len(array) > index:
self.ndarray[i] = np.delete(self.ndarray[i],index)
else:
self.ndarray[i] = np.delete(self.ndarray[i],index)
return self
def _append(self,stream):
"""
DESCRIPTION:
Helper method to append values from another stream to
a ndarray. Append only to columns already filled in self.
APPLICTAION:
Used by k_fmi
"""
for i,array in enumerate(self):
if len(array) > 0:
self.ndarray[i] = np.append(self.ndarray[i],stream.ndarray[i])
return self
def _det_trange(self, period):
"""
starting with coefficients above 1%
is now returning a timedelta object
"""
return np.sqrt(-np.log(0.01)*2)*self._tau(period)
def _is_number(self, s):
"""
Test whether s is a number
"""
if str(s) in ['','None',None]:
return False
try:
float(s)
return True
except ValueError:
return False
def _normalize(self, column):
"""
normalizes the given column to range [0:1]
"""
normcol = []
column = column.astype(float)
maxval = np.max(column)
minval = np.min(column)
for elem in column:
normcol.append((elem-minval)/(maxval-minval))
return normcol, minval, maxval
def _testtime(self, time):
"""
Check the date/time input and returns a datetime object if valid:
! Use UTC times !
- accepted are the following inputs:
1) absolute time: as provided by date2num
2) strings: 2011-11-22 or 2011-11-22T11:11:00
3) datetime objects by datetime.datetime e.g. (datetime(2011,11,22,11,11,00)
"""
if isinstance(time, float) or isinstance(time, int):
try:
timeobj = num2date(time).replace(tzinfo=None)
except:
raise TypeError
elif isinstance(time, str): # test for str only in Python 3 should be basestring for 2.x
try:
timeobj = datetime.strptime(time,"%Y-%m-%d")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%dT%H:%M:%S")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%d %H:%M:%S.%f")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%dT%H:%M:%S.%f")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%d %H:%M:%S")
except:
try:
# Not happy with that but necessary to deal
# with old 1000000 micro second bug
timearray = time.split('.')
if timearray[1] == '1000000':
timeobj = datetime.strptime(timearray[0],"%Y-%m-%d %H:%M:%S")+timedelta(seconds=1)
else:
# This would be wrong but leads always to a TypeError
timeobj = datetime.strptime(timearray[0],"%Y-%m-%d %H:%M:%S")
except:
try:
timeobj = num2date(float(time)).replace(tzinfo=None)
except:
raise TypeError
elif not isinstance(time, datetime):
raise TypeError
else:
timeobj = time
return timeobj
def _drop_nans(self, key):
"""
DEFINITION:
Helper to drop all lines when NaNs or INFs are found within the selected key
RETURNS:
- DataStream: (DataStream object) a new data stream object with out identified lines.
EXAMPLE:
>>> newstream = stream._drop_nans('x')
APPLICATION:
used for plotting and fitting of data
"""
array = [np.asarray([]) for elem in KEYLIST]
if len(self.ndarray[0]) > 0 and key in NUMKEYLIST:
ind = KEYLIST.index(key)
#indicieslst = [i for i,el in enumerate(self.ndarray[ind].astype(float)) if np.isnan(el) or np.isinf(el)]
ar = np.asarray(self.ndarray[ind]).astype(float)
indicieslst = []
for i,el in enumerate(ar):
if np.isnan(el) or np.isinf(el):
indicieslst.append(i)
searchlist = ['time']
searchlist.extend(NUMKEYLIST)
for index,tkey in enumerate(searchlist):
if len(self.ndarray[index])>0: # Time column !!! -> index+1
array[index] = np.delete(self.ndarray[index], indicieslst)
#elif len(self.ndarray[index+1])>0:
# array[index+1] = self.ndarray[index+1]
newst = [LineStruct()]
else:
newst = [elem for elem in self if not isnan(eval('elem.'+key)) and not isinf(eval('elem.'+key))]
return DataStream(newst,self.header,np.asarray(array,dtype=object))
def _select_keys(self, keys):
"""
DESCRIPTION
Non-destructive method to select provided keys from Data stream.
APPLICATION:
streamxy = streamyxzf._select_keys(['x','y'])
"""
result = self.copy()
try:
if not len(keys) > 0:
return self
except:
return self
"""
print ("sel", keys)
if not 'time' in keys:
keys.append('time')
print ("sel", keys)
"""
ndarray = [[] for key in KEYLIST]
ndarray = np.asarray([np.asarray(elem) if KEYLIST[idx] in keys or KEYLIST[idx] == 'time' else np.asarray([]) for idx,elem in enumerate(result.ndarray)])
return DataStream([LineStruct()],result.header,ndarray)
def _select_timerange(self, starttime=None, endtime=None, maxidx=-1):
"""
DESCRIPTION
Non-destructive method to select a certain time range from a stream.
Similar to trim, leaving the original stream unchanged however.
APPLICATION:
Used by write
"""
ndarray = [[] for key in KEYLIST]
# Use a different technique
# copy all data to array and then delete everything below and above
#t1 = datetime.utcnow()
#ndarray = self.ndarray
startindices = []
endindices = []
if starttime:
starttime = self._testtime(starttime)
if self.ndarray[0].size > 0: # time column present
if maxidx > 0:
idx = (np.abs(self.ndarray[0][:maxidx]-date2num(starttime))).argmin()
else:
idx = (np.abs(self.ndarray[0]-date2num(starttime))).argmin()
# Trim should start at point >= starttime, so check:
if self.ndarray[0][idx] < date2num(starttime):
idx += 1
startindices = list(range(0,idx))
if endtime:
endtime = self._testtime(endtime)
if self.ndarray[0].size > 0: # time column present
#print "select timerange", maxidx
if maxidx > 0: # truncate the ndarray
#print maxidx
#tr = self.ndarray[0][:maxidx].astype(float)
idx = 1 + (np.abs(self.ndarray[0][:maxidx].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
else:
idx = 1 + (np.abs(self.ndarray[0].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
if idx >= len(self.ndarray[0]): ## prevent too large idx values
idx = len(self.ndarray[0]) # - 1
try: # using try so that this test is passed in case of idx == len(self.ndarray)
endnum = date2num(endtime)
#print ("Value now", idx, self.ndarray[0][idx], date2num(endtime))
if self.ndarray[0][idx] > endnum and self.ndarray[0][idx-1] < endnum:
# case 1: value at idx is larger, value at idx-1 is smaller -> use idx
pass
elif self.ndarray[0][idx] == endnum:
# case 2: value at idx is endnum -> use idx
pass
elif not self.ndarray[0][idx] <= endnum:
# case 3: value at idx-1 equals endnum -> use idx-1
idx -= 1
#print ("Value now b", idx, self.ndarray[0][idx], date2num(endtime))
#if not self.ndarray[0][idx] <= date2num(endtime):
# # Make sure that last value is either identical to endtime (if existing or one index larger)
# # This is important as from this index on, data is removed
# idx -= 1
# print ("Value now", idx, self.ndarray[0][idx], date2num(endtime))
# print ("Value now", idx, self.ndarray[0][idx+1], date2num(endtime))
except:
pass
endindices = list(range(idx,len(self.ndarray[0])))
indices = startindices + endindices
#t2 = datetime.utcnow()
#print "_select_timerange - getting t range needed:", t2-t1
if len(startindices) > 0:
st = startindices[-1]+1
else:
st = 0
if len(endindices) > 0:
ed = endindices[0]
else:
ed = len(self.ndarray[0])
for i in range(len(self.ndarray)):
ndarray[i] = self.ndarray[i][st:ed] ## This is the correct length
#t3 = datetime.utcnow()
#print "_select_timerange - deleting :", t3-t2
return np.asarray(ndarray,dtype=object)
# ------------------------------------------------------------------------
# C. Application methods
# (in alphabetical order)
# ------------------------------------------------------------------------
def aic_calc(self, key, **kwargs):
"""
DEFINITION:
Picking storm onsets using the Akaike Information Criterion (AIC) picker
- extract one dimensional array from DataStream (e.g. H) -> signal
- take the first k values of the signal and calculates variance and log
- plus the rest of the signal (variance and log)
NOTE: Best results come from evaluating two data series - one with original
data, one of same data with AIC timerange offset by timerange/2 to cover
any signals that may occur at the points between evaluations.
PARAMETERS:
Variables:
- key: (str) Key to check. Needs to be an element of KEYLIST.
Kwargs:
- timerange: (timedelta object) defines the length of the time window
examined by the aic iteration. (default: timedelta(hours=1).)
- aic2key: (str) defines the key of the column where to save the aic values
(default = var2).
- aicmin2key: (str) defines the key of the column where to save the aic minimum val
(default: key = var1.)
- aicminstack: (bool) if true, aicmin values are added to previously present column values.
RETURNS:
- self: (DataStream object) Stream with results in default var1 + var2 keys.
EXAMPLE:
>>> stream = stream.aic_calc('x',timerange=timedelta(hours=0.5))
APPLICATION:
from magpy.stream import read
stream = read(datapath)
stream = stream.aic_calc('x',timerange=timedelta(hours=0.5))
stream = stream.differentiate(keys=['var2'],put2keys=['var3'])
stream_filt = stream.extract('var1',200,'>')
stream_new = stream_file.eventlogger('var3',[30,40,60],'>',addcomment=True)
stream = mergeStreams(stream,stream_new,key='comment')
"""
timerange = kwargs.get('timerange')
aic2key = kwargs.get('aic2key')
aicmin2key = kwargs.get('aicmin2key')
aicminstack = kwargs.get('aicminstack')
if not timerange:
timerange = timedelta(hours=1)
if not aic2key:
aic2key = 'var2'
if not aicmin2key:
aicmin2key = 'var1'
t = self._get_column('time')
signal = self._get_column(key)
#Clear the projected results column
array = []
aic2ind = KEYLIST.index(aic2key)
self = self._clear_column(aic2key)
if len(self.ndarray[0]) > 0.:
self.ndarray[aic2ind] = np.empty((len(self.ndarray[0],)))
self.ndarray[aic2ind][:] = np.NAN
# get sampling interval for normalization - need seconds data to test that
sp = self.get_sampling_period()*24*60
# corrcet approach
iprev = 0
iend = 0
while iend < len(t)-1:
istart = iprev
ta, iend = find_nearest(np.asarray(t), date2num(num2date(t[istart]).replace(tzinfo=None) + timerange))
if iend == istart:
iend += 60 # approx for minute files and 1 hour timedelta (used when no data available in time range) should be valid for any other time range as well
else:
currsequence = signal[istart:iend]
aicarray = []
for idx, el in enumerate(currsequence):
if idx > 1 and idx < len(currsequence):
# CALCULATE AIC
aicval = self._aic(currsequence, idx)/timerange.seconds*3600 # *sp Normalize to sampling rate and timerange
if len(self.ndarray[0]) > 0:
self.ndarray[aic2ind][idx+istart] = aicval
else:
exec('self[idx+istart].'+ aic2key +' = aicval')
if not isnan(aicval):
aicarray.append(aicval)
# store start value - aic: is a measure for the significance of information change
#if idx == 2:
# aicstart = aicval
#self[idx+istart].var5 = aicstart-aicval
maxaic = np.max(aicarray)
# determine the relative amplitude as well
cnt = 0
for idx, el in enumerate(currsequence):
if idx > 1 and idx < len(currsequence):
# TODO: this does not yet work with ndarrays
try:
if aicminstack:
if not eval('isnan(self[idx+istart].'+aicmin2key+')'):
exec('self[idx+istart].'+ aicmin2key +' += (-aicarray[cnt] + maxaic)')
else:
exec('self[idx+istart].'+ aicmin2key +' = (-aicarray[cnt] + maxaic)')
else:
exec('self[idx+istart].'+ aicmin2key +' = (-aicarray[cnt] + maxaic)')
exec('self[idx+istart].'+ aicmin2key +' = maxaic')
cnt = cnt+1
except:
msg = "number of counts does not fit usually because of nans"
iprev = iend
self.header['col-var2'] = 'aic'
return self
def baseline(self, absolutedata, **kwargs):
"""
DESCRIPTION:
calculates baseline correction for input stream (datastream)
Uses available baseline values from the provided absolute file
Special cases:
1) Absolute data covers the full time range of the stream:
-> Absolute data is extrapolated by duplicating the last and first entry at "extradays" offset
-> desired function is calculated
2) No Absolute data for the end of the stream:
-> like 1: Absolute data is extrapolated by duplicating the last entry at "extradays" offset or end of stream
-> and info message is created, if timedifference exceeds the "extraday" arg then a warning will be send
2) No Absolute data for the beginning of the stream:
-> like 2: Absolute data is extrapolated by duplicating the first entry at "extradays" offset or beginning o stream
-> and info message is created, if timedifference exceeds the "extraday" arg then a warning will be send
VARIABLES:
required:
didata (DataStream) containing DI data- usually obtained by absolutes.absoluteAnalysis()
keywords:
plotbaseline (bool/string) will plot a baselineplot (if a valid path is provided
to file otherwise to to screen- requires mpplot
extradays (int) days to which the absolutedata is exteded prior and after start and endtime
##plotfilename (string) if plotbaseline is selected, the outputplot is send to this file
fitfunc (string) see fit
fitdegree (int) see fit
knotstep (int) see fit
keys (list) keys which contain the basevalues (default) is ['dx','dy','dz']
APPLICATION:
func = data.baseline(didata,knotstep=0.1,plotbaseline=True)
# fixed time range
func = data.baseline(didata,startabs='2015-02-01',endabs='2015-08-24',extradays=0)
OR:
funclist = []
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='poly',
fitdegree=1,startabs='2009-01-01',endabs='2009-03-22'))
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='poly',
fitdegree=1,startabs='2009-03-22',endabs='2009-06-27'))
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='spline',
knotstep=0.2,startabs='2009-06-27',endabs='2010-02-01'))
stabilitytest (bool)
"""
keys = kwargs.get('keys')
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
extradays = kwargs.get('extradays',15)
plotbaseline = kwargs.get('plotbaseline')
plotfilename = kwargs.get('plotfilename')
startabs = kwargs.get('startabs')
endabs = kwargs.get('endabs')
orgstartabs = None
orgendabs = None
#if not extradays:
# extradays = 15
if not fitfunc:
fitfunc = self.header.get('DataAbsFunc')
if not fitfunc:
fitfunc = 'spline'
if not fitdegree:
fitdegree = self.header.get('DataAbsDegree')
if not fitdegree:
fitdegree = 5
if not knotstep:
knotstep = self.header.get('DataAbsKnots')
if not knotstep:
knotstep = 0.3
if not keys:
keys = ['<KEY>']
if len(self.ndarray[0]) > 0:
ndtype = True
starttime = np.min(self.ndarray[0])
endtime = np.max(self.ndarray[0])
else:
starttime = self[0].time
endtime = self[-1].time
fixstart,fixend = False,False
if startabs:
startabs = date2num(self._testtime(startabs))
orgstartabs = startabs
fixstart = True
if endabs:
endabs = date2num(self._testtime(endabs))
orgendabs = endabs
fixend = True
pierlong = absolutedata.header.get('DataAcquisitionLongitude','')
pierlat = absolutedata.header.get('DataAcquisitionLatitude','')
pierel = absolutedata.header.get('DataElevation','')
pierlocref = absolutedata.header.get('DataAcquisitionReference','')
pierelref = absolutedata.header.get('DataElevationRef','')
#self.header['DataAbsFunc'] = fitfunc
#self.header['DataAbsDegree'] = fitdegree
#self.header['DataAbsKnots'] = knotstep
#self.header['DataAbsDate'] = datetime.strftime(datetime.utcnow(),'%Y-%m-%d %H:%M:%S')
usestepinbetween = False # for better extrapolation
logger.info(' --- Start baseline-correction at %s' % str(datetime.now()))
absolutestream = absolutedata.copy()
#print("Baseline", absolutestream.length())
absolutestream = absolutestream.remove_flagged()
#print("Baseline", absolutestream.length())
#print("Baseline", absolutestream.ndarray[0])
absndtype = False
if len(absolutestream.ndarray[0]) > 0:
#print ("HERE1: adopting time range absolutes - before {} {}".format(startabs, endabs))
absolutestream.ndarray[0] = absolutestream.ndarray[0].astype(float)
absndtype = True
if not np.min(absolutestream.ndarray[0]) < endtime:
logger.warning("Baseline: Last measurement prior to beginning of absolute measurements ")
abst = absolutestream.ndarray[0]
if not startabs or startabs < np.min(absolutestream.ndarray[0]):
startabs = np.min(absolutestream.ndarray[0])
if not endabs or endabs > np.max(absolutestream.ndarray[0]):
endabs = np.max(absolutestream.ndarray[0])
else:
# 1) test whether absolutes are in the selected absolute data stream
if absolutestream[0].time == 0 or absolutestream[0].time == float('nan'):
raise ValueError ("Baseline: Input stream needs to contain absolute data ")
# 2) check whether enddate is within abs time range or larger:
if not absolutestream[0].time-1 < endtime:
logger.warning("Baseline: Last measurement prior to beginning of absolute measurements ")
abst = absolutestream._get_column('time')
startabs = absolutestream[0].time
endabs = absolutestream[-1].time
# Initialze orgstartabd and orgendabs if not yet provided: orgabs values will be added to DataAbsInfo
if not orgstartabs:
orgstartabs = startabs
if not orgendabs:
orgendabs = endabs
#print ("HERE2a: Time range absolutes - {} {} {} {}".format(startabs, endabs, num2date(startabs), num2date(endabs)))
#print ("HERE2b: Time range datastream - {} {}".format(starttime, endtime))
# 3) check time ranges of stream and absolute values:
if startabs > starttime:
#print ('HERE2c: First absolute value measured after beginning of stream')
#logger.warning('Baseline: First absolute value measured after beginning of stream - duplicating first abs value at beginning of time series')
#if fixstart:
#
#absolutestream.add(absolutestream[0])
#absolutestream[-1].time = starttime
#absolutestream.sorting()
logger.info('Baseline: %d days without absolutes at the beginning of the stream' % int(np.floor(np.min(abst)-starttime)))
if endabs < endtime:
logger.info("Baseline: Last absolute measurement before end of stream - extrapolating baseline")
if num2date(endabs).replace(tzinfo=None) + timedelta(days=extradays) < num2date(endtime).replace(tzinfo=None):
usestepinbetween = True
if not fixend:
logger.warning("Baseline: Well... thats an adventurous extrapolation, but as you wish...")
starttime = num2date(starttime).replace(tzinfo=None)
endtime = num2date(endtime).replace(tzinfo=None)
# 4) get standard time rang of one year and extradays at start and end
# test whether absstream covers this time range including extradays
# ###########
# get boundaries
# ###########
extrapolate = False
# upper
if fixend:
#absolutestream = absolutestream.trim(endtime=endabs) # should I trim here already - leon ??
# time range long enough
baseendtime = endabs+extradays
if baseendtime < orgendabs:
baseendtime = orgendabs
extrapolate = True
else:
baseendtime = date2num(endtime+timedelta(days=1))
extrapolate = True
#if endabs >= date2num(endtime)+extradays:
# # time range long enough
# baseendtime = date2num(endtime)+extradays
# lower
if fixstart:
#absolutestream = absolutestream.trim(starttime=startabs) # should I trim here already - leon ??
basestarttime = startabs-extradays
if basestarttime > orgstartabs:
basestarttime = orgstartabs
extrapolate = True
else:
# not long enough
#basestarttime = date2num(starttime)
basestarttime = startabs-extradays
extrapolate = True
if baseendtime - (366.+2*extradays) > startabs:
# time range long enough
basestarttime = baseendtime-(366.+2*extradays)
baseendtime = num2date(baseendtime).replace(tzinfo=None)
basestarttime = num2date(basestarttime).replace(tzinfo=None)
#print ("HERE3a: basestart and end", basestarttime, baseendtime)
# Don't use trim here
#bas = absolutestream.trim(starttime=basestarttime,endtime=baseendtime)
basarray = absolutestream._select_timerange(starttime=basestarttime,endtime=baseendtime)
bas = DataStream([LineStruct()],absolutestream.header,basarray)
#print ("HERE3b: length of selected absolutes: ", bas.length()[0])
if extrapolate: # and not extradays == 0:
bas = bas.extrapolate(basestarttime,baseendtime)
#keys = ['<KEY>']
try:
print ("Fitting Baseline between: {a} and {b}".format(a=str(num2date(np.min(bas.ndarray[0]))),b=str(num2date(np.max(bas.ndarray[0])))))
print (keys, fitfunc, fitdegree, knotstep)
logger.info("Fitting Baseline between: {a} and {b}".format(a=str(num2date(np.min(bas.ndarray[0]))),b=str(num2date(np.max(bas.ndarray[0])))))
#print ("Baseline", bas.length(), keys)
#for elem in bas.ndarray:
# print elem
func = bas.fit(keys,fitfunc=fitfunc,fitdegree=fitdegree,knotstep=knotstep)
except:
print ("Baseline: Error when determining fit - Enough data point to satisfy fit complexity?")
logger.error("Baseline: Error when determining fit - Not enough data point to satisfy fit complexity? N = {}".format(bas.length()))
return None
#if len(keys) == 3:
# ix = KEYLIST.index(keys[0])
# iy = KEYLIST.index(keys[1])
# iz = KEYLIST.index(keys[2])
# get the function in some readable equation
#self.header['DataAbsDataT'] = bas.ndarray[0],bas.ndarray[ix],bas.ndarray[iy],bas.ndarray[iz]]
if plotbaseline:
#check whether plotbaseline is valid path or bool
try:
try:
import magpy.mpplot as mp
except ImportError:
print ("baseline: Could not load package mpplot")
if plotfilename:
mp.plot(bas,variables=['dx','dy','dz'],padding = [5,0.005,5], symbollist = ['o','o','o'],function=func,plottitle='Absolute data',outfile=plotfilename)
else:
mp.plot(bas,variables=['dx','dy','dz'],padding = [5,0.005,5], symbollist = ['o','o','o'],function=func,plottitle='Absolute data')
except:
print("using the internal plotting routine requires mpplot to be imported as mp")
keystr = '_'.join(keys)
pierlong = absolutedata.header.get('DataAcquisitionLongitude','')
pierlat = absolutedata.header.get('DataAcquisitionLatitude','')
pierel = absolutedata.header.get('DataElevation','')
pierlocref = absolutedata.header.get('DataLocationReference','')
pierelref = absolutedata.header.get('DataElevationRef','')
if not pierlong == '' and not pierlat == '' and not pierel == '':
absinfostring = '_'.join(map(str,[orgstartabs,orgendabs,extradays,fitfunc,fitdegree,knotstep,keystr,pierlong,pierlat,pierlocref,pierel,pierelref]))
else:
absinfostring = '_'.join(map(str,[orgstartabs,orgendabs,extradays,fitfunc,fitdegree,knotstep,keystr]))
existingabsinfo = self.header.get('DataAbsInfo','').replace(', EPSG',' EPSG').split(',')
if not existingabsinfo[0] == '':
existingabsinfo.append(absinfostring)
else:
existingabsinfo = [absinfostring]
# Get minimum and maximum times out of existing absinfostream
minstarttime=100000000.0
maxendtime=0.0
for el in existingabsinfo:
ele = el.split('_')
mintime = float(ele[0])
maxtime = float(ele[1])
if minstarttime > mintime:
minstarttime = mintime
if maxendtime < maxtime:
maxendtime = maxtime
exabsstring = ','.join(existingabsinfo)
self.header['DataAbsInfo'] = exabsstring # 735582.0_735978.0_0_spline_5_0.3_dx_dy_dz
#print ("HERE5a:", minstarttime, maxendtime, absolutestream.length()[0])
bas2save = absolutestream.trim(starttime=minstarttime,endtime=maxendtime)
tmpdict = bas2save.stream2dict()
#print ("HERE5b:", bas2save.length()[0])
self.header['DataBaseValues'] = tmpdict['DataBaseValues']
# Get column heads of dx,dy and dz
# default is H-base[nT],D-base[deg],Z-base[nT]
basecomp = "HDZ"
try:
basecomp = "{}{}{}".format(absolutestream.header.get('col-dx')[0],absolutestream.header.get('col-dy')[0],absolutestream.header.get('col-dz')[0])
except:
pass
if not basecomp == "HDZ":
print (" -> basevalues correspond to components {}".format(basecomp))
self.header['DataBaseComponents'] = basecomp
#self.header['DataAbsMinTime'] = func[1] #num2date(func[1]).replace(tzinfo=None)
#self.header['DataAbsMaxTime'] = func[2] #num2date(func[2]).replace(tzinfo=None)
#self.header['DataAbsFunctionObject'] = func
logger.info(' --- Finished baseline-correction at %s' % str(datetime.now()))
return func
def stream2dict(self, keys=['dx','dy','dz'], dictkey='DataBaseValues'):
"""
DESCRIPTION:
Method to convert stream contents into a list and assign this to a dictionary.
You can use this method to directly store magnetic basevalues along with
data time series (e.g. using NasaCDF). Multilayer storage as supported by NetCDF
might provide better options to combine both data sets in one file.
PARAMETERS:
stream (DataStream) data containing e.g. basevalues
keys (list of keys) keys which are going to be stored
dictkey (string) name of the dictionaries key
RETURNS:
dict (dictionary) with name dictkey
APPLICATION:
>>> d = absdata.stream2dict(['dx','dy','dz'],'DataBaseValues')
>>> d = neicdata.stream2dict(['f','str3'],'Earthquakes')
"""
if not self.length()[0] > 0:
return {}
if not len(keys) > 0:
return {}
d = {}
keylst = ['time']
keylst.extend(keys)
array,headline,addline = [],[],[]
for key in keylst:
try:
pos = KEYLIST.index(key)
except ValueError:
pos = -1
if pos in range(0,len(KEYLIST)):
headline.append(key)
if not key == 'time':
addline.append(self.header.get('col-'+key))
else:
addline.append(self.header.get('DataID'))
column = self.ndarray[pos]
array.append(column)
rowlst = np.transpose(np.asarray(array)).astype(object)
fulllst = np.insert(rowlst,0,np.asarray(addline).astype(object),axis=0) ##could be used to store column names and id in time column
fulllst = np.insert(fulllst,0,np.asarray(headline).astype(object),axis=0)
d[dictkey] = fulllst
return d
def dict2stream(self,dictkey='DataBaseValues'):
"""
DESCRIPTION:
Method to convert the list stored in stream.header['DataBaseValue']
to an absolute stream.
PARAMETERS:
stream (DataStream) stream with variation data
dictkey (string) ususally 'DataBaseValues'
RETURNS:
stream (DataStream) containing values of header info
APPLICATION:
>>> absstream = stream.dict2stream(header['DataBaseValues'])
"""
lst = self.header.get(dictkey)
if not type(lst) in (list,tuple,np.ndarray):
print("dict2stream: no list,tuple,array found in provided header key")
return DataStream()
if len(lst) == 0:
print("dict2stream: list is empty")
return DataStream()
array = [[] for el in KEYLIST]
headerinfo = lst[0]
addinfo = lst[1]
data = lst[2:]
#print(headerinfo,addinfo)
collst = np.transpose(np.asarray(data)).astype(object)
#print(collst)
for idx,key in enumerate(headerinfo):
pos = KEYLIST.index(key)
array[pos] = collst[idx]
return DataStream([LineStruct()], {}, np.asarray(array,dtype=object))
def baselineAdvanced(self, absdata, baselist, **kwargs):
"""
DESCRIPTION:
reads stream, didata and baseline list
-> save separate monthly cdf's for each baseline input
-> Filename contains date of baseline jump
RETURNS:
list of header and ndarray -> this is necessary for datastreams
"""
sensid = kwargs.get('sensorid')
plotbaseline = kwargs.get('plotbaseline')
data = self.copy()
# Get start and endtime of stream
ts,te = data._find_t_limits()
# Get start and endtime of di data
tabss,tabse = absdata._find_t_limits()
# Some checks
if tabss > te or tabse < ts:
print ("baselineAdvanced: No DI data for selected stream available -aborting")
return False
if tabss > ts:
print ("baselineAdvanced: DI data does not cover the time range of stream - trimming stream")
data = data.trim(starttime=tabss)
if tabse < te:
print ("baselineAdvanced: DI data does not cover the time range of stream - trimming stream")
data = data.trim(endtime=tabse)
# Getting relevant baseline info
sensid = self.header.get('SensorID','')
if sensid == '':
print ("baselineAdvanced: No SensorID in header info - provide by option sensorid='XXX'")
return False
indlist = [ind for ind, elem in enumerate(baselist[0]) if elem == sensid]
#print "writeBC", indlist
senslist = [[el for idx,el in enumerate(elem) if idx in indlist] for elem in baselist]
#print "writeBC", senslist
#print "writeBC", senslist[1]
if not len(senslist) > 0:
print ("baselineAdvanced: Did not find any valid baseline parameters for selected sensor")
return False
# get index of starttime closest before
beforeinds = [[ind,np.abs(date2num(ts)-elem)] for ind, elem in enumerate(senslist[1]) if elem < date2num(ts)]
#print "writeBC", beforeinds
minl = [el[1] for el in beforeinds]
#print "writeBC minl", minl
startind = beforeinds[minl.index(np.min(minl))][0]
#print "writeBC", startind
vallist = [[el for idx,el in enumerate(elem) if idx == startind] for elem in senslist]
#print vallist
validinds = [ind for ind, elem in enumerate(senslist[1]) if elem >= date2num(ts) and elem <= date2num(te)]
#print "writeBC inds", validinds
vallist2 = [[el for idx,el in enumerate(elem) if idx in validinds] for elem in senslist]
#print vallist2
if len(vallist2[0]) > 0:
resultlist = []
for idx, elem in enumerate(vallist):
addelem = vallist2[idx]
print(elem, addelem)
elem.extend(addelem)
resultlist.append(elem)
else:
resultlist = vallist
print("baselineAdvanced: inds", resultlist)
# Select appropriate time ranges from stream
if not len(resultlist[0]) > 0:
print ("baselineAdvanced: Did not find any valid baseline parameters for selected sensor")
return False
streamlist = []
dictlist = []
resultlist = np.asarray(resultlist)
vals = resultlist.transpose()
for idx, elem in enumerate(vals):
#print "writeBC running", elem
mintime = float(elem[1])
maxtime = float(elem[2])
array = data._select_timerange(starttime=mintime, endtime=maxtime)
stream = DataStream(data,data.header,array)
baselinefunc = stream.baseline(absdata,startabs=mintime,endabs=maxtime, fitfunc=elem[3],fitdegree=int(elem[4]),knotstep=float(elem[5]),plotbaseline=plotbaseline)
#stream = stream.bc()
#exec('stream'+str(idx)+'= DataStream(stream,stream.header,stream.ndarray)')
dicthead = stream.header
#dictlist.append(dicthead.copy()) # Note: append just adds a pointer to content - use copy
#streamlist.append([dicthead.copy(),stream.ndarray])
streamlist.append([DataStream([LineStruct()],dicthead.copy(),stream.ndarray),baselinefunc])
#print "Streamlist", streamlist
#print len(dicthead),dictlist
return streamlist
def bc(self, function=None, ctype=None, alpha=0.0,level='preliminary'):
"""
DEFINITION:
Method to obtain baseline corrected data. By default flagged data is removed
before baseline correction.
Requires DataAbs values in the datastreams header.
The function object is transferred to keys x,y,z, please note that the baseline function
is stored in HDZ format (H:nT, D:0.0000 deg, Z: nT).
By default the bc method requires HDZ oriented variometer data. If XYZ data is provided,
or any other orientation, please provided rotation angles to transform this data into HDZ.
Example: For XYZ data please add the option alpha=DeclinationAtYourSite in a
float format of 0.00000 deg
PARAMETERS:
function (function object) provide the function directly - not from header
ctype (string) one of 'fff', 'fdf', 'ddf' - denoting nT components 'f' and degree 'd'
alpha/beta (floats) provide rotation angles for the variometer data to be applied
before correction - data is rotated back after correction
"""
logger.debug("BC: Performing baseline correction: Requires HEZ data.")
logger.debug(" H magnetic North, E magnetic East, Z vertical downwards, all in nT.")
pierdata = False
absinfostring = self.header.get('DataAbsInfo')
absvalues = self.header.get('DataBaseValues')
func = self.header.get('DataAbsFunctionObject')
datatype = self.header.get('DataType')
basecomp = self.header.get('DataBaseComponents')
if datatype == 'BC':
print ("BC: dataset is already baseline corrected - returning")
return self
bcdata = self.copy()
logger.debug("BC: Components of stream: {}".format(self.header.get('DataComponents')))
logger.debug("BC: baseline adoption information: {}".format(absinfostring))
if absinfostring and type(absvalues) in [list,np.ndarray,tuple]:
#print("BC: Found baseline adoption information in meta data - correcting")
absinfostring = absinfostring.replace(', EPSG',' EPSG')
absinfostring = absinfostring.replace(',EPSG',' EPSG')
absinfostring = absinfostring.replace(', epsg',' EPSG')
absinfostring = absinfostring.replace(',epsg',' EPSG')
absinfolist = absinfostring.split(',')
funclist = []
for absinfo in absinfolist:
#print("BC: TODO repeat correction several times and check header info")
# extract baseline data
absstream = bcdata.dict2stream()
#print("BC: abstream length", absstream.length()[0])
parameter = absinfo.split('_')
#print("BC:", parameter, len(parameter))
funckeys = parameter[6:9]
if len(parameter) >= 14:
#extract pier information
pierdata = True
pierlon = float(parameter[9])
pierlat = float(parameter[10])
pierlocref = parameter[11]
pierel = float(parameter[12])
pierelref = parameter[13]
#print("BC", num2date(float(parameter[0])))
#print("BC", num2date(float(parameter[1])))
if not funckeys == ['df']:
func = bcdata.baseline(absstream, startabs=float(parameter[0]), endabs=float(parameter[1]), extradays=int(float(parameter[2])), fitfunc=parameter[3], fitdegree=int(float(parameter[4])), knotstep=float(parameter[5]), keys=funckeys)
if 'dx' in funckeys:
func[0]['fx'] = func[0]['fdx']
func[0]['fy'] = func[0]['fdy']
func[0]['fz'] = func[0]['fdz']
func[0].pop('fdx', None)
func[0].pop('fdy', None)
func[0].pop('fdz', None)
keys = ['x','y','z']
elif 'x' in funckeys:
keys = ['x','y','z']
else:
print("BC: could not interpret BaseLineFunctionObject - returning")
return self
funclist.append(func)
#TODO addbaseline
#if AbsData contain xyz use mode='add'
datacomp = bcdata.header.get('DataComponents','')
if basecomp in ['xyz','XYZ']:
bcdata = bcdata.func2stream(funclist,mode='add',keys=keys)
bcdata.header['col-x'] = 'X'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'Y'
bcdata.header['unit-col-y'] = 'nT'
if len(datacomp) == 4:
bcdata.header['DataComponents'] = 'XYZ'+datacomp[3]
else:
bcdata.header['DataComponents'] = 'XYZ'
else:
#print ("BC: Found a list of functions:", funclist)
bcdata = bcdata.func2stream(funclist,mode='addbaseline',keys=keys)
bcdata.header['col-x'] = 'H'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'D'
bcdata.header['unit-col-y'] = 'deg'
datacomp = bcdata.header.get('DataComponents','')
if len(datacomp) == 4:
bcdata.header['DataComponents'] = 'HDZ'+datacomp[3]
else:
bcdata.header['DataComponents'] = 'HDZ'
# Add BC mark to datatype - data is baseline corrected
bcdata.header['DataType'] = 'BC'
# Update location data from absinfo
if pierdata:
self.header['DataAcquisitionLongitude'] = pierlon
self.header['DataAcquisitionLatitude'] = pierlat
self.header['DataLocationReference'] = pierlocref
self.header['DataElevation'] = pierel
self.header['DataElevationRef'] = pierelref
return bcdata
elif func:
# 1.) move content of basevalue function to columns 'x','y','z'?
try:
func[0]['fx'] = func[0]['fdx']
func[0]['fy'] = func[0]['fdy']
func[0]['fz'] = func[0]['fdz']
func[0].pop('fdx', None)
func[0].pop('fdy', None)
func[0].pop('fdz', None)
keys = ['<KEY>']
except:
print("BC: could not interpret BaseLineFunctionObject - returning")
return self
# 2.) eventually transform self - check header['DataComponents']
if ctype == 'fff':
pass
elif ctype == 'ddf':
pass
else:
pass
#eventually use other information like absolute path, and function parameter
#for key in self.header:
# if key.startswith('DataAbs'):
# print key, self.header[key]
# drop all lines with nan values in either x or y and if x=0 add some 0.00001 because of arctan(y/x)
#print len(self.ndarray[0])
#for elem in self.ndarray[1]:
# if np.isnan(elem) or elem == 0.0:
# print "Found", elem
#self = self._drop_nans('x')
#self = self._drop_nans('y')
#print len(self.ndarray[0])
bcdata = bcdata.func2stream(func,mode='addbaseline',keys=['x','y','z'])
bcdata.header['col-x'] = 'H'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'D'
bcdata.header['unit-col-y'] = 'deg'
bcdata.header['DataComponents'] = 'HDZ'
return bcdata
else:
print("BC: No data for correction available - header needs to contain DataAbsFunctionObject")
return self
def bindetector(self,key,flagnum=1,keystoflag=['x'],sensorid=None,text=None,**kwargs):
"""
DEFINITION:
Function to detect changes between 0 and 1 and create a flaglist for zero or one states
PARAMETERS:
key: (key) key to investigate
flagnum: (int) integer between 0 and 4, default is 0
keystoflag: (list) list of keys to be flagged
sensorid: (string) sensorid for flaglist, default is sensorid of self
text: (string) text to be added to comments/stdout,
will be extended by on/off
Kwargs:
markallon: (BOOL) add comment to all ons
markalloff: (BOOL) add comment to all offs
onvalue: (float) critical value to determin on stage (default = 0.99)
RETURNS:
- flaglist
EXAMPLE:
>>> flaglist = stream.bindetector('z',0,'x',SensorID,'Maintanence switch for rain bucket',markallon=True)
"""
markallon = kwargs.get('markallon')
markalloff = kwargs.get('markalloff')
onvalue = kwargs.get('onvalue')
if not markallon and not markalloff:
markallon = True
if not onvalue:
onvalue = 0.99
if not sensorid:
sensorid = self.header.get('SensorID')
if not len(self.ndarray[0]) > 0:
print ("bindetector: No ndarray data found - aborting")
return self
moddate = datetime.utcnow()
ind = KEYLIST.index(key)
startstate = self.ndarray[ind][0]
flaglist=[]
# Find switching states (Joe Kington: http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-numpy-array)
d = np.diff(self.ndarray[ind])
idx, = d.nonzero()
idx += 1
if markallon:
if not text:
text = 'on'
if self.ndarray[ind][0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if self.ndarray[ind][-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, self.ndarray[ind].size] # Edit
# Reshape the result into two columns
#print("Bindetector", idx, idx.size)
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(self.ndarray[0][start]).replace(tzinfo=None),num2date(self.ndarray[0][stop]).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.append(flagline)
if markalloff:
if not text:
text = 'off'
if not self.ndarray[ind][0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if not self.ndarray[ind][-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, self.ndarray[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(self.ndarray[0][start]).replace(tzinfo=None),num2date(self.ndarray[0][stop]).replace(tzinfo=None),elem,int(flagid),text,sensorid,moddate]
flaglist.append(flagline)
return flaglist
def calc_f(self, **kwargs):
"""
DEFINITION:
Calculates the f form x^2+y^2+z^2. If delta F is present, then by default
this value is added as well
PARAMETERS:
Kwargs:
- offset: (array) containing three elements [xoffset,yoffset,zoffset],
- skipdelta (bool) id selecetd then an existing delta f is not accounted for
RETURNS:
- DataStream with f and, if given, offset corrected xyz values
EXAMPLES:
>>> fstream = stream.calc_f()
>>> fstream = stream.calc_f(offset=[20000,0,43000])
"""
# Take care: if there is only 0.1 nT accuracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
skipdelta = kwargs.get('skipdelta')
if not offset:
offset = [0,0,0]
else:
if not len(offset) == 3:
logger.error('calc_f: offset with wrong dimension given - needs to contain a three dim array like [a,b,c] - returning stream without changes')
return self
ndtype = False
try:
if len(self.ndarray[0]) > 0:
ndtype = True
elif len(self) > 1:
ndtype = False
else:
logger.error('calc_f: empty stream - aborting')
return self
except:
logger.error('calc_f: inapropriate data provided - aborting')
return self
logger.info('calc_f: --- Calculating f started at %s ' % str(datetime.now()))
if ndtype:
inddf = KEYLIST.index('df')
indf = KEYLIST.index('f')
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
indz = KEYLIST.index('z')
if len(self.ndarray[inddf]) > 0 and not skipdelta:
df = self.ndarray[inddf].astype(float)
else:
df = np.asarray([0.0]*len(self.ndarray[indx]))
x2 = ((self.ndarray[indx]+offset[0])**2).astype(float)
y2 = ((self.ndarray[indy]+offset[1])**2).astype(float)
z2 = ((self.ndarray[indz]+offset[2])**2).astype(float)
self.ndarray[indf] = np.sqrt(x2+y2+z2) + df
else:
for elem in self:
elem.f = np.sqrt((elem.x+offset[0])**2+(elem.y+offset[1])**2+(elem.z+offset[2])**2)
self.header['col-f'] = 'f'
self.header['unit-col-f'] = 'nT'
logger.info('calc_f: --- Calculating f finished at %s ' % str(datetime.now()))
return self
def compensation(self, **kwargs):
"""
DEFINITION:
Method for magnetic variometer data:
Applies eventually present compensation field values in the header
to the vector x,y,z.
Compensation fields are provided in mirco Tesla (according to LEMI data).
Please note that any additional provided "DataDeltaValues" are also applied
by default (to avoid use option skipdelta=True).
Calculation:
This method uses header information data.header[''].
After successfull application data.header['DeltaValuesApplied']
is set to 1.
PARAMETERS:
Kwargs:
- skipdelta (bool) if True then DataDeltaValues are ignored
RETURNS:
- DataStream with compensation values appliesd to xyz values
- original dataStream if no compensation values are found
EXAMPLES:
>>> compstream = stream.compensation()
"""
skipdelta = kwargs.get('skipdelta')
if not self.length()[0] > 0:
return self
stream = self.copy()
logger.info("compensation: applying compensation field values to variometer data ...")
deltas = stream.header.get('DataDeltaValues','')
if not skipdelta and not deltas=='':
logger.info("compensation: applying delta values from header['DataDeltaValues'] first")
stream = stream.offset(deltas)
stream.header['DataDeltaValuesApplied'] = 1
offdict = {}
xcomp = stream.header.get('DataCompensationX','0')
ycomp = stream.header.get('DataCompensationY','0')
zcomp = stream.header.get('DataCompensationZ','0')
if not float(xcomp)==0.:
offdict['x'] = -1*float(xcomp)*1000.
if not float(ycomp)==0.:
offdict['y'] = -1*float(ycomp)*1000.
if not float(zcomp)==0.:
offdict['z'] = -1*float(zcomp)*1000.
logger.info(' -- applying compensation fields: x={}, y={}, z={}'.format(xcomp,ycomp,zcomp))
if len(offdict) > 0:
stream = stream.offset(offdict)
stream.header['DataDeltaValuesApplied'] = 1
return stream
def cut(self,length,kind=0,order=0):
"""
DEFINITION:
cut returns the selected amount of lines from datastreams
PARAMETER:
stream : datastream
length : provide the amount of lines to be returned (default: percent of stream length)
kind : define the kind of length parameter
= 0 (default): length is given in percent
= 1: length is given in number of lines
order : define from which side
= 0 (default): the last amount of lines are returned
= 1: lines are counted from the beginning
VERSION:
added in MagPy 0.4.6
APPLICATION:
# length of stream: 86400
cutstream = stream.cut(50)
# length of cutstream: 43200
"""
stream = self.copy()
if length <= 0:
print ("get_last: length needs to be > 0")
return stream
if kind == 0:
if length > 100:
length = 100
amount = int(stream.length()[0]*length/100.)
else:
if length > stream.length()[0]:
return stream
else:
amount = length
for idx,el in enumerate(stream.ndarray):
if len(el) >= amount:
if order == 0:
nel = el[-amount:]
else:
nel = el[:amount]
stream.ndarray[idx] = nel
return stream
def dailymeans(self, keys=['x','<KEY>'], offset = 0.5, keepposition=False, **kwargs):
"""
DEFINITION:
Calculates daily means of xyz components and their standard deviations. By default
numpy's mean and std methods are applied even if only two data sets are available.
TODO ---
If less then three data sets are provided, twice the difference between two values
is used as an conservative proxy of uncertainty. I only on value is available, then
the maximum uncertainty of the collection is assumed. This behavior can be changed
by keyword arguments.
TODO ---
An outputstream is generated which containes basevalues in columns
x,y,z and uncertainty values in dx,dy,dz
if only a single values is available, dx,dy,dz contain the average uncertainties
of the full data set
time column contains the average time of the measurement
PARAMETERS:
Variables
- keys: (list) provide up to four keys which are used in columns x,y,z
- offset: (float) offset in timeunit days (0 to 0.999) default is 0.5, some test might use 0
Kwargs:
- none
RETURNS:
- stream: (DataStream object) with daily means and standard deviation
EXAMPLE:
>>> means = didata.dailymeans(keys=['dx','dy','dz'])
APPLICATION:
>>> means = didata.dailymeans(keys=['dx','dy','dz'])
>>> mp.plot(means,['x','y','z'],errorbars=True, symbollist=['o','o','o'])
"""
percentage = 90
keys = keys[:4]
poslst,deltaposlst = [],[]
deltakeys = ['dx','dy','dz','df']
for key in keys:
poslst.append(KEYLIST.index(key))
for idx,pos in enumerate(poslst):
deltaposlst.append(KEYLIST.index(deltakeys[idx]))
if not len(self.ndarray[0]) > 0:
return self
array = [[] for el in KEYLIST]
data = self.copy()
data = data.removeduplicates()
timecol = np.floor(data.ndarray[0])
tmpdatelst = np.asarray(list(set(list(timecol))))
for day in tmpdatelst:
sel = data._select_timerange(starttime=day,endtime=day+1)
"""
#for idx,day in enumerate(daylst):
#sel = final._select_timerange(starttime=np.round(day), endtime=np.round(day)+1)
"""
#print (len(sel))
sttmp = DataStream([LineStruct()],{},sel)
array[0].append(day+offset)
for idx, pos in enumerate(poslst):
#if len(sttmp.ndarray[idx+1]) > 0:
if not keepposition:
array[idx+1].append(sttmp.mean(KEYLIST[pos],percentage=percentage))
else:
array[pos].append(sttmp.mean(KEYLIST[pos],percentage=percentage))
#print ("Check", KEYLIST[pos], idx+1, len(sttmp._get_column(KEYLIST[pos])),sttmp._get_column(KEYLIST[pos]),sttmp.mean(KEYLIST[pos],percentage=percentage))
"""
#array[0].append(day+0.5)
#for idx,pos in enumerate(poslst):
array[idx+1].append(np.mean(sel[pos],percentage=percentage))
"""
data.header['col-'+KEYLIST[idx+1]] = '{}'.format(self.header.get('col-'+KEYLIST[pos]))
data.header['unit-col-'+KEYLIST[idx+1]] = '{}'.format(self.header.get('unit-col-'+KEYLIST[pos]))
diff = pos-idx
if not keepposition:
for idx,dpos in enumerate(deltaposlst):
#if len(sttmp.ndarray[idx]) > 0:
me,std = sttmp.mean(KEYLIST[idx+diff],percentage=percentage, std=True)
array[dpos].append(std)
#array[dpos].append(np.std(sel[idx+diff]))
data.header['col-'+KEYLIST[dpos]] = 'sigma {}'.format(self.header.get('col-'+KEYLIST[idx+diff]))
data.header['unit-col-'+KEYLIST[dpos]] = '{}'.format(self.header.get('unit-col-'+KEYLIST[idx+diff]))
data.header['DataFormat'] = 'MagPyDailyMean'
array = [np.asarray(el) for el in array]
retstream = DataStream([LineStruct()],data.header,np.asarray(array))
retstream = retstream.sorting()
return retstream
def date_offset(self, offset):
"""
IMPORTANT:
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
THIS METHOD IS NOT SUPPORTED ANY MORE. PLEASE USE
self.offset({'time':timedelta(seconds=1000)}) INSTEAD
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DEFINITION:
Corrects the time column of the selected stream by the offst
offset is a timedelta object (e.g. timedelta(hours=1))
PARAMETERS:
Variables:
- offset: (timedelta object) Offset to apply to stream.
Kwargs:
- None
RETURNS:
- stream: (DataStream object) Stream with offset applied.
EXAMPLE:
>>> data = data.offset(timedelta(minutes=3))
APPLICATION:
"""
header = self.header
newstream = DataStream()
array = self.ndarray
if len(ndarray[0]) > 0:
ndtype = True
secsperday = 3600*24
ndarray[0] = ndarray[0] + offset.total_seconds/secsperday
for elem in self:
newtime = num2date(elem.time).replace(tzinfo=None) + offset
elem.sectime = elem.time
elem.time = date2num(newtime)
newstream.add(elem)
logger.info('date_offset: Corrected time column by %s sec' % str(offset.total_seconds))
return DataStream(newstream,header,array)
def delta_f(self, **kwargs):
"""
DESCRIPTION:
Calculates the difference of x+y+z to f and puts the result to the df column
PARAMETER:
keywords:
:type offset: float
:param offset: constant offset to f values
:type digits: int
:param digits: number of digits to be rounded (should equal the input precision)
"""
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
digits = kwargs.get('digits')
if not offset:
offset = 0
if not digits:
digits = 8
logger.info('--- Calculating delta f started at %s ' % str(datetime.now()))
try:
syst = self.header['DataComponents']
except:
syst = None
ind = KEYLIST.index("df")
indx = KEYLIST.index("x")
indy = KEYLIST.index("y")
indz = KEYLIST.index("z")
indf = KEYLIST.index("f")
if len(self.ndarray[0])>0 and len(self.ndarray[indx])>0 and len(self.ndarray[indy])>0 and len(self.ndarray[indz])>0 and len(self.ndarray[indf])>0:
# requires x,y,z and f
arx = self.ndarray[indx]**2
ary = self.ndarray[indy]**2
arz = self.ndarray[indz]**2
if syst in ['HDZ','hdz','HDZF','hdzf','HDZS','hdzs','HDZG','hdzg']:
print("deltaF: found HDZ orientation")
ary = np.asarray([0]*len(self.ndarray[indy]))
sumar = list(arx+ary+arz)
sqr = np.sqrt(np.asarray(sumar))
self.ndarray[ind] = sqr - (self.ndarray[indf] + offset)
else:
for elem in self:
elem.df = round(np.sqrt(elem.x**2+elem.y**2+elem.z**2),digits) - (elem.f + offset)
self.header['col-df'] = 'delta f'
self.header['unit-col-df'] = 'nT'
logger.info('--- Calculating delta f finished at %s ' % str(datetime.now()))
return self
def f_from_df(self, **kwargs):
"""
DESCRIPTION:
Calculates the f from the difference of x+y+z and df
PARAMETER:
keywords:
:type offset: float
:param offset: constant offset to f values
:type digits: int
:param digits: number of digits to be rounded (should equal the input precision)
"""
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
digits = kwargs.get('digits')
if not offset:
offset = 0.
if not digits:
digits = 8
logger.info('--- Calculating f started at %s ' % str(datetime.now()))
try:
syst = self.header['DataComponents']
except:
syst = None
ind = KEYLIST.index("df")
indx = KEYLIST.index("x")
indy = KEYLIST.index("y")
indz = KEYLIST.index("z")
indf = KEYLIST.index("f")
if len(self.ndarray[0])>0 and len(self.ndarray[indx])>0 and len(self.ndarray[indy])>0 and len(self.ndarray[indz])>0 and len(self.ndarray[ind])>0:
# requires x,y,z and f
arx = self.ndarray[indx]**2
ary = self.ndarray[indy]**2
arz = self.ndarray[indz]**2
if syst in ['HDZ','hdz','HDZF','hdzf','HDZS','hdzs','HDZG','hdzg']:
print("deltaF: found HDZ orientation")
ary = np.asarray([0]*len(self.ndarray[indy]))
sumar = list(arx+ary+arz)
sqr = np.sqrt(np.asarray(sumar))
self.ndarray[indf] = sqr - (self.ndarray[ind] + offset)
else:
for elem in self:
elem.f = round(np.sqrt(elem.x**2+elem.y**2+elem.z**2),digits) - (elem.df + offset)
self.header['col-f'] = 'f'
self.header['unit-col-f'] = 'nT'
logger.info('--- Calculating f finished at %s ' % str(datetime.now()))
return self
def differentiate(self, **kwargs):
"""
DEFINITION:
Method to differentiate all columns with respect to time.
-- Using successive gradients
PARAMETERS:
Variables:
keys: (list - default ['x','y','z','f'] provide limited key-list
put2key
- keys: (list) Provide limited key-list. default = ['x','y','z','f']
- put2keys: (type) Provide keys to put differentiated keys to.
Default = ['dx','dy','dz','df']
Kwargs:
RETURNS:
- stream: (DataStream) Differentiated data stream, x values in dx, etc..
EXAMPLE:
>>> stream = stream.differentiate(keys=['f'],put2keys=['df'])
APPLICATION:
"""
logger.info('differentiate: Calculating derivative started.')
keys = kwargs.get('keys')
put2keys = kwargs.get('put2keys')
if not keys:
keys = ['<KEY>']
if not put2keys:
put2keys = ['<KEY>']
if len(keys) != len(put2keys):
logger.error('Amount of columns read must be equal to outputcolumns')
return self
stream = self.copy()
ndtype = False
if len(stream.ndarray[0]) > 0:
t = stream.ndarray[0].astype(float)
ndtype = True
else:
t = stream._get_column('time')
for i, key in enumerate(keys):
if ndtype:
ind = KEYLIST.index(key)
val = stream.ndarray[ind].astype(float)
else:
val = stream._get_column(key)
dval = np.gradient(np.asarray(val))
stream._put_column(dval, put2keys[i])
stream.header['col-'+put2keys[i]] = r"d%s vs dt" % (key)
logger.info('--- derivative obtained at %s ' % str(datetime.now()))
return stream
def DWT_calc(self,key='x',wavelet='db4',level=3,plot=False,outfile=None,
window=5):
"""
DEFINITION:
Discrete wavelet transform (DWT) method of analysing a magnetic signal
to pick out SSCs. This method was taken from Hafez (2013): "Systematic examination
of the geomagnetic storm sudden commencement using multi resolution analysis."
(NOTE: PyWavelets package must be installed for this method. It should be applied
to 1s data - otherwise the sample window should be changed.)
METHOD:
1. Use the 4th-order Daubechies wavelet filter to calculate the 1st to 3rd details
(D1, D2, D3) of the geomagnetic signal. This is applied to a sliding window of
five samples.
2. The 3rd detail (D3) samples are squared to evaluate the magnitude.
3. The sample window (5) is averaged to avoid ripple effects. (This means the
returned stream will have ~1/5 the size of the original.)
PARAMETERS:
Variables:
- key: (str) Apply DWT to this key. Default 'x' due to SSCs dominating
the horizontal component.
- wavelet: (str) Type of filter to use. Default 'db4' (4th-order Daubechies
wavelet filter) according to Hafez (2013).
- level: (int) Decomposition level. Will calculate details down to this level.
Default 3, also Hafez (2013).
- plot: (bool) If True, will display a plot of A3, D1, D2 and D3.
- outfile: (str) If given, will plot will be saved to 'outfile' path.
- window: (int) Length of sample window. Default 5, i.e. 5s with second data.
RETURNS:
- DWT_stream: (DataStream object) A stream containing the following:
'x': A_n (approximation function)
'var1': D1 (first detail)
'var2': D2 (second detail)
'var3': D3 (third detail)
... will have to be changed if higher details are required.
EXAMPLE:
>>> DWT_stream = stream.DWT_calc(plot=True)
APPLICATION:
# Storm detection using detail 3 (D3 = var3):
from magpy.stream import *
stream = read('LEMI_1s_Data_2014-02-15.cdf') # 2014-02-15 is a good storm example
DWT_stream = stream.DWT_calc(plot=True)
Da_min = 0.0005 # nT^2 (minimum amplitude of D3 for storm detection)
Dp_min = 40 # seconds (minimum period of Da > Da_min for storm detection)
detection = False
for row in DWT_stream:
if row.var3 >= Da_min and detection == False:
timepin = row.time
detection = True
elif row.var3 < Da_min and detection == True:
duration = (num2date(row.time) - num2date(timepin)).seconds
if duration >= Dp_min:
print "Storm detected!"
print duration, num2date(timepin)
detection = False
"""
# Import required package PyWavelets:
# http://www.pybytes.com/pywavelets/index.html
import pywt
# 1a. Grab array from stream
data = self._get_column(key)
t_ind = KEYLIST.index('time')
#DWT_stream = DataStream([],{})
DWT_stream = DataStream()
headers = DWT_stream.header
array = [[] for key in KEYLIST]
x_ind = KEYLIST.index('x')
dx_ind = KEYLIST.index('dx')
var1_ind = KEYLIST.index('var1')
var2_ind = KEYLIST.index('var2')
var3_ind = KEYLIST.index('var3')
i = 0
logger.info("DWT_calc: Starting Discrete Wavelet Transform of key %s." % key)
# 1b. Loop for sliding window
while True:
if i >= (len(data)-window):
break
#row = LineStruct()
# Take the values in the middle of the window (not exact but changes are
# not extreme over standard 5s window)
#row.time = self[i+window/2].time
array[t_ind].append(self.ndarray[t_ind][i+int(window/2)])
data_cut = data[i:i+window]
#row.x = sum(data_cut)/float(window)
array[x_ind].append(sum(data_cut)/float(window))
# 1c. Calculate wavelet transform coefficients
# Wavedec produces results in form: [cA_n, cD_n, cD_n-1, ..., cD2, cD1]
# (cA_n is a list of coefficients for an approximation for the nth order.
# All cD_n are coefficients for details n --> 1.)
coeffs = pywt.wavedec(data_cut, wavelet, level=level)
# 1d. Calculate approximation and detail functions from coefficients
take = len(data_cut) # (Length of fn from coeffs = length of original data)
functions = []
approx = True
for item in coeffs:
if approx:
part = 'a' # Calculate approximation function
else:
part = 'd' # Calculate detail function
function = pywt.upcoef(part, item, wavelet, level=level, take=take)
functions.append(function)
approx = False
# 2. Square the results
fin_fns = []
for item in functions:
item_sq = [j**2 for j in item]
# 3. Average over the window
val = sum(item_sq)/window
fin_fns.append(val)
# TODO: This is hard-wired for level=3.
#row.dx, row.var1, row.var2, row.var3 = fin_fns
array[dx_ind].append(fin_fns[0])
array[var1_ind].append(fin_fns[3])
array[var2_ind].append(fin_fns[2])
array[var3_ind].append(fin_fns[1])
#DWT_stream.add(row)
i += window
logger.info("DWT_calc: Finished DWT.")
DWT_stream.header['col-x'] = 'A3'
DWT_stream.header['unit-col-x'] = 'nT^2'
DWT_stream.header['col-var1'] = 'D1'
DWT_stream.header['unit-col-var1'] = 'nT^2'
DWT_stream.header['col-var2'] = 'D2'
DWT_stream.header['unit-col-var2'] = 'nT^2'
DWT_stream.header['col-var3'] = 'D3'
DWT_stream.header['unit-col-var3'] = 'nT^2'
# Plot stream:
if plot == True:
date = datetime.strftime(num2date(self.ndarray[0][0]),'%Y-%m-%d')
logger.info('DWT_calc: Plotting data...')
if outfile:
DWT_stream.plot(['x','var1','var2','var3'],
plottitle="DWT Decomposition of %s (%s)" % (key,date),
outfile=outfile)
else:
DWT_stream.plot(['x','var1','var2','var3'],
plottitle="DWT Decomposition of %s (%s)" % (key,date))
#return DWT_stream
return DataStream([LineStruct()], headers, np.asarray([np.asarray(a) for a in array]))
def eventlogger(self, key, values, compare=None, stringvalues=None, addcomment=None, debugmode=None):
"""
read stream and log data of which key meets the criteria
maybe combine with extract
Required:
:type key: string
:param key: provide the key to be examined
:type values: list
:param values: provide a list of three values
:type values: list
:param values: provide a list of three values
Optional:
:type compare: string
:param compare: ">, <, ==, !="
:type stringvalues: list
:param stringvalues: provide a list of exactly the same length as values with the respective comments
:type addcomment: bool
:param addcomment: if true add the stringvalues to the comment line of the datastream
:type debugmode: bool
:param debugmode: provide more information
example:
compare is string like ">, <, ==, !="
st.eventlogger(['var3'],[15,20,30],'>')
"""
assert type(values) == list
if not compare:
compare = '=='
if not compare in ['<','>','<=','>=','==','!=']:
logger.warning('Eventlogger: wrong value for compare: needs to be among <,>,<=,>=,==,!=')
return self
if not stringvalues:
stringvalues = ['Minor storm onset','Moderate storm onset','Major storm onset']
else:
assert type(stringvalues) == list
if not len(stringvalues) == len(values):
logger.warning('Eventlogger: Provided comments do not match amount of values')
return self
for elem in self:
#evaluationstring = 'elem.' + key + ' ' + compare + ' ' + str(values[0])
if eval('elem.'+key+' '+compare+' '+str(values[2])):
stormlogger.warning('%s at %s' % (stringvalues[2],num2date(elem.time).replace(tzinfo=None)))
if addcomment:
if elem.comment == '-':
elem.comment = stringvalues[2]
else:
elem.comment += ', ' + stringvalues[2]
elif eval('elem.'+key+' '+compare+' '+str(values[1])):
stormlogger.warning('%s at %s' % (stringvalues[1],num2date(elem.time).replace(tzinfo=None)))
if addcomment:
if elem.comment == '-':
elem.comment = stringvalues[1]
else:
elem.comment += ', ' + stringvalues[1]
elif eval('elem.'+key+' '+compare+' '+str(values[0])):
stormlogger.warning('%s at %s' % (stringvalues[0],num2date(elem.time).replace(tzinfo=None)))
if addcomment:
if elem.comment == '-':
elem.comment = stringvalues[0]
else:
elem.comment += ', ' + stringvalues[0]
return self
def extract(self, key, value, compare=None, debugmode=None):
"""
DEFINITION:
Read stream and extract data of the selected key which meets the choosen criteria
PARAMETERS:
Variables:
- key: (str) streams key e.g. 'x'.
- value: (str/float/int) any selected input which should be tested for
special note: if value is in brackets, then the term is evaluated
e.g. value="('int(elem.time)')" selects all points at 0:00
Important: this only works for compare = '=='
Kwargs:
- compare: (str) criteria, one out of ">=", "<=",">", "<", "==", "!=", default is '=='
- debugmode:(bool) if true several additional outputs will be created
RETURNS:
- DataStream with selected values only
EXAMPLES:
>>> extractedstream = stream.extract('x',20000,'>')
>>> extractedstream = stream.extract('str1','Berger')
"""
if not compare:
compare = '=='
if not compare in [">=", "<=",">", "<", "==", "!=", 'like']:
logger.info('--- Extract: Please provide proper compare parameter ">=", "<=",">", "<", "==", "like" or "!=" ')
return self
if value in ['',None]:
return self
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
ind = KEYLIST.index(key)
stream = self.copy()
if not self._is_number(value):
if value.startswith('(') and value.endswith(')') and compare == '==':
logger.info("extract: Selected special functional type -equality defined by difference less then 10 exp-6")
if ndtype:
val = eval(value[1:-1])
indexar = np.where((np.abs(stream.ndarray[ind]-val)) < 0.000001)[0]
else:
val = value[1:-1]
liste = []
for elem in self:
if abs(eval('elem.'+key) - eval(val)) < 0.000001:
liste.append(elem)
return DataStream(liste,self.header)
else:
#print "Found String", ndtype
too = '"' + str(value) + '"'
if ndtype:
if compare == 'like':
indexar = np.asarray([i for i, s in enumerate(stream.ndarray[ind]) if str(value) in s])
else:
#print stream.ndarray[ind]
searchclause = 'stream.ndarray[ind] '+ compare + ' ' + too
#print searchclause, ind, key
indexar = eval('np.where('+searchclause+')[0]')
#print indexar, len(indexar)
else:
too = str(value)
if ndtype:
searchclause = 'stream.ndarray[ind].astype(float) '+ compare + ' ' + too
with np.errstate(invalid='ignore'):
indexar = eval('np.where('+searchclause+')[0]')
if ndtype:
for ind,el in enumerate(stream.ndarray):
if len(stream.ndarray[ind]) > 0:
ar = [stream.ndarray[ind][i] for i in indexar]
stream.ndarray[ind] = np.asarray(ar).astype(object)
return stream
else:
liste = [elem for elem in self if eval('elem.'+key+' '+ compare + ' ' + too)]
return DataStream(liste,self.header,self.ndarray)
def extract2(self, keys, get='>', func=None, debugmode=None):
"""
DEFINITION:
Read stream and extract data of the selected keys which meets the choosen criteria
PARAMETERS:
Variables:
- keys: (list) keylist like ['x','f'].
- func: a function object
Kwargs:
- get: (str) criteria, one out of ">=", "<=",">", "<", "==", "!=", default is '=='
- debugmode:(bool) if true several additional outputs will be created
RETURNS:
- DataStream with selected values only
EXAMPLES:
>>> extractedstream = stream.extract('x',20000,'>')
>>> extractedstream = stream.extract('str1','Berger')
"""
if not get:
get = '=='
if not get in [">=", "<=",">", "<", "==", "!=", 'like']:
print ('--- Extract: Please provide proper compare parameter ">=", "<=",">", "<", "==", "like" or "!=" ')
return self
stream = self.copy()
def func(x):
y = 1/(0.2*exp(0.06/(x/10000.))) + 2.5
return y
xpos = KEYLIST.index(keys[0])
ypos = KEYLIST.index(keys[1])
x = stream.ndarray[xpos].astype(float)
y = stream.ndarray[ypos].astype(float)
idxlist = []
for idx,val in enumerate(x):
ythreshold = func(val)
test = eval('y[idx] '+ get + ' ' + str(ythreshold))
#print (val, 'y[idx] '+ get + ' ' + str(ythreshold))
if test:
idxlist.append(idx)
array = [[] for key in KEYLIST]
for i,key in enumerate(KEYLIST):
for idx in idxlist:
if len(stream.ndarray[i]) > 0:
array[i].append(stream.ndarray[i][idx])
array[i] = np.asarray(array[i])
print ("Length of list", len(idxlist))
return DataStream([LineStruct()], stream.header,np.asarray(array))
def extrapolate(self, start, end):
"""
DESCRIPTION:
Reads stream output of absolute analysis and extrapolate the data
current method (too be improved if necessary):
- repeat the last and first input with baseline values at disered start and end time
Hereby and functional fit (e.g. spline or polynom is forced towards a quasi-stable baseline evolution).
The principle asumption of this technique is that the base values are constant on average.
APPLICATION:
is used by stream.baseline
"""
ltime = date2num(end) # + timedelta(days=1))
ftime = date2num(start) # - timedelta(days=1))
array = [[] for key in KEYLIST]
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
firsttime = np.min(self.ndarray[0])
lasttime = np.max(self.ndarray[0])
# Find the last element with baseline values - assuming a sorted array
inddx = KEYLIST.index('dx')
lastind=len(self.ndarray[0])-1
#print("Extrapolate", self.ndarray,len(self.ndarray[inddx]), self.ndarray[inddx], self.ndarray[inddx][lastind])
while np.isnan(float(self.ndarray[inddx][lastind])):
lastind = lastind-1
firstind=0
while np.isnan(float(self.ndarray[inddx][firstind])):
firstind = firstind+1
#print "extrapolate", num2date(ftime), num2date(ltime), ftime, ltime
for idx,elem in enumerate(self.ndarray):
if len(elem) > 0:
array[idx] = self.ndarray[idx]
if idx == 0:
array[idx] = np.append(array[idx],ftime)
array[idx] = np.append(array[idx],ltime)
#array[idx] = np.append(self.ndarray[idx],ftime)
#array[idx] = np.append(self.ndarray[idx],ltime)
else:
array[idx] = np.append(array[idx],array[idx][firstind])
array[idx] = np.append(array[idx],array[idx][lastind])
#array[idx] = np.append(self.ndarray[idx],self.ndarray[idx][firstind])
#array[idx] = np.append(self.ndarray[idx],self.ndarray[idx][lastind])
indar = np.argsort(array[0])
array = [el[indar].astype(object) if len(el)>0 else np.asarray([]) for el in array]
else:
if self.length()[0] < 2:
return self
firstelem = self[0]
lastelem = self[-1]
# Find the last element with baseline values
i = 1
while isnan(lastelem.dx):
lastelem = self[-i]
i = i +1
line = LineStruct()
for key in KEYLIST:
if key == 'time':
line.time = ftime
else:
exec('line.'+key+' = firstelem.'+key)
self.add(line)
line = LineStruct()
for key in KEYLIST:
if key == 'time':
line.time = ltime
else:
exec('line.'+key+' = lastelem.'+key)
self.add(line)
stream = DataStream(self,self.header,np.asarray(array,dtype=object))
#print "extra", stream.ndarray
#print "extra", stream.length()
#stream = stream.sorting()
return stream
#return DataStream(self,self.header,self.ndarray)
def filter(self,**kwargs):
"""
DEFINITION:
Uses a selected window to filter the datastream - similar to the smooth function.
(take a look at the Scipy Cookbook/Signal Smooth)
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
This function is approximately twice as fast as the previous version.
Difference: Gaps of the stream a filled by time steps with NaNs in the data columns
By default missing values are interpolated if more than 90 percent of data is present
within the window range. This is used to comply with INTERMAGNET rules. Set option
conservative to False to avoid this.
PARAMETERS:
Kwargs:
- keys: (list) List of keys to smooth
- filter_type: (string) name of the window. One of
'flat','barthann','bartlett','blackman','blackmanharris','bohman',
'boxcar','cosine','flattop','hamming','hann','nuttall',
'parzen','triang','gaussian','wiener','spline','butterworth'
See http://docs.scipy.org/doc/scipy/reference/signal.html
- filter_width: (timedelta) window width of the filter
- resample_period: (int) resampling interval in seconds (e.g. 1 for one second data)
leave blank for standard filters as it will be automatically selected
- noresample: (bool) if True the data set is resampled at filter_width positions
- missingdata: (string) define how to deal with missing data
'conservative' (default): no filtering
'interpolate': interpolate if less than 10% are missing
'mean': use mean if less than 10% are missing'
- conservative: (bool) if True than no interpolation is performed
- autofill: (list) of keys: provide a keylist for which nan values are linearly interpolated before filtering - use with care, might be useful if you have low resolution parameters asociated with main values like (humidity etc)
- resampleoffset: (timedelta) if provided the offset will be added to resamples starttime
- resamplemode: (string) if 'fast' then fast resampling is used
- testplot: (bool) provides a plot of unfiltered and filtered data for each key if true
- dontfillgaps: (bool) if true, get_gaps will not be conducted - much faster but requires the absence of data gaps (including time step)
RETURNS:
- self: (DataStream) containing the filtered signal within the selected columns
EXAMPLE:
>>> nice_data = bad_data.filter(keys=['x','y','z'])
or
>>> nice_data = bad_data.filter(filter_type='gaussian',filter_width=timedelta(hours=1))
APPLICATION:
TODO:
!!A proper and correct treatment of gaps within the dataset to be filtered is missing!!
"""
# ########################
# Kwargs and definitions
# ########################
filterlist = ['flat','barthann','bartlett','blackman','blackmanharris','bohman',
'boxcar','cosine','flattop','hamming','hann','nuttall','parzen','triang',
'gaussian','wiener','spline','butterworth']
# To be added
#kaiser(M, beta[, sym]) Return a Kaiser window.
#slepian(M, width[, sym]) Return a digital Slepian (DPSS) window.
#chebwin(M, at[, sym]) Return a Dolph-Chebyshev window.
# see http://docs.scipy.org/doc/scipy/reference/signal.html
keys = kwargs.get('keys')
filter_type = kwargs.get('filter_type')
filter_width = kwargs.get('filter_width')
resample_period = kwargs.get('resample_period')
filter_offset = kwargs.get('filter_offset')
noresample = kwargs.get('noresample')
resamplemode = kwargs.get('resamplemode')
resamplestart = kwargs.get('resamplestart')
resampleoffset = kwargs.get('resampleoffset')
testplot = kwargs.get('testplot')
autofill = kwargs.get('autofill')
dontfillgaps = kwargs.get('dontfillgaps')
fillgaps = kwargs.get('fillgaps')
debugmode = kwargs.get('debugmode')
conservative = kwargs.get('conservative')
missingdata = kwargs.get('missingdata')
sr = self.samplingrate()
if not keys:
keys = self._get_key_headers(numerical=True)
if not filter_width and not resample_period:
if sr < 0.5: # use 1 second filter with 0.3 Hz cut off as default
filter_width = timedelta(seconds=3.33333333)
resample_period = 1.0
else: # use 1 minute filter with 0.008 Hz cut off as default
filter_width = timedelta(minutes=2)
resample_period = 60.0
if not filter_width: # resample_period obviously provided - use nyquist
filter_width = timedelta(seconds=2*resample_period)
if not resample_period: # filter_width obviously provided... use filter_width as period
resample_period = filter_width.total_seconds()
# Fall back for old data
if filter_width == timedelta(seconds=1):
filter_width = timedelta(seconds=3.3)
resample_period = 1.0
if not noresample:
resample = True
else:
resample = False
if not autofill:
autofill = []
else:
if not isinstance(autofill, (list, tuple)):
print("Autofill need to be a keylist")
return
if not resamplemode:
resamplefast = False
else:
if resamplemode == 'fast':
resamplefast = True
else:
resamplefast = False
if not debugmode:
debugmode = None
if not filter_type:
filter_type = 'gaussian'
if resamplestart:
print("############## Warning ##############")
print("option RESAMPLESTART is not used any more. Switch to resampleoffset for modifying time steps")
if not missingdata:
missingdata = 'conservative'
ndtype = False
# ########################
# Basic validity checks and window size definitions
# ########################
if not filter_type in filterlist:
logger.error("smooth: Window is none of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman', etc")
logger.debug("smooth: You entered non-existing filter type - %s - " % filter_type)
return self
logger.info("filter: Filtering with {} window".format(filter_type))
#print self.length()[0]
if not self.length()[0] > 1:
logger.error("Filter: stream needs to contain data - returning.")
return self
if debugmode:
print("Starting length:", self.length())
#if not dontfillgaps: ### changed--- now using dont fill gaps as default
if fillgaps:
self = self.get_gaps()
if debugmode:
print("length after getting gaps:", len(self))
window_period = filter_width.total_seconds()
si = timedelta(seconds=self.get_sampling_period()*24*3600)
sampling_period = si.days*24*3600 + si.seconds + np.round(si.microseconds/1000000.0,2)
if debugmode:
print("Timedelta and sampling period:", si, sampling_period)
# window_len defines the window size in data points assuming the major sampling period to be valid for the dataset
if filter_type == 'gaussian':
# For a gaussian fit
window_len = np.round((window_period/sampling_period))
#print (window_period,sampling_period,window_len)
# Window length needs to be odd number:
if window_len % 2 == 0:
window_len = window_len +1
std = 0.83255461*window_len/(2*np.pi)
trangetmp = self._det_trange(window_period)*24*3600
if trangetmp < 1:
trange = np.round(trangetmp,3)
else:
trange = timedelta(seconds=(self._det_trange(window_period)*24*3600)).seconds
if debugmode:
print("Window character: ", window_len, std, trange)
else:
window_len = np.round(window_period/sampling_period)
if window_len % 2:
window_len = window_len+1
trange = window_period/2
if sampling_period >= window_period:
logger.warning("Filter: Sampling period is equal or larger then projected filter window - returning.")
return self
# ########################
# Reading data of each selected column in stream
# ########################
if len(self.ndarray[0])>0:
t = self.ndarray[0]
ndtype = True
else:
t = self._get_column('time')
if debugmode:
print("Length time column:", len(t))
window_len = int(window_len)
for key in keys:
if debugmode:
print ("Start filtering for", key)
if not key in KEYLIST:
logger.error("Column key %s not valid." % key)
keyindex = KEYLIST.index(key)
if len(self.ndarray[keyindex])>0:
v = self.ndarray[keyindex]
else:
v = self._get_column(key)
# INTERMAGNET 90 percent rule: interpolate missing values if less than 10 percent are missing
#if not conservative or missingdata in ['interpolate','mean']:
if missingdata in ['interpolate','mean']:
fill = 'mean'
try:
if missingdata == 'interpolate':
fill = missingdate
else:
fill = 'mean'
except:
fill = 'mean'
v = self.missingvalue(v,np.round(window_period/sampling_period),fill=fill) # using ratio here and not _len
if key in autofill:
logger.warning("Filter: key %s has been selected for linear interpolation before filtering." % key)
logger.warning("Filter: I guess you know what you are doing...")
nans, x= nan_helper(v)
v[nans]= interp(x(nans), x(~nans), v[~nans])
# Make sure that we are dealing with numbers
v = np.array(list(map(float, v)))
if v.ndim != 1:
logger.error("Filter: Only accepts 1 dimensional arrays.")
if window_len<3:
logger.error("Filter: Window lenght defined by filter_width needs to cover at least three data points")
if debugmode:
print("Treating k:", key, v.size)
if v.size >= window_len:
#print ("Check:", v, len(v), window_len)
s=np.r_[v[int(window_len)-1:0:-1],v,v[-1:-int(window_len):-1]]
if filter_type == 'gaussian':
w = signal.gaussian(window_len, std=std)
y=np.convolve(w/w.sum(),s,mode='valid')
res = y[(int(window_len/2)):(len(v)+int(window_len/2))]
elif filter_type == 'wiener':
res = signal.wiener(v, int(window_len), noise=0.5)
elif filter_type == 'butterworth':
dt = 800./float(len(v))
nyf = 0.5/dt
b, a = signal.butter(4, 1.5/nyf)
res = signal.filtfilt(b, a, v)
elif filter_type == 'spline':
res = UnivariateSpline(t, v, s=240)
elif filter_type == 'flat':
w=np.ones(int(window_len),'d')
s = np.ma.masked_invalid(s)
y=np.convolve(w/w.sum(),s,mode='valid') #'valid')
res = y[(int(window_len/2)-1):(len(v)+int(window_len/2)-1)]
else:
w = eval('signal.'+filter_type+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
res = y[(int(window_len/2)):(len(v)+int(window_len/2))]
if testplot == True:
fig, ax1 = plt.subplots(1,1, figsize=(10,4))
ax1.plot(t, v, 'b.-', linewidth=2, label = 'raw data')
ax1.plot(t, res, 'r.-', linewidth=2, label = filter_type)
plt.show()
if ndtype:
self.ndarray[keyindex] = res
else:
self._put_column(res,key)
if resample:
if debugmode:
print("Resampling: ", keys)
self = self.resample(keys,period=resample_period,fast=resamplefast,offset=resampleoffset)
self.header['DataSamplingRate'] = str(resample_period) + ' sec'
# ########################
# Update header information
# ########################
passband = filter_width.total_seconds()
#print ("passband", 1/passband)
#self.header['DataSamplingFilter'] = filter_type + ' - ' + str(trange) + ' sec'
self.header['DataSamplingFilter'] = filter_type + ' - ' + str(1.0/float(passband)) + ' Hz'
return self
def nfilter(self, **kwargs):
"""
DEFINITION:
Code for simple application, filtering function.
Returns stream with filtered data with sampling period of
filter_width.
PARAMETERS:
Variables:
- variable: (type) Description.
Kwargs:
- filter_type: (str) Options: gaussian, linear or special. Default = gaussian.
- filter_width: (timedelta object) Default = timedelta(minutes=1)
- filter_offset: (timedelta object) Default=0
- gauss_win: (int) Default = 1.86506 (corresponds to +/-45 sec in case of min or 45 min in case of hour).
- fmi_initial_data: (DataStream containing dH values (dx)) Default=[].
RETURNS:
- stream: (DataStream object) Stream containing filtered data.
EXAMPLE:
>>> stream_filtered = stream.filter(filter_width=timedelta(minutes=3))
APPLICATION:
"""
return self.filter(**kwargs)
def fit(self, keys, **kwargs):
"""
DEFINITION:
Code for fitting data. Please note: if nans are present in any of the selected keys
the whole line is dropped before fitting.
PARAMETERS:
Variables:
- keys: (list) Provide a list of keys to be fitted (e.g. ['x','y','z'].
Kwargs:
- fitfunc: (str) Options: 'poly', 'harmonic', 'least-squares', 'spline', 'none', default='spline'
- timerange: (timedelta object) Default = timedelta(hours=1)
- fitdegree: (float) Default=5
- knotstep: (float < 0.5) determines the amount of knots: amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
- flag: (bool).
RETURNS:
- function object: (list) func = [functionkeylist, sv, ev]
EXAMPLE:
>>> func = stream.fit(['x'])
APPLICATION:
"""
# Defaults:
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
if not fitfunc:
fitfunc = 'spline'
if not fitdegree:
fitdegree = 5
if not knotstep:
knotstep = 0.01
defaulttime = 0
if not starttime:
starttime = self._find_t_limits()[0]
if not endtime:
endtime = self._find_t_limits()[1]
if starttime == self._find_t_limits()[0]:
defaulttime += 1
if endtime == self._find_t_limits()[1]:
defaulttime += 1
if knotstep >= 0.5:
raise ValueError("Knotstep needs to be smaller than 0.5")
functionkeylist = {}
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype=True
#tok = True
fitstream = self.copy()
if not defaulttime == 2: # TODO if applied to full stream, one point at the end is missing
fitstream = fitstream.trim(starttime=starttime, endtime=endtime)
sv = 0
ev = 0
for key in keys:
tmpst = fitstream._drop_nans(key)
#print ("Length", tmpst.length())
if ndtype:
t = tmpst.ndarray[0]
else:
t = tmpst._get_column('time')
if len(t) < 1:
#tok = False
print ("Column {} does not contain valid values".format(key))
continue
nt,sv,ev = fitstream._normalize(t)
sp = fitstream.get_sampling_period()
if sp == 0: ## if no dominant sampling period can be identified then use minutes
sp = 0.0177083333256
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
if ndtype:
ind = KEYLIST.index(key)
val = tmpst.ndarray[ind]
else:
val = tmpst._get_column(key)
# interplolate NaN values
# normalized sampling rate
sp = sp/(ev-sv) # should be the best?
#sp = (ev-sv)/len(val) # does not work
x = arange(np.min(nt),np.max(nt),sp)
#print len(x)
if len(val)<=1:
logger.warning('Fit: No valid data for key {}'.format(key))
break
elif fitfunc == 'spline':
try:
#logger.error('Interpolation: Testing knots (knotsteps = {}), (len(val) = {}'.format(knotstep, len(val)))
knots = np.array(arange(np.min(nt)+knotstep,np.max(nt)-knotstep,knotstep))
if len(knots) > len(val):
knotstep = knotstep*4
knots = np.array(arange(np.min(nt)+knotstep,np.max(nt)-knotstep,knotstep))
logger.warning('Too many knots in spline for available data. Please check amount of fitted data in time range. Trying to reduce resolution ...')
ti = interpolate.splrep(nt, val, k=3, s=0, t=knots)
except:
logger.error('Value error in fit function - likely reason: no valid numbers or too few numbers for fit: len(knots)={} > len(val)={}? '.format(len(knots),len(val)))
print ("Checking", key, len(val), val, sp, knotstep, len(knots))
raise ValueError("Value error in fit function - not enough data or invalid numbers")
return
#print nt, val, len(knots), knots
#ti = interpolate.interp1d(nt, val, kind='cubic')
#print "X", x, np.min(nt),np.max(nt),sp
#print "TI", ti
f_fit = interpolate.splev(x,ti)
elif fitfunc == 'poly':
logger.debug('Selected polynomial fit - amount of data: %d, time steps: %d, degree of fit: %d' % (len(nt), len(val), fitdegree))
ti = polyfit(nt, val, fitdegree)
f_fit = polyval(ti,x)
elif fitfunc == 'mean':
logger.debug('Selected mean fit - amount of data: {}, time steps: {}'.format(len(nt), len(val)))
meanvalue = np.nanmean(val)
meanval = np.asarray([meanvalue for el in val])
ti = polyfit(nt, meanval, 1)
f_fit = polyval(ti,x)
elif fitfunc == 'harmonic':
logger.debug('Selected harmonic fit - using inverse fourier transform')
f_fit = self.harmfit(nt, val, fitdegree)
# Don't use resampled list for harmonic time series
x = nt
elif fitfunc == 'least-squares':
logger.debug('Selected linear least-squares fit')
A = np.vstack([nt, np.ones(len(nt))]).T
m, c, = np.linalg.lstsq(A, val)[0]
f_fit = m * x + c
elif fitfunc == 'none':
logger.debug('Selected no fit')
return
else:
logger.warning('Fit: function not valid')
return
exec('f'+key+' = interpolate.interp1d(x, f_fit, bounds_error=False)')
exec('functionkeylist["f'+key+'"] = f'+key)
#if tok:
func = [functionkeylist, sv, ev]
#else:
# func = [functionkeylist, 0, 0]
return func
def extractflags(self, debug=False):
"""
DEFINITION:
Extracts flags asociated with the provided DataStream object
(as obtained by flaggedstream = stream.flag_outlier())
PARAMETERS:
Variables:
None
RETURNS:
- flaglist: (list) a flaglist of type [st,et,key,flagnumber,commentarray[idx],sensorid,now]
EXAMPLE:
>>> flaglist = stream.extractflags()
"""
sensorid = self.header.get('SensorID','')
now = datetime.utcnow()
flaglist = []
flpos = KEYLIST.index('flag')
compos = KEYLIST.index('comment')
flags = self.ndarray[flpos]
comments = self.ndarray[compos]
if not len(flags) > 0 or not len(comments) > 0:
return flaglist
uniqueflags = self.union(flags)
uniquecomments = self.union(comments)
# 1. Extract relevant keys from uniqueflags
if debug:
print ("extractflags: Unique Flags -", uniqueflags)
print ("extractflags: Unique Comments -", uniquecomments)
# zeroflag = ''
keylist = []
for elem in uniqueflags:
if not elem in ['','-']:
#print (elem)
for idx,el in enumerate(elem):
if not el == '-' and el in ['0','1','2','3','4','5','6']:
keylist.append(NUMKEYLIST[idx-1])
# 2. Cycle through keys and extract comments
if not len(keylist) > 0:
return flaglist
keylist = self.union(np.asarray(keylist))
for key in keylist:
indexflag = KEYLIST.index(key)
for comment in uniquecomments:
flagindicies = []
for idx, elem in enumerate(comments):
if not elem == '' and elem == comment:
#print ("ELEM", elem)
flagindicies.append(idx)
# 2. get consecutive groups
for k, g in groupby(enumerate(flagindicies), lambda ix: ix[0] - ix[1]):
try:
consecutives = list(map(itemgetter(1), g))
st = num2date(self.ndarray[0][consecutives[0]]).replace(tzinfo=None)
et = num2date(self.ndarray[0][consecutives[-1]]).replace(tzinfo=None)
flagnumber = flags[consecutives[0]][indexflag]
if not flagnumber in ['-',None]:
flaglist.append([st,et,key,int(flagnumber),comment,sensorid,now])
except:
print ("extractflags: error when extracting flaglist")
return flaglist
def flagfast(self,indexarray,flag, comment,keys=None):
"""
DEFINITION:
Add a flag to specific indicies of the streams ndarray.
PARAMETERS:
Variables:
- keys: (list) Optional: list of keys to mark ['x','y','z']
- flag: (int) 0 ok, 1 remove, 2 force ok, 3 force remove,
4 merged from other instrument
- comment: (str) The reason for flag
- indexarray: (array) indicies of the datapoint(s) to mark
RETURNS:
- DataStream: Input stream with flags and comments.
EXAMPLE:
>>> data = data.flagfast([155],'3','Lawnmower',['x','y','z'])
APPLICATION:
"""
print("Adding flags .... ")
# Define Defaultflag
flagls = [str('-') for elem in FLAGKEYLIST]
defaultflag = ''
# Get new flag
newflagls = []
if not keys:
for idx,key in enumerate(FLAGKEYLIST): # Flag all existing data
if len(self.ndarray[idx]) > 0:
newflagls.append(str(flag))
else:
newflagls.append('-')
newflag = ''.join(newflagls)
else:
for idx,key in enumerate(FLAGKEYLIST): # Only key column
if len(self.ndarray[idx]) > 0 and FLAGKEYLIST[idx] in keys:
newflagls.append(str(flag))
else:
newflagls.append('-')
newflag = ''.join(newflagls)
flagarray, commentarray = [],[]
flagindex = KEYLIST.index('flag')
commentindex = KEYLIST.index('comment')
# create a predefined list
# ########################
# a) get existing flags and comments or create empty lists
if len(self.ndarray[flagindex]) > 0:
flagarray = self.ndarray[flagindex].astype(object)
else:
flagarray = [''] * len(self.ndarray[0])
if len(self.ndarray[commentindex]) > 0:
commentarray = self.ndarray[commentindex].astype(object)
else:
commentarray = [''] * len(self.ndarray[0])
# b) insert new info
for i in indexarray:
flagarray[i] = newflag
commentarray[i] = comment
commentarray = np.asarray(commentarray, dtype='object')
flagarray = np.asarray(flagarray, dtype='object')
flagnum = KEYLIST.index('flag')
commentnum = KEYLIST.index('comment')
self.ndarray[flagnum] = flagarray
self.ndarray[commentnum] = commentarray
#print "... finished"
return self
def flag_range(self, **kwargs):
"""
DEFINITION:
Flags data within time range or data exceeding a certain threshold
Coding : 0 take, 1 remove, 2 force take, 3 force remove
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to check for criteria. Default = all numerical
please note: for using above and below criteria only one element
need to be provided (e.g. ['x']
- text (string) comment
- flagnum (int) Flagid
- keystoflag: (list) List of keys to flag. Default = all numerical
- below: (float) flag data of key below this numerical value.
- above: (float) flag data of key exceeding this numerical value.
- starttime: (datetime Object)
- endtime: (datetime Object)
RETURNS:
- flaglist: (list) flagging information - use stream.flag(flaglist) to add to stream
EXAMPLE:
>>> fllist = stream.flag_range(keys=['x'], above=80)
APPLICATION:
"""
keys = kwargs.get('keys')
above = kwargs.get('above')
below = kwargs.get('below')
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
text = kwargs.get('text')
flagnum = kwargs.get('flagnum')
keystoflag = kwargs.get('keystoflag')
numuncert = 0.0000000001 # numerical uncertainty on different machines when using date2num()
sensorid = self.header.get('SensorID')
moddate = datetime.utcnow()
flaglist=[]
if not keystoflag:
keystoflag = self._get_key_headers(numerical=True)
if not flagnum:
flagnum = 0
if not len(self.ndarray[0]) > 0:
print ("flag_range: No data available - aborting")
return flaglist
if not len(keys) == 1:
if above or below:
print ("flag_range: for using thresholds above and below only a single key needs to be provided")
print (" -- ignoring given above and below values")
below = False
above = False
# test validity of starttime and endtime
trimmedstream = self.copy()
if starttime and endtime:
trimmedstream = self._select_timerange(starttime=starttime,endtime=endtime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
elif starttime:
trimmedstream = self._select_timerange(starttime=starttime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
elif endtime:
trimmedstream = self._select_timerange(endtime=endtime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
if not above and not below:
# return flags for all data in trimmed stream
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndarray[0][0]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndarray[0][-1]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.append(flagline)
return flaglist
if above and below:
# TODO create True/False list and then follow the bin detector example
ind = KEYLIST.index(keys[0])
trueindicies = (trimmedstream.ndarray[ind] > above) & (trimmedstream.ndarray[ind] < below)
d = np.diff(trueindicies)
idx, = d.nonzero()
idx += 1
if not text:
text = 'outside of range {} to {}'.format(below,above)
if trueindicies[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if trueindicies[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, trimmedstream.ndarray[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
# numerical uncertainty is subtracted from both time steps, as the flagging procedure (findtime) links
# flags to the exact time stamp or, if not found, due to numerical diffs, to the next timestamp
flagline = [num2date(trimmedstream.ndarray[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndarray[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.append(flagline)
elif above:
# TODO create True/False list and then follow the bin detector example
ind = KEYLIST.index(keys[0])
trueindicies = trimmedstream.ndarray[ind] > above
d = np.diff(trueindicies)
idx, = d.nonzero()
idx += 1
if not text:
text = 'exceeding {}'.format(above)
if trueindicies[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if trueindicies[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, trimmedstream.ndarray[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndarray[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndarray[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.append(flagline)
elif below:
# TODO create True/False the other way round
ind = KEYLIST.index(keys[0])
truefalse = trimmedstream.ndarray[ind] < below
d = np.diff(truefalse)
idx, = d.nonzero()
idx += 1
if not text:
text = 'below {}'.format(below)
if truefalse[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if truefalse[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, trimmedstream.ndarray[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndarray[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndarray[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),str(text),sensorid,moddate]
flaglist.append(flagline)
return flaglist
def flag_outlier(self, **kwargs):
"""
DEFINITION:
Flags outliers in data, using quartiles.
Coding : 0 take, 1 remove, 2 force take, 3 force remove
Example:
0000000, 0001000, etc
012 = take f, automatically removed v, and force use of other
300 = force remove f, take v, and take other
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to evaluate. Default = all numerical
- threshold: (float) Determines threshold for outliers.
1.5 = standard
5 = weak condition, keeps storm onsets in (default)
4 = a useful comprimise to be used in automatic analysis.
- timerange: (timedelta Object) Time range. Default = samlingrate(sec)*600
- stdout: prints removed values to stdout
- returnflaglist (bool) if True, a flaglist is returned instead of stream
- markall (bool) default is False. If True, all components (provided keys)
are flagged even if outlier is only detected in one. Useful for
vectorial data
RETURNS:
- stream: (DataStream Object) Stream with flagged data.
EXAMPLE:
>>> stream.flag_outlier(keys=['x','y','z'], threshold=2)
APPLICATION:
"""
# Defaults:
timerange = kwargs.get('timerange')
threshold = kwargs.get('threshold')
keys = kwargs.get('keys')
markall = kwargs.get('markall')
stdout = kwargs.get('stdout')
returnflaglist = kwargs.get('returnflaglist')
sr = self.samplingrate()
flagtimeprev = 0
startflagtime = 0
numuncert = 0.0000000001 # numerical uncertainty on different machines when using date2num()
if not timerange:
sr = self.samplingrate()
timerange = timedelta(seconds=sr*600)
if not keys:
keys = self._get_key_headers(numerical=True)
if not threshold:
threshold = 5.0
cdate = datetime.utcnow().replace(tzinfo=None)
sensorid = self.header.get('SensorID','')
flaglist = []
# Position of flag in flagstring
# f (intensity): pos 0
# x,y,z (vector): pos 1
# other (vector): pos 2
if not len(self.ndarray[0]) > 0:
logger.info('flag_outlier: No ndarray - starting old remove_outlier method.')
self = self.remove_outlier(keys=keys,threshold=threshold,timerange=timerange,stdout=stdout,markall=markall)
return self
logger.info('flag_outlier: Starting outlier identification...')
flagidx = KEYLIST.index('flag')
commentidx = KEYLIST.index('comment')
if not len(self.ndarray[flagidx]) > 0:
self.ndarray[flagidx] = [''] * len(self.ndarray[0])
else:
self.ndarray[flagidx] = self.ndarray[flagidx].astype(object)
if not len(self.ndarray[commentidx]) > 0:
self.ndarray[commentidx] = [''] * len(self.ndarray[0])
else:
self.ndarray[commentidx] = self.ndarray[commentidx].astype(object)
# get a poslist of all keys - used for markall
flagposls = [FLAGKEYLIST.index(key) for key in keys]
# Start here with for key in keys:
for key in keys:
flagpos = FLAGKEYLIST.index(key)
if not len(self.ndarray[flagpos]) > 0:
print("Flag_outlier: No data for key %s - skipping" % key)
break
print ("-------------------------")
print ("Dealing with key:", key)
st = 0
et = len(self.ndarray[0])
incrt = int(timerange.total_seconds()/sr)
if incrt == 0:
print("Flag_outlier: check timerange ... seems to be smaller as sampling rate")
break
at = incrt
while st < et:
idxst = st
idxat = at
st = at
at += incrt
if idxat > et:
idxat = et
#print key, idxst, idxat
selcol = self.ndarray[flagpos][idxst:idxat].astype(float)
selcol = selcol[~np.isnan(selcol)]
if len(selcol) > 0:
try:
q1 = stats.scoreatpercentile(selcol,16)
q3 = stats.scoreatpercentile(selcol,84)
iqd = q3-q1
md = np.median(selcol)
if iqd == 0:
iqd = 0.000001
whisker = threshold*iqd
#print key, md, iqd, whisker
except:
try:
md = np.median(selcol)
whisker = md*0.005
except:
logger.warning("remove_outlier: Eliminate outliers produced a problem: please check.")
pass
#print md, whisker, np.asarray(selcol)
for elem in range(idxst,idxat):
#print flagpos, elem
if not md-whisker < self.ndarray[flagpos][elem] < md+whisker and not np.isnan(self.ndarray[flagpos][elem]):
#print "Found:", key, self.ndarray[flagpos][elem]
#if key == 'df':
# x = 1/0
try:
if not self.ndarray[flagidx][elem] == '':
#print "Got here", self.ndarray[flagidx][elem]
newflagls = list(self.ndarray[flagidx][elem])
#print newflagls
if newflagls[flagpos] == '-':
newflagls[flagpos] = 0
if not int(newflagls[flagpos]) > 1:
newflagls[flagpos] = '1'
if markall:
for p in flagposls:
if not newflagls[p] > 1:
newflagls[p] = '1'
newflag = ''.join(newflagls)
else:
x=1/0 # Force except
except:
newflagls = []
for idx,el in enumerate(FLAGKEYLIST): # Only key column
if idx == flagpos:
newflagls.append('1')
else:
newflagls.append('-')
if markall:
for p in flagposls:
newflagls[p] = '1'
newflag = ''.join(newflagls)
self.ndarray[flagidx][elem] = newflag
#print self.ndarray[flagidx][elem]
commline = "aof - threshold: {a}, window: {b} sec".format(a=str(threshold), b=str(timerange.total_seconds()))
self.ndarray[commentidx][elem] = commline
infoline = "flag_outlier: at {a} - removed {b} (= {c})".format(a=str(self.ndarray[0][elem]), b=key, c=self.ndarray[flagpos][elem])
logger.info(infoline)
#[starttime,endtime,key,flagid,flagcomment]
flagtime = self.ndarray[0][elem]
if markall:
# if not flagtime and key and commline in flaglist
for fkey in keys:
ls = [flagtime,flagtime,fkey,1,commline]
if not ls in flaglist:
flaglist.append(ls)
else:
flaglist.append([flagtime,flagtime,key,1,commline])
if stdout:
print(infoline)
else:
try:
if not self.ndarray[flagidx][elem] == '':
pass
else:
x=1/0 # Not elegant but working
except:
self.ndarray[flagidx][elem] = ''
self.ndarray[commentidx][elem] = ''
self.ndarray[flagidx] = np.asarray(self.ndarray[flagidx])
self.ndarray[commentidx] = np.asarray(self.ndarray[commentidx])
logger.info('flag_outlier: Outlier flagging finished.')
## METHOD WHICH SORTS/COMBINES THE FLAGLIST
#print("flag_outlier",flaglist)
# Combine subsequent time steps with identical flags to one flag range
newlist = []
srday = sr/(3600.*24.)
# Keep it simple - no cleaning here - just produce new format
if len(flaglist)>0:
#flaglist = sorted(flaglist, key=lambda x: x[0])
for line in flaglist:
newlist.append([num2date(line[0]-numuncert).replace(tzinfo=None),num2date(line[1]-numuncert).replace(tzinfo=None),line[2],line[3],line[4],sensorid,cdate])
else:
newlist = []
#newlist = self.flaglistclean(newlist)
"""
# requires a sorted list
if len(flaglist)>0:
# Different keys are not regarded for here (until 0.4.6)
# 1. Extract all flag for individual keys first
for key in keys:
templist = [l for l in flaglist if l[2] == key]
fllist = sorted(templist, key=lambda x: x[0])
#flaglist = sorted(flaglist, key=lambda x: x[0])
# Startvalue of endtime is firsttime
etprev = fllist[0][1]
prevline = fllist[0]
for line in fllist:
st = line[0]
et = line[1]
diff1 = (et-etprev) # end time diff between current flag and last flag
diff2 = (st-etprev) # diff between current start and last end
srunc = srday+0.01*srday # sampling rate with uncertainty
if diff1 < srunc or diff2 < srunc:
# subsequent time step found -> changing et in line
prevline[1] = et
else:
newlist.append([num2date(prevline[0]).replace(tzinfo=None),num2date(prevline[1]).replace(tzinfo=None),prevline[2],prevline[3],prevline[4],sensorid,cdate])
prevline = line
etprev = et
#save current content of prevline with new et
newlist.append([num2date(prevline[0]).replace(tzinfo=None),num2date(prevline[1]).replace(tzinfo=None),prevline[2],prevline[3],prevline[4],sensorid,cdate])
else:
newlist = []
"""
if returnflaglist:
return newlist
return self
def flag(self, flaglist, removeduplicates=False, debug=False):
"""
DEFINITION:
Apply flaglist to stream. A flaglist typically looks like:
[starttime,endtime,key,flagid,flagcomment]
starttime and endtime are provided as datetime objects
key exists in KEYLIST
flagid is a integer number between 0 and 4
comment is a string of less then 100 characters
PARAMETERS:
- flaglist: (list) as obtained by mpplots plotFlag, database db2flaglist
RETURNS:
- DataStream: flagged version of stream.
EXAMPLE:
>>> flaglist = db.db2flaglist(db,sensorid_data)
>>> data = data.flag(flaglist)
"""
self.progress = 0
# get time range of stream:
st,et = self._find_t_limits()
st = date2num(st)
et = date2num(et)
lenfl = len(flaglist)
logger.info("Flag: Found flaglist of length {}".format(lenfl))
flaglist = [line for line in flaglist if date2num(self._testtime(line[1])) >= st]
flaglist = [line for line in flaglist if date2num(self._testtime(line[0])) <= et]
# Sort flaglist accoring to startdate (used to speed up flagging procedure)
# BETTER: Sort with input date - otherwise later data might not overwrite earlier...
flaglist = sorted(flaglist, key=lambda x: x[-1])
#flaglist.sort()
## Cleanup flaglist -- remove all inputs with duplicate start and endtime
## (use only last input)
#print("1",flaglist)
def flagclean(flaglist):
## Cleanup flaglist -- remove all inputs with duplicate start and endtime
## (use only last input)
indicies = []
for line in flaglist:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2]]
if len(inds) > 0:
index = inds[-1]
indicies.append(index)
uniqueidx = (list(set(indicies)))
uniqueidx.sort()
#print(uniqueidx)
flaglist = [elem for idx, elem in enumerate(flaglist) if idx in uniqueidx]
return flaglist
if removeduplicates:
flaglist = flagclean(flaglist)
lenfl = len(flaglist)
logger.info("Flag: Relevant flags: {}".format(lenfl))
## Determinig sampling rate for nearby flagging
sr = self.samplingrate()
if lenfl > 0:
for i in range(lenfl):
self.progress = (float(i)/float(lenfl)*100.)
if removeduplicates or debug or lenfl > 100:
if i == int(lenfl/5.):
print("Flag: 20 percent done")
if i == int(lenfl/5.*2.):
print("Flag: 40 percent done")
if i == int(lenfl/5.*3.):
print("Flag: 60 percent done")
if i == int(lenfl/5.*4.):
print("Flag: 80 percent done")
fs = date2num(self._testtime(flaglist[i][0]))
fe = date2num(self._testtime(flaglist[i][1]))
if st < fs and et < fs and st < fe and et < fe:
pass
elif st > fs and et > fs and st > fe and et > fe:
pass
else:
valid_chars='-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
flaglist[i][4] = ''.join([e for e in list(flaglist[i][4]) if e in list(valid_chars)])
keys = flaglist[i][2].split('_')
for key in keys:
self = self.flag_stream(key,int(flaglist[i][3]),flaglist[i][4],flaglist[i][0],flaglist[i][1],samplingrate = sr,debug=debug)
return self
def flagliststats(self,flaglist, intensive=False, output='stdout'):
"""
DESCRIPTION:
Provides some information on flag statistics
PARAMETER:
flaglist (list) flaglist to be investigated
APPLICTAION:
flaglist = db2flaglist(db,'all')
self.flagliststats(flaglist)
"""
amountlist = []
outputt = '##########################################\n'
outputt += ' Flaglist statistics \n'
outputt += '##########################################\n'
outputt += '\n'
outputt += 'A) Total contents: {}\n'.format(len(flaglist))
outputt += '\n'
outputt += 'B) Content for each ID:\n'
#print (flaglist[0], len(flaglist[0]))
if len(flaglist[0]) > 6:
ids = [el[5] for el in flaglist]
uniquenames = list(set(ids))
for name in uniquenames:
amount = len([el[0] for el in flaglist if el[5] == name])
amountlist.append([name,amount])
if intensive:
flagli = [el for el in flaglist if el[5] == name]
index = [el[3] for el in flagli]
uniqueindicies = list(set(index))
reasons = [el[4] for el in flagli]
uniquereasons = list(set(reasons))
intensiveinfo = []
for reason in uniquereasons:
num = len([el for el in flagli if reason == el[4]])
intensiveinfo.append([reason,num])
intensiveinfo = sorted(intensiveinfo,key=lambda x: x[1])
intensiveinfo = ["{} : {}\n".format(e[0],e[1]) for e in intensiveinfo]
amountlist[-1].append(intensiveinfo)
amountlist = sorted(amountlist,key=lambda x: x[1])
for el in amountlist:
outputt += "Dataset: {} \t Amount: {}\n".format(el[0],el[1])
if intensive:
for ele in el[2]:
outputt += " {}".format(ele)
if output=='stdout':
print (outputt)
return outputt
def flaglistclean(self,flaglist,progress=False):
"""
DESCRIPTION:
identify and remove duplicates from flaglist, only the latest inputs are used
start, endtime and key are used to identfy duplicates
PARAMETER:
flaglist (list) flaglist to be investigated
APPLICTAION:
stream = DataStream()
flaglist = db2flaglist(db,'all')
flaglistwithoutduplicates = stream.flaglistclean(flaglist)
"""
# first step - remove all duplicates
testflaglist = ['____'.join([str(date2num(elem[0])),str(date2num(elem[1])),str(elem[2]),str(elem[3]),str(elem[4]),str(elem[5]),str(date2num(elem[6]))]) for elem in flaglist]
uniques,indi = np.unique(testflaglist,return_index=True)
flaglist = [flaglist[idx] for idx in indi]
# second step - remove all inputs without components
flaglist = [elem for elem in flaglist if not elem[2] == '']
## Cleanup flaglist -- remove all inputs with duplicate start and endtime
## (use only last input)
indicies = []
for ti, line in enumerate(flaglist):
if progress and ti/1000. == np.round(ti/1000.):
print ("Current state: {} percent".format(ti/len(flaglist)*100))
if len(line) > 5:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2] and elem[5] == line[5]]
else:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2]]
if len(inds) > 1:
# get inputs dates for all duplicates and select the latest
dates = [[flaglist[dupind][-1], dupind] for dupind in inds]
indicies.append(sorted(dates)[-1][1])
else:
index = inds[-1]
indicies.append(index)
uniqueidx = (list(set(indicies)))
print ("flaglistclean: found {} unique inputs".format(len(uniqueidx)))
uniqueidx.sort()
flaglist = [flaglist[idx] for idx in uniqueidx]
return flaglist
def stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None):
"""
DESCRIPTION:
Constructs a flaglist input dependent on the content of stream
PARAMETER:
comment (key or string) if key (or comma separted list of keys) are
found, then the content of this column is used (first input
flagnumber (int) integer number between 0 and 4
userange (bool) if False, each stream line results in a flag,
if True the full time range is marked
"""
### identify any given gaps and flag time ranges regarding gaps
if not comment:
print("stream2flag: you need to provide either a key or a text comment. (e.g. 'str1,str2' or 'Flagged'")
return []
if not flagnumber:
flagnumber = 0
if not keystoflag:
print("stream2flag: you need to provide a list of keys to which you apply the flags (e.g. ['x','z']")
return []
if not sensorid:
print("stream2flag: you need to provide a sensorid")
return []
commentarray = np.asarray([])
uselist = False
if comment in KEYLIST:
pos = KEYLIST.index(comment)
if userange:
comment = self.ndarray[pos][0]
else:
uselist = True
commentarray = self.ndarray[pos]
else:
lst,poslst = [],[]
commentlist = comment.split(',')
try:
for commkey in commentlist:
if commkey in KEYLIST:
#print(commkey)
pos = KEYLIST.index(commkey)
if userange:
lst.append(str(self.ndarray[pos][0]))
else:
poslst.append(pos)
else:
# Throw exception
x= 1/0
if userange:
comment = ' : '.join(lst)
else:
uselist = True
resultarray = []
for pos in poslst:
resultarray.append(self.ndarray[pos])
resultarray = np.transpose(np.asarray(resultarray))
commentarray = [''.join(str(lst)) for lst in resultarray]
except:
#comment remains unchanged
pass
now = datetime.utcnow()
res = []
if userange:
st = np.min(self.ndarray[0])
et = np.max(self.ndarray[0])
st = num2date(float(st)).replace(tzinfo=None)
et = num2date(float(et)).replace(tzinfo=None)
for key in keystoflag:
res.append([st,et,key,flagnumber,comment,sensorid,now])
else:
for idx,st in enumerate(self.ndarray[0]):
for key in keystoflag:
st = num2date(float(st)).replace(tzinfo=None)
if uselist:
res.append([st,st,key,flagnumber,commentarray[idx],sensorid,now])
else:
res.append([st,st,key,flagnumber,comment,sensorid,now])
return res
def flaglistmod(self, mode='select', flaglist=[], parameter='key', value=None, newvalue=None, starttime=None, endtime=None):
"""
DEFINITION:
Select/Replace/Delete information in flaglist
parameters are key, flagnumber, comment, startdate, enddate=None
mode delete: if only starttime and endtime are provided then all data inbetween is removed,
if parameter and value are provided this data is removed, eventuall
only between start and endtime
APPLICTAION
"""
num = 0
# convert start and end to correct format
if parameter == 'key':
num = 2
elif parameter == 'flagnumber':
num = 3
elif parameter == 'comment':
num = 4
elif parameter == 'sensorid':
num = 5
if mode in ['select','replace'] or (mode=='delete' and value):
if starttime:
starttime = self._testtime(starttime)
flaglist = [elem for elem in flaglist if elem[1] > starttime]
if endtime:
endtime = self._testtime(endtime)
flaglist = [elem for elem in flaglist if elem[0] < endtime]
elif mode == 'delete' and not value:
print ("Only deleting")
flaglist1, flaglist2 = [],[]
if starttime:
starttime = self._testtime(starttime)
flaglist1 = [elem for elem in flaglist if elem[1] < starttime]
if endtime:
endtime = self._testtime(endtime)
flaglist2 = [elem for elem in flaglist if elem[0] > endtime]
flaglist1.extend(flaglist2)
flaglist = flaglist1
if mode == 'select':
if num>0 and value:
if num == 4:
flaglist = [elem for elem in flaglist if elem[num].find(value) > 0]
elif num == 3:
flaglist = [elem for elem in flaglist if elem[num] == int(value)]
else:
flaglist = [elem for elem in flaglist if elem[num] == value]
elif mode == 'replace':
if num>0 and value:
for idx, elem in enumerate(flaglist):
if num == 4:
if elem[num].find(value) >= 0:
flaglist[idx][num] = newvalue
elif num == 3:
if elem[num] == int(value):
flaglist[idx][num] = int(newvalue)
else:
if elem[num] == value:
flaglist[idx][num] = newvalue
elif mode == 'delete':
if num>0 and value:
if num == 4:
flaglist = [elem for elem in flaglist if elem[num].find(value) < 0]
elif num == 3:
flaglist = [elem for elem in flaglist if not elem[num] == int(value)]
else:
flaglist = [elem for elem in flaglist if not elem[num] == value]
return flaglist
def flaglistadd(self, flaglist, sensorid, keys, flagnumber, comment, startdate, enddate=None):
"""
DEFINITION:
Add a specific input to a flaglist
Flaglist elements look like
[st,et,key,flagnumber,comment,sensorid,now]
APPLICATION:
newflaglist = stream.flaglistadd(oldflaglist,sensorid, keys, flagnumber, comment, startdate, enddate)
"""
# convert start and end to correct format
st = self._testtime(startdate)
if enddate:
et = self._testtime(enddate)
else:
et = st
now = datetime.utcnow()
if keys in ['all','All','ALL']:
keys = KEYLIST
for key in keys:
flagelem = [st,et,key,flagnumber,comment,sensorid,now]
exists = [elem for elem in flaglist if elem[:5] == flagelem[:5]]
if len(exists) == 0:
flaglist.append(flagelem)
else:
print ("flaglistadd: Flag already exists")
return flaglist
def flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate=0., debug=False):
"""
DEFINITION:
Add flags to specific times or time ranges (if enddate is provided).
PARAMETERS:
Variables:
- key: (str) Column to apply flag to, e.g. 'x'
- flag: (int) 0 ok, 1 remove, 2 force ok, 3 force remove,
4 merged from other instrument
- comment: (str) The reason for flag
- startdate: (datetime object) the date of the (first) datapoint to remove
Kwargs:
- enddate: (datetime object) the enddate of a time range to be flagged
- samplingrate: (float) in seconds, needs to be provided for effective nearby search
RETURNS:
- DataStream: Input stream with flags and comments.
EXAMPLE:
>>> data = data.flag_stream('x',0,'Lawnmower',flag1,flag1_end)
APPLICATION:
"""
# TODO:
# make flag_stream to accept keylists -> much faser for multiple column data
sr = samplingrate
if not key in KEYLIST:
logger.error("flag_stream: %s is not a valid key." % key)
return self
if not flag in [0,1,2,3,4]:
logger.error("flag_stream: %s is not a valid flag." % flag)
return self
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
elif not len(self) > 0:
return DataStream()
startdate = self._testtime(startdate)
if not enddate:
# Set enddate to startdat
# Hereby flag nearest might be used later
enddate = startdate
"""
start = date2num(startdate)
check_startdate, val = self.findtime(start)
if check_startdate == 0:
logger.info("flag_stream: No data at given date for flag. Finding nearest data point.")
if ndtype:
time = self.ndarray[0]
else:
time = self._get_column('time')
#print start, len(time)
new_endtime, index = find_nearest(time, start)
if new_endtime > start:
startdate = num2date(start)
enddate = num2date(new_endtime)
else:
startdate = num2date(new_endtime)
enddate = num2date(start)
else:
enddate = startdate
"""
else:
enddate = self._testtime(enddate)
### ######## IF STARTDATE == ENDDATE
### MODIFYED TO STARTDATE-Samplingrate/3, ENDDATE + Samplingrate/3
### Taking 1/3 is arbitrary.
### This helps to apply flagging info to any higher resolution record
### which does not contain the exact time stamp.
### You are likely exclude more data then necessary.
### Flag the high resolution data set to avoid that.
def rangeExtend(startdate,enddate,samplingrate,divisor=3):
if startdate == enddate:
startdate = startdate-timedelta(seconds=samplingrate/divisor)
enddate = enddate+timedelta(seconds=samplingrate/divisor)
start = date2num(startdate)
end = date2num(enddate)
return start,end
else:
start = date2num(startdate)
end = date2num(enddate)
return start,end
pos = FLAGKEYLIST.index(key)
if debug:
print("flag_stream: Flag",startdate, enddate)
start = date2num(startdate)
end = date2num(enddate)
mint = np.min(self.ndarray[0])
maxt = np.max(self.ndarray[0])
if start < mint and end < mint:
st = 0
ed = 0
elif start > maxt and end > maxt:
st = 0
ed = 0
else:
### Modified to use nearest value to be flagged if flagtimes
### overlap with streams timerange
### find_nearest is probably very slowly...
### Using startidx values to speed up the process at least for later data
# Get start and end indicies:
if debug:
ti1 = datetime.utcnow()
st, ls = self.findtime(startdate,mode='argmax')
# st is the starttime, ls ? -- modification allow to provide key list!!
if debug:
ti2 = datetime.utcnow()
print ("flag_stream: findtime duration", ti2-ti1)
#if debug:
# ti1 = datetime.utcnow()
# testls = nonzero(self.ndarray[0]==startdate)
# ti2 = datetime.utcnow()
# print ("Findtime duration -alternative", ti2-ti1)
if st == 0:
#print("Flag_stream: slowly start",st)
if not sr == 0:
# Determine sampling rate if not done yet
start,end = rangeExtend(startdate,enddate,sr)
ls,st = find_nearest(self.ndarray[0],start)
sti = st-2
if sti < 0:
sti = 0
ed, le = self.findtime(enddate,startidx=sti,mode='argmax')
if ed == 0:
#print("Flag_stream: slowly end",ed)
if not sr == 0:
# Determine sampling rate if not done yet
start,end = rangeExtend(startdate,enddate,sr)
le, ed = find_nearest(self.ndarray[0],end) ### TODO use startundex here as well
if ed == len(self.ndarray[0]):
ed = ed-1
# Create a defaultflag
defaultflag = ['-' for el in FLAGKEYLIST]
if debug:
ti3 = datetime.utcnow()
print ("Full Findtime duration", ti3-ti1)
print("flagging", st, ed)
if ndtype:
array = [[] for el in KEYLIST]
flagind = KEYLIST.index('flag')
commentind = KEYLIST.index('comment')
# Check whether flag and comment are exisiting - if not create empty
if not len(self.ndarray[flagind]) > 0:
array[flagind] = [''] * len(self.ndarray[0])
else:
array[flagind] = list(self.ndarray[flagind])
if not len(self.ndarray[commentind]) > 0:
array[commentind] = [''] * len(self.ndarray[0])
else:
array[commentind] = list(self.ndarray[commentind])
# Now either modify existing or add new flag
if st==0 and ed==0:
pass
else:
t3a = datetime.utcnow()
for i in range(st,ed+1):
#if self.ndarray[flagind][i] == '' or self.ndarray[flagind][i] == '-':
if array[flagind][i] == '' or array[flagind][i] == '-':
flagls = defaultflag
else:
flagls = list(array[flagind][i])
# if existing flaglistlength is shorter, because new columns where added later to ndarray
if len(flagls) < pos:
flagls.extend(['-' for j in range(pos+1-flagls)])
flagls[pos] = str(flag)
array[flagind][i] = ''.join(flagls)
array[commentind][i] = comment
self.ndarray[flagind] = np.array(array[flagind], dtype=np.object)
self.ndarray[commentind] = np.array(array[commentind], dtype=np.object)
# up to 0.3.98 the following code was used (~10 times slower)
# further significant speed up requires some structural changes:
# 1. use keylist here
#self.ndarray[flagind] = np.asarray(array[flagind]).astype(object)
#self.ndarray[commentind] = np.asarray(array[commentind]).astype(object)
else:
for elem in self:
if elem.time >= start and elem.time <= end:
fllist = list(elem.flag)
if not len(fllist) > 1:
fllist = defaultflag
fllist[pos] = str(flag)
elem.flag=''.join(fllist)
elem.comment = comment
if flag == 1 or flag == 3 and debug:
if enddate:
#print ("flag_stream: Flagged data from %s to %s -> (%s)" % (startdate.isoformat(),enddate.isoformat(),comment))
try:
logger.info("flag_stream: Flagged data from %s to %s -> (%s)" % (startdate.isoformat().encode('ascii','ignore'),enddate.isoformat().encode('ascii','ignore'),comment.encode('ascii','ignore')))
except:
pass
else:
try:
logger.info("flag_stream: Flagged data at %s -> (%s)" % (startdate.isoformat().encode('ascii','ignore'),comment.encode('ascii','ignore')))
except:
pass
return self
def simplebasevalue2stream(self,basevalue,**kwargs):
"""
DESCRIPTION:
simple baselvalue correction using a simple basevalue list
PARAMETERS:
basevalue (list): [baseH,baseD,baseZ]
keys (list): default = 'x','y','z'
APPLICTAION:
used by stream.baseline
"""
mode = kwargs.get('mode')
keys = ['<KEY>']
# Changed that - 49 sec before, no less then 2 secs
if not len(self.ndarray[0]) > 0:
print("simplebasevalue2stream: requires ndarray")
return self
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
# get x array for baseline
#indx = KEYLIST.index('x')
for key in KEYLIST:
ind = KEYLIST.index(key)
if key in keys: # new
#print keys.index(key)
ar = self.ndarray[ind].astype(float)
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndarray[ind]), key, self.ndarray[ind]
array[ind] = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + basevalue[keys.index(key)]
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
array[ind] = ar + basevalue[keys.index(key)]
if key == 'x': # remember this for correct y determination
arrayx = array[ind]
else: # new
if len(self.ndarray[ind]) > 0:
array[ind] = self.ndarray[ind].astype(object)
self.header['DataComponents'] = 'HDZ'
return DataStream(self,self.header,np.asarray(array))
def func2stream(self,funclist,**kwargs):
"""
DESCRIPTION:
combine data stream and functions obtained by fitting and interpolation. Possible combination
modes are 'add' (default), subtract 'sub', divide 'div' and 'multiply'. Furthermore, the
function values can replace the original values at the given timesteps of the stream
PARAMETERS:
funclist (list of functions): required - each function is an output of stream.fit or stream.interpol
#function (function): required - output of stream.fit or stream.interpol
keys (list): default = '<KEY>'
mode (string): one of 'add','sub','div','multiply','values' - default = 'add'
APPLICTAION:
used by stream.baseline
"""
keys = kwargs.get('keys')
fkeys = kwargs.get('fkeys')
mode = kwargs.get('mode')
if not keys:
keys = ['<KEY>']
if not mode:
mode = 'add'
if fkeys and not len(fkeys) == len(keys):
fkeys=None
logger.warning("func2stream: provided fkeys do not match keys")
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist # TODO: cycle through list
totalarray = [[] for key in KEYLIST]
posstr = KEYLIST.index('str1')
testx = []
for function in funct:
#print ("Testing", function)
if not function:
return self
# Changed that - 49 sec before, no less then 2 secs
if not len(self.ndarray[0]) > 0:
print("func2stream: requires ndarray - trying old LineStruct functions")
if mode == 'add':
return self.func_add(function, keys=keys)
elif mode == 'sub':
return self.func_subtract(function, keys=keys)
else:
return self
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
dis_done = False
# get x array for baseline
#indx = KEYLIST.index('x')
#arrayx = self.ndarray[indx].astype(float)
functimearray = (self.ndarray[0].astype(float)-function[1])/(function[2]-function[1])
for key in KEYLIST:
validkey = False
ind = KEYLIST.index(key)
if key in keys: # new
#print ("DEALING: ", key)
keyind = keys.index(key)
if fkeys:
fkey = fkeys[keyind]
else:
fkey = key
ar = np.asarray(self.ndarray[ind]).astype(float)
try:
test = function[0]['f'+fkey](functimearray)
validkey = True
except:
pass
if mode == 'add' and validkey:
print ("here", ar, function[0]['f'+fkey](functimearray))
array[ind] = ar + function[0]['f'+fkey](functimearray)
elif mode == 'addbaseline' and validkey:
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndarray[ind]), key, self.ndarray[ind]
array[ind] = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + function[0]['f'+fkey](functimearray)
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
#print("func2stream", function, function[0], function[0]['f'+key],functimearray)
array[ind] = ar + function[0]['f'+fkey](functimearray)
if len(array[posstr]) == 0:
#print ("Assigned values to str1: function {}".format(function[1]))
array[posstr] = ['c']*len(ar)
if len(testx) > 0 and not dis_done:
# identify change from number to nan
# add discontinuity marker there
#print ("Here", testx)
prevel = np.nan
for idx, el in enumerate(testx):
if not np.isnan(prevel) and np.isnan(el):
array[posstr][idx] = 'd'
#print ("Modified str1 at {}".format(idx))
break
prevel = el
dis_done = True
if key == 'x': # remember this for correct y determination
arrayx = array[ind]
testx = function[0]['f'+fkey](functimearray)
if key == 'dx': # use this column to test if delta values are already provided
testx = function[0]['f'+fkey](functimearray)
elif mode in ['sub','subtract'] and validkey:
array[ind] = ar - function[0]['f'+fkey](functimearray)
elif mode == 'values' and validkey:
array[ind] = function[0]['f'+fkey](functimearray)
elif mode == 'div' and validkey:
array[ind] = ar / function[0]['f'+fkey](functimearray)
elif mode == 'multiply' and validkey:
array[ind] = ar * function[0]['f'+fkey](functimearray)
elif validkey:
print("func2stream: mode not recognized")
else: # new
if len(self.ndarray[ind]) > 0:
array[ind] = np.asarray(self.ndarray[ind]).astype(object)
for idx, col in enumerate(array):
if len(totalarray[idx]) > 0 and not idx == 0:
totalcol = totalarray[idx]
for j,el in enumerate(col):
if idx < len(NUMKEYLIST)+1 and not np.isnan(el) and np.isnan(totalcol[j]):
totalarray[idx][j] = array[idx][j]
if idx > len(NUMKEYLIST) and not el == 'c' and totalcol[j] == 'c':
totalarray[idx][j] = 'd'
else:
totalarray[idx] = array[idx]
return DataStream(self,self.header,np.asarray(totalarray,dtype=object))
def func_add(self,funclist,**kwargs):
"""
Add a function to the selected values of the data stream -> e.g. get baseline
Optional:
keys (default = 'x','y','z')
"""
keys = kwargs.get('keys')
mode = kwargs.get('mode')
if not keys:
keys = ['<KEY>']
if not mode:
mode = 'add'
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
function = funct[0] # Direct call of old version only accepts single function
# Changed that - 49 sec before, no less then 2 secs
if len(self.ndarray[0]) > 0:
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
functimearray = (self.ndarray[0].astype(float)-function[1])/(function[2]-function[1])
#print functimearray
for key in keys:
ind = KEYLIST.index(key)
if mode == 'add':
array[ind] = self.ndarray[ind] + function[0]['f'+key](functimearray)
elif mode == 'sub':
array[ind] = self.ndarray[ind] - function[0]['f'+key](functimearray)
elif mode == 'values':
array[ind] = function[0]['f'+key](functimearray)
elif mode == 'div':
array[ind] = self.ndarray[ind] / function[0]['f'+key](functimearray)
elif mode == 'multiply':
array[ind] = self.ndarray[ind] * function[0]['f'+key](functimearray)
else:
print("func2stream: mode not recognized")
return DataStream(self,self.header,np.asarray(array,dtype=object))
for elem in self:
# check whether time step is in function range
if function[1] <= elem.time <= function[2]:
functime = (elem.time-function[1])/(function[2]-function[1])
for key in keys:
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
fkey = 'f'+key
exec('keyval = elem.'+key)
if fkey in function[0] and not isnan(keyval):
try:
newval = keyval + function[0][fkey](functime)
except:
newval = float('nan')
exec('elem.'+key+' = newval')
else:
pass
else:
pass
return self
def func_subtract(self,funclist,**kwargs):
"""
Subtract a function from the selected values of the data stream -> e.g. obtain Residuals
Optional:
keys (default = '<KEY>')
:type order int
:param order : 0 -> stream - function; 1 -> function - stream
"""
keys = kwargs.get('keys')
order = kwargs.get('order')
st = DataStream()
st = self.copy()
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
function = funct[0] # Direct call of old version only accepts single function
"""
for el in self:
li = LineStruct()
li.time = el.time
li.x = el.x
li.y = el.y
li.z = el.z
st.add(li)
"""
if not order:
order = 0
if not keys:
keys = ['<KEY>']
for elem in st:
# check whether time step is in function range
if function[1] <= elem.time <= function[2]:
functime = (elem.time-function[1])/(function[2]-function[1])
for key in keys:
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
fkey = 'f'+key
exec('keyval = elem.'+key)
if fkey in function[0] and not isnan(keyval):
try:
if order == 0:
newval = keyval - function[0][fkey](functime)
else:
newval = function[0][fkey](functime) - keyval
except:
newval = float('nan')
exec('elem.'+key+' = newval')
else:
pass
else:
pass
return st
def func2header(self,funclist,debug=False):
"""
DESCRIPTION
Add a list of functions into the data header
"""
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
self.header['DataFunctionObject'] = funct
return self
def GetKeyName(self,key):
"""
DESCRIPTION
get the content name of a specific key
will scan header information until successful:
(1) col-"key" names
(2) ColumnContent header info
(3) SensorElements header info
if no Name for the key is found, then the key itself is returned
APPLICATION:
element = datastream.GetKeyName('var1')
"""
if not key in KEYLIST:
print ("key not in KEYLIST - aborting")
return ''
element = ''
# One
try:
element = self.header.get("col-{}".format(key))
if not element == '':
return element
except:
pass
# Two
try:
element = self.header.get('ColumnContents','').split(',')[KEYLIST.index(key)]
if not element == '':
return element
except:
pass
# Three
try:
idx = self.header.get('SensorKeys','').split(',').index(key)
element = self.header.get('SensorElements','').split(',')[idx]
if not element == '':
return element
except:
pass
return key
def GetKeyUnit(self,key):
"""
DESCRIPTION
get the content name of a specific key
will scan header information until successful:
(1) unit-col-"key" names
(2) ColumnUnit header info
if no unit for the key is found, then an empty string is returned
APPLICATION:
unit = datastream.GetKeyUnit('var1')
"""
if not key in KEYLIST:
print ("key not in KEYLIST - aborting")
return ''
unit = ''
# One
try:
unit = self.header.get("unit-col-{}".format(key))
if not unit == '':
return unit
except:
pass
# Two
try:
unit = self.header.get('ColumnUnits','').split(',')[KEYLIST.index(key)]
if not unit == '':
return unit
except:
pass
return unit
def get_gaps(self, **kwargs):
"""
DEFINITION:
Takes the dominant sample frequency and fills nan into non-existing time steps:
This function provides the basis for discontinuous plots and gap analysis and proper filtering.
PARAMETERS:
Variables:
---
Kwargs:
- accuracy: (float) time relative to a day - default 1 sec
- gapvariable: (string) - refering to stream column - default='var5' - This column
is overwritten with 0 (data) and 1 (no data).
- key: (string) - refering to a data column e.g. key='x'. If given then all NaN values with existing time steps are also marked by '1' in the gapvariable line for this key
RETURNS:
- stream: (Datastream)
EXAMPLE:
>>> stream_with_gaps_filled = stream_with_aps.get_gaps(['f'])
APPLICATION:
used by nfilter() for correct filtering
CHANGES:
Last updated and tested with nfilter function by leon 2014-07-22
"""
accuracy = kwargs.get('accuracy')
key = kwargs.get('key')
gapvariable = kwargs.get('gapvariable')
debug = kwargs.get('debug')
if key in KEYLIST:
gapvariable = True
if not gapvariable:
gapvariable = 'var5'
if not self.length()[0] > 1:
print ("get_gaps: Stream does not contain data - aborting")
return self
# Better use get_sampling period as samplingrate is rounded
#spr = self.get_sampling_period()
#newsps = newsp*3600.0*24.0
newsps = self.samplingrate()
newsp = newsps/3600.0/24.0
if not accuracy:
accuracy = 0.9/(3600.0*24.0) # one second relative to day
accuracy = 0.05*newsp # 5 percent of samplingrate
if newsps < 0.9 and not accuracy:
accuracy = (newsps-(newsps*0.1))/(3600.0*24.0)
logger.info('--- Starting filling gaps with NANs at %s ' % (str(datetime.now())))
stream = self.copy()
prevtime = 0
ndtype = False
if len(stream.ndarray[0]) > 0:
maxtime = stream.ndarray[0][-1]
mintime = stream.ndarray[0][0]
length = len(stream.ndarray[0])
sourcetime = stream.ndarray[0]
ndtype = True
else:
mintime = self[0].time
maxtime = self[-1].time
if debug:
print("Time range:", mintime, maxtime)
print("Length, samp_per and accuracy:", self.length()[0], newsps, accuracy)
shift = 0
if ndtype:
# Get time diff and expected count
timediff = maxtime - mintime
expN = int(round(timediff/newsp))+1
if debug:
print("Expected length vs actual length:", expN, length)
if expN == len(sourcetime):
# Found the expected amount of time steps - no gaps
logger.info("get_gaps: No gaps found - Returning")
return stream
else:
# correct way (will be used by default) - does not use any accuracy value
#projtime = np.linspace(mintime, maxtime, num=expN, endpoint=True)
#print("proj:", projtime, len(projtime))
# find values or projtime, which are not in sourcetime
#dif = setdiff1d(projtime,sourcetime, assume_unique=True)
#print (dif, len(dif))
#print (len(dif),len(sourcetime),len(projtime))
diff = sourcetime[1:] - sourcetime[:-1]
num_fills = np.round(diff / newsp) - 1
getdiffids = np.where(diff > newsp+accuracy)[0]
logger.info("get_gaps: Found gaps - Filling nans to them")
if debug:
print ("Here", diff, num_fills, newsp, getdiffids)
missingt = []
# Get critical differences and number of missing steps
for i in getdiffids:
#print (i, sourcetime[i-1], sourcetime[i], sourcetime[i+1])
nf = num_fills[i]
# if nf is larger than zero then get append the missing time steps to missingt list
if nf > 0:
for n in range(int(nf)): # add n+1 * samplingrate for each missing value
missingt.append(sourcetime[i]+(n+1)*newsp)
print ("Filling {} gaps".format(len(missingt)))
# Cycle through stream and append nans to each column for missing time steps
nans = [np.nan] * len(missingt)
empts = [''] * len(missingt)
gaps = [0.0] * len(missingt)
for idx,elem in enumerate(stream.ndarray):
if idx == 0:
# append missingt list to array element
elem = list(elem)
lenelem = len(elem)
elem.extend(missingt)
stream.ndarray[idx] = np.asarray(elem).astype(object)
elif len(elem) > 0:
# append nans list to array element
elem = list(elem)
if KEYLIST[idx] in NUMKEYLIST or KEYLIST[idx] == 'sectime':
elem.extend(nans)
else:
elem.extend(empts)
stream.ndarray[idx] = np.asarray(elem).astype(object)
elif KEYLIST[idx] == gapvariable:
# append nans list to array element
elem = [1.0]*lenelem
elem.extend(gaps)
stream.ndarray[idx] = np.asarray(elem).astype(object)
return stream.sorting()
else:
stream = DataStream()
for elem in self:
if abs((prevtime+newsp) - elem.time) > accuracy and not prevtime == 0:
currtime = num2date(prevtime)+timedelta(seconds=newsps)
while currtime <= num2date(elem.time):
newline = LineStruct()
exec('newline.'+gapvariable+' = 1.0')
newline.time = date2num(currtime)
stream.add(newline)
currtime += timedelta(seconds=newsps)
else:
exec('elem.'+gapvariable+' = 0.0')
if key in KEYLIST:
if isnan(eval('elem.'+key)):
exec('elem.'+gapvariable+' = 1.0')
stream.add(elem)
prevtime = elem.time
logger.info('--- Filling gaps finished at %s ' % (str(datetime.now())))
if debugmode:
print("Ending:", stream[0].time, stream[-1].time)
return stream.sorting()
def get_rotationangle(self, xcompensation=0,keys=['x','y','z'],**kwargs):
"""
DESCRIPTION:
"Estimating" the rotation angle towards a magnetic coordinate system
assuming z to be vertical down. Please note: You need to provide a
complete horizontal vector including either the x compensation field
or if not available an annual estimate of the vector. This method can be used
to determine reorientation characteristics in order to accurately apply
HDZ optimzed basevalue calculations.
RETURNS:
rotangle (float) The estimated rotation angle in degree
"""
annualmeans = kwargs.get('annualmeans')
#1. get vector from data
# x = y*tan(dec)
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
logger.error('get_rotation: provided keylist need to have three components.')
return stream #self
logger.info('get_rotation: Determining rotation angle towards a magnetic coordinate system assuming z to be vertical down.')
ind1 = KEYLIST.index(keys[0])
ind2 = KEYLIST.index(keys[1])
ind3 = KEYLIST.index(keys[2])
if len(self.ndarray[0]) > 0:
if len(self.ndarray[ind1]) > 0 and len(self.ndarray[ind2]) > 0 and len(self.ndarray[ind3]) > 0:
# get mean disregarding nans
xl = [el for el in self.ndarray[ind1] if not np.isnan(el)]
yl = [el for el in self.ndarray[ind2] if not np.isnan(el)]
if annualmeans:
meanx = annualmeans[0]
else:
meanx = np.mean(xl)+xcompensation
meany = np.mean(yl)
# get rotation angle so that meany == 0
#print ("Rotation",meanx, meany)
#zeroy = meanx*np.sin(ra)+meany*np.cos(ra)
#-meany/meanx = np.tan(ra)
rotangle = np.arctan2(-meany,meanx) * (180.) / np.pi
logger.info('getrotation: Rotation angle determined: {} deg'.format(rotangle))
return rotangle
def get_sampling_period(self):
"""
returns the dominant sampling frequency in unit ! days !
for time savings, this function only tests the first 1000 elements
"""
# For proper applictation - duplicates are removed
self = self.removeduplicates()
if len(self.ndarray[0]) > 0:
timecol = self.ndarray[0].astype(float)
else:
timecol= self._get_column('time')
# New way:
if len(timecol) > 1:
diffs = np.asarray(timecol[1:]-timecol[:-1])
diffs = diffs[~np.isnan(diffs)]
me = np.median(diffs)
st = np.std(diffs)
diffs = [el for el in diffs if el <= me+2*st and el >= me-2*st]
return np.median(diffs)
else:
return 0.0
"""
timedifflist = [[0,0]]
timediff = 0
if len(timecol) <= 1000:
testrange = len(timecol)
else:
testrange = 1000
print "Get_sampling_rate", np.asarray(timecol[1:]-timecol[:-1])
print "Get_sampling_rate", np.median(np.asarray(timecol[1:]-timecol[:-1]))*3600.*24.
for idx, val in enumerate(timecol[:testrange]):
if idx > 1 and not isnan(val):
timediff = np.round((val-timeprev),7)
found = 0
for tel in timedifflist:
if tel[1] == timediff:
tel[0] = tel[0]+1
found = 1
if found == 0:
timedifflist.append([1,timediff])
timeprev = val
#print self
if not len(timedifflist) == 0:
timedifflist.sort(key=lambda x: int(x[0]))
# get the most often found timediff
domtd = timedifflist[-1][1]
else:
logger.error("get_sampling_period: unkown problem - returning 0")
domtd = 0
if not domtd == 0:
return domtd
else:
try:
return timedifflist[-2][1]
except:
logger.error("get_sampling_period: could not identify dominant sampling rate")
return 0
"""
def samplingrate(self, **kwargs):
"""
DEFINITION:
returns a rounded value of the sampling rate
in seconds
and updates the header information
"""
# XXX include that in the stream reading process....
digits = kwargs.get('digits')
notrounded = kwargs.get('notrounded')
if not digits:
digits = 1
if not self.length()[0] > 1:
return 0.0
sr = self.get_sampling_period()*24*3600
unit = ' sec'
val = sr
# Create a suitable rounding function:
# Use simple rounds if sr > 60 secs
# Check accuracy for sr < 10 secs (three digits:
# if abs(sr-round(sr,0)) * 1000 e.g. (1.002 -> 2, 0.998 -> 2)
if sr < 0.05:
for i in range(0,5):
multi = 10**i
srfloor = np.floor(sr*multi)
if srfloor >= 1:
# found multiplicator
# now determine significance taking into account three more digits
digs = np.floor(np.abs(sr*multi-srfloor)*1000)
if digs<5: # round to zero
val = np.round(srfloor/multi,1)
else:
val = np.round(sr,5)
break
elif sr < 59:
for i in range(0,3):
multi = 10**i
srfloor = np.floor(sr*multi)
if srfloor >= 1:
# found multiplicator
# now determine significance taking into account three more digits
digs = np.floor(np.abs(sr*multi-srfloor)*1000)
if digs<5: # round to zero
val = np.round(srfloor/multi,1)
else:
val = np.round(sr,3)
break
else:
val = np.round(sr,1)
"""
if np.round(sr*10.,0) == 0:
val = np.round(sr,2)
#unit = ' Hz'
elif np.round(sr,0) == 0:
if 0.09 < sr < 0.11:
val = np.round(sr,digits)
else:
val = np.round(sr,2)
#unit = ' Hz'
else:
val = np.round(sr,0)
"""
if notrounded:
val = sr
self.header['DataSamplingRate'] = str(val) + unit
return val
def integrate(self, **kwargs):
"""
DESCRIPTION:
Method to integrate selected columns respect to time.
-- Using scipy.integrate.cumtrapz
VARIABLES:
optional:
keys: (list - default ['x','y','z','f'] provide limited key-list
"""
logger.info('--- Integrating started at %s ' % str(datetime.now()))
keys = kwargs.get('keys')
if not keys:
keys = ['x','y','z']
array = [[] for key in KEYLIST]
ndtype = False
if len(self.ndarray[0])>0:
ndtype = True
t = self.ndarray[0]
array[0] = t
else:
t = self._get_column('time')
for key in keys:
if ndtype:
ind = KEYLIST.index(key)
val = self.ndarray[ind]
array[ind] = np.asarray(val)
else:
val = self._get_column(key)
dval = sp.integrate.cumtrapz(np.asarray(val),t)
dval = np.insert(dval, 0, 0) # Prepend 0 to maintain original length
if ndtype:
ind = KEYLIST.index('d'+key)
array[ind] = np.asarray(dval)
else:
self._put_column(dval, 'd'+key)
self.ndarray = np.asarray(array)
logger.info('--- integration finished at %s ' % str(datetime.now()))
return self
def interpol(self, keys, **kwargs):
"""
DEFINITION:
Uses Numpy interpolate.interp1d to interpolate streams.
PARAMETERS:
Variables:
- keys: (list) List of keys to interpolate.
Kwargs:
- kind: (str) type of interpolation. Options:
linear = linear - Default
slinear = spline (first order)
quadratic = spline (second order)
cubic = spline (third order)
nearest = ?
zero = ?
(TODO: add these?)
- timerange: (timedelta object) default=timedelta(hours=1).
- fitdegree: (float) default=4.
- knotstep: (float < 0.5) determines the amount of knots:
amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
RETURNS:
- func: (list) Contains the following:
list[0]: (dict) {'f+key': interpolate function}
list[1]: (float) date2num value of minimum timestamp
list[2]: (float) date2num value of maximum timestamp
EXAMPLE:
>>> int_data = pos_data.interpol(['f'])
APPLICATION:
"""
kind = kwargs.get('kind')
if not kind:
kind = 'linear'
if kind not in ['linear','slinear','quadratic','cubic','nearest','zero']:
logger.warning("interpol: Interpolation kind %s not valid. Using linear interpolation instead." % kind)
kind = 'linear'
ndtype = False
if len(self.ndarray[0]) > 0:
t = self.ndarray[0]
ndtype = True
else:
t = self._get_column('time')
nt,sv,ev = self._normalize(t)
sp = self.get_sampling_period()
functionkeylist = {}
logger.info("interpol: Interpolating stream with %s interpolation." % kind)
for key in keys:
if not key in NUMKEYLIST:
logger.error("interpol: Column key not valid!")
if ndtype:
ind = KEYLIST.index(key)
val = self.ndarray[ind].astype(float)
else:
val = self._get_column(key)
# interplolate NaN values
nans, xxx= nan_helper(val)
try: # Try to interpolate nan values
val[nans]= np.interp(xxx(nans), xxx(~nans), val[~nans])
except:
#val[nans]=int(nan)
pass
if len(val)>1:
exec('f'+key+' = interpolate.interp1d(nt, val, kind)')
exec('functionkeylist["f'+key+'"] = f'+key)
else:
logger.warning("interpol: interpolation of zero length data set - wont work.")
pass
logger.info("interpol: Interpolation complete.")
func = [functionkeylist, sv, ev]
return func
def interpolate_nans(self, keys):
""""
DEFINITION:
Provides a simple linear nan interpolator that returns the interpolated
data in the stream. Uses method that is already present elsewhere, e.g.
in filter, for easy and quick access.
PARAMETERS:
- keys: List of keys to interpolate.
RETURNS:
- stream: Original stream with nans replaced by linear interpolation.
"""
for key in keys:
if key not in NUMKEYLIST:
logger.error("interpolate_nans: {} is an invalid key! Cannot interpolate.".format(key))
y = self._get_column(key)
nans, x = nan_helper(y)
y[nans] = np.interp(x(nans), x(~nans), y[~nans])
self._put_column(y, key)
logger.info("interpolate_nans: Replaced nans in {} with linearly interpolated values.".format(key))
return self
def k_extend(self, **kwargs):
"""
DESCRIPTION:
Extending the k_scale from 9 to 28 values as used for the GFZ kp value
"""
k9_level = kwargs.get('k9_level')
if not k9_level:
if 'StationK9' in self.header:
# 1. Check header info
k9_level = self.header['StationK9']
else:
# 2. Set Potsdam default
k9_level = 500
fortscale = [0,7.5,15,30,60,105,180,300,495,750]
k_scale = [float(k9_level)*elem/750.0 for elem in fortscale]
newlst = []
klst = [0.,0.33,0.66,1.,1.33,1.66,2.,2.33,2.66,3.,3.33,3.66,4.,4.33,4.66,5.,5.33,5.66,6.,6.33,6.66,7.,7.33,7.66,8.,8.33,8.66,9.]
for idx,elem in enumerate(k_scale):
if idx > 0:
diff = elem - k_scale[idx-1]
newlst.append(elem-2*diff/3)
newlst.append(elem-diff/3)
newlst.append(elem)
indvar1 = KEYLIST.index('var1')
indvar2 = KEYLIST.index('var2')
ar = []
for elem in self.ndarray[indvar2]:
for count,val in enumerate(newlst):
if elem > val:
k = klst[count]
ar.append(k)
self.ndarray[indvar1] = np.asarray(ar)
return self
def k_fmi(self, **kwargs):
"""
DESCRIPTION:
Calculating k values following the fmi approach. The method uses three major steps:
Firstly, the record is eventually filtered to minute data, outliers are removed
(using default options) and gaps are interpolated. Ideally, these steps have been
contucted before, which allows for complete control of these steps.
Secondly, the last 27 hours are investigated. Starting from the last record, the last
three hour segment is taken and the fmi approach is applied. Finally, the provided
stream is analyzed from the beginning. Definite values are thus produced for the
previous day after 3:00 am (depending on n - see below).
The FMI method:
The provided data stream is checked and converted to xyz data. Investigated are the
horizontal components. In a first run k values are calculated by simply determining
the max/min difference of the minute variation data within the three hour segements.
This is done for both horizontal components and the maximum difference is selected.
Using the transformation table related to the Niemegk scale the k values are calculated.
Based on these k values, a first estimate of the quiet daily variation (Sr) is obtained.
Hourly means with extended time ranges (30min + m + n) are obtained for each x.5 hour.
m refers to 120 minutes (0-3a.m., 21-24p.m.), 60 minutes (3-6, 18-21) or 0 minutes.
n is determined by k**3.3.
xyz within the code always refers to the coordinate system of the sensor and not to any geomagnetic reference.
By default it is assumed that the provided stream comes from a hdz oriented instrument.
For xyz (or any other) orientation use the option checky=True to investigate both horizontal components.
If the stream contains absolute data, the option hcomp = True transforms the stream to hdz.
The following steps are performed:
1. Asserts: Signal covers at least 24 hours, sampling rate minute or second
2. Produce filtered minute signal, check for gaps, eventually interpolate (done by filter/sm algorythm) - needs some improvements
3. from the last value contained get 3 hour segments and calculate max, min and max-min
kwargs support the following keywords:
- k9_level (float) the value for which k9 is defined, all other values a linearly approximated
- magnetic latitude (float) another way to define the k scale
- timerange (timedelta obsject) default=timedelta(hours=1)
- fitdegree (float) default=5
- knotstep (float < 0.5) determines the amount of knots: amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
- flag
PARAMETER:
k9_level (int) define the Observatories K9 Level. If not provided then firstly
the header information is scanned for a 'StationK9' input. If not
successful a K9 of 500 nT is assumend.
"""
plot = kwargs.get('plot')
debug = kwargs.get('debug')
hcomp = kwargs.get('hcomp')
fitdegree = kwargs.get('fitdegree')
fitfunc=kwargs.get('fitfunc')
magnetic_latitude = kwargs.get('magnetic_latitude')
k9_level = kwargs.get('k9_level')
checky = kwargs.get('checky') # used for xyz data if True then the y component is checked as well
if not fitfunc:
fitfunc = 'harmonic'
if not fitdegree:
fitdegree = 5
if not k9_level:
if 'StationK9' in self.header:
# 1. Check header info
k9_level = self.header['StationK9']
else:
# 2. Set Potsdam default
k9_level = 500
# Some basics:
startinghours = [0,3,6,9,12,15,18,21]
mlist = [120,60,0,0,0,0,60,120]
#ngkscale = [0,5,10,20,40,70,120,200,330,500]
fortscale = [0,7.5,15,30,60,105,180,300,495,750]
k_scale = [float(k9_level)*elem/750.0 for elem in fortscale]
# calculate local scale from magnetic latitude (inclination):
# important: how to do that - what is the latitudinal relationship, how to transfer the scale,
# it is frequently mentioned to be quasi-log but it is not a simple Log scale
# func can be fitted reasonably well by
# func[a_] := Exp[0.8308663199145958 + 0.7894060396483681 k - 0.021250627459823503 k^2]
kstream = DataStream()
logger.info('--- Starting k value calculation: %s ' % (str(datetime.now())))
# Non destructive - using a coyp of the supplied stream
stream = self.copy()
# ############################################
# ## Step 1 ##############
# ## ------------------------ ##############
# ## preparing data: ##############
# ## - check sampling/length ##############
# ## - check type (xyz etc) ##############
# ## - check removing outliers ##############
# ## - eventually filter ##############
# ## - interpolate/fill gaps ##############
# ############################################
# removing outliers
if debug:
print("Removing outliers")
stream = stream.flag_outlier(keys=['x','y','z'],threshold=6.) # Weak conditions
stream = stream.remove_flagged()
sr = stream.samplingrate()
if debug:
print("Sampling rate", sr)
if sr > 65:
print("Algorythm requires minute or higher resolution - aborting")
return DataStream()
if sr <= 0.9:
print("Data appears to be below 1 second resolution - filtering to seconds first")
stream = stream.nfilter(filter_width=timedelta(seconds=1))
sr = stream.samplingrate()
if 0.9 < sr < 55:
print("Data appears to be below 1 minute resolution - filtering to minutes")
stream = stream.nfilter(filter_width=timedelta(minutes=1))
else:
pass
# get_gaps - put nans to missing data
# then replace nans with interpolated values
#nans, x= nan_helper(v)
# v[nans]= interp(x(nans), x(~nans), v[~nans])
ndtype = True
if len(stream.ndarray[0]) > 0:
ndtype = True
timediff = np.max(stream.ndarray[0]) - np.min(stream.ndarray[0])
indtyp = KEYLIST.index('typ')
try:
gettyp = stream.ndarray[indtyp][0]
except:
gettyp = 'xyzf'
print("ndtype - Timeseries ending at:", num2date(np.max(stream.ndarray[0])))
else:
timediff = stream[-1].time - stream[0].time
gettyp = stream[0].typ
print("LineStruct - Timeseries ending at:", num2date(stream[-1].time))
print("Coverage in days:", timediff)
if timediff < 1.1: # 1 corresponds to 24 hours
print("not enough time covered - aborting")
return
if debug:
print("Typ:", gettyp)
# Transform the coordinate system to XYZ, asuming a hdz orientation.
fmistream = stream
if gettyp == 'idff':
fmistream = stream._convertstream('idf2xyz',keep_header=True)
elif gettyp == 'hdzf':
fmistream = stream._convertstream('hdz2xyz',keep_header=True)
elif not gettyp == 'xyzf':
print("Unkown type of data - please provide xyzf, idff, hdzf -aborting")
return
# By default use H for determination
if debug:
print("converting data to hdz - only analyze h")
print("This is applicable in case of baselinecorrected data")
# TODO Important currently we are only using x (or x and y)
if hcomp:
print("Please note: H comp requires that columns xyz contain baseline corrected values")
fmistream = fmistream._convertstream('xyz2hdz',keep_header=True)
elif 'DataAbsFunctionObject' in fmistream.header:
print("Found Baseline function")
pass # to a bc correction and
checky = True
else:
# If variation data use maximum from x and y
checky = True
# ############################################
# ## Step 2 ##############
# ## ------------------------ ##############
# ## some functions ##############
# ############################################
def klist2stream(klist, kvalstream=DataStream() ,ndtype=True):
"""
Internal method to convert a k value list to a stream
"""
#emptystream = DataStream()
if len(kvalstream.ndarray[0]) > 0:
kexists = True
#ti = list(li.ndarray[0])
#print "Previous k", li.ndarray
elif len(kvalstream) > 0:
kexists = True
#li = [elem for elem in kvalstream]
#ti = [elem.time for elem in kvalstream]
else:
kexists = False
array = [[] for key in KEYLIST]
#li = DataStream()
indvar1 = KEYLIST.index('var1')
indvar2 = KEYLIST.index('var2')
indvar3 = KEYLIST.index('var3')
if ndtype:
#array = [[] for key in KEYLIST]
for kline in klist:
time = kline[0]
if kexists:
try:
ind = list(kvalstream.ndarray[0]).index(time)
#print "Found time at index", ind
#if kvalstream.ndarray[indvar3][ind] < quality lower
kvalstream = kvalstream._delete(ind)
except:
pass
kvalstream.ndarray[0] = np.append(kvalstream.ndarray[0],kline[0])
kvalstream.ndarray[indvar1] = np.append(kvalstream.ndarray[indvar1],kline[1])
kvalstream.ndarray[indvar2] = np.append(kvalstream.ndarray[indvar2],kline[2])
kvalstream.ndarray[indvar3] = np.append(kvalstream.ndarray[indvar3],kline[3])
else:
# put data to kvalstream
array[0].append(kline[0])
array[indvar1].append(kline[1])
array[indvar2].append(kline[2])
array[indvar3].append(kline[3]) # Quality parameter - containg time coverage
# High quality replaces low quality
if not kexists:
array[0] = np.asarray(array[0])
array[indvar1] = np.asarray(array[indvar1])
array[indvar2] = np.asarray(array[indvar2])
kvalstream.ndarray = np.asarray(array)
return kvalstream
def maxmink(datastream, cdlist, index, k_scale, ndtype=True, **kwargs):
# function returns 3 hour k values for a 24 hour minute time series
# The following function is used several times on different !!!!! 24h !!!!!!! timeseries
# (with and without removal of daily-quiet signals)
checky = kwargs.get('checky')
xmaxval = 0
xminval = 0
ymaxval = 0
yminval = 0
deltaday = 0
klist = []
for j in range(0,8):
if debug:
print("Loop Test", j, index, num2date(cdlist[index])-timedelta(days=deltaday))
#t7 = datetime.utcnow()
#threehours = datastream.extract("time", date2num(num2date(cdlist[index])-timedelta(days=deltaday)), "<")
et = date2num(num2date(cdlist[index])-timedelta(days=deltaday))
index = index - 1
if index < 0:
index = 7
deltaday += 1
if debug:
print("Start", num2date(cdlist[index])-timedelta(days=deltaday))
#threehours = threehours.extract("time", date2num(num2date(cdlist[index])-timedelta(days=deltaday)), ">=")
st = date2num(num2date(cdlist[index])-timedelta(days=deltaday))
ar = datastream._select_timerange(starttime=st, endtime=et)
threehours = DataStream([LineStruct()],{},ar)
#print("ET",st,et)
#t8 = datetime.utcnow()
#print("Extracting time needed:", t8-t7)
if ndtype:
len3hours = len(threehours.ndarray[0])
else:
len3hours = len(threehours)
if debug:
print("Length of three hour segment", len3hours)
if len3hours > 0:
if ndtype:
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
colx = threehours.ndarray[indx]
else:
colx = threehours._get_column('x')
colx = [elem for elem in colx if not isnan(elem)]
if len(colx) > 0:
xmaxval = max(colx)
xminval = min(colx)
else:
ymaxval = 0.0
yminval = 0.0
if checky:
if ndtype:
coly = threehours.ndarray[indy]
else:
coly = threehours._get_column('y')
coly = [elem for elem in coly if not isnan(elem)]
ymaxval = max(coly)
yminval = min(coly)
else:
ymaxval = 0.0
yminval = 0.0
maxmindiff = max([xmaxval-xminval, ymaxval-yminval])
k = np.nan
for count,val in enumerate(k_scale):
if maxmindiff > val:
k = count
if np.isnan(k):
maxmindiff = np.nan
if debug:
print("Extrema", k, maxmindiff, xmaxval, xminval, ymaxval, yminval)
# create a k-value list
else:
k = np.nan
maxmindiff = np.nan
ti = date2num(num2date(cdlist[index])-timedelta(days=deltaday)+timedelta(minutes=90))
klist.append([ti,k,maxmindiff,1])
return klist
def fmimeans(datastream, laststep, kvalstream, ndtype=True):
# function returns 3 hour k values for a 24 hour minute time series
deltaday = 0
hmlist = []
meanstream = DataStream()
lasthour = num2date(laststep).replace(minute=0, second=0, microsecond=0)
for j in range(0,24):
#if debug:
# print "Loop Test", j
# last hour
index = lasthour.hour
index = index - 1
if index < 0:
index = 23
#if debug:
#print index
meanat = lasthour - timedelta(minutes=30)
#get m (using index)
#if debug:
#print int(np.floor(index/3.))
m = mlist[int(np.floor(index/3.))]
#if debug:
#print "m:", m
#get n
# test: find nearest kval from kvalstream
idx = (np.abs(kvalstream.ndarray[0].astype(float)-date2num(meanat))).argmin()
kval = kvalstream.ndarray[KEYLIST.index('var1')][idx]
if not np.isnan(kval):
n = kval**3.3
else:
n = 0
# extract meanat +/- (30+m+n)
valrange = datastream.extract("time", date2num(meanat+timedelta(minutes=30)+timedelta(minutes=m)+timedelta(minutes=n)), "<")
valrange = valrange.extract("time", date2num(meanat-timedelta(minutes=30)-timedelta(minutes=m)-timedelta(minutes=n)), ">=")
#if debug:
#print "Length of Sequence", len(valrange), num2date(valrange[0].time), num2date(valrange[-1].time)
if ndtype:
firsttime = np.min(datastream.ndarray[0])
else:
firsttime = datastream[0].time
if not firsttime < date2num(meanat-timedelta(minutes=30)-timedelta(minutes=m)-timedelta(minutes=n)):
print("##############################################")
print(" careful - datastream not long enough for correct k determination")
print("##############################################")
print("Hourly means not correctly determinable for day", meanat)
print("as the extended time range is not reached")
print("----------------------------------------------")
kvalstream.ndarray[KEYLIST.index('var3')][idx] = 0.5
#return meanstream
# Now get the means
meanx = valrange.mean('x')
meany = valrange.mean('y')
meanz = valrange.mean('z')
hmlist.append([date2num(meanat),meanx,meany,meanz])
# Describe why we are duplicating values at the end and the beginning!!
# Was that necessary for the polyfit??
if j == 0:
hmlist.append([date2num(meanat+timedelta(minutes=30)+timedelta(minutes=m)+timedelta(minutes=n)),meanx,meany,meanz])
if j == 23:
hmlist.append([date2num(meanat-timedelta(minutes=30)-timedelta(minutes=m)-timedelta(minutes=n)),meanx,meany,meanz])
lasthour = lasthour - timedelta(hours=1)
if ndtype:
array = [[] for key in KEYLIST]
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
indz = KEYLIST.index('z')
array[0] = np.asarray([elem[0] for elem in hmlist])
array[indx] = np.asarray([elem[1] for elem in hmlist])
array[indy] = np.asarray([elem[2] for elem in hmlist])
array[indz] = np.asarray([elem[3] for elem in hmlist])
meanstream.ndarray = np.asarray(array)
else:
for elem in sorted(hmlist):
line = LineStruct()
line.time = elem[0]
line.x = elem[1]
line.y = elem[2]
line.z = elem[3]
meanstream.add(line)
#print klist
return meanstream.sorting()
# ############################################
# ## Step 2 ##############
# ## ------------------------ ##############
# ## analyze last 24 h: ##############
# ## - get last day ##############
# ## - get last 3hour segment ##############
# ## - run backwards ##############
# ## - calc fmi: ##############
# ## - 1. get max/min deviation ###########
# ## - 2. use this k to get sr ###########
# ## - 3. calc k with sr reduced ##########
# ## - 4. recalc sr ##########
# ## - 5. final k ##########
# ############################################
if ndtype:
currentdate = num2date(np.max(fmistream.ndarray[0])).replace(tzinfo=None)
lastdate = currentdate
d = currentdate.date()
currentdate = datetime.combine(d, datetime.min.time())
else:
currentdate = num2date(fmistream[-1].time).replace(tzinfo=None)
lastdate = currentdate
d = currentdate.date()
currentdate = datetime.combine(d, datetime.min.time())
print("Last effective time series ending at day", currentdate)
print(" -----------------------------------------------------")
print(" ------------- Starting backward analysis ------------")
print(" --------------- beginning at last time --------------")
# selecting reduced time range!!!
t1 = datetime.utcnow()
array = fmistream._select_timerange(starttime=currentdate-timedelta(days=2))
fmitstream = DataStream([LineStruct()],fmistream.header,array)
cdlist = [date2num(currentdate.replace(hour=elem)) for elem in startinghours]
#print("Daily list", cdlist, currentdate)
t2 = datetime.utcnow()
print("Step0 needed:", t2-t1)
#ta, i = find_nearest(np.asarray(cdlist), date2num(lastdate-timedelta(minutes=90)))
ta, i = find_nearest(np.asarray(cdlist), date2num(lastdate))
if i < 7:
i=i+1
else:
i=0
cdlist = [el+1 for el in cdlist]
#print("Nearest three hour mark", num2date(ta), i, np.asarray(cdlist))
if plot:
import magpy.mpplot as mp
fmistream.plot(noshow=True, plottitle="0")
# 1. get a backward 24 hour calculation from the last record
klist = maxmink(fmitstream,cdlist,i,k_scale)
#print(klist, i)
kstream = klist2stream(klist, kstream)
t3 = datetime.utcnow()
print("Step1 needed:", t3-t2)
# 2. a) now get the hourly means with extended time ranges (sr function)
hmean = fmimeans(fmitstream,date2num(lastdate),kstream)
func = hmean.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
if plot:
hmean.plot(function=func,noshow=True, plottitle="1: SR function")
# 2. b) subtract sr from original record
#redfmi = fmistream.func_subtract(func)
redfmi = fmistream.func2stream(func,mode='sub')
if plot:
redfmi.plot(noshow=True, plottitle="1: reduced")
fmistream.plot(noshow=True, plottitle="1")
t4 = datetime.utcnow()
print("Step2 needed:", t4-t3)
# 3. recalc k
klist = maxmink(redfmi,cdlist,i,k_scale)
kstream = klist2stream(klist, kstream)
#print ("3.", num2date(kstream.ndarray[0]))
t5 = datetime.utcnow()
print("Step3 needed:", t5-t4)
# 4. recalc sr and subtract
finalhmean = fmimeans(fmitstream,date2num(lastdate),kstream)
finalfunc = finalhmean.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
firedfmi = fmistream.func2stream(finalfunc,mode='sub')
if plot:
mp.plot(finalhmean,['x','y','z'],function=finalfunc,noshow=True, plottitle="2: SR function")
#finalhmean.plot(['x','y','z'],function=finalfunc,noshow=True, plottitle="2: SR function")
firedfmi.plot(['x','y','z'],noshow=True, plottitle="2: reduced")
fmitstream.plot(['x','y','z'],plottitle="2")
t6 = datetime.utcnow()
print("Step4 needed:", t6-t5)
# 5. final k
klist = maxmink(firedfmi,cdlist,i,k_scale)
kstream = klist2stream(klist, kstream)
#print ("Last", num2date(kstream.ndarray[0]))
t7 = datetime.utcnow()
print("Step5 needed:", t7-t6)
# ############################################
# ## Step 3 ##############
# ## ------------------------ ##############
# ## analyze from beginning: ##############
# ## - get first record ##############
# ## - from day to day ##############
# ## - run backwards ##############
# ## - calc fmi: ##############
# ## - 1. get max/min deviation ###########
# ## - 2. use this k to get sr ###########
# ## - 3. calc k with sr reduced ##########
# ## - 4. recalc sr ##########
# ## - 5. final k ##########
# ############################################
print(" -----------------------------------------------------")
print(" ------------- Starting forward analysis -------------")
print(" ----------------- from first date ------------------")
if ndtype:
st = np.min(fmistream.ndarray[0])
else:
st = fmistream[0].time
startday = int(np.floor(st))
for daynum in range(1,int(timediff)+1):
currentdate = num2date(startday+daynum)
print("Running daily chunks forward until ", currentdate)
# selecting reduced time range!!!
array = fmistream._select_timerange(starttime=currentdate-timedelta(days=3),endtime=currentdate+timedelta(days=1))
fmitstream = DataStream([LineStruct()],fmistream.header,array)
cdlist = [date2num(currentdate.replace(hour=elem)) for elem in startinghours]
#print "Daily list", cdlist
# 1. get a backward 24 hour calculation from the last record
klist = maxmink(fmitstream,cdlist,0,k_scale)
#print("forward", klist)
kstream = klist2stream(klist, kstream)
# 2. a) now get the hourly means with extended time ranges (sr function)
hmean = fmimeans(fmitstream,startday+daynum,kstream)
if ndtype:
lenhmean = len(hmean.ndarray[0])
else:
lenhmean = len(hmean)
if not lenhmean == 0: # Length 0 if not enough data for full extended mean value calc
func = hmean.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
#hmean.plot(function=func,noshow=True)
if not func[0] == {}:
if plot:
fmistream.plot(noshow=True)
# 2. b) subtract sr from original record
redfmi = fmitstream.func2stream(func,mode='sub')
# 3. recalc k
klist = maxmink(redfmi,cdlist,0,k_scale)
kstream = klist2stream(klist, kstream)
#print klist
# 4. recalc sr and subtract
finalhmean = fmimeans(fmitstream,startday+daynum,kstream)
finalfunc = finalhmean.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
firedfmi = fmistream.func2stream(finalfunc,mode='sub')
if plot:
finalhmean.plot(['x','y','z'],noshow=True, function=finalfunc, plottitle="2")
firedfmi.plot(['x','y','z'],noshow=True, plottitle="2: reduced")
fmitstream.plot(['x','y','z'], plottitle="2: fmistream")
# 5. final k
klist = maxmink(firedfmi,cdlist,0,k_scale)
kstream = klist2stream(klist, kstream)
#print "Final", klist
#print kstream.ndarray, klist
kstream = kstream.sorting()
kstream.header['col-var1'] = 'K'
kstream.header['col-var2'] = 'C'
kstream.header['col-var3'] = 'Quality'
#print ("Test",kstream.ndarray)
return DataStream([LineStruct()],kstream.header,kstream.ndarray)
"""
outstream = DataStream()
lst = [[elem.time,elem.var1,elem.var2] for elem in kstream]
for el in sorted(lst):
line = LineStruct()
line.time = el[0]
line.var1 = el[1]
line.var2 = el[2]
outstream.add(line)
return outstream
"""
def linestruct2ndarray(self):
"""
DEFINITION:
Converts linestruct data to ndarray.
RETURNS:
- self with ndarray filled
EXAMPLE:
>>> data = data.linestruct2ndarray()
APPLICATION:
"""
def checkEqual3(lst):
return lst[1:] == lst[:-1]
array = [np.asarray([]) for elem in KEYLIST]
keys = self._get_key_headers()
t = np.asarray(self._get_column('time'))
array[0] = t
for key in keys:
ind = KEYLIST.index(key)
col = self._get_column(key)
if len(col) > 0:
if not False in checkEqual3(col) and str(col[0]) == str('-'):
col = np.asarray([])
array[ind] = col
else:
array[ind] = []
array = np.asarray(array,dtype=object)
steam = DataStream()
stream = [LineStruct()]
return DataStream(stream,self.header,array)
def mean(self, key, **kwargs):
"""
DEFINITION:
Calculates mean values for the specified key, Nan's are regarded for.
Means are only calculated if more then "amount" in percent are non-nan's
Returns a float if successful or NaN.
PARAMETERS:
Variables:
- key: (KEYLIST) element of Keylist like 'x' .
Kwargs:
- percentage: (int) Define required percentage of non-nan values, if not
met that nan will be returned. Default is 95 (%)
- meanfunction: (string) accepts 'mean' and 'median'. Default is 'mean'
- std: (bool) if true, the standard deviation is returned as well
RETURNS:
- mean/median(, std) (float)
EXAMPLE:
>>> meanx = datastream.mean('x',meanfunction='median',percentage=90)
APPLICATION:
stream = read(datapath)
mean = stream.mean('f')
median = stream.mean('f',meanfunction='median')
stddev = stream.mean('f',std=True)
"""
percentage = kwargs.get('percentage')
meanfunction = kwargs.get('meanfunction')
std = kwargs.get('std')
if not meanfunction:
meanfunction = 'mean'
if not percentage:
percentage = 95
if not std:
std = False
ndtype = False
if len(self.ndarray[0])>0:
ndtype = True
elif len(self) > 0:
pass
else:
logger.error('mean: empty stream - aborting')
if std:
return float("NaN"), float("NaN")
else:
return float("NaN")
try: #python2
if not isinstance( percentage, (int,long)):
logger.error("mean: Percentage needs to be an integer!")
except:
if not isinstance( percentage, (int)):
logger.error("mean: Percentage needs to be an integer!")
if not key in KEYLIST[:16]:
logger.error("mean: Column key not valid!")
if ndtype:
ind = KEYLIST.index(key)
length = len(self.ndarray[0])
self.ndarray[ind] = np.asarray(self.ndarray[ind])
ar = self.ndarray[ind].astype(float)
ar = ar[~np.isnan(ar)]
else:
ar = [getattr(elem,key) for elem in self if not isnan(getattr(elem,key))]
length = float(len(self))
div = float(len(ar))/length*100.0
if div >= percentage:
if std:
return eval('np.'+meanfunction+'(ar)'), np.std(ar)
else:
return eval('np.'+meanfunction+'(ar)')
else:
logger.info('mean: Too many nans in column {}, exceeding {} percent'.format(key,percentage))
if std:
return float("NaN"), float("NaN")
else:
return float("NaN")
def missingvalue(self,v,window_len,threshold=0.9,fill='mean'):
"""
DESCRIPTION
fills missing values either with means or interpolated values
PARAMETER:
v: (np.array) single column of ndarray
window_len: (int) length of window to check threshold
threshold: (float) minimum percentage of available data e.g. 0.9 - 90 precent
fill: (string) 'mean' or 'interpolation'
RETURNS:
ndarray - single column
"""
try:
v_rest = np.array([])
v = v.astype(float)
n_split = len(v)/float(window_len)
if not n_split == int(n_split):
el = int(int(n_split)*window_len)
v_rest = v[el:]
v = v[:el]
spli = np.split(v,int(len(v)/window_len))
if len(v_rest) > 0:
spli.append(v_rest)
newar = np.array([])
for idx,ar in enumerate(spli):
nans, x = nan_helper(ar)
if len(ar[~nans]) >= threshold*len(ar):
if fill == 'mean':
ar[nans]= np.nanmean(ar)
else:
ar[nans]= interp(x(nans), x(~nans), ar[~nans])
newar = np.concatenate((newar,ar))
v = newar
except:
print ("Filter: could not split stream in equal parts for interpolation - switching to conservative mode")
return v
def MODWT_calc(self,key='x',wavelet='haar',level=1,plot=False,outfile=None,
window=5):
"""
DEFINITION:
Multiple Overlap Discrete wavelet transform (MODWT) method of analysing a magnetic signal
to pick out SSCs. This method was taken from Hafez (2013b): "Geomagnetic Sudden
Commencement Automatic Detection via MODWT"
(NOTE: PyWavelets package must be installed for this method. It should be applied
to 1s data - otherwise the sample window and detection levels should be changed.)
METHOD:
1. Use the Haar wavelet filter to calculate the 1st and 2nd details
of the geomagnetic signal.
2. The 1st detail (D1) samples are squared to evaluate the magnitude.
3. The sample window (5) is averaged to avoid ripple effects. (This means the
returned stream will have ~1/5 the size of the original.)
PARAMETERS:
Variables:
- key: (str) Apply MODWT to this key. Default 'x' due to SSCs dominating
the horizontal component.
- wavelet: (str) Type of filter to use. Default 'db4' (4th-order Daubechies
wavelet filter) according to Hafez (2013).
- level: (int) Decomposition level. Will calculate details down to this level.
Default 3, also Hafez (2013).
- plot: (bool) If True, will display a plot of A3, D1, D2 and D3.
- outfile: (str) If given, will plot will be saved to 'outfile' path.
- window: (int) Length of sample window. Default 5, i.e. 5s with second data.
RETURNS:
- MODWT_stream: (DataStream object) A stream containing the following:
'x': A_n (approximation function)
'var1': D1 (first detail)
'var2': D2 (second detail)
...
'var3': D3 (third detail)
...
EXAMPLE:
>>> DWT_stream = stream.DWT_calc(plot=True)
APPLICATION:
# Storm detection using detail 3 (D3 = var3):
from magpy.stream import *
stream = read('LEMI_1s_Data_2014-02-15.cdf') # 2014-02-15 is a good storm example
MODWT_stream = stream.MODWT_calc(plot=True)
Da_min = 0.0005 # nT^2 (minimum amplitude of D3 for storm detection)
Dp_min = 40 # seconds (minimum period of Da > Da_min for storm detection)
detection = False
for row in MODWT_stream:
if row.var3 >= Da_min and detection == False:
timepin = row.time
detection = True
elif row.var3 < Da_min and detection == True:
duration = (num2date(row.time) - num2date(timepin)).seconds
if duration >= Dp_min:
print "Storm detected!"
print duration, num2date(timepin)
detection = False
"""
# Import required package PyWavelets:
# http://www.pybytes.com/pywavelets/index.html
import pywt
# 1a. Grab array from stream
data = self._get_column(key)
t_ind = KEYLIST.index('time')
#MODWT_stream = DataStream([],{})
MODWT_stream = DataStream()
headers = MODWT_stream.header
array = [[] for key in KEYLIST]
x_ind = KEYLIST.index('x')
dx_ind = KEYLIST.index('dx')
var1_ind = KEYLIST.index('var1')
var2_ind = KEYLIST.index('var2')
var3_ind = KEYLIST.index('var3')
var4_ind = KEYLIST.index('var4')
var5_ind = KEYLIST.index('var5')
dy_ind = KEYLIST.index('dy')
i = 0
logger.info("MODWT_calc: Starting Discrete Wavelet Transform of key %s." % key)
if len(data) % 2 == 1:
data = data[0:-1]
# Results have format:
# (cAn, cDn), ..., (cA2, cD2), (cA1, cD1)
coeffs = pywt.swt(data, wavelet, level)
acoeffs, dcoeffs = [], []
for i in xrange(level):
(a, d) = coeffs[i]
acoeffs.append(a)
dcoeffs.append(d)
for i, item in enumerate(dcoeffs):
dcoeffs[i] = [j**2 for j in item]
# 1b. Loop for sliding window
while True:
if i >= (len(data)-window):
break
# Take the values in the middle of the window (not exact but changes are
# not extreme over standard 5s window)
array[t_ind].append(self.ndarray[t_ind][i+window/2])
data_cut = data[i:i+window]
array[x_ind].append(sum(data_cut)/float(window))
a_cut = acoeffs[0][i:i+window]
array[dx_ind].append(sum(a_cut)/float(window))
for j in xrange(level):
d_cut = dcoeffs[-(j+1)][i:i+window]
if j <= 5:
key = 'var'+str(j+1)
array[KEYLIST.index(key)].append(sum(d_cut)/float(window))
elif 5 < j <= 7:
if j == 6:
key = 'dy'
elif j == 7:
key = 'dz'
array[KEYLIST.index(key)].append(sum(d_cut)/float(window))
i += window
logger.info("MODWT_calc: Finished MODWT.")
MODWT_stream.header['col-x'] = 'A3'
MODWT_stream.header['unit-col-x'] = 'nT^2'
MODWT_stream.header['col-var1'] = 'D1'
MODWT_stream.header['unit-col-var1'] = 'nT^2'
MODWT_stream.header['col-var2'] = 'D2'
MODWT_stream.header['unit-col-var2'] = 'nT^2'
MODWT_stream.header['col-var3'] = 'D3'
MODWT_stream.header['unit-col-var3'] = 'nT^2'
MODWT_stream.header['col-var4'] = 'D4'
MODWT_stream.header['unit-col-var4'] = 'nT^2'
MODWT_stream.header['col-var5'] = 'D5'
MODWT_stream.header['unit-col-var5'] = 'nT^2'
MODWT_stream.header['col-dy'] = 'D6'
MODWT_stream.header['unit-col-dy'] = 'nT^2'
# Plot stream:
if plot == True:
date = datetime.strftime(num2date(self.ndarray[0][0]),'%Y-%m-%d')
logger.info('MODWT_calc: Plotting data...')
if outfile:
MODWT_stream.plot(['x','var1','var2','var3'],
plottitle="MODWT Decomposition of %s (%s)" % (key,date),
outfile=outfile)
else:
MODWT_stream.plot(['x','var1','var2','var3'],
plottitle="MODWT Decomposition of %s (%s)" % (key,date))
for key in KEYLIST:
array[KEYLIST.index(key)] = np.asarray(array[KEYLIST.index(key)])
return DataStream([LineStruct()], headers, np.asarray(array,dtype=object))
def multiply(self, factors, square=False):
"""
DEFINITION:
A function to multiply the datastream, should one ever have the need to.
Scale value correction for example.
PARAMETERS:
Variables:
- factors: (dict) Dictionary of multiplcation factors with keys to apply to
e.g. {'x': -1, 'f': 2}
Kwargs:
- square: (bool) If True, key will be squared by the factor.
RETURNS:
- self: (DataStream) Multiplied datastream.
EXAMPLE:
>>> data.multiply({'x':-1})
APPLICATION:
"""
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
sel = self.copy()
for key in factors:
if key in KEYLIST:
if ndtype:
ind = KEYLIST.index(key)
val = sel.ndarray[ind]
else:
val = sel._get_column(key)
if key == 'time':
logger.error("factor: Multiplying time? That's just plain silly.")
else:
if square == False:
newval = [elem * factors[key] for elem in val]
logger.info('factor: Multiplied column %s by %s.' % (key, factors[key]))
else:
newval = [elem ** factors[key] for elem in val]
logger.info('factor: Multiplied column %s by %s.' % (key, factors[key]))
if ndtype:
sel.ndarray[ind] = np.asarray(newval)
else:
sel = sel._put_column(newval, key)
else:
logger.warning("factor: Key '%s' not in keylist." % key)
return sel
def obspyspectrogram(self, data, samp_rate, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, cmap=None, zorder=None, title=None, show=True,
sphinx=False, clip=[0.0, 1.0]):
#TODO: Discuss with Ramon which kind of window should be used (cos^2(2*pi (t/T)))
"""
Function taken from ObsPy
Computes and plots spectrogram of the input data.
:param data: Input data
:type samp_rate: float
:param samp_rate: Samplerate in Hz
:type per_lap: float
:param per_lap: Percentage of overlap of sliding window, ranging from 0
to 1. High overlaps take a long time to compute.
:type wlen: int or float
:param wlen: Window length for fft in seconds. If this parameter is too
small, the calculation will take forever.
:type log: bool
:param log: Logarithmic frequency axis if True, linear frequency axis
otherwise.
:type outfile: String
:param outfile: String for the filename of output file, if None
interactive plotting is activated.
:type fmt: String
:param fmt: Format of image to save
:type axes: :class:`matplotlib.axes.Axes`
:param axes: Plot into given axes, this deactivates the fmt and
outfile option.
:type dbscale: bool
:param dbscale: If True 10 * log10 of color values is taken, if False the
sqrt is taken.
:type mult: float
:param mult: Pad zeros to lengh mult * wlen. This will make the spectrogram
smoother. Available for matplotlib > 0.99.0.
:type cmap: :class:`matplotlib.colors.Colormap`
:param cmap: Specify a custom colormap instance
:type zorder: float
:param zorder: Specify the zorder of the plot. Only of importance if other
plots in the same axes are executed.
:type title: String
:param title: Set the plot title
:type show: bool
:param show: Do not call `plt.show()` at end of routine. That way, further
modifications can be done to the figure before showing it.
:type sphinx: bool
:param sphinx: Internal flag used for API doc generation, default False
:type clip: [float, float]
:param clip: adjust colormap to clip at lower and/or upper end. The given
percentages of the amplitude range (linear or logarithmic depending
on option `dbscale`) are clipped.
"""
# enforce float for samp_rate
samp_rate = float(samp_rate)
# set wlen from samp_rate if not specified otherwise
if not wlen:
wlen = samp_rate / 100.
npts = len(data)
# nfft needs to be an integer, otherwise a deprecation will be raised
#XXX add condition for too many windows => calculation takes for ever
nfft = int(nearestPow2(wlen * samp_rate))
if nfft > npts:
nfft = int(nearestPow2(npts / 8.0))
if mult != None:
mult = int(nearestPow2(mult))
mult = mult * nfft
nlap = int(nfft * float(per_lap))
data = data - data.mean()
end = npts / samp_rate
# Here we call not plt.specgram as this already produces a plot
# matplotlib.mlab.specgram should be faster as it computes only the
# arrays
# XXX mlab.specgram uses fft, would be better and faster use rfft
if MATPLOTLIB_VERSION >= [0, 99, 0]:
specgram, freq, time = mlab.specgram(data, Fs=samp_rate, NFFT=nfft,
pad_to=mult, noverlap=nlap)
else:
specgram, freq, time = mlab.specgram(data, Fs=samp_rate,
NFFT=nfft, noverlap=nlap)
# db scale and remove zero/offset for amplitude
if dbscale:
specgram = 10 * np.log10(specgram[1:, :])
else:
specgram = np.sqrt(specgram[1:, :])
freq = freq[1:]
vmin, vmax = clip
if vmin < 0 or vmax > 1 or vmin >= vmax:
msg = "Invalid parameters for clip option."
raise ValueError(msg)
_range = float(specgram.max() - specgram.min())
vmin = specgram.min() + vmin * _range
vmax = specgram.min() + vmax * _range
norm = Normalize(vmin, vmax, clip=True)
if not axes:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes
# calculate half bin width
halfbin_time = (time[1] - time[0]) / 2.0
halfbin_freq = (freq[1] - freq[0]) / 2.0
if log:
# pcolor expects one bin more at the right end
freq = np.concatenate((freq, [freq[-1] + 2 * halfbin_freq]))
time = np.concatenate((time, [time[-1] + 2 * halfbin_time]))
# center bin
time -= halfbin_time
freq -= halfbin_freq
# pcolormesh issue was fixed in matplotlib r5716 (2008-07-07)
# inbetween tags 0.98.2 and 0.98.3
# see:
# - http://matplotlib.svn.sourceforge.net/viewvc/...
# matplotlib?revision=5716&view=revision
# - http://matplotlib.sourceforge.net/_static/CHANGELOG
if MATPLOTLIB_VERSION >= [0, 98, 3]:
# Log scaling for frequency values (y-axis)
ax.set_yscale('log')
# Plot times
ax.pcolormesh(time, freq, specgram, cmap=cmap, zorder=zorder,
norm=norm)
else:
X, Y = np.meshgrid(time, freq)
ax.pcolor(X, Y, specgram, cmap=cmap, zorder=zorder, norm=norm)
ax.semilogy()
else:
# this method is much much faster!
specgram = np.flipud(specgram)
# center bin
extent = (time[0] - halfbin_time, time[-1] + halfbin_time,
freq[0] - halfbin_freq, freq[-1] + halfbin_freq)
ax.imshow(specgram, interpolation="nearest", extent=extent,
cmap=cmap, zorder=zorder)
# set correct way of axis, whitespace before and after with window
# length
ax.axis('tight')
ax.set_xlim(0, end)
ax.grid(False)
if axes:
return ax
ax.set_xlabel('Time [s]')
ax.set_ylabel('Frequency [Hz]')
if title:
ax.set_title(title)
if not sphinx:
# ignoring all NumPy warnings during plot
temp = np.geterr()
np.seterr(all='ignore')
plt.draw()
np.seterr(**temp)
if outfile:
if fmt:
fig.savefig(outfile, format=fmt)
else:
fig.savefig(outfile)
elif show:
plt.show()
else:
return fig
def offset(self, offsets, **kwargs):
"""
DEFINITION:
Apply constant offsets to elements of the datastream
PARAMETERS:
Variables:
- offsets: (dict) Dictionary of offsets with keys to apply to
e.g. {'time': timedelta(hours=1), 'x': 4.2, 'f': -1.34242}
Important: Time offsets have to be timedelta objects
Kwargs:
- starttime: (Datetime object) Start time to apply offsets
- endtime : (Datetime object) End time to apply offsets
RETURNS:
- variable: (type) Description.
EXAMPLE:
>>> data.offset({'x':7.5})
or
>>> data.offset({'x':7.5},starttime='2015-11-21 13:33:00',starttime='2015-11-23 12:22:00')
APPLICATION:
"""
endtime = kwargs.get('endtime')
starttime = kwargs.get('starttime')
comment = kwargs.get('comment')
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype =True
tcol = self.ndarray[0]
else:
tcol = self._get_column('time')
if not len(tcol) > 0:
logger.error("offset: No data found - aborting")
return self
stidx = 0
edidx = len(tcol)
if starttime:
st = date2num(self._testtime(starttime))
# get index number of first element >= starttime in timecol
stidxlst = np.where(tcol >= st)[0]
if not len(stidxlst) > 0:
return self ## stream ends before starttime
stidx = stidxlst[0]
if endtime:
ed = date2num(self._testtime(endtime))
# get index number of last element <= endtime in timecol
edidxlst = np.where(tcol <= ed)[0]
if not len(edidxlst) > 0:
return self ## stream begins after endtime
edidx = (edidxlst[-1]) + 1
if comment and not comment == '':
if len(self.ndarray[0]) > 0:
commpos = KEYLIST.index('comment')
flagpos = KEYLIST.index('flag')
commcol = self.ndarray[commpos]
else:
commcol = self._get_column('comment')
if not len(commcol) == len(tcol):
commcol = [''] * len(tcol)
if not len(self.ndarray[flagpos]) == len(tcol):
fllist = ['0' for el in FLAGKEYLIST]
fllist.append('-')
fl = ''.join(fllist)
self.ndarray[flagpos] = [fl] * len(tcol)
for idx,el in enumerate(commcol):
if idx >= stidx and idx <= edidx:
if not el == '':
commcol[idx] = comment + ', ' + el
else:
commcol[idx] = comment
else:
commcol[idx] = el
print("offset", len(commcol), len(tcol))
self.ndarray[commpos] = commcol
for key in offsets:
if key in KEYLIST:
if ndtype:
ind = KEYLIST.index(key)
val = self.ndarray[ind]
else:
val = self._get_column(key)
val = val[stidx:edidx]
if key == 'time':
secperday = 24*3600
try:
os = offsets[key].total_seconds()/secperday
except:
try:
exec('os = '+offsets[key]+'.total_seconds()/secperday')
except:
print("offset: error with time offset - check provided timedelta")
break
val = val + os
#print num2date(val[0]).replace(tzinfo=None)
#print num2date(val[0]).replace(tzinfo=None) + offsets[key]
#newval = [date2num(num2date(elem).replace(tzinfo=None) + offsets[key]) for elem in val]
logger.info('offset: Corrected time column by %s sec' % str(offsets[key]))
else:
val = val + offsets[key]
#newval = [elem + offsets[key] for elem in val]
logger.info('offset: Corrected column %s by %.3f' % (key, offsets[key]))
if ndtype:
self.ndarray[ind][stidx:edidx] = val
else:
nval = self._get_column(key) # repeated extraction of column - could be optimzed but usage of LineStruct will not be supported in future
nval[stidx:edidx] = val
self = self._put_column(nval, key)
else:
logger.error("offset: Key '%s' not in keylist." % key)
return self
def plot(self, keys=None, debugmode=None, **kwargs):
"""
DEFINITION:
Code for plotting one dataset. Consult mpplot.plot() and .plotStreams() for more
details.
EXAMPLE:
>>> cs1_data.plot(['f'],
outfile = 'frequenz.png',
specialdict = {'f':[44184.8,44185.8]},
plottitle = 'Station Graz - Feldstaerke 05.08.2013',
bgcolor='white')
"""
import magpy.mpplot as mp
if keys == None:
keys = []
mp.plot(self, variables=keys, **kwargs)
def powerspectrum(self, key, debugmode=None, outfile=None, fmt=None, axes=None, title=None,**kwargs):
"""
DEFINITION:
Calculating the power spectrum
following the numpy fft example
PARAMETERS:
Variables:
- key: (str) Key to analyse
Kwargs:
- axes: (?) ?
- debugmode: (bool) Variable to show steps
- fmt: (str) Format of outfile, e.g. "png"
- outfile: (str) Filename to save plot to
- title: (str) Title to display on plot
- marks: (dict) add some text to the plot
- returndata: (bool) return freq and asd
- freqlevel: (float) print noise level at that frequency
RETURNS:
- plot: (matplotlib plot) A plot of the powerspectrum
EXAMPLE:
>>> data_stream.powerspectrum('x')
APPLICATION:
>>> from magpy.stream import read
1. Requires DataStream object:
>>> data_path = '/usr/lib/python2.7/magpy/examples/*'
>>> data = read(path_or_url=data_path,
starttime='2013-06-10 00:00:00',
endtime='2013-06-11 00:00:00')
2. Call for data stream:
>>> data.powerspectrum('f',
title='PSD of f', marks={'day':0.000011574},
outfile='ps.png')
"""
if debugmode:
print("Start powerspectrum at %s" % datetime.utcnow())
noshow = kwargs.get('noshow')
returndata = kwargs.get('returndata')
marks = kwargs.get('marks')
freqlevel = kwargs.get('freqlevel')
if noshow:
show = False
else:
show = True
dt = self.get_sampling_period()*24*3600
if not len(self) > 0:
logger.error("Powerspectrum: Stream of zero length -- aborting")
raise Exception("Can't analyse stream of zero length!")
t = np.asarray(self._get_column('time'))
val = np.asarray(self._get_column(key))
mint = np.min(t)
tnew, valnew = [],[]
nfft = int(nearestPow2(len(t)))
#print "NFFT:", nfft
if nfft > len(t):
nfft = int(nearestPow2(len(t) / 2.0))
#print "NFFT now:", nfft
for idx, elem in enumerate(val):
if not isnan(elem):
tnew.append((t[idx]-mint)*24*3600)
valnew.append(elem)
tnew = np.asarray(tnew)
valnew = np.asarray(valnew)
if debugmode:
print("Extracted data for powerspectrum at %s" % datetime.utcnow())
#freq = np.fft.fftfreq(tnew.shape[-1],dt)
#freq = freq[range(len(tnew)/2)] # one side frequency range
#freq = freq[1:]
#print "Maximum frequency:", max(freq)
#s = np.fft.fft(valnew)
#s = s[range(len(valnew)/2)] # one side data range
#s = s[1:]
#ps = np.real(s*np.conjugate(s))
if not axes:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes
psdm = mlab.psd(valnew, nfft, 1/dt)
asdm = np.sqrt(psdm[0])
freqm = psdm[1]
ax.loglog(freqm, asdm,'b-')
#print "Maximum frequency:", max(freqm)
if freqlevel:
val, idx = find_nearest(freqm, freqlevel)
print("Maximum Noise Level at %s Hz: %s" % (val,asdm[idx]))
if not marks:
pass
else:
for elem in marks:
ax.annotate(elem, xy=(marks[elem],min(asdm)),
xytext=(marks[elem],max(asdm)-(max(asdm)-min(asdm))*0.3),
bbox=dict(boxstyle="round", fc="0.95", alpha=0.6),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=1,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
try:
unit = self.header['unit-col-'+key]
except:
unit = 'unit'
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel(('Amplitude spectral density [%s/sqrt(Hz)]') % unit)
if title:
ax.set_title(title)
if debugmode:
print("Finished powerspectrum at %s" % datetime.utcnow())
if outfile:
if fmt:
fig.savefig(outfile, format=fmt)
else:
fig.savefig(outfile)
elif returndata:
return freqm, asdm
elif show:
plt.show()
else:
return fig
def randomdrop(self,percentage=None,fixed_indicies=None):
"""
DESCRIPTION:
Method to randomly drop one line from data. If percentage is
given, then lines according to this percentage are dropped.
This corresponds to a jackknife and d-jackknife respectively.
PARAMETER:
percentage (float) provide a percentage value to be dropped (1-99)
fixed_indicies (list) e.g. [0,1] provide a list of indicies
which will not be dropped
RETURNS:
DataStream
APPLICATION:
>>> newstream = stream.randomdrop(percentage=10,fixed_indicies=[0,len(means.ndarray[0])-1])
"""
import random
def makeDrippingBucket(lst):
bucket = lst
if len(bucket) == 0:
return []
else:
random_index = random.randrange(0,len(bucket))
del bucket[random_index]
return bucket
if len(self.ndarray[0]) < 1:
return self
if percentage:
if percentage > 99:
percentage = 99
if percentage < 1:
percentage = 1
ns = self.copy()
if fixed_indicies:
# TODO assert list
pass
if not percentage:
newlen = len(ns.ndarray[0]) -1
else:
newlen = int(np.round(len(ns.ndarray[0])-len(ns.ndarray[0])*percentage/100.,0))
# Index list of stream
indexlst = [idx for idx, el in enumerate(ns.ndarray[0])]
#print len(indexlst), newlen
while len(indexlst) > newlen:
indexlst = makeDrippingBucket(indexlst)
if fixed_indicies:
for el in fixed_indicies:
if not el in indexlst:
indexlst.append(el)
#print "Here", len(indexlst)
for idx,ar in enumerate(ns.ndarray):
if len(ar) > 0:
#print ar, indexlst
newar = ar[indexlst]
ns.ndarray[idx] = newar
return ns
def remove(self, starttime=None, endtime=None):
"""
DEFINITION:
Removing dates inside of range between start- and endtime.
(Does the exact opposite of self.trim().)
PARAMETERS:
Variables:
- starttime: (datetime/str) Start of period to trim with
- endtime: (datetime/str) End of period to trim to
RETURNS:
- stream: (DataStream object) Stream with data between
starttime and endtime removed.
EXAMPLE:
>>> data = data.trim(starttime, endtime)
APPLICATION:
"""
if starttime and endtime:
if self._testtime(starttime) > self._testtime(endtime):
logger.error('Trim: Starttime (%s) is larger than endtime (%s).' % (starttime,endtime))
raise ValueError("Starttime is larger than endtime.")
logger.info('Remove: Started from %s to %s' % (starttime,endtime))
cutstream = DataStream()
cutstream.header = self.header
cutstream.ndarray = self.ndarray
starttime = self._testtime(starttime)
endtime = self._testtime(endtime)
stval = 0
if len(cutstream.ndarray[0]) > 0:
timearray = self.ndarray[0]
st = (np.abs(timearray.astype(float)-date2num(starttime))).argmin() - 1
ed = (np.abs(timearray.astype(float)-date2num(endtime))).argmin() + 1
if starttime < num2date(cutstream.ndarray[0][0]):
st = 0
if endtime > num2date(cutstream.ndarray[0][-1]):
ed = len(cutstream.ndarray[0])
dropind = [i for i in range(st,ed)]
for index,key in enumerate(KEYLIST):
if len(cutstream.ndarray[index])>0:
cutstream.ndarray[index] = np.delete(cutstream.ndarray[index], dropind)
else:
for idx, elem in enumerate(self):
newline = LineStruct()
if not isnan(elem.time):
newline.time = elem.time
if elem.time <= date2num(starttime) or elem.time > date2num(endtime):
for key in KEYLIST:
exec('newline.'+key+' = elem.'+key)
cutstream.add(newline)
return cutstream
def remove_flagged(self, **kwargs):
"""
DEFINITION:
remove flagged data from stream:
Flagged values are replaced by NAN values. Therefore the stream's length is not changed.
Flags are defined by integers (0 normal, 1 automatically marked, 2 to be kept,
3 to be removed, 4 special)
PARAMETERS:
Kwargs:
- keys: (list) keys (string list e.g. 'f') default=FLAGKEYLIST
- flaglist: (list) default=[1,3] defines integer codes to be removed
RETURNS:
- stream: (DataStream Object) Stream with flagged data replaced by NAN.
EXAMPLE:
>>> newstream = stream.remove_flagged()
APPLICATION:
"""
# Defaults:
flaglist = kwargs.get('flaglist')
keys = kwargs.get('keys')
if not flaglist:
flaglist = [1,3]
if not keys:
keys = FLAGKEYLIST
# Converting elements of flaglist to strings
flaglist = [str(fl) for fl in flaglist]
array = self.ndarray
ndtype = False
if len(self.ndarray[0]) > 0:
flagind = KEYLIST.index('flag')
commind = KEYLIST.index('comment')
ndtype = True
for key in keys:
pos = KEYLIST.index(key)
liste = []
emptyelem = LineStruct()
if ndtype:
# get indicies of all non-empty flag contents
indlst = [i for i,el in enumerate(self.ndarray[flagind]) if not el in ['','-']]
for i in indlst:
try:
#if len(array[pos]) > 0:
flagls = list(self.ndarray[flagind][i])
flag = flagls[pos]
if flag in flaglist:
array[pos][i] = float("nan")
except:
#print("stream remove_flagged: index error: indlst {}, pos {}, length flag colum {}".format(len(indlst), pos, len(self.ndarray[flagind])))
pass
liste = [LineStruct()]
else:
for elem in self:
fllst = list(elem.flag)
try: # test whether useful flag is present: flaglst length changed during the program development
flag = int(fllst[pos])
except:
flag = 0
if not flag in flaglist:
liste.append(elem)
else:
setattr(elem, key, float("nan"))
#exec('elem.'+key+' = float("nan")')
liste.append(elem)
#liste = [elem for elem in self if not elem.flag[pos] in flaglist]
if ndtype:
#-> Necessary to consider shape (e.g.BLV data)
newar = [np.asarray([]) for el in KEYLIST]
for idx,el in enumerate(array):
if idx == flagind:
pass
elif idx == commind:
pass
else:
newar[idx] = array[idx]
else:
newar = list(self.ndarray)
# Drop contents of flag and comment column -> didn't work for BLV data because of shape
# changed for 0.3.99
#array[flagind] = np.asarray([])
#array[commind] = np.asarray([])
return DataStream(liste, self.header,np.asarray(newar,dtype=object))
def remove_outlier(self, **kwargs):
"""
DEFINITION:
Flags outliers in data, uses quartiles.
Notes: Position of flag in flagstring:
f (intensity): pos 0
x,y,z (vector): pos 1
other (vector): pos 2
Position of flag in flagstring
x : pos 0
y : pos 1
z : pos 2
f : pos 3
t1 : pos 4
t2 : pos 5
var1 : pos 6
var2: pos 7
Coding : 0 take, 1 remove, 2 force take, 3 force remove
Example:
0000000, 0001000, etc
012 = take f, automatically removed v, and force use of other
300 = force remove f, take v, and take other
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to evaluate. Default=['f']
- threshold: (float) Determines threshold for outliers.
1.5 = standard
5 = keeps storm onsets in
4 = Default as comprimise.
- timerange: (timedelta Object) Time range. Default = timedelta(hours=1)
- markall : marks all data except forcing has already been applied
- stdout: prints removed values to stdout
RETURNS:
- stream: (DataStream Object) Stream with flagged data.
EXAMPLE:
>>> stream.remove_outlier(keys=['x','y','z'], threshold=2)
APPLICATION:
"""
# Defaults:
timerange = kwargs.get('timerange')
threshold = kwargs.get('threshold')
keys = kwargs.get('keys')
markall = kwargs.get('markall')
stdout = kwargs.get('stdout')
if not timerange:
timerange = timedelta(hours=1)
if not keys:
keys = ['f']
if not threshold:
threshold = 4.0
if not stdout:
stdout = False
# Position of flag in flagstring
# f (intensity): pos 0
# x,y,z (vector): pos 1
# other (vector): pos 2
logger.info('remove_outlier: Starting outlier removal.')
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
arraytime = self.ndarray[0]
flagind = KEYLIST.index('flag')
commentind = KEYLIST.index('comment')
print ("Found ndarray - using flag_outlier instead")
return self.flag_outlier(**kwargs)
elif len(self) > 1:
arraytime = self._get_column('time')
else:
logger.warning('remove_outlier: No data - Stopping outlier removal.')
return self
# Working non-destructive
restream = self.copy()
# Start here with for key in keys:
for key in keys:
flagpos = FLAGKEYLIST.index(key)
st,et = self._find_t_limits()
st = date2num(st)
et = date2num(et)
at = date2num((num2date(st).replace(tzinfo=None)) + timerange)
incrt = at-st
newst = DataStream()
while st < et:
tmpar, idxst = find_nearest(arraytime,st)
tmpar, idxat = find_nearest(arraytime,at)
if idxat == len(arraytime)-1:
idxat = len(arraytime)
st = at
at += incrt
if ndtype:
ind = KEYLIST.index(key)
lstpart = self.ndarray[ind][idxst:idxat].astype(float)
print(lstpart)
print(np.isnan(lstpart))
selcol = lstpart[~np.isnan(lstpart)]
else:
lstpart = self[idxst:idxat]
# changed at 28.08.2014
#selcol = [eval('row.'+key) for row in lstpart]
selcol = [eval('row.'+key) for row in lstpart if not isnan(eval('row.'+key))]
try:
q1 = stats.scoreatpercentile(selcol,25)
q3 = stats.scoreatpercentile(selcol,75)
iqd = q3-q1
md = np.median(selcol)
whisker = threshold*iqd
except:
try:
md = np.median(selcol)
whisker = md*0.005
except:
logger.warning("remove_outlier: Eliminate outliers produced a problem: please check.")
pass
if ndtype:
# XXX DOES NOT WORK, TODO
for i in range(idxst,idxat):
if row.flag == '' or row.flag == '0000000000000000-' or row.flag == '-' or row.flag == '-0000000000000000':
row.flag = '-' * len(FLAGKEYLIST)
if row.comment == '-':
row.comment = ''
else:
for elem in lstpart:
row = LineStruct()
row = elem
if row.flag == '' or row.flag == '0000000000000000-' or row.flag == '-' or row.flag == '-0000000000000000':
#row.flag = '0000000000000000-'
row.flag = '-----------------'
if row.comment == '-':
row.comment = ''
if isNumber(row.flag): # if somehow the flag has been transfered to a number - create a string again
num = str(int(row.flag))[:-1]
row.flag = num+'-'
if not md-whisker < eval('elem.'+key) < md+whisker:
fllist = list(row.flag)
#print "Found", key
if len(fllist) >= flagpos:
fllist = np.asarray(fllist, dtype=object)
if not fllist[flagpos] in [1,2,3,4] :
if markall:
#print "mark"
fl = []
for j,f in enumerate(FLAGKEYLIST):
if f in keys:
fl.append('1')
else:
fl.append('-')
for idx, el in enumerate(fllist):
if el in [1,2,3,4]:
fl[idx] = el
fllist = fl
fllist[flagpos] = '1'
row.flag=''.join(fllist)
row.comment = "aof - threshold: %s, window: %s sec" % (str(threshold), str(timerange.total_seconds()))
#print row.flag, key
if not isnan(eval('elem.'+key)):
infoline = "remove_outlier: at %s - removed %s (= %f)" % (str(num2date(elem.time)),key, eval('elem.'+key))
logger.info(infoline)
if stdout:
print(infoline)
else:
fllist = list(row.flag)
if len(fllist) >= flagpos:
if row.flag == '':
pass
elif fllist[flagpos] == '-':
testlst = [el for el in fllist if el in ['0','1','2','3','4']]
if not len(testlst) > 0:
row.flag = ''
else:
pass
newst.add(row)
logger.info('remove_outlier: Outlier removal finished.')
if ndtype:
return restream
else:
return DataStream(newst, self.header, self.ndarray)
def resample(self, keys, debugmode=False,**kwargs):
"""
DEFINITION:
Uses Numpy interpolate.interp1d to resample stream to requested period.
Two methods:
fast: is only valid if time stamps at which resampling is conducted are part of the
original time series. e.g. org = second (58,59,0,1,2) resampled at 0
slow: general method if time stamps for resampling are not contained (e.g. 58.23, 59.24, 0.23,...)
resampled at 0
PARAMETERS:
Variables:
- keys: (list) keys to be resampled.
Kwargs:
- period: (float) sampling period in seconds, e.g. 5s (0.2 Hz).
- fast: (bool) use fast approximation
- startperiod: (integer) starttime in sec (e.g. 60 each minute, 900 each quarter hour
- offset: (integer) starttime in sec (e.g. 60 each minute, 900 each quarter hour
RETURNS:
- stream: (DataStream object) Stream containing resampled data.
EXAMPLE:
>>> resampled_stream = pos_data.resample(['f'],period=1)
APPLICATION:
"""
period = kwargs.get('period')
fast = kwargs.get('fast')
offset = kwargs.get('offset')
if not period:
period = 60.
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
sp = self.samplingrate()
logger.info("resample: Resampling stream of sampling period %s to period %s." % (sp,period))
logger.info("resample: Resampling keys %s " % (','.join(keys)))
# Determine the minimum time
t_min,t_max = self._find_t_limits()
t_start = t_min
if offset:
t_min = ceil_dt(t_min,period)
if t_min - offset > t_start:
t_min = t_min -offset
else:
t_min = t_min +offset
startperiod, line = self.findtime(t_min)
else:
t_min = ceil_dt(t_min,period)
startperiod, line = self.findtime(t_min)
if fast: # To be done if timesteps are at period timesteps
try:
logger.info("resample: Using fast algorithm.")
si = timedelta(seconds=sp)
sampling_period = si.seconds
if period <= sampling_period:
logger.warning("resample: Resampling period must be larger or equal than original sampling period.")
return self
if debugmode:
print ("Trying fast algorythm")
print ("Projected period and Sampling period:", period, sampling_period)
if not line == [] or ndtype: # or (ndtype and not line == []):
xx = int(np.round(period/sampling_period))
if ndtype:
newstream = DataStream([LineStruct()],{},np.asarray([]))
newstream.header = self.header
lst = []
for ind,elem in enumerate(self.ndarray):
if debugmode:
print ("dealing with column", ind, elem)
if len(elem) > 0:
lst.append(np.asarray(elem[startperiod::xx]))
else:
lst.append(np.asarray([]))
newstream.ndarray = np.asarray(lst)
else:
newstream = DataStream([],{},np.asarray([[] for el in KEYLIST]))
newstream.header = self.header
for line in self[startperiod::xx]:
newstream.add(line)
newstream.header['DataSamplingRate'] = str(period) + ' sec'
return newstream
logger.warning("resample: Fast resampling failed - switching to slow mode")
except:
logger.warning("resample: Fast resampling failed - switching to slow mode")
pass
# This is done if timesteps are not at period intervals
# -----------------------------------------------------
if debugmode:
print ("General -slow- resampling")
# Create a list containing time steps
#t_max = num2date(self._get_max('time'))
t_list = []
time = t_min
while time <= t_max:
t_list.append(date2num(time))
time = time + timedelta(seconds=period)
# Compare length of new time list with old timelist
# multiplicator is used to check whether nan value is at the corresponding position of the orgdata file - used for not yet completely but sufficiently correct missing value treatment
if not len(t_list) > 0:
return DataStream()
multiplicator = float(self.length()[0])/float(len(t_list))
logger.info("resample a: {},{},{}".format(float(self.length()[0]), float(len(t_list)),startperiod))
#print ("Times:", self.ndarray[0][0],self.ndarray[0][-1],t_list[0],t_list[-1])
stwithnan = self.copy()
# What is this good for (leon 17.04.2019)???
tmp = self.trim(starttime=736011.58337400458,endtime=736011.59721099539)
logger.info("resample test: {}".format(tmp.ndarray))
#tcol = stwithnan.ndarray[0]
res_stream = DataStream()
res_stream.header = self.header
array=[np.asarray([]) for elem in KEYLIST]
if ndtype:
array[0] = np.asarray(t_list)
res_stream.add(LineStruct())
else:
for item in t_list:
row = LineStruct()
row.time = item
res_stream.add(row)
for key in keys:
if debugmode:
print ("Resampling:", key)
if key not in KEYLIST[1:16]:
logger.warning("resample: Key %s not supported!" % key)
index = KEYLIST.index(key)
try:
#print (len(self._get_column(key)), multiplicator)
int_data = self.interpol([key],kind='linear')#'cubic')
int_func = int_data[0]['f'+key]
int_min = int_data[1]
int_max = int_data[2]
key_list = []
for ind, item in enumerate(t_list):
# normalized time range between 0 and 1
functime = (item - int_min)/(int_max - int_min)
# check whether original value is np.nan (as interpol method does not account for that)
# exact but slowly: idx = np.abs(tcol-item).argmin()
# orgval = stwithnan.ndarray[index][idx]
# reduce the index range as below
if ndtype:
if int(ind*multiplicator) <= len(self.ndarray[index]):
#orgval = self.ndarray[index][int(ind*multiplicator)]
estimate = False
# Please note: here a two techniques (exact and estimate)
# Speeddiff (example data set (500000 data points)
# Exact: 7.55 sec (including one minute filter)
# Estimate: 7.15 sec
if estimate:
orgval = stwithnan.ndarray[index][int(ind*multiplicator+startperiod)] # + offset
else:
# Exact solution:
mv = int(ind*multiplicator+startperiod)
stv = mv-int(20*multiplicator)
if stv < 0:
stv = 0
etv = mv+int(20*multiplicator)
if etv >= len(self.ndarray[index]):
etv = len(self.ndarray[index])
subar = stwithnan.ndarray[0][stv:etv]
idx = (np.abs(subar-item)).argmin()
#subar = stwithnan.ndarray[index][stv:etv]
orgval = stwithnan.ndarray[index][stv+idx] # + offset
#if item > 736011.58337400458 and item < 736011.59721099539:
# print ("Found", item, stv+idx, idx, orgval)
#if np.isnan(orgval):
# print (stv+idx, stv, etv)
else:
print("Check Resampling method")
orgval = 1.0
else:
orgval = getattr(stwithnan[int(ind*multiplicator+startperiod)],key)
tempval = np.nan
# Not a safe fix, but appears to cover decimal leftover problems
# (e.g. functime = 1.0000000014, which raises an error)
if functime > 1.0:
functime = 1.0
if not isnan(orgval):
tempval = int_func(functime)
key_list.append(float(tempval))
if ndtype:
array[index] = np.asarray(key_list)
else:
res_stream._put_column(key_list,key)
except:
logger.error("resample: Error interpolating stream. Stream either too large or no data for selected key")
res_stream.ndarray = np.asarray(array,dtype=object)
logger.info("resample: Data resampling complete.")
#return DataStream(res_stream,self.headers)
res_stream.header['DataSamplingRate'] = str(period) + ' sec'
return res_stream
def rotation(self,**kwargs):
"""
DEFINITION:
Rotation matrix for rotating x,y,z to new coordinate system xs,ys,zs using angles alpha and beta
PARAMETERS:
Variables:
Kwargs:
- alpha: (float) The horizontal rotation in degrees
- beta: (float) The vertical rotation in degrees
- keys: (list) provide an alternative vector to rotate - default is ['x','y','z']
keys are only supported from 1.0 onwards (ndarray)
RETURNS:
- self: (DataStream) The rotated stream
EXAMPLE:
>>> data.rotation(alpha=2.74)
APPLICATION:
"""
unit = kwargs.get('unit')
alpha = kwargs.get('alpha')
beta = kwargs.get('beta')
keys = kwargs.get('keys')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
if not alpha:
alpha = 0.
if not beta:
beta = 0.
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
logger.error('rotation: provided keylist need to have three components.')
return self
logger.info('rotation: Applying rotation matrix.')
"""
a[0][0] = cos(p)*cos(b);
a[0][1] = -sin(b);
a[0][2] = sin(p)*cos(b);
a[1][0] = cos(p)*sin(b);
a[1][1] = cos(b);
a[1][2] = sin(p)*sin(b);
a[2][0] = -sin(p);
a[2][1] = 0.0;
a[2][2] = cos(p);
xyz.l = ortho.l*a[0][0]+ortho.m*a[0][1]+ortho.n*a[0][2];
xyz.m = ortho.l*a[1][0]+ortho.m*a[1][1]+ortho.n*a[1][2];
xyz.n = ortho.l*a[2][0]+ortho.m*a[2][1]+ortho.n*a[2][2];
"""
ind1 = KEYLIST.index(keys[0])
ind2 = KEYLIST.index(keys[1])
ind3 = KEYLIST.index(keys[2])
if len(self.ndarray[0]) > 0:
if len(self.ndarray[ind1]) > 0 and len(self.ndarray[ind2]) > 0 and len(self.ndarray[ind3]) > 0:
ra = np.pi*alpha/(180.*ang_fac)
rb = np.pi*beta/(180.*ang_fac)
xar = self.ndarray[ind1].astype(float)*np.cos(rb)*np.cos(ra)-self.ndarray[ind2].astype(float)*np.sin(ra)+self.ndarray[ind3].astype(float)*np.sin(rb)*np.cos(ra)
yar = self.ndarray[ind1].astype(float)*np.cos(rb)*np.sin(ra)+self.ndarray[ind2].astype(float)*np.cos(ra)+self.ndarray[ind3].astype(float)*np.sin(rb)*np.sin(ra)
zar = -self.ndarray[ind1].astype(float)*np.sin(rb)+self.ndarray[ind3].astype(float)*np.cos(rb)
self.ndarray[ind1] = xar
self.ndarray[ind2] = yar
self.ndarray[ind3] = zar
"""
for elem in self:
ra = np.pi*alpha/(180.*ang_fac)
rb = np.pi*beta/(180.*ang_fac)
# Testing the conservation of f ##### Error corrected in May 2014 by leon
#fbefore = sqrt(elem.x**2+elem.y**2+elem.z**2)
xs = elem.x*np.cos(rb)*np.cos(ra)-elem.y*np.sin(ra)+elem.z*np.sin(rb)*np.cos(ra)
ys = elem.x*np.cos(rb)*np.sin(ra)+elem.y*np.cos(ra)+elem.z*np.sin(rb)*np.sin(ra)
zs = -elem.x*np.sin(rb)+elem.z*np.cos(rb)
#fafter = sqrt(xs**2+ys**2+zs**2)
#print "f:", fbefore,fafter,fbefore-fafter
elem.x = xs
elem.y = ys
elem.z = zs
"""
logger.info('rotation: Finished reorientation.')
return self
def scale_correction(self, keys, scales, **kwargs):
"""
DEFINITION:
multiplies the selected keys by the given scale values
PARAMETERS:
Kwargs:
- offset: (array) containing constant offsets for the given keys
RETURNS:
- DataStream
EXAMPLES:
>>> stream = stream.scale_correction(['x','y','z'],[1,0.988,1])
"""
print("Function will be removed - use e.g. self.multiply({'y': 0.988}) instead")
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
if not offset:
offset = [0]*len(keys)
else:
if not len(offset) == len(keys):
logger.error('scale_correction: offset with wrong dimension given - needs to have the same length as given keys - returning stream without changes')
return self
try:
assert len(self) > 0
except:
logger.error('scale_correction: empty stream - aborting')
return self
offsetlst = []
for key in KEYLIST:
if key in keys:
pos = keys.index(key)
offsetlst.append(offset[pos])
else:
offsetlst.append(0.0)
logger.info('scale_correction: --- Scale correction started at %s ' % str(datetime.now()))
for elem in self:
for i,key in enumerate(keys):
exec('elem.'+key+' = (elem.'+key+'+offset[i]) * scales[i]')
scalelst = []
for key in KEYLIST:
if key in keys:
pos = keys.index(key)
scalelst.append(scales[pos])
else:
scalelst.append(1.)
#print '_'.join(map(str,offsetlst)), scalelst
self.header['DataScaleValues'] = '_'.join(map(str,scalelst))
self.header['DataOffsets'] = '_'.join(map(str,offsetlst))
logger.info('scale_correction: --- Scale correction finished at %s ' % str(datetime.now()))
return self
def selectkeys(self, keys, **kwargs):
"""
DEFINITION:
Take data stream and remove all except the provided keys from ndarray
RETURNS:
- self: (DataStream) with ndarray limited to keys
EXAMPLE:
>>> keydata = fulldata.selectkeys(['x','y','z'])
APPLICATION:
"""
noflags = kwargs.get('noflags')
stream = self.copy()
if not 'time' in keys:
ti = ['time']
ti.extend(keys)
keys = ti
if len(stream.ndarray[0]) > 0:
# Check for flagging and comment column
if not noflags:
flagidx = KEYLIST.index('flag')
commentidx = KEYLIST.index('comment')
if len(stream.ndarray[flagidx]) > 0:
keys.append('flag')
if len(stream.ndarray[commentidx]) > 0:
keys.append('comment')
# Remove all missing
for idx, elem in enumerate(stream.ndarray):
if not KEYLIST[idx] in keys:
stream.ndarray[idx] = np.asarray([])
return stream
else:
return stream
def smooth(self, keys=None, **kwargs):
"""
DEFINITION:
Smooth the data using a window with requested size.
(taken from Cookbook/Signal Smooth)
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
PARAMETERS:
Variables:
- keys: (list) List of keys to smooth
Kwargs:
- window_len: (int,odd) dimension of the smoothing window
- window: (str) the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'. A flat window will produce a moving average smoothing.
(See also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter)
RETURNS:
- self: (DataStream) The smoothed signal
EXAMPLE:
>>> nice_data = bad_data.smooth(['x','y','z'])
or
>>> t=linspace(-2,2,0.1)
>>> x=sin(t)+randn(len(t))*0.1
>>> y=smooth(x)
APPLICATION:
TODO:
the window parameter could be the window itself if an array instead of a string
"""
# Defaults:
window_len = kwargs.get('window_len')
window = kwargs.get('window')
if not window_len:
window_len = 11
if not window:
window='hanning'
if not keys:
keys=self._get_key_headers(numerical=True)
window_len = int(window_len)
ndtype = False
if len(self.ndarray[0])>0:
ndtype = True
logger.info('smooth: Start smoothing (%s window, width %d) at %s' % (window, window_len, str(datetime.now())))
for key in keys:
if key in NUMKEYLIST:
if ndtype:
ind = KEYLIST.index(key)
x = self.ndarray[ind]
else:
x = self._get_column(key)
x = maskNAN(x)
if x.ndim != 1:
logger.error("smooth: Only accepts 1 dimensional arrays.")
if x.size < window_len:
print(x.size, window_len)
logger.error("smooth: Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
logger.error("smooth: Window is none of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
logger.debug("smooth: You entered string %s as a window." % window)
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
if ndtype:
self.ndarray[ind] = np.asarray(y[(int(window_len/2)):(len(x)+int(window_len/2))])
else:
self._put_column(y[(int(window_len/2)):(len(x)+int(window_len/2))],key)
else:
logger.error("Column key %s not valid." % key)
logger.info('smooth: Finished smoothing at %s' % (str(datetime.now())))
return self
def spectrogram(self, keys, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, cmap=None, zorder=None, title=None, show=True,
sphinx=False, clip=[0.0, 1.0], **kwargs):
"""
Creates a spectrogram plot of selected keys.
Parameter description at function obspyspectrogram
keywords:
samp_rate_multiplicator: to change the frequency relative to one day (default value is Hz - 24*3600)
samp_rate_multiplicator : sampling rate give as days -> multiplied by x to create Hz, etc: default 24, which means 1/3600 Hz
"""
samp_rate_multiplicator = kwargs.get('samp_rate_multiplicator')
if not samp_rate_multiplicator:
samp_rate_multiplicator = 24*3600
t = self._get_column('time')
if not len(t) > 0:
logger.error('Spectrogram: stream of zero length -- aborting')
return
for key in keys:
val = self._get_column(key)
val = maskNAN(val)
dt = self.get_sampling_period()*(samp_rate_multiplicator)
Fs = float(1.0/dt)
self.obspyspectrogram(val,Fs, per_lap=per_lap, wlen=wlen, log=log,
outfile=outfile, fmt=fmt, axes=axes, dbscale=dbscale,
mult=mult, cmap=cmap, zorder=zorder, title=title, show=show,
sphinx=sphinx, clip=clip)
def steadyrise(self, key, timewindow, **kwargs):
"""
DEFINITION:
Method determines the absolute increase within a data column
and a selected time window
neglecting any resets and decreasing trends
- used for analyzing some rain senors
PARAMETERS:
key: (key) column on which the process is performed
timewindow: (timedelta) define the window e.g. timedelta(minutes=15)
Kwargs:
sensitivitylevel: (float) define a difference which two successive
points need to exceed to be used
(useful if you have some numeric noise)
RETURNS:
- column: (array) column with length of th stream
containing timewindow blocks of stacked data.
EXAMPLE:
>>> col = stream.steadyrise('t1', timedelta(minutes=60),sensitivitylevel=0.002)
"""
sensitivitylevel = kwargs.get('sensitivitylevel')
prevval = 9999999999999.0
stacked = 0.0
count = 0
rescol = []
testcol = []
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
ind = KEYLIST.index(key)
if ndtype and len(self.ndarray[ind]) > 0:
startt = num2date(np.min(self.ndarray[0]))
for idx,val in enumerate(self.ndarray[ind]):
if num2date(self.ndarray[0][idx]) < startt+timewindow:
if prevval < val:
diff = val-prevval
if not sensitivitylevel:
stacked += val-prevval
elif diff > sensitivitylevel:
stacked += val-prevval
count += 1
else:
for i in range(count+1):
rescol.append(stacked)
count = 0
# now put that results back to a column
startt = startt+timewindow
stacked = 0.0
prevval = val
elif not ndtype:
startt = num2date(self[0].time)
for elem in self:
testcol.append(elem)
if num2date(elem.time) < startt+timewindow:
val = eval('elem.'+key)
if prevval < val:
diff = val-prevval
if not sensitivitylevel:
stacked += val-prevval
elif diff > sensitivitylevel:
stacked += val-prevval
count += 1
else:
for i in range(count+1):
rescol.append(stacked)
count = 0
# now put that results back to a column
startt = startt+timewindow
val = eval('elem.'+key)
stacked = 0.0
prevval = val
else:
print("steadyrise: no data found in selected column %s" % key)
return np.asarray([])
# Finally fill the end
for i in range(count):
rescol.append(stacked)
if not len(rescol) == len(self) and not len(rescol) == len(self.ndarray[0]) :
logger.error('steadrise: An error leading to unequal lengths has been encountered')
return []
return np.asarray(rescol)
def stereoplot(self, **kwargs):
"""
DEFINITION:
plots a dec and inc values in stereographic projection
will abort if no idff typ is provided
full circles denote positive inclinations, open negative
PARAMETERS:
variable:
- stream (DataStream) a magpy datastream object
kwargs:
- focus: (string) defines the plot area - can be either:
all - -90 to 90 deg inc, 360 deg dec (default)
q1 - first quadrant
q2 - first quadrant
q3 - first quadrant
q4 - first quadrant
data - focus on data (if angular spread is less then 10 deg
- groups (KEY) - key of keylist which defines color of points
(e.g. ('str2') in absolutes to select
different colors for different instruments
- legend (bool) - draws legend only if groups is given - default True
- legendposition (string) - draws the legend at chosen position (e.g. "upper right", "lower center") - default is "lower left"
- labellimit (integer)- maximum length of label in legend
- noshow: (bool) don't call show at the end, just returns figure handle
- outfile: (string) to save the figure, if path is not existing it will be created
- gridcolor: (string) Define grid color e.g. '0.5' greyscale, 'r' red, etc
- savedpi: (integer) resolution
- figure: (bool) True for GUI
REQUIRES:
- package operator for color selection
RETURNS:
- plot
ToDo:
- add alpha 95 calc
EXAMPLE:
>>> stream.stereoplot(focus='data',groups='str2')
"""
focus = kwargs.get('focus')
groups = kwargs.get('groups')
bgcolor = kwargs.get('bgcolor')
colorlist = kwargs.get('colorlist')
outfile = kwargs.get('outfile')
savedpi = kwargs.get('savedpi')
gridinccolor = kwargs.get('gridinccolor')
griddeccolor = kwargs.get('griddeccolor')
noshow = kwargs.get('noshow')
legend = kwargs.get('legend')
legendposition = kwargs.get('legendposition')
labellimit = kwargs.get('labellimit')
figure = kwargs.get('figure')
if not colorlist:
colorlist = ['b','r','g','c','m','y','k']
if not bgcolor:
bgcolor = '#d5de9c'
if not griddeccolor:
griddeccolor = '#316931'
if not gridinccolor:
gridinccolor = '#316931'
if not savedpi:
savedpi = 80
if not focus:
focus = 'all'
if not legend:
legend = 'True'
if not labellimit:
labellimit = 11
if not legendposition:
legendposition = "lower left"
if not self[0].typ == 'idff':
logger.error('Stereoplot: you need to provide idf data')
return
inc = self._get_column('x')
dec = self._get_column('y')
col = ['']
if groups:
sel = self._get_column(groups)
col = list(set(list(sel)))
if len(col) > 7:
col = col[:7]
if not len(dec) == len(inc):
logger.error('Stereoplot: check you data file - unequal inc and dec data?')
return
if not figure:
fig = plt.figure()
else:
fig = figure
ax = plt.gca()
ax.cla() # clear things for fresh plot
ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
# Define koordinates:
basic1=plt.Circle((0,0),90,color=bgcolor,fill=True)
basic1a=plt.Circle((0,0),90,color=gridinccolor,fill=False)
basic2=plt.Circle((0,0),30,color=gridinccolor,fill=False,linestyle='dotted')
basic3=plt.Circle((0,0),60,color=gridinccolor,fill=False,linestyle='dotted')
basic4=plt.Line2D([0,0],[-90,90],color=griddeccolor,linestyle='dashed')
basic5=plt.Line2D([-90,90],[0,0],color=griddeccolor,linestyle='dashed')
fig.gca().add_artist(basic1)
fig.gca().add_artist(basic1a)
fig.gca().add_artist(basic2)
fig.gca().add_artist(basic3)
fig.gca().add_artist(basic4)
fig.gca().add_artist(basic5)
for j in range(len(col)):
color = colorlist[j]
xpos,ypos,xneg,yneg,xabs,y = [],[],[],[],[],[]
for i,el in enumerate(inc):
if groups:
if sel[i] == col[j]:
coinc = 90-np.abs(el)
sindec = np.sin(np.pi/180*dec[i])
cosdec = np.cos(np.pi/180*dec[i])
xabs.append(coinc*sindec)
y.append(coinc*cosdec)
if el < 0:
xneg.append(coinc*sindec)
yneg.append(coinc*cosdec)
else:
xpos.append(coinc*sindec)
ypos.append(coinc*cosdec)
else:
coinc = 90-np.abs(el)
sindec = np.sin(np.pi/180*dec[i])
cosdec = np.cos(np.pi/180*dec[i])
xabs.append(coinc*sindec)
y.append(coinc*cosdec)
if el < 0:
xneg.append(coinc*sindec)
yneg.append(coinc*cosdec)
else:
xpos.append(coinc*sindec)
ypos.append(coinc*cosdec)
xmax = np.ceil(max(xabs))
xmin = np.floor(min(xabs))
xdif = xmax-xmin
ymax = np.ceil(max(y))
ymin = np.floor(min(y))
ydif = ymax-ymin
maxdif = max([xdif,ydif])
mindec = np.floor(min(dec))
maxdec = np.ceil(max(dec))
mininc = np.floor(min(np.abs(inc)))
maxinc = np.ceil(max(np.abs(inc)))
if focus == 'data' and maxdif <= 10:
# decs
startdec = mindec
decline,inclst = [],[]
startinc = mininc
incline = []
while startdec <= maxdec:
xl = 90*np.sin(np.pi/180*startdec)
yl = 90*np.cos(np.pi/180*startdec)
decline.append([xl,yl,startdec])
startdec = startdec+1
while startinc <= maxinc:
inclst.append(90-np.abs(startinc))
startinc = startinc+1
if focus == 'all':
ax.set_xlim((-90,90))
ax.set_ylim((-90,90))
if focus == 'q1':
ax.set_xlim((0,90))
ax.set_ylim((0,90))
if focus == 'q2':
ax.set_xlim((-90,0))
ax.set_ylim((0,90))
if focus == 'q3':
ax.set_xlim((-90,0))
ax.set_ylim((-90,0))
if focus == 'q4':
ax.set_xlim((0,90))
ax.set_ylim((-90,0))
if focus == 'data':
ax.set_xlim((xmin,xmax))
ax.set_ylim((ymin,ymax))
#ax.annotate('Test', xy=(1.2, 25.2))
ax.plot(xpos,ypos,'o',color=color, label=col[j][:labellimit])
ax.plot(xneg,yneg,'o',color='white')
ax.annotate('60', xy=(0, 30))
ax.annotate('30', xy=(0, 60))
ax.annotate('0', xy=(0, 90))
ax.annotate('90', xy=(90, 0))
ax.annotate('180', xy=(0, -90))
ax.annotate('270', xy=(-90, 0))
if focus == 'data' and maxdif <= 10:
for elem in decline:
pline = plt.Line2D([0,elem[0]],[0,elem[1]],color=griddeccolor,linestyle='dotted')
xa = elem[0]/elem[1]*((ymax - ymin)/2+ymin)
ya = (ymax - ymin)/2 + ymin
annotext = "D:%i" % int(elem[2])
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_artist(pline)
for elem in inclst:
pcirc = plt.Circle((0,0),elem,color=gridinccolor,fill=False,linestyle='dotted')
xa = (xmax-xmin)/2 + xmin
ya = sqrt((elem*elem)-(xa*xa))
annotext = "I:%i" % int(90-elem)
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_artist(pcirc)
if groups and legend:
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels),key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=legendposition)
# 5. SAVE TO FILE (or show)
if figure:
return ax
if outfile:
path = os.path.split(outfile)[0]
if not path == '':
if not os.path.exists(path):
os.makedirs(path)
if fmt:
fig.savefig(outfile, format=fmt, dpi=savedpi)
else:
fig.savefig(outfile, dpi=savedpi)
elif noshow:
return fig
else:
plt.show()
def trim(self, starttime=None, endtime=None, newway=False):
"""
DEFINITION:
Removing dates outside of range between start- and endtime.
Returned stream has range starttime <= range < endtime.
PARAMETERS:
Variables:
- starttime: (datetime/str) Start of period to trim with
- endtime: (datetime/str) End of period to trim to
Kwargs:
- newway: (bool) Testing method for non-destructive trimming
RETURNS:
- stream: (DataStream object) Trimmed stream
EXAMPLE:
>>> data = data.trim(starttime, endtime)
APPLICATION:
"""
if starttime and endtime:
if self._testtime(starttime) > self._testtime(endtime):
logger.error('Trim: Starttime (%s) is larger than endtime (%s).' % (starttime,endtime))
raise ValueError("Starttime is larger than endtime.")
logger.info('Trim: Started from %s to %s' % (starttime,endtime))
ndtype = False
if self.ndarray[0].size > 0:
ndtype = True
self.container = [LineStruct()]
#-ndarrray---------------------------------------
if not newway:
newarray = list(self.ndarray) # Converting array to list - better for append and other item function (because its not type sensitive)
else:
newstream = self.copy()
newarray = list(newstream.ndarray)
if starttime:
starttime = self._testtime(starttime)
if newarray[0].size > 0: # time column present
idx = (np.abs(newarray[0].astype(float)-date2num(starttime))).argmin()
# Trim should start at point >= starttime, so check:
if newarray[0][idx] < date2num(starttime):
idx += 1
for i in range(len(newarray)):
if len(newarray[i]) >= idx:
newarray[i] = newarray[i][idx:]
if endtime:
endtime = self._testtime(endtime)
if newarray[0].size > 0: # time column present
idx = 1 + (np.abs(newarray[0].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
#idx = 1+ (np.abs(self.ndarray[0]-date2num(endtime))).argmin() # get the nearest index to endtime
if idx >= len(newarray[0]): ## prevent too large idx values
idx = len(newarray[0]) - 1
while True:
if not float(newarray[0][idx]) < date2num(endtime) and idx != 0: # Make sure that last value is smaller than endtime
idx -= 1
else:
break
#self.ndarray = list(self.ndarray)
for i in range(len(newarray)):
length = len(newarray[i])
if length >= idx:
newarray[i] = newarray[i][:idx+1]
newarray = np.asarray(newarray,dtype=object)
#-ndarrray---------------------------------------
#--------------------------------------------------
if newway and not ndtype:
# Non-destructive trimming of stream
trimmedstream = DataStream()
trimmedstream.header = self.header
starttime = self._testtime(starttime)
endtime = self._testtime(endtime)
stval = 0
for idx, elem in enumerate(self):
newline = LineStruct()
if not isnan(elem.time):
if elem.time >= date2num(starttime) and elem.time < date2num(endtime):
newline.time = elem.time
for key in KEYLIST:
exec('newline.'+key+' = elem.'+key)
trimmedstream.add(newline)
return trimmedstream
#--------------------------------------------------
if not ndtype:
stream = DataStream()
if starttime:
# check starttime input
starttime = self._testtime(starttime)
stval = 0
for idx, elem in enumerate(self):
if not isnan(elem.time):
if num2date(elem.time).replace(tzinfo=None) > starttime.replace(tzinfo=None):
#stval = idx-1 # changed because of latex output
stval = idx
break
if stval < 0:
stval = 0
self.container = self.container[stval:]
# remove data prior to endtime input
if endtime:
# check endtime input
endtime = self._testtime(endtime)
edval = len(self)
for idx, elem in enumerate(self):
if not isnan(elem.time):
if num2date(elem.time).replace(tzinfo=None) > endtime.replace(tzinfo=None):
edval = idx
#edval = idx-1
break
self.container = self.container[:edval]
if ndtype:
return DataStream(self.container,self.header,newarray)
else:
return DataStream(self.container,self.header,self.ndarray)
def use_sectime(self, swap=False):
"""
DEFINITION:
Drop primary time stamp and replace by secondary time stamp if available.
If swap is True, then primary time stamp is moved to secondary column (and
not dropped).
"""
if not 'sectime' in self._get_key_headers():
logger.warning("use_sectime: did not find secondary time column in the streams keylist - returning unmodified timeseries")
return self
# Non destructive
stream = self.copy()
pos = KEYLIST.index('sectime')
tcol = stream.ndarray[0]
stream = stream._move_column('sectime','time')
if swap:
stream = stream._put_column(tcol,'sectime')
else:
stream = stream._drop_column('sectime')
return stream
def variometercorrection(self, variopath, thedate, **kwargs):
"""
DEFINITION:
##### THS METHOD IS USELESS....
##### Either select a certain time in absolute calculation (TODO)
##### or calculate daily means of basevalues which ar already corrected for
##### variotion --- leon 2016-03
Function to perform a variometercorrection of an absresult stream
towards the given datetime using the given variometer stream.
Returns a new absresult object with new datetime and corrected values
APPLICATION:
Useful to compare various absolute measurement e.g. form one day and analyse their
differences after correcting them to a single spot in time.
PARAMETERS:
Variables:
- variodata: (DataStream) data to be used for reduction
- endtime: (datetime/str) End of period to trim to
Kwargs:
- funckeys: (list) keys of the variometerfile which are interpolated and used
- nomagorient: (bool) indicates that variometerdata is NOT in magnetic
coordinates (hez) - Method will then use header info
in DataRotationAlpha and Beta
RETURNS:
- stream: (DataStream object) absolute stream - corrected
EXAMPLE:
>>> newabsdata = absdata.variometercorrection(starttime, endtime)
APPLICATION:
"""
funckeys = kwargs.get('funckeys')
offset = kwargs.get('offset')
nomagorient = kwargs.get('nomagorient')
if not offset:
offset = 0.0
dateform = "%Y-%m-%d"
def getfuncvals(variofunc,day):
# Put the following to a function
functime = (date2num(day)-variofunc[1])/(variofunc[2]-variofunc[1])
#print(functime, day, date2num(day),variofunc[1],variofunc[2])
refval = []
for key in funckeys:
if key in ['x','y','z']:
refval.append(variofunc[0]['f'+key](functime))
return refval
# Return results within a new streamobject containing only
# the average values and its uncertainties
resultstream = DataStream()
# Check for ndtype:
ndtype = False
if len(self.ndarray[0]) > 0:
timecol = self.ndarray[0]
ndtype = True
typus = self.header.get('DataComponents')
try:
typus = typus.lower()[:3]
except:
typus = ''
else:
timecol = self._get_column('time')
try:
typus = self[0].typ[:3]
except:
typus = ''
# 1 Convert absresult - idff to xyz ---- NOT NECESSARY
# test stream type (xyz, idf or hdz?)
# TODO add the end check whether streams are modified!!!!!!!!!!
#print("Variometercorrection", typus)
absstream = self.copy()
absstream = absstream.removeduplicates()
# 2 Convert datetime to number
# check whether thedate is a time (then use this time every day)
# or a full date
datelist = []
try:
# Check whether provided thedate is a date with time
datelist = [self._testtime(thedate)]
print("Variometercorrection: using correction to single provided datetime", datelist[0])
except:
try:
# Check whether provided thedate is only time
tmpdatelst = [datetime.date(num2date(elem)) for elem in timecol]
tmpdatelst = list(set(tmpdatelst))
dummydatedt = self._testtime('2016-11-22T'+thedate)
datelist = [datetime.combine(elem, datetime.time(dummydatedt)) for elem in tmpdatelst]
except:
print("Variometercorrection: Could not interpret the provided date/time - aborting - used dateformat should be either 12:00:00 or 2016-11-22 12:00:00 - provided:", thedate)
return self
if len(datelist) == 1:
print("Variometercorrection: Transforming all provided absolute data towards", datelist[0])
elif len(datelist) > 1:
print("Variometercorrection: Correcting all absolute data of individual days towards time", datetime.strftime(datelist[0],"%H:%M:%S"))
else:
print("Variometercorrection: No correction date found - aborting")
return self
for day in datelist:
print("Variocorrection: dealing with {}".format(day))
# 1. Select the appropriate values from self
if len(datelist) == 1:
usedabsdata = absstream
st, et = absstream._find_t_limits()
else:
st = str(datetime.date(day))
et = str(datetime.date(day+timedelta(days=1)))
usedndarray = absstream._select_timerange(starttime=st, endtime=et)
usedabsdata = DataStream([LineStruct()],self.header,usedndarray)
#print(date, num2date(usedabsdata.ndarray[0]))
# 2. Read variation data for respective date
vario = read(variopath, starttime=st, endtime=et)
print("Variocorrection: loaded {} data points".format(vario.length()[0]))
#print("Variocorrection: Please note - we are assuming that the provided variometerdata records the field in magnetic coordinates in nT (e.g. HEZ). In case of geographic xyz records one can activate a kwarg: takes provided rotation angle or (if not existing) the declination value of abs data")
# 3. Check DataComponents: we need pure variation data
comps = vario.header.get('DataComponents')
try:
comps = comps.lower()[:3]
except:
comps = ''
if comps in ['xyz','idf','hdz']:
# Data is already in geographic coordinates
# Rotate back
if not comps == 'xyz':
vario = vario._convertstream(comps+'2xyz')
nomagorient = True
else:
nomagorient = False
# 4. TODO TEST! Eventually rotate the data to hez
if nomagorient:
rotaangle = vario.header.get('DataRotationAlpha')
rotbangle = vario.header.get('DataRotationBeta')
#print("Angles", rotaangle, rotbangle)
try:
rotaangle = float(rotaangle)
rotbangle = float(rotbangle)
except:
pass
if rotaangle in [None,np.nan,0.0]:
print("Variocorrection: Did not find DataRotationAlpha in header assuming xyz and rotation by minus declination")
rotaangle = -np.mean(usedabsdata.ndarray[2])
else:
try:
rotaangle = float(rotaangle)
except:
rotaangle = 0.
if not rotbangle in [None,'Null',np.nan,0.0]:
try:
rotbangle = float(rotbangle)
except:
rotbangle = 0.
print("Variocorrection: Rotating data by {a} and {b}".format(a=rotaangle,b=rotbangle))
vario = vario.rotation(alpha=rotaangle,beta=rotbangle)
if vario.length()[0] > 1 and len(usedabsdata.ndarray[0]) > 0:
variost, varioet = vario._find_t_limits()
# 4. Interpolating variation data
if not funckeys:
funckeys = []
keys = vario._get_key_headers(numerical=True)
for key in keys:
if key in ['x','y','z','f']:
funckeys.append(key)
variofunc = vario.interpol(funckeys)
refvals = getfuncvals(variofunc,day)
for idx,abstime in enumerate(usedabsdata.ndarray[0]):
variovalsatabstime = getfuncvals(variofunc,num2date(abstime))
diffs= np.asarray(refvals)-np.asarray(variovalsatabstime)
"""
if key == 'y':
#refy = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + function[0]['f'+key](functime)
pass
elif key in ['x','z']:
pass
else:
pass
#refvals = funcattime(variofunc,date)
# 5. Get variofunc data for selected date and each usedabsdata
#for abstime in usedabsdata.ndarray[0]:
# if variost
#absst, abset = usedabsdata._find_t_limits()
"""
"""
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndarray[ind]), key, self.ndarray[ind]
array[ind] = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + function[0]['f'+key](functimearray)
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
print("func2stream", function, function[0], function[0]['f'+key],functimearray)
array[ind] = ar + function[0]['f'+key](functimearray)
if key == 'x': # remember this for correct y determination
arrayx = array[ind]
"""
"""
for date in datelist:
newvallists=[]
for elem in absstream:
# if elem.time == date:
# if value existis in function:
# calnewvalues and append to lists
# calc means from lists
# append means to new stream
# 4 Test whether variostream covers the timerange between the abstream value(s) and the datetime
if function[1] <= elem.time <= function[2] and function[1] <= newdate <= function[2]:
valatorgtime = (elem.time-function[1])/(function[2]-function[1])
valatnewtime = (newdate-function[1])/(function[2]-function[1])
elem.time = newdate
for key in funckeys:
if not key in KEYLIST[1:15]:
raise ValueError, "Column key not valid"
fkey = 'f'+key
if fkey in function[0]:
try:
orgval = float(function[0][fkey](valatorgtime))
newval = float(function[0][fkey](valatnewtime))
diff = orgval - newval
except:
logger.error("variometercorrection: error in assigning new values")
return
exec('elem.'+key+' = elem.'+key+' - diff')
else:
pass
else:
logger.warning("variometercorrection: Variometer stream does not cover the projected time range")
pass
# 5 Convert absresult - xyzf to idff
absstream = absstream._convertstream('xyz2idf')
return absstream
"""
def _write_format(self, format_type, filenamebegins, filenameends, coverage, dateformat,year):
"""
DEFINITION:
Helper method to determine suggested write filenames.
Reads format_type and header info of self -> returns specifications
RETURNS:
filenamebegins
filenameends
coverage
dateformat
"""
# Preconfigure some fileformats - can be overwritten by keywords
if format_type == 'IMF':
dateformat = '%b%d%y'
try:
extension = (self.header.get('StationID','')).lower()
except:
extension = 'txt'
filenameends = '.'+extension
coverage = 'day'
if format_type == 'IAF':
try:
filenamebegins = (self.header.get('StationIAGAcode','')).upper()
except:
filenamebegins = 'XXX'
dateformat = '%y%b'
extension = 'BIN'
coverage = 'month'
filenameends = '.'+extension
if format_type == 'IYFV':
if not filenameends or filenameends=='.cdf':
head = self.header
code = head.get('StationIAGAcode','')
if not code == '':
filenameends = '.'+code.upper()
else:
filenameends = '.XXX'
if not filenamebegins:
filenamebegins = 'YEARMEAN'
dateformat = 'None'
coverage = 'year'
if format_type == 'IAGA':
dateformat = '%Y%m%d'
if not coverage == 'all':
coverage = 'day'
head = self.header
if not filenamebegins:
code = head.get('StationIAGAcode','')
if code == '':
code = head.get('StationID','')
if not code == '':
filenamebegins = code.lower()[:3]
if not filenameends or filenameends=='.cdf':
samprate = float(str(head.get('DataSamplingRate','0')).replace('sec','').strip())
plevel = head.get('DataPublicationLevel',0)
if int(samprate) == 1:
middle = 'sec'
elif int(samprate) == 60:
middle = 'min'
elif int(samprate) == 3600:
middle = 'hou'
else:
middle = 'lol'
if plevel == 4:
fed = 'd'+middle+'.'+middle
elif plevel == 3:
fed = 'q'+middle+'.'+middle
elif plevel == 2:
fed = 'p'+middle+'.'+middle
else:
fed = 'v'+middle+'.'+middle
filenameends = fed
if format_type == 'CSV':
if not filenameends:
filenameends = '.csv'
if format_type == 'IMAGCDF':
begin = (self.header.get('StationIAGAcode','')).lower()
if begin == '':
begin = (self.header.get('StationID','XYZ')).lower()
publevel = str(self.header.get('DataPublicationLevel',0))
samprate = float(str(self.header.get('DataSamplingRate','0')).replace('sec','').strip())
if coverage == 'year':
dfor = '%Y'
elif coverage == 'month':
dfor = '%Y%m'
else:
dfor = '%Y%m%d'
if int(samprate) == 1:
dateformat = dfor
middle = '_000000_PT1S_'
elif int(samprate) == 60:
dateformat = dfor
middle = '_0000_PT1M_'
elif int(samprate) == 3600:
dateformat = dfor
middle = '_00_PT1H_'
elif int(samprate) == 86400:
dateformat = dfor
middle = '_PT1D_'
elif int(samprate) > 30000000:
dateformat = '%Y'
middle = '_PT1Y_'
elif int(samprate) > 2400000:
dateformat = '%Y%m'
middle = '_PT1M_'
else:
dateformat = '%Y%m%d'
middle = 'unknown'
filenamebegins = begin+'_'
filenameends = middle+publevel+'.cdf'
if format_type == 'BLV':
if len(self.ndarray[0]) > 0:
lt = max(self.ndarray[0].astype(float))
else:
lt = self[-1].time
if year:
blvyear = str(year)
else:
blvyear = datetime.strftime(num2date(lt).replace(tzinfo=None),'%Y')
try:
filenamebegins = (self.header['StationID']).upper()+blvyear
except:
filenamebegins = 'XXX'+blvyear
filenameends = '.blv'
coverage = 'all'
if not format_type:
format_type = 'PYCDF'
if not dateformat:
dateformat = '%Y-%m-%d' # or %Y-%m-%dT%H or %Y-%m or %Y or %Y
if not coverage:
coverage = 'day' #timedelta(days=1)
if not filenamebegins:
filenamebegins = ''
if not filenameends and not filenameends == '':
# Extension for cdf files is automatically attached
if format_type in ['PYCDF','IMAGCDF']:
filenameends = ''
else:
filenameends = '.txt'
return format_type, filenamebegins, filenameends, coverage, dateformat
def write(self, filepath, compression=5, **kwargs):
"""
DEFINITION:
Code for simple application: write Stream to a file.
PARAMETERS:
Variables:
- filepath: (str) Providing path/filename for saving.
Kwargs:
- coverage: (str/timedelta) day files or hour or month or year or all - default day.
'month','year','all',etc., otherwise timedelta object
- dateformat: (str) outformat of date in filename (e.g. "%Y-%m-%d" -> "2011-11-22".
- filenamebegins: (str) providing the begin of savename (e.g. "WIK_").
- filenameends: (str) providing the end of savename (e.g. ".min").
- format_type: (str) Which format - default pystr.
Current supported formats: PYSTR, PYCDF, IAGA, WDC, DIDD,
PMAG1, PMAG2, DTU1, GDASA1, RMRCS, AUTODIF_FREAD,
USBLOG, CR800, LATEX
- keys: (list) Keys to write to file.
- mode: (str) Mode for handling existing files/data in files.
Options: append, overwrite, replace, skip
[- period: (str) Supports hour, day, month, year, all - default day.]
[--> Where is this?]
- wformat: (str) outputformat.
SPECIFIC FORMAT INSTRUCTIONS:
format_type='IAGA'
------------------
*General:
The meta information provided within the header of each IAGA file is automatically
generated from the header information provided along with the following keys
(define by stream.header[key]):
- Obligatory: StationInstitution, StationName, StationIAGAcode (or StationID),
DataElevation, DataSensorOrientation, DataDigitalSampling
- Optional: SensorID, DataPublicationDate, DataComments, DataConversion, StationK9,
SecondarySensorID (F sensor), StationMeans (used for 'Approx H')
- Header input "IntervalType": can either be provided by using key 'DataIntervalType'
or is automatically created from DataSamplingRate.
Filter details as contained in DataSamplingFilter are added to the
commentary part
- Header input "Geodetic Longitude and Latitude":
- defined with keys 'DataAcquisitionLatitude','DataAcquisitionLongitude'
- if an EPSG code is provided in key 'DataLocationReference'
this code is used to convert Lat and Long into the WGS84 system
e.g. stream.header['DataLocationReference'] = 'M34, EPSG: '
*Specific parameters:
- useg (Bool) if F is available, and G not yet caluclated: calculate G (deltaF) and
use it within the IAGA output file
*Example:
format_type='IMF'
------------------
*Specific parameters:
- version (str) file version
- gin (gin) information node code
- datatype (str) R: reported, A: adjusted, Q: quasi-definit, D: definite
- kvals (Datastream) contains K value for iaf storage
- comment (string) some comment, currently used in IYFV
- kind (string) one of 'A' (all), 'Q' quiet days, 'D' disturbed days,
currently used in IYFV
format_type='IMAGCDF'
------------------
*General:
- Header input "Geodetic Longitude and Latitude": see format_type='IAGA'
*Specific parameters:
- addflags (BOOL) add flags to IMAGCDF output if True
format_type='BLV'
------------------
*Specific parameters:
- absinfo (str) parameter of DataAbsInfo
- fitfunc (str) fit function for baselinefit
- fitdegree
- knotstep
- extradays
- year (int) year
- meanh (float) annual mean of H component
- meanf (float) annual mean of F component
- deltaF (float) given deltaF value between pier and f position
- diff (DataStream) diff (deltaF) between vario and scalar
RETURNS:
- ... (bool) True if successful.
EXAMPLE:
>>> stream.write('/home/user/data',
format_type='IAGA')
>>> stringio = stream.write('StringIO',
format_type='IAGA')
APPLICATION:
"""
format_type = kwargs.get('format_type')
filenamebegins = kwargs.get('filenamebegins')
filenameends = kwargs.get('filenameends')
dateformat = kwargs.get('dateformat')
coverage = kwargs.get('coverage')
mode = kwargs.get('mode')
#period = kwargs.get('period') # TODO
#offsets = kwargs.get('offsets') # retired? TODO
keys = kwargs.get('keys')
absinfo = kwargs.get('absinfo')
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
extradays = kwargs.get('extradays')
year = kwargs.get('year')
meanh = kwargs.get('meanh')
meanf = kwargs.get('meanf')
deltaF = kwargs.get('deltaF')
diff = kwargs.get('diff')
baseparam = kwargs.get('baseparam')
version = kwargs.get('version')
gin = kwargs.get('gin')
datatype = kwargs.get('datatype')
kvals = kwargs.get('kvals')
kind = kwargs.get('kind')
comment = kwargs.get('comment')
useg = kwargs.get('useg')
skipcompression = kwargs.get('skipcompression')
debug = kwargs.get('debug')
addflags = kwargs.get('addflags')
headonly = kwargs.get('headonly')
success = True
#compression: provide compression factor for CDF data: 0 no compression, 9 high compression
t1 = datetime.utcnow()
if not format_type in PYMAG_SUPPORTED_FORMATS:
if not format_type:
format_type = 'PYSTR'
else:
logger.warning('write: Output format not supported.')
return False
else:
if not 'w' in PYMAG_SUPPORTED_FORMATS[format_type][0]:
logger.warning('write: Selected format does not support write methods.')
return False
format_type, filenamebegins, filenameends, coverage, dateformat = self._write_format(format_type, filenamebegins, filenameends, coverage, dateformat, year)
if not mode:
mode= 'overwrite'
if len(self) < 1 and len(self.ndarray[0]) < 1:
logger.error('write: Stream is empty!')
raise Exception("Can't write an empty stream to file!")
ndtype = False
if len(self.ndarray[0]) > 0:
self.ndarray[0] = self.ndarray[0].astype(float)
# remove all data from array where time is not numeric
#1. get indicies of nonnumerics in ndarray[0]
nonnumlist = np.asarray([idx for idx,elem in enumerate(self.ndarray[0]) if np.isnan(elem)])
#2. delete them
if len(nonnumlist) > 0:
print("write: Found NaNs in time column - deleting them", nonnumlist)
print(self.ndarray[0])
for idx, elem in enumerate(self.ndarray):
self.ndarray[idx] = np.delete(self.ndarray[idx],nonnumlist)
starttime = datetime.strptime(datetime.strftime(num2date(float(self.ndarray[0][0])).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
try:
lasttime = num2date(float(self.ndarray[0][-1])).replace(tzinfo=None)
except:
lasttime = num2date(float(self.ndarray[0][-2])).replace(tzinfo=None)
ndtype = True
else:
starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
lasttime = num2date(self[-1].time).replace(tzinfo=None)
t2 = datetime.utcnow()
# divide stream in parts according to coverage and save them
newst = DataStream()
if coverage == 'month':
#starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
cmonth = int(datetime.strftime(starttime,'%m')) + 1
cyear = int(datetime.strftime(starttime,'%Y'))
if cmonth == 13:
cmonth = 1
cyear = cyear + 1
monthstr = str(cyear) + '-' + str(cmonth) + '-' + '1T00:00:00'
endtime = datetime.strptime(monthstr,'%Y-%m-%dT%H:%M:%S')
while starttime < lasttime:
if ndtype:
lst = []
ndarray=self._select_timerange(starttime=starttime, endtime=endtime)
else:
lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
ndarray = np.asarray([])
newst = DataStream(lst,self.header,ndarray)
filename = filenamebegins + datetime.strftime(starttime,dateformat) + filenameends
# remove any eventually existing null byte
filename = filename.replace('\x00','')
if len(lst) > 0 or len(ndarray[0]) > 0:
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,kvals=kvals,skipcompression=skipcompression,compression=compression, addflags=addflags)
starttime = endtime
# get next endtime
cmonth = int(datetime.strftime(starttime,'%m')) + 1
cyear = int(datetime.strftime(starttime,'%Y'))
if cmonth == 13:
cmonth = 1
cyear = cyear + 1
monthstr = str(cyear) + '-' + str(cmonth) + '-' + '1T00:00:00'
endtime = datetime.strptime(monthstr,'%Y-%m-%dT%H:%M:%S')
elif coverage == 'year':
#print ("write: Saving yearly data")
cyear = int(datetime.strftime(starttime,'%Y'))
cyear = cyear + 1
yearstr = str(cyear) + '-01-01T00:00:00'
endtime = datetime.strptime(yearstr,'%Y-%m-%dT%H:%M:%S')
while starttime < lasttime:
ndarray=self._select_timerange(starttime=starttime, endtime=endtime)
newst = DataStream([LineStruct()],self.header,ndarray)
if not dateformat == 'None':
dat = datetime.strftime(starttime,dateformat)
else:
dat = ''
filename = filenamebegins + dat + filenameends
# remove any eventually existing null byte
filename = filename.replace('\x00','')
if len(ndarray[0]) > 0:
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,kvals=kvals,kind=kind,comment=comment,skipcompression=skipcompression,compression=compression, addflags=addflags)
# get next endtime
starttime = endtime
cyear = cyear + 1
yearstr = str(cyear) + '-01-01T00:00:00'
endtime = datetime.strptime(yearstr,'%Y-%m-%dT%H:%M:%S')
elif not coverage == 'all':
#starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
if coverage == 'hour':
cov = timedelta(hours=1)
else:
cov = timedelta(days=1)
dailystream = self.copy()
maxidx = -1
endtime = starttime + cov
while starttime < lasttime:
#lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
#newst = DataStream(lst,self.header)
t3 = datetime.utcnow()
#print "write - writing day:", t3
if ndtype:
lst = []
# non-destructive
#print "write: start and end", starttime, endtime
#print "write", dailystream.length()
#ndarray=self._select_timerange(starttime=starttime, endtime=endtime)
#print starttime, endtime, coverage
#print "Maxidx", maxidx
ndarray=dailystream._select_timerange(starttime=starttime, endtime=endtime, maxidx=maxidx)
#print "write", len(ndarray), len(ndarray[0])
if len(ndarray[0]) > 0:
#maxidx = len(ndarray[0])*2 ## That does not work for few seconds of first day and full coverage of all other days
dailystream.ndarray = np.asarray([array[(len(ndarray[0])-1):] for array in dailystream.ndarray])
#print dailystream.length()
#print len(ndarray), len(ndarray[0]), len(ndarray[1]), len(ndarray[3])
else:
lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
ndarray = np.asarray([np.asarray([]) for key in KEYLIST])
t4 = datetime.utcnow()
#print "write - selecting time range needs:", t4-t3
newst = DataStream(lst,self.header,ndarray)
filename = str(filenamebegins) + str(datetime.strftime(starttime,dateformat)) + str(filenameends)
# remove any eventually existing null byte
filename = filename.replace('\x00','')
if format_type == 'IMF':
filename = filename.upper()
if debug:
print ("Writing data:", os.path.join(filepath,filename))
if len(lst) > 0 or ndtype:
if len(newst.ndarray[0]) > 0 or len(newst) > 1:
logger.info('write: writing %s' % filename)
#print("Here", num2date(newst.ndarray[0][0]), newst.ndarray)
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,version=version,gin=gin,datatype=datatype, useg=useg,skipcompression=skipcompression,compression=compression, addflags=addflags,headonly=headonly,kind=kind)
starttime = endtime
endtime = endtime + cov
t5 = datetime.utcnow()
#print "write - written:", t5-t3
#print "write - End:", t5
else:
filename = filenamebegins + filenameends
# remove any eventually existing null byte
filename = filename.replace('\x00','')
if debug:
print ("Writing file:", filename)
success = writeFormat(self, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,absinfo=absinfo,fitfunc=fitfunc,fitdegree=fitdegree, knotstep=knotstep,meanh=meanh,meanf=meanf,deltaF=deltaF,diff=diff,baseparam=baseparam, year=year,extradays=extradays,skipcompression=skipcompression,compression=compression, addflags=addflags,headonly=headonly,kind=kind)
return success
def idf2xyz(self,**kwargs):
"""
DEFINITION:
Converts inclination, declination, intensity (idf) data to xyz (i,d in 0.00000 deg (or gon)), f in nT
Working only for ndarrays
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
unit = kwargs.get('unit')
keys = kwargs.get('keys')
if not len(self.ndarray[0]) > 0:
print("idf2xyz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("idf2xyz: invalid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
dc = self.ndarray[indy].astype(float)*np.pi/(180.*ang_fac)
ic = self.ndarray[indx].astype(float)*np.pi/(180.*ang_fac)
self.ndarray[indx] = self.ndarray[indz].astype(float)*np.cos(dc)*np.cos(ic)
self.ndarray[indy] = self.ndarray[indz].astype(float)*np.sin(dc)*np.cos(ic)
self.ndarray[indz] = self.ndarray[indz].astype(float)*np.sin(ic)
self.header['col-x'] = 'X'
self.header['col-y'] = 'Y'
self.header['col-z'] = 'Z'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'nT'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('IDF','XYZ')
return self
def xyz2idf(self,**kwargs):
"""
DEFINITION:
Converts x,y,z (all in nT) to inclination, declination, intensity (idf)
(i,d in 0.00000 deg (or gon)), f in nT
Working only for ndarrays
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
keys = kwargs.get('keys')
if not len(self.ndarray[0]) > 0:
print("xyz2idf: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("xyz2idf: invalid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
h = np.sqrt(self.ndarray[indx].astype(float)**2 + self.ndarray[indy].astype(float)**2)
i = (180.*ang_fac)/np.pi * np.arctan2(self.ndarray[indz].astype(float), h)
d = (180.*ang_fac)/np.pi * np.arctan2(self.ndarray[indy].astype(float), self.ndarray[indx].astype(float))
f = np.sqrt(self.ndarray[indx].astype(float)**2+self.ndarray[indy].astype(float)**2+self.ndarray[indz].astype(float)**2)
self.ndarray[indx] = i
self.ndarray[indy] = d
self.ndarray[indz] = f
self.header['col-x'] = 'I'
self.header['col-y'] = 'D'
self.header['col-z'] = 'F'
self.header['unit-col-x'] = 'deg'
self.header['unit-col-y'] = 'deg'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('XYZ','IDF')
return self
def xyz2hdz(self,**kwargs):
"""
DEFINITION:
Converts x,y,z (all in nT) to horizontal, declination, z (hdz)
(d in 0.00000 deg (or gon)), h,z in nT
Working only for ndarrays
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
keys = kwargs.get('keys')
if not len(self.ndarray[0]) > 0:
print("xyz2hdz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("xyz2hdz: invalid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
h = np.sqrt(self.ndarray[indx].astype(float)**2 + self.ndarray[indy].astype(float)**2)
d = (180.*ang_fac) / np.pi * np.arctan2(self.ndarray[indy].astype(float), self.ndarray[indx].astype(float))
self.ndarray[indx] = h
self.ndarray[indy] = d
#dH = dX*X/sqrt(X^2 + Y^2) + dY*Y/sqrt(X^2 + Y^2)
#dD = 180/Pi*(dY*X/(X^2 + Y^2) - dX*Y/(X^2 + Y^2))
self.header['col-x'] = 'H'
self.header['col-y'] = 'D'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'deg'
self.header['DataComponents'] = self.header['DataComponents'].replace('XYZ','HDZ')
return self
def hdz2xyz(self,**kwargs):
"""
DEFINITION:
Converts h,d,z (h,z in nT, d in deg) to xyz
Working only for ndarrays
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
keys (list) list of three keys which hold h,d,z values
"""
keys = kwargs.get('keys')
if not len(self.ndarray[0]) > 0:
print("hdz2xyz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("hdz2xyz: invalid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
dc = self.ndarray[indy].astype(float)*np.pi/(180.*ang_fac)
prevxcol = self.ndarray[indx].astype(float)
self.ndarray[indx] = prevxcol * (np.cos(dc))
self.ndarray[indy] = prevxcol * (np.sin(dc))
#self.ndarray[indx] = self.ndarray[indx].astype(float) /np.sqrt((np.tan(dc))**2 + 1)
#self.ndarray[indy] = np.sqrt(self.ndarray[indx].astype(float)**2 - xtmp**2)
#print self.ndarray[indy]
#self.ndarray[indx] = xtmp
self.header['col-x'] = 'X'
self.header['col-y'] = 'Y'
self.header['col-z'] = 'Z'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'nT'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('HDZ','XYZ')
return DataStream(self,self.header,self.ndarray)
class PyMagLog(object):
"""
Looging class for warning messages and analysis steps.
logger and warnings are lists of strings.
They contain full text information for file and screen output
"""
def __init__(self, logger=[], warnings=[], process=[], proc_count=0):
self.logger = logger
self.warnings = warnings
self.process = process
self.proc_count = proc_count
def __getitem__(self, key):
return self.key
def addwarn(self, warnmsg):
self.warnings.append(warnmsg)
def addlog(self, logmsg):
self.logger.append(logmsg)
def addpro(self, promsg):
self.process.append(promsg)
def clearpro(self):
process = []
def clearlog(self):
logger = []
def clearwarn(self):
warnings = []
def addcount(self, num, maxnum):
"""
creates an integer number relative to maxnum ranging from 0 to 100
assuming num starting at zero
"""
self.proc_count = int(np.round(num*100/maxnum))
def clearcount(self):
self.proc_count = 0
def _removeduplicates(self,content):
return list(set(content))
"""
def sendLogByMail(self,loglist,**kwargs):
smtpserver = kwargs.get('smtpserver')
sender = kwargs.get('sender')
user = kwargs.get('user')
pwd = <PASSWORD>('<PASSWORD>')
destination = kwargs.get('destination')
subject = kwargs.get('subject')
if not smtpserver:
smtpserver = 'smtp.internet.at'
if not sender:
sender = '<EMAIL>'
if not destination:
destination = ['<EMAIL>']
if not user:
user = "FrauMusterfrau"
if not pwd:
pwd = "<PASSWORD>"
if not subject:
subject= 'MagPy Log from %s' % datetime.utcnow()
# typical values for text_subtype are plain, html, xml
text_subtype = 'plain'
content = '\n'.join(''.join(line) for line in loglist)
try:
msg = MIMEText(content, text_subtype)
msg['Subject']= subject
msg['From'] = sender # some SMTP servers will do this automatically, not all
smtp = SMTP()
smtp.set_debuglevel(False)
smtp.connect(smtpserver, 587)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(user, pwd)
try:
smtp.sendmail(sender, destination, msg.as_string())
finally:
smtp.close()
except Exception as exc:
raise ValueError( "mail failed; %s" % str(exc) ) # give a error message
"""
def combineWarnLog(self,warning,log):
comlst = ['Warning:']
comlst.extend(self._removeduplicates(warning))
comlst.extend(['Non-critical info:'])
comlst.extend(self._removeduplicates(log))
return comlst
class LineStruct(object):
def __init__(self, time=float('nan'), x=float('nan'), y=float('nan'), z=float('nan'), f=float('nan'), dx=float('nan'), dy=float('nan'), dz=float('nan'), df=float('nan'), t1=float('nan'), t2=float('nan'), var1=float('nan'), var2=float('nan'), var3=float('nan'), var4=float('nan'), var5=float('nan'), str1='-', str2='-', str3='-', str4='-', flag='0000000000000000-', comment='-', typ="xyzf", sectime=float('nan')):
#def __init__(self):
#- at the end of flag is important to be recognized as string
"""
self.time=float('nan')
self.x=float('nan')
self.y=float('nan')
self.z=float('nan')
self.f=float('nan')
self.dx=float('nan')
self.dy=float('nan')
self.dz=float('nan')
self.df=float('nan')
self.t1=float('nan')
self.t2=float('nan')
self.var1=float('nan')
self.var2=float('nan')
self.var3=float('nan')
self.var4=float('nan')
self.var5=float('nan')
self.str1=''
self.str2=''
self.str3=''
self.str4=''
self.flag='0000000000000000-'
self.comment='-'
self.typ="xyzf"
self.sectime=float('nan')
"""
self.time = time
self.x = x
self.y = y
self.z = z
self.f = f
self.dx = dx
self.dy = dy
self.dz = dz
self.df = df
self.t1 = t1
self.t2 = t2
self.var1 = var1
self.var2 = var2
self.var3 = var3
self.var4 = var4
self.var5 = var5
self.str1 = str1
self.str2 = str2
self.str3 = str3
self.str4 = str4
self.flag = flag
self.comment = comment
self.typ = typ
self.sectime = sectime
def __repr__(self):
return repr((self.time, self.x, self.y, self.z, self.f, self.dx, self.dy, self.dz, self.df, self.t1, self.t2, self.var1, self.var2, self.var3, self.var4, self.var5, self.str1, self.str2, self.str3, self.str4, self.flag, self.comment, self.typ))
def __getitem__(self, index):
key = KEYLIST[index]
return getattr(self, key)
def __setitem__(self, index, value):
key = KEYLIST[index]
setattr(self, key.lower(), value)
def idf2xyz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
dc = self.y*np.pi/(180.*ang_fac)
ic = self.x*np.pi/(180.*ang_fac)
self.x = self.z*np.cos(dc)*np.cos(ic)
self.y = self.z*np.sin(dc)*np.cos(ic)
self.z = self.z*np.sin(ic)
return self
def xyz2idf(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
h = np.sqrt(self.x**2 + self.y**2)
i = (180.*ang_fac)/np.pi * math.atan2(self.z, h)
d = (180.*ang_fac)/np.pi * math.atan2(self.y, self.x)
f = np.sqrt(self.x**2+self.y**2+self.z**2)
self.x = i
self.y = d
self.z = f
return self
def xyz2hdz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
h = np.sqrt(self.x**2 + self.y**2)
d = (180.*ang_fac) / np.pi * math.atan2(self.y, self.x)
self.x = h
self.y = d
#dH = dX*X/sqrt(X^2 + Y^2) + dY*Y/sqrt(X^2 + Y^2)
#dD = 180/Pi*(dY*X/(X^2 + Y^2) - dX*Y/(X^2 + Y^2))
return self
def hdz2xyz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
dc = self.y*np.pi/(180.*ang_fac)
xtmp = self.x /np.sqrt((np.tan(dc))**2 + 1)
self.y = np.sqrt(self.x**2 - xtmp**2)
self.x = xtmp
return self
def rotation(self,alpha=None,beta=None,**kwargs):
"""
Rotation matrix for ratating x,y,z to new coordinate system xs,ys,zs using angles alpha and beta
alpha is the horizontal rotation in degree, beta the vertical
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
xval = self.x
yval = self.y
zval = self.z
ra = ni.pi*alpha/(180.*ang_fac)
rb = ni.pi*beta/(180.*ang_fac)
xs = self.x*np.cos(rb)*np.cos(ra)-self.y*np.sin(ra)+self.z*np.sin(rb)*np.cos(ra)
ys = self.x*np.cos(rb)*np.sin(ra)+self.y*np.cos(ra)+self.z*np.sin(rb)*np.sin(ra)
zs = self.x*np.sin(rb)+self.z*np.cos(rb)
xs2 = xval*np.cos(rb)*np.cos(ra)-yval*np.sin(ra)+zval*np.sin(rb)*np.cos(ra)
ys2 = xval*np.cos(rb)*np.sin(ra)+yval*np.cos(ra)+zval*np.sin(rb)*np.sin(ra)
zs2 = xval*np.sin(rb)+zval*np.cos(rb)
self.x = xs
self.y = ys
self.z = zs
return self
# Unused classes
"""
class ColStruct(object):
def __init__(self,length, time=float('nan'), x=float('nan'), y=float('nan'), z=float('nan'), f=float('nan'), dx=float('nan'), dy=float('nan'), dz=float('nan'), df=float('nan'), t1=float('nan'), t2=float('nan'), var1=float('nan'), var2=float('nan'), var3=float('nan'), var4=float('nan'), var5=float('nan'), str1='-', str2='-', str3='-', str4='-', flag='0000000000000000-', comment='-', typ="xyzf", sectime=float('nan')):
#""
Not used so far. Maybe useful for
Speed optimization:
Change the whole thing to column operations
- at the end of flag is important to be recognized as string
for column initialization use a length parameter and "lenght*[float('nan')]" or "lenght*['-']"to initialize nan-values
#""
self.length = length
self.time = length*[time]
self.x = length*[x]
self.y = length*[y]
self.z = length*[z]
self.f = length*[f]
self.dx = length*[dx]
self.dy = length*[dy]
self.dz = length*[dz]
self.df = length*[df]
self.t1 = length*[t1]
self.t2 = length*[t2]
self.var1 = length*[var1]
self.var2 = length*[var2]
self.var3 = length*[var3]
self.var4 = length*[var4]
self.var5 = length*[var5]
self.str1 = length*[str1]
self.str2 = length*[str2]
self.str3 = length*[str3]
self.str4 = length*[str4]
self.flag = length*[flag]
self.comment = length*[comment]
self.typ = length*[typ]
self.sectime = length*[sectime]
def __repr__(self):
return repr((self.time, self.x, self.y, self.z, self.f, self.dx, self.dy, self.dz, self.df, self.t1, self.t2, self.var1, self.var2, self.var3, self.var4, self.var5, self.str1, self.str2, self.str3, self.str4, self.flag, self.comment, self.typ, self.sectime))
"""
# -------------------
# Global functions of the stream file
# -------------------
def coordinatetransform(u,v,w,kind):
"""
DESCRIPTION:
Transforms given values and returns [d,i,h,x,y,z,f] if successful, False if not.
Parameter "kind" defines the type of provided values
APPLICATION:
list = coordinatetransform(meanx,meany,meanz,'xyz')
"""
if not kind in ['xyz','hdz','dhz','idf']:
return [0]*7
if kind == 'xyz':
h = np.sqrt(u**2 + v**2)
i = (180.)/np.pi * np.arctan2(w, h)
d = (180.)/np.pi * np.arctan2(v, u)
f = np.sqrt(u**2+v**2+w**2)
return [d,i,h,u,v,w,f]
elif kind == 'hdz':
dc = v*np.pi/(180.)
xtmp = u /np.sqrt((np.tan(dc))**2 + 1)
y = np.sqrt(u**2 - xtmp**2)
x = xtmp
f = np.sqrt(x**2+y**2+w**2)
i = (180.)/np.pi * np.arctan2(w, u)
return [v,i,u,x,y,w,f]
elif kind == 'dhz':
dc = u*np.pi/(180.)
xtmp = v /np.sqrt((np.tan(dc))**2 + 1)
y = np.sqrt(v**2 - xtmp**2)
x = xtmp
f = np.sqrt(h**2+w**2)
i = (180.)/np.pi * np.arctan2(w, v)
return [u,i,v,x,y,w,f]
return [0]*7
def isNumber(s):
"""
Test whether s is a number
"""
try:
float(s)
return True
except ValueError:
return False
def find_nearest(array,value):
"""
Find the nearest element within an array
"""
# Eventually faster solution (minimal)
#idx = np.searchsorted(array, value, side="left")
#if math.fabs(value - array[idx-1]) < math.fabs(value - array[idx]):
# return array[idx-1], idx-1
#else:
# return array[idx], idx
idx = (np.abs(array-value)).argmin()
return array[idx], idx
def ceil_dt(dt,seconds):
"""
DESCRIPTION:
Function to round time to the next time step as given by its seconds
minute: 60 sec
quater hour: 900 sec
hour: 3600 sec
PARAMETER:
dt: (datetime object)
seconds: (integer)
USAGE:
>>>print ceil_dt(datetime(2014,01,01,14,12,04),60)
>>>2014-01-01 14:13:00
>>>print ceil_dt(datetime(2014,01,01,14,12,04),3600)
>>>2014-01-01 15:00:00
>>>print ceil_dt(datetime(2014,01,01,14,7,0),60)
>>>2014-01-01 14:07:00
"""
#how many secs have passed this hour
nsecs = dt.minute*60+dt.second+dt.microsecond*1e-6
if nsecs % seconds:
delta = (nsecs//seconds)*seconds+seconds-nsecs
return dt + timedelta(seconds=delta)
else:
return dt
# ##################
# read/write functions
# ##################
def read(path_or_url=None, dataformat=None, headonly=False, **kwargs):
"""
DEFINITION:
The read functions tries to open the selected files. Calls on
function _read() for help.
PARAMETERS:
Variables:
- path_or_url: (str) Path to data files in form:
a) c:\my\data\*
b) c:\my\data\thefile.txt
c) /home/data/*
d) /home/data/thefile.txt
e) ftp://server/directory/
f) ftp://server/directory/thefile.txt
g) http://www.thepage.at/file.tab
- headonly: (?) ???
Kwargs:
- dataformat: (str) Format of data file. Works as auto-detection.
- disableproxy: (bool) If True, will use urllib2.install_opener()
- endtime: (str/datetime object) Description.
- starttime: (str/datetime object) Description.
Format specific kwargs:
IAF:
- resolution: (str) can be either 'day','hour','minute'(default) or 'k'
RETURNS:
- stream: (DataStream object) Stream containing data in file
under path_or_url.
EXAMPLE:
>>> stream = read('/srv/archive/WIC/LEMI025/LEMI025_2014-05-05.bin')
OR
>>> stream = read('http://www.swpc.noaa.gov/ftpdir/lists/ace/20140507_ace_sis_5m.txt')
APPLICATION:
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
debugmode = kwargs.get('debugmode')
disableproxy = kwargs.get('disableproxy')
skipsorting = kwargs.get('skipsorting')
keylist = kwargs.get('keylist') # for PYBIN
debug = kwargs.get('debug')
if disableproxy:
proxy_handler = ProxyHandler( {} )
opener = build_opener(proxy_handler)
# install this opener
install_opener(opener)
# 1. No path
if not path_or_url:
logger.error("read: File not specified.")
raise Exception("No path given for data in read function!")
# 2. Create DataStream
st = DataStream([],{},np.array([[] for ke in KEYLIST]))
# 3. Read data
if not isinstance(path_or_url, basestring):
# not a string - we assume a file-like object
pass
"""
elif path_or_url.startswith("DB:"):
# a database table
if
logger.error("read: File not specified.")
raise Exception("No path given for data in read function!")
pathname = path_or_url
for file in iglob(pathname):
stp = DataStream([],{},np.array([[] for ke in KEYLIST]))
stp = _read(file, dataformat, headonly, **kwargs) glob
"""
elif "://" in path_or_url:
# some URL
# extract extension if any
logger.info("read: Found URL to read at {}".format(path_or_url))
content = urlopen(path_or_url).read()
content = content.decode('utf-8')
if content.find('<pre>') > -1:
"""
check whether content is coming with some html tags
"""
def get_between(s,first,last):
start = s.index(first) + len(first)
end = s.index(last, start )
return s[start:end]
content_t = get_between(content, '<pre>', '</pre>')
cleanr = re.compile('<.*?>')
content = re.sub(cleanr, '', content_t)
#print ("HERE", path_or_url)
if debugmode:
print(urlopen(path_or_url).info())
if path_or_url[-1] == '/':
# directory
string = content.decode('utf-8')
for line in string.split("\n"):
if len(line) > 1:
filename = (line.strip().split()[-1])
if debugmode:
print(filename)
content = urlopen(path_or_url+filename).read()
suffix = '.'+os.path.basename(path_or_url).partition('.')[2] or '.tmp'
#date = os.path.basename(path_or_url).partition('.')[0][-8:]
#date = re.findall(r'\d+',os.path.basename(path_or_url).partition('.')[0])
date = os.path.basename(path_or_url).partition('.')[0] # append the full filename to the temporary file
fname = date+suffix
fname = fname.strip('?').strip(':') ## Necessary for windows
#fh = NamedTemporaryFile(suffix=date+suffix,delete=False)
fh = NamedTemporaryFile(suffix=fname,delete=False)
print (fh.name, suffix)
fh.write(content)
fh.close()
stp = _read(fh.name, dataformat, headonly, **kwargs)
if len(stp) > 0: # important - otherwise header is going to be deleted
st.extend(stp.container,stp.header,stp.ndarray)
os.remove(fh.name)
else:
# TODO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# check whether content is a single file or e.g. a ftp-directory
# currently only single files are supported
# ToDo !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
suffix = '.'+os.path.basename(path_or_url).partition('.')[2] or '.tmp'
#date = os.path.basename(path_or_url).partition('.')[0][-8:]
#date = re.findall(r'\d+',os.path.basename(path_or_url).partition('.')[0])[0]
date = os.path.basename(path_or_url).partition('.')[0] # append the full filename to the temporary file
fname = date+suffix
fname = fname.replace('?','').replace(':','') ## Necessary for windows
fh = NamedTemporaryFile(suffix=fname,delete=False,mode='w+')
fh.write(content)
fh.close()
st = _read(fh.name, dataformat, headonly, **kwargs)
os.remove(fh.name)
else:
# some file name
pathname = path_or_url
for filename in iglob(pathname):
getfile = True
theday = extractDateFromString(filename)
#print (" Extracted date:", theday) # Doesnt work for IAF files
try:
if starttime:
if not theday[-1] >= datetime.date(st._testtime(starttime)):
getfile = False
if endtime:
if not theday[0] <= datetime.date(st._testtime(endtime)):
getfile = False
except:
# Date format not recognised. Read all files
logger.info("read: Unable to detect date string in filename. Reading all files...")
#logger.warning("read: filename: {}, theday: {}".format(filename,theday))
getfile = True
if getfile:
if filename.endswith('.gz') or filename.endswith('.GZ'):
## Added gz support to read IMO compressed data directly - future option might include tarfiles
import gzip
print ("Found zipped file (gz) ... unpacking")
fname = os.path.split(filename)[1]
fname = fname.strip('.gz')
with NamedTemporaryFile(suffix=fname,delete=False) as fh:
shutil.copyfileobj(gzip.open(filename), fh)
filename = fh.name
if filename.endswith('.zip') or filename.endswith('.ZIP'):
## Added gz support to read IMO compressed data directly - future option might include tarfiles
from zipfile import ZipFile
print ("Found zipped file (zip) ... unpacking")
with ZipFile(filename) as myzip:
fname = myzip.namelist()[0]
with NamedTemporaryFile(suffix=fname,delete=False) as fh:
shutil.copyfileobj(myzip.open(fname), fh)
filename = fh.name
stp = DataStream([],{},np.array([[] for ke in KEYLIST]))
try:
stp = _read(filename, dataformat, headonly, **kwargs)
except:
stp = DataStream([],{},np.array([[] for ke in KEYLIST]))
logger.warning("read: File {} could not be read. Skipping ...".format(filename))
if (len(stp) > 0 and not np.isnan(stp[0].time)) or len(stp.ndarray[0]) > 0: # important - otherwise header is going to be deleted
st.extend(stp.container,stp.header,stp.ndarray)
#del stp
if st.length()[0] == 0:
# try to give more specific information why the stream is empty
if has_magic(pathname) and not glob(pathname):
logger.error("read: No file matching file pattern: %s" % pathname)
raise Exception("Cannot read non-existent file!")
elif not has_magic(pathname) and not os.path.isfile(pathname):
logger.error("read: No such file or directory: %s" % pathname)
raise Exception("Cannot read non-existent file!")
# Only raise error if no starttime/endtime has been set. This
# will return an empty stream if the user chose a time window with
# no data in it.
# XXX: Might cause problems if the data is faulty and the user
# set starttime/endtime. Not sure what to do in this case.
elif not 'starttime' in kwargs and not 'endtime' in kwargs:
logger.error("read: Cannot open file/files: %s" % pathname)
elif 'starttime' in kwargs or 'endtime' in kwargs:
logger.error("read: Cannot read data. Probably no data available in the time range provided!")
raise Exception("No data available in time range")
else:
logger.error("read: Unknown error occurred. No data in stream!")
raise Exception("Unknown error occurred during reading. No data in stream!")
if headonly and (starttime or endtime):
msg = "read: Keyword headonly cannot be combined with starttime or endtime."
logger.error(msg)
# Sort the input data regarding time
if not skipsorting:
st = st.sorting()
# eventually trim data
if starttime:
st = st.trim(starttime=starttime)
if endtime:
st = st.trim(endtime=endtime)
### Define some general header information TODO - This is done already in some format libs - clean up
st.header['DataSamplingRate'] = float("{0:.2f}".format(st.samplingrate()))
return st
#@uncompressFile
def _read(filename, dataformat=None, headonly=False, **kwargs):
"""
Reads a single file into a MagPy DataStream object.
Internal function only.
"""
debug = kwargs.get('debug')
stream = DataStream([],{})
format_type = None
foundapproptiate = False
if not dataformat:
# auto detect format - go through all known formats in given sort order
for format_type in PYMAG_SUPPORTED_FORMATS:
# check format
if debug:
print("_read: Testing format: {} ...".format(format_type))
if debug:
logger.info("_read: Testing format: {} ...".format(format_type))
#try:
# readsucc = isFormat(filename, format_type)
#except:
# readsucc = False
if isFormat(filename, format_type):
if debug:
logger.info(" -- found: {}".format(format_type))
print (" -- found: {}".format(format_type))
foundapproptiate = True
break
if not foundapproptiate:
temp = open(filename, 'rt').readline()
if temp.startswith('# MagPy Absolutes'):
logger.warning("_read: You apparently tried to open a DI object - please use the absoluteAnalysis method")
else:
logger.error("_read: Could not identify a suitable data format")
return DataStream([LineStruct()],{},np.asarray([[] for el in KEYLIST]))
else:
# format given via argument
dataformat = dataformat.upper()
try:
formats = [el for el in PYMAG_SUPPORTED_FORMATS if el == dataformat]
format_type = formats[0]
except IndexError:
msg = "Format \"%s\" is not supported. Supported types: %s"
logger.error(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))
raise TypeError(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))
"""
try:
# search readFormat for given entry point
readFormat = load_entry_point(format_ep.dist.key,
'obspy.plugin.waveform.%s' % (format_ep.name), 'readFormat')
except ImportError:
msg = "Format \"%s\" is not supported. Supported types: %s"
raise TypeError(msg % (format_ep.name,
', '.join(WAVEFORM_ENTRY_POINTS)))
"""
stream = readFormat(filename, format_type, headonly=headonly, **kwargs)
return stream
def saveflags(mylist=None,path=None, overwrite=False):
"""
DEFINITION:
Save list e.g. flaglist to file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
RETURNS:
- True if succesful otherwise False
EXAMPLE:
>>> saveflags(flaglist,'/my/path/myfile.pkl')
"""
print("Saving flaglist ...")
if not mylist:
print("error 1")
return False
if not path:
path = 'myfile.pkl'
if not overwrite:
existflag = loadflags(path)
existflag.extend(mylist)
mylist = existflag
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if path.endswith('.json'):
print(" -- using json format ")
try:
import json
def dateconv(d):
# Converter to serialize datetime objects in json
if isinstance(d,datetime):
return d.__str__()
# Convert mylist to a dictionary
mydic = {}
# get a list of unique sensorid
sid = [elem[5] for elem in mylist]
sid = list(set(sid))
for s in sid:
slist = [elem[0:5]+elem[6:] for elem in mylist if elem[5] == s]
mydic[s] = slist
## Dictionary looks like {SensorID:[[t1,t2,xxx,xxx,],[x...]]}
with open(path,'w',encoding='utf-8') as file:
file.write(unicode(json.dumps(mydic,default=dateconv)))
print("saveflags: list saved to a json file: {}".format(path))
return True
except:
return False
else:
print(" -- using pickle")
try:
# TODO: check whether package is already loaded
from pickle import dump
dump(mylist,open(path,'wb'))
print("saveflags: list saved to {}".format(path))
return True
except:
return False
def loadflags(path=None,sensorid=None,begin=None, end=None):
"""
DEFINITION:
Load list e.g. flaglist from file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
- begin: (datetime)
- end: (datetime)
RETURNS:
- list (e.g. flaglist)
EXAMPLE:
>>> loadflags('/my/path/myfile.pkl')
"""
if not path:
return []
if path.endswith('.json'):
try:
import json
print ("Reading a json style flaglist...")
def dateparser(dct):
# Convert dates in dictionary to datetime objects
for (key,value) in dct.items():
for i,line in enumerate(value):
for j,elem in enumerate(line):
if str(elem).count('-') + str(elem).count(':') == 4:
try:
try:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S.%f")
except:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S")
except:
pass
dct[key] = value
return dct
if os.path.isfile(path):
with open(path,'r') as file:
mydic = json.load(file,object_hook=dateparser)
if sensorid:
mylist = mydic.get(sensorid,'')
do = [el.insert(5,sensorid) for el in mylist]
else:
mylist = []
for s in mydic:
ml = mydic[s]
do = [el.insert(5,s) for el in ml]
mylist.extend(mydic[s])
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
return mylist
else:
print ("Flagfile not yet existing ...")
return []
except:
return []
else:
try:
from pickle import load as pklload
mylist = pklload(open(path,"rb"))
print("loadflags: list {a} successfully loaded, found {b} inputs".format(a=path,b=len(mylist)))
if sensorid:
print(" - extracting data for sensor {}".format(sensorid))
mylist = [el for el in mylist if el[5] == sensorid]
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
#print(" -> remaining flags: {b}".format(b=len(mylist)))
return mylist
except:
return []
def joinStreams(stream_a,stream_b, **kwargs):
"""
DEFINITION:
Copy two streams together eventually replacing already existing time steps.
Data of stream_a will replace data of stream_b
APPLICATION
combinedstream = joinStreams(stream_a,stream_b)
"""
logger.info('joinStreams: Start joining at %s.' % str(datetime.now()))
# Check stream type and eventually convert them to ndarrays
# --------------------------------------
ndtype = False
if len(stream_a.ndarray[0]) > 0:
# Using ndarray and eventually convert stream_b to ndarray as well
ndtype = True
if not len(stream_b.ndarray[0]) > 0:
stream_b = stream_b.linestruct2ndarray()
if not len(stream_b.ndarray[0]) > 0:
return stream_a
elif len(stream_b.ndarray[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
if not len(stream_a.ndarray[0]) > 0:
return stream_b
else:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
stream_b = stream_b.linestruct2ndarray()
if not len(stream_a.ndarray[0]) > 0 and not len(stream_b.ndarray[0]) > 0:
logger.error('subtractStreams: stream(s) empty - aborting subtraction.')
return stream_a
# non-destructive
# --------------------------------------
sa = stream_a.copy()
sb = stream_b.copy()
# Get indicies of timesteps of stream_b of which identical times are existing in stream_a-> delelte those lines
# --------------------------------------
# IMPORTANT: If two streams with different keys should be combined then "merge" is the method of choice
# NEW: shape problems when removing data -> now use removeduplicates at the end
# SHOULD WORK (already tested) as remove duplicate will keep the last value and drop earlier occurences
#indofb = np.nonzero(np.in1d(sb.ndarray[0], sa.ndarray[0]))[0]
#for idx,elem in enumerate(sb.ndarray):
# if len(sb.ndarray[idx]) > 0:
# sb.ndarray[idx] = np.delete(sb.ndarray[idx],indofb)
# Now add stream_a to stream_b - regard for eventually missing column data
# --------------------------------------
array = [[] for key in KEYLIST]
for idx,elem in enumerate(sb.ndarray):
if len(sa.ndarray[idx]) > 0 and len(sb.ndarray[idx]) > 0:
array[idx] = np.concatenate((sa.ndarray[idx],sb.ndarray[idx]))
elif not len(sa.ndarray[idx]) > 0 and len(sb.ndarray[idx]) > 0:
if idx < len(NUMKEYLIST):
fill = float('nan')
else:
fill = '-'
arraya = np.asarray([fill]*len(sa.ndarray[0]))
array[idx] = np.concatenate((arraya,sb.ndarray[idx]))
elif len(sa.ndarray[idx]) > 0 and not len(sb.ndarray[idx]) > 0:
if idx < len(NUMKEYLIST):
fill = float('nan')
else:
fill = '-'
arrayb = np.asarray([fill]*len(sb.ndarray[0]))
array[idx] = np.concatenate((sa.ndarray[idx],arrayb))
else:
array[idx] = np.asarray([])
stream = DataStream([LineStruct()],sa.header,np.asarray(array,dtype=object))
stream = stream.removeduplicates()
return stream.sorting()
def appendStreams(streamlist):
"""
DESCRIPTION:
Appends contents of streamlist and returns a single new stream.
Duplicates are removed and the new stream is sorted.
"""
array = [[] for key in KEYLIST]
for idx,key in enumerate(KEYLIST):
# Get tuple of array
arlist = []
for stream in streamlist:
if len(stream.ndarray[idx]) > 0:
array[idx].extend(stream.ndarray[idx])
stream = DataStream([LineStruct()],streamlist[0].header,np.asarray(array).astype(object))
if len(stream.ndarray[0]) > 0:
stream = stream.removeduplicates()
stream = stream.sorting()
return stream
else:
return DataStream([LineStruct()],streamlist[0].header,np.asarray([np.asarray([]) for key in KEYLIST]))
def mergeStreams(stream_a, stream_b, **kwargs):
"""
DEFINITION:
Combine the contents of two data streams realtive to stream_a.
Basically three modes are possible:
1. Insert data from stream_b into stream_a based on timesteps of stream_a
- if keys are provided only these specific columns are inserted into a
- default: if data is existing in stream_a only nans are replaced
here flags (4) can be set and a comment "inserted from SensorID" is added
- eventually use get_gaps to identfy missing timesteps in stream_a before
2. Replace
- same as insert but here all existing time series data is replaced by
corresponding data from stream_b
3. Drop
- drops the whole column from stream_a and fills it with stream_b data
The streams need to overlapp, base stream is stream_a of which the time range
is not modfified. If you want to extend this stream by new data use the extend
method.
1. replace data from specific columns of stream_a with data from stream_b.
- requires keys
2. fill gaps in stream_a data with stream_b data without replacing any data.
- extend = True
PARAMETERS:
Variables:
- stream_a (DataStream object) main stream
- stream_b (DataStream object) this stream is merged into stream_a
Kwargs:
- addall: (bool) Add all elements from stream_b
- extend: (bool) Time range of stream b is eventually added to stream a.
Default False.
If extend = true => any existing date which is not present in stream_a
will be filled by stream_b
- mode: (string) 'insert' or 'replace' or 'drop'. drop removes stream_a column, replace will change values no matter what, insert will only replace nan's (default)
- keys: (list) List of keys to add from stream_b into stream_a.
- flag: (bool) if true, a flag will be added to each merged line (default: flagid = 4, comment = "keys ... added from sensorid b").
- comment: (str) Define comment to stream_b data in stream_a.
- replace: (bool) Allows existing stream_a values to be replaced by stream_b ones.
RETURNS:
- Datastream(stream_a): (DataStream) DataStream object.
EXAMPLE:
>>> # Joining two datasets together:
>>> alldata = mergeStreams(lemidata, gsmdata, keys=['f'])
# f of gsm will be added to lemi
# inserting missing values from another stream
>>> new_gsm = mergeStreams(gsm1, gsm2, keys=['f'], mode='insert')
# all missing values (nans) of gsm1 will be filled by gsm2 values (if existing)
APPLICATION:
"""
# old (LineStruct) too be removed
addall = kwargs.get('addall')
replace = kwargs.get('replace')
extend = kwargs.get('extend')
# new
mode = kwargs.get('mode')
flag = kwargs.get('flag')
keys = kwargs.get('keys')
comment = kwargs.get('comment')
flagid = kwargs.get('flagid')
if not mode:
mode = 'insert' # other possibilities: replace, ...
if not keys:
keys = stream_b._get_key_headers()
# Defining default comment
# --------------------------------------
headera = stream_a.header
headerb = stream_b.header
try:
sensidb = headerb['SensorID']
except:
sensidb = 'stream_b'
# Better: create a flaglist and apply stream.flag(flaglist) with flag 4
if not comment:
comment = 'keys %s added from %s' % (','.join(keys), sensidb)
if not flagid:
flagid = 4
fllst = [] # flaglist
logger.info('mergeStreams: Start mergings at %s.' % str(datetime.now()))
# Check stream type and eventually convert them to ndarrays
# --------------------------------------
ndtype = False
if len(stream_a.ndarray[0]) > 0:
# Using ndarray and eventually convert stream_b to ndarray as well
ndtype = True
if not len(stream_b.ndarray[0]) > 0:
stream_b = stream_b.linestruct2ndarray()
elif len(stream_b.ndarray[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
else:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
stream_b = stream_b.linestruct2ndarray()
if not len(stream_a.ndarray[0]) > 0 and len(stream_b.ndarray[0]) > 0:
logger.error('subtractStreams: stream(s) empty - aborting subtraction.')
return stream_a
# non-destructive
# --------------------------------------
sa = stream_a.copy()
sb = stream_b.copy()
sa = sa.removeduplicates()
sb = sb.removeduplicates()
# Sampling rates
# --------------------------------------
sampratea = sa.samplingrate()
samprateb = sb.samplingrate()
minsamprate = min(sampratea,samprateb)
if ndtype:
timea = sa.ndarray[0]
else:
timea = sa._get_column('time')
# truncate b to time range of a
# --------------------------------------
try:
sb = sb.trim(starttime=num2date(timea[0]).replace(tzinfo=None), endtime=num2date(timea[-1]).replace(tzinfo=None)+timedelta(seconds=samprateb),newway=True)
except:
print("mergeStreams: stream_a and stream_b are apparently not overlapping - returning stream_a")
return stream_a
if ndtype:
timeb = sb.ndarray[0]
else:
timeb = sb._get_column('time')
# keeping a - changed by leon 10/2015
"""
# truncate a to range of b
# --------------------------------------
try:
sa = sa.trim(starttime=num2date(timeb[0]).replace(tzinfo=None), endtime=num2date(timeb[-1]).replace(tzinfo=None)+timedelta(seconds=sampratea),newway=True)
except:
print "mergeStreams: stream_a and stream_b are apparently not overlapping - returning stream_a"
return stream_a
# redo timea calc after trimming
# --------------------------------------
if ndtype:
timea = sa.ndarray[0]
else:
timea = sa._get_column('time')
"""
# testing overlapp
# --------------------------------------
if not len(sb) > 0:
print("subtractStreams: stream_a and stream_b are not overlapping - returning stream_a")
return stream_a
timea = maskNAN(timea)
timeb = maskNAN(timeb)
orgkeys = stream_a._get_key_headers()
# master header
# --------------------------------------
header = sa.header
# just add the merged sensorid
header['SecondarySensorID'] = sensidb
## Speed up of unequal timesteps - limit search range
# - search range small (fracratio high) if t_limits are similar and data is periodic
# - search range large (fracratio small) if t_limits are similar and data is periodic
# - fracratio = 1 means that the full stream_b data set is searched
# - fracratio = 20 means that +-5percent of stream_b are searched arround expected index
#print("mergeStream", sa.length(), sb.length(), sa._find_t_limits(), sb._find_t_limits())
fracratio = 2 # modify if start and endtime are different
speedup = True
if speedup and ndtype:
ast, aet = sa._find_t_limits()
bst, bet = sb._find_t_limits()
uncert = (date2num(aet)-date2num(ast))*0.01
#print ("Merge speedup", uncert, ast, aet, bst, bet)
if not bst < ast+timedelta(minutes=uncert*24*60):
print ("Merge: Starttime of stream_b too large")
for indx,key in enumerate(KEYLIST):
if key == 'time':
### Changes from 2019-01-15: modified axis - originally working fine, however except for saggitarius
#sb.ndarray[0] = np.append(np.asarray([date2num(ast)]), sb.ndarray[0],1)
sb.ndarray[0] = np.append(np.asarray([date2num(ast)]), sb.ndarray[0])
elif key == 'sectime' or key in NUMKEYLIST:
if not len(sb.ndarray[indx]) == 0:
#sb.ndarray[indx] = np.append(np.asarray([np.nan]),sb.ndarray[indx],1)
sb.ndarray[indx] = np.append(np.asarray([np.nan]),sb.ndarray[indx])
else:
if not len(sb.ndarray[indx]) == 0:
#sb.ndarray[indx] = np.append(np.asarray(['']),sb.ndarray[indx],1)
sb.ndarray[indx] = np.append(np.asarray(['']),sb.ndarray[indx])
if not bet > aet-timedelta(minutes=uncert*24*60):
print ("Merge: Endtime of stream_b too small") ### Move that to merge??
for indx,key in enumerate(KEYLIST):
if key == 'time':
#sb.ndarray[0] = np.append(sb.ndarray[0], np.asarray([date2num(aet)]),1)
sb.ndarray[0] = np.append(sb.ndarray[0], np.asarray([date2num(aet)]))
elif key == 'sectime' or key in NUMKEYLIST:
if not len(sb.ndarray[indx]) == 0:
#sb.ndarray[indx] = np.append(sb.ndarray[indx], np.asarray([np.nan]),1)
sb.ndarray[indx] = np.append(sb.ndarray[indx], np.asarray([np.nan]))
else:
if not len(sb.ndarray[indx]) == 0:
#sb.ndarray[indx] = np.append(sb.ndarray[indx], np.asarray(['']),1)
sb.ndarray[indx] = np.append(sb.ndarray[indx], np.asarray(['']))
#st,et = sb._find_t_limits()
#print ("Merge", st, et, sb.length())
sb = sb.get_gaps()
fracratio = 40 # modify if start and endtime are different
timeb = sb.ndarray[0]
timeb = maskNAN(timeb)
abratio = len(timea)/float(len(timeb))
dcnt = int(len(timeb)/fracratio)
#print ("Merge:", abratio, dcnt, len(timeb))
timea = np.round(timea, decimals=9)
timeb = np.round(timeb, decimals=9)
if ndtype:
array = [[] for key in KEYLIST]
# Init array with keys from stream_a
for key in orgkeys:
keyind = KEYLIST.index(key)
array[keyind] = sa.ndarray[keyind]
indtib = np.nonzero(np.in1d(timeb,timea))[0]
# If equal elements occur in time columns
if len(indtib) > int(0.5*len(timeb)):
print("mergeStreams: Found identical timesteps - using simple merge")
# get tb times for all matching indicies
#print("merge", indtib, len(indtib), len(timea), len(timeb), np.argsort(timea), np.argsort(timeb))
tb = np.asarray([timeb[ind] for ind in indtib])
# Get indicies of stream_a of which times are present in matching tbs
indtia = np.nonzero(np.in1d(timea,tb))[0]
#print("mergeStreams", tb, indtib, indtia, timea,timeb, len(indtib), len(indtia))
if len(indtia) == len(indtib):
nanind = []
for key in keys:
keyind = KEYLIST.index(key)
#array[keyind] = sa.ndarray[keyind]
vala, valb = [], []
if len(sb.ndarray[keyind]) > 0: # stream_b values are existing
#print("Found sb values", key)
valb = [sb.ndarray[keyind][ind] for ind in indtib]
if len(sa.ndarray[keyind]) > 0: # stream_b values are existing
vala = [sa.ndarray[keyind][ind] for ind in indtia]
### Change by leon in 10/2015
if len(array[keyind]) > 0 and not mode=='drop': # values are present
pass
else:
if key in NUMKEYLIST:
array[keyind] = np.asarray([np.nan] *len(timea))
else:
array[keyind] = np.asarray([''] *len(timea))
try:
header['col-'+key] = sb.header['col-'+key]
header['unit-col-'+key] = sb.header['unit-col-'+key]
except:
print ("mergeStreams: warning when assigning header values to column %s - missing head" % key)
if len(sb.ndarray[keyind]) > 0: # stream_b values are existing
for i,ind in enumerate(indtia):
if key in NUMKEYLIST:
tester = np.isnan(array[keyind][ind])
else:
tester = False
if array[keyind][ind] == '':
tester = True
#print ("Merge3", tester)
if mode == 'insert':
if tester:
array[keyind][ind] = valb[i]
else:
if len(vala) > 0:
array[keyind][ind] = vala[i]
elif mode == 'replace':
if not np.isnan(valb[i]):
array[keyind][ind] = valb[i]
else:
if len(vala) > 0:
array[keyind][ind] = vala[i]
else:
array[keyind][ind] = valb[i]
if flag:
ttt = num2date(array[0][ind])
fllst.append([ttt,ttt,key,flagid,comment])
array[0] = np.asarray(sa.ndarray[0])
array = np.asarray(array)
else:
print("mergeStreams: Did not find identical timesteps - linearily interpolating stream b...")
print("- Please note: this method needs considerably longer.")
print("- Only data within 1/2 the sampling rate distance of stream_a timesteps is used.")
print("- Put in the larger (higher resolution) stream as stream_a,")
print("- otherwise you might wait an endless amount of time.")
# interpolate b
# TODO here it is necessary to limit the stream to numerical keys
#sb.ndarray = np.asarray([col for idx,col in enumerate(sb.ndarray) if KEYLIST[idx] in NUMKEYLIST])
print(" a) starting interpolation of stream_b")
mst = datetime.utcnow()
function = sb.interpol(keys)
met = datetime.utcnow()
print(" -> needed {}".format(met-mst))
# Get a list of indicies for which timeb values are
# in the vicintiy of a (within half of samplingrate)
dti = (minsamprate/24./3600.)
print(" b) getting indicies of stream_a with stream_b values in the vicinity")
mst = datetime.utcnow()
#indtia = [idx for idx, el in enumerate(timea) if np.min(np.abs(timeb-el))/dti <= 1.] # This selcetion requires most of the time
indtia = [] ### New and faster way by limiting the search range in stream_b by a factor of 10
check = [int(len(timea)*(100-el)/100.) for el in range(99,1,-10)]
lentimeb = len(timeb)
for idx, el in enumerate(timea):
cst = int(idx/abratio-dcnt)
if cst<=0:
cst = 0
cet = int(idx/abratio+dcnt)
if cet>=lentimeb:
cet=lentimeb
if np.min(np.abs(timeb[cst:cet]-el)/(dti)) <= 0.5:
indtia.append(idx)
if idx in check:
print (" -> finished {} percent".format(idx/float(len(timea))*100.))
indtia = np.asarray(indtia)
met = datetime.utcnow()
print(" -> needed {}".format(met-mst))
# limit time range to valued covered by the interpolation function
#print len(indtia), len(timeb), np.asarray(indtia)
indtia = [elem for elem in indtia if function[1] < timea[elem] < function[2]]
#t2temp = datetime.utcnow()
#print "Timediff %s" % str(t2temp-t1temp)
#print len(indtia), len(timeb), np.asarray(indtia)
#print function[1], sa.ndarray[0][indtia[0]], sa.ndarray[0][indtia[-1]], function[2]
print(" c) extracting interpolated values of stream_b")
mst = datetime.utcnow()
if len(function) > 0:
for key in keys:
keyind = KEYLIST.index(key)
#print key, keyind
#print len(sa.ndarray[keyind]),len(sb.ndarray[keyind]), np.asarray(indtia)
vala, valb = [], []
if len(sb.ndarray[keyind]) > 0: # and key in function:
valb = [float(function[0]['f'+key]((sa.ndarray[0][ind]-function[1])/(function[2]-function[1]))) for ind in indtia]
if len(sa.ndarray[keyind]) > 0: # and key in function:
vala = [sa.ndarray[keyind][ind] for ind in indtia]
if len(array[keyind]) > 0 and not mode=='drop': # values are present
pass
else:
if key in NUMKEYLIST:
array[keyind] = np.asarray([np.nan] *len(timea))
else:
array[keyind] = np.asarray([''] *len(timea))
try:
header['col-'+key] = sb.header['col-'+key]
header['unit-col-'+key] = sb.header['unit-col-'+key]
except:
print ("mergeStreams: warning when assigning header values to column %s- missing head" % key)
for i,ind in enumerate(indtia):
if key in NUMKEYLIST:
tester = isnan(array[keyind][ind])
else:
tester = False
if array[keyind][ind] == '':
tester = True
if mode == 'insert':
if tester:
array[keyind][ind] = valb[i]
else:
if len(vala) > 0:
array[keyind][ind] = vala[i]
elif mode == 'replace':
if not np.isnan(valb[i]):
array[keyind][ind] = valb[i]
else:
if len(vala) > 0:
array[keyind][ind] = vala[i]
else:
array[keyind][ind] = valb[i]
"""
if mode == 'insert' and tester:
array[keyind][ind] = valb[i]
elif mode == 'replace':
array[keyind][ind] = valb[i]
"""
if flag:
ttt = num2date(array[0][ind])
fllst.append([ttt,ttt,key,flagid,comment])
met = datetime.utcnow()
print(" -> needed {} for {}".format(met-mst,key))
array[0] = np.asarray(sa.ndarray[0])
array = np.asarray(array)
#try:
# header['SensorID'] = sa.header['SensorID']+'-'+sb.header['SensorID']
#except:
# pass
return DataStream([LineStruct()],header,array)
sta = list(stream_a)
stb = list(stream_b)
if addall:
logger.info('mergeStreams: Adding streams together not regarding for timeconstraints of data.')
if ndtype:
for idx,elem in enumerate(stream_a.ndarray):
ndarray = stream_a.ndarray
if len(elem) == 0 and len(stream_b.ndarray[idx]) > 0:
# print add nan's of len_a to stream a
# then append stream b
pass
elif len(elem) > 0 and len(stream_b.ndarray[idx]) == 0:
# print add nan's of len_b to stream a
pass
elif len(elem) == 0 and len(stream_b.ndarray[idx]) == 0:
# do nothing
pass
else: #len(elem) > 0 and len(stream_b.ndarray[idx]) > 0:
# append b to a
pass
newsta = DataStream(sta, headera, ndarray)
else:
for elem in stream_b:
sta.append(elem)
newsta = DataStream(sta, headera, stream_a.ndarray)
for elem in headerb:
try:
headera[elem]
ha = True
except:
ha = False
if headerb[elem] and not ha:
newsta.header[elem] = headerb[elem]
elif headerb[elem] and ha:
logger.warning("mergeStreams: headers both have keys for %s. Headers may be incorrect." % elem)
newsta.sorting()
return newsta
elif extend:
logger.info('mergeStreams: Extending stream a with data from b.')
for elem in stream_b:
if not elem.time in timea:
sta.append(elem)
newsta = DataStream(sta, headera)
for elem in headerb:
try:
headera[elem]
ha = True
except:
ha = False
if headerb[elem] and not ha:
newsta.header[elem] = headerb[elem]
elif headerb[elem] and ha:
logger.warning("mergeStreams: headers both have keys for %s. Headers may be incorrect." % elem)
newsta.sorting()
return newsta
else:
# interpolate stream_b
# changed the following trim section to prevent removal of first input in trim method
if stream_b[0].time == np.min(timea):
sb = stream_b.trim(endtime=np.max(timea))
else:
sb = stream_b.trim(starttime=np.min(timea), endtime=np.max(timea))
timeb = sb._get_column('time')
timeb = maskNAN(timeb)
function = sb.interpol(keys)
taprev = 0
for elem in sb:
foundina = find_nearest(timea,elem.time)
pos = foundina[1]
ta = foundina[0]
if (ta > taprev) and (np.min(timeb) <= ta <= np.max(timeb)):
taprev = ta
functime = (ta-function[1])/(function[2]-function[1])
for key in keys:
if not key in KEYLIST[1:16]:
logger.error('mergeStreams: Column key (%s) not valid.' % key)
#keyval = getattr(stream_a[pos], key)# should be much better
exec('keyval = stream_a[pos].'+key)
fkey = 'f'+key
if fkey in function[0] and (isnan(keyval) or not stream_a._is_number(keyval)):
newval = function[0][fkey](functime)
exec('stream_a['+str(pos)+'].'+key+' = float(newval) + offset')
exec('stream_a['+str(pos)+'].comment = comment')
## Put flag 4 into the merged data if keyposition <= 8
flagposlst = [i for i,el in enumerate(FLAGKEYLIST) if el == key]
try:
flagpos = flagposlst[0]
fllist = list(stream_a[pos].flag)
fllist[flagpos] = '4'
stream_a[pos].flag=''.join(fllist)
except:
pass
elif fkey in function[0] and not isnan(keyval) and replace == True:
newval = function[0][fkey](functime)
exec('stream_a['+str(pos)+'].'+key+' = float(newval) + offset')
exec('stream_a['+str(pos)+'].comment = comment')
## Put flag 4 into the merged data if keyposition <= 8
flagposlst = [i for i,el in enumerate(FLAGKEYLIST) if el == key]
try:
flagpos = flagposlst[0]
fllist = list(stream_a[pos].flag)
fllist[flagpos] = '4'
stream_a[pos].flag=''.join(fllist)
except:
pass
logger.info('mergeStreams: Mergings finished at %s ' % str(datetime.now()))
return DataStream(stream_a, headera)
def dms2d(dms):
"""
DESCRIPTION:
converts a string with degree:minutes:seconds to degree.decimals
VARIBALES:
dms (string) like -0:37:23 or 23:23
"""
# 1. get sign
sign = dms[0]
multi = 1
if sign == '-':
multi = -1
dms = dms[1:]
dmsar = dms.split(':')
if len(dmsar) > 3:
print("Could not interpret dms")
return 0.0
val=[]
for i in range(0,3):
try:
val.append(float(dmsar[i]))
except:
val.append(0.0)
d = multi*(val[0]+val[1]/60.+val[2]/3600.)
return d
def find_offset(stream1, stream2, guess_low=-60., guess_high=60.,
deltat_step=0.1,log_chi=False,**kwargs):
'''
DEFINITION:
Uses least-squares method for a rough estimate of the offset in the time
axis of two different streams. Both streams must contain the same key, e.g. 'f'.
GENTLE WARNING: This method is FAR FROM OPTIMISED.
Interpolation brings in errors, *however* does allow for
a more exact result.
PARAMETERS:
Variables:
- stream1: (DataStream object) First stream to compare.
- stream2: (DataStream object) Second stream to compare.
Kwargs:
- deltat_step: (float) Time value in s to iterate over. Accuracy is higher with
smaller values.
- guess_low: (float) Low guess for offset in s. Function will iterate from here.
- guess_high: (float) High guess for offset in s. Function will iterate till here.
- log_chi: (bool) If True, log chi values.
- plot: (bool) Filename of plot to save chi-sq values to, e.g. "chisq.png"
RETURNS:
- t_offset: (float) The offset (in seconds) calculated by least-squares method
of stream_b.
EXAMPLE:
>>> offset = find_offset(gdas_data, pos_data, guess=-30.,deltat_min = 0.1)
APPLICATION:
Challenge in this function:
--> Needs to be able to compare two non harmonic signals with different sampling
rates and a presumed time offset. The time offset may be smaller than the
sampling rate itself.
How to go about it:
1. Take arrays of key to compare
2. Resample arrays to same sampling period (or interpolate)
3. Determine offset between two arrays
"""
'''
# 1. Define starting parameters:
N_iter = 0.
# Interpolate the function with the smaller sample period.
# Should hopefully lower error factors.
sp1 = stream1.get_sampling_period()
sp2 = stream2.get_sampling_period()
#if sp1 > sp2:
if sp1 < sp2:
stream_a = stream1
stream_b = stream2
main_a = True
#elif sp1 < sp2:
elif sp1 > sp2:
stream_a = stream2
stream_b = stream1
main_a = False
else:
stream_a = stream1
stream_b = stream2
main_a = True
# Important for least-squares method. Streams must have same length.
timeb = stream_b._get_column('time')
stime = np.min(timeb)
etime = np.max(timeb)
timespan = guess_high-guess_low
# TODO: Remove this trim function. It's destructive.
stream_a = stream_a.trim(starttime=num2date(stime).replace(tzinfo=None)+timedelta(seconds=timespan*2),
endtime=num2date(etime).replace(tzinfo=None)+timedelta(seconds=-timespan*2))
mean_a = stream_a.mean('f')
mean_b = stream_b.mean('f')
difference = mean_a - mean_b
# Interpolate one stream:
# Note: higher errors with lower degree of interpolation. Highest degree possible is desirable, linear terrible.
try:
int_data = stream_b.interpol(['f'],kind='cubic')
except:
try:
logger.warning("find_offset: Not enough memory for cubic spline. Attempting quadratic...")
int_data = stream_b.interpol(['f'],kind='quadratic')
except:
logger.error("find_offset: Too much data! Cannot interpolate function with high enough accuracy.")
return "nan"
int_func = int_data[0]['ff']
int_min = date2num(num2date(int_data[1])+timedelta(milliseconds=guess_low*1000.))
int_max = date2num(num2date(int_data[2])+timedelta(milliseconds=guess_low*1000.))
timea = stream_a._get_column('f')
datarray_base = np.zeros((len(stream_a)))
count = 0
# 5. Create array of delta-f with offset times:
for elem in stream_a:
time = stream_a[count].time
if time > int_min and time < int_max:
functime = (time - int_min)/(int_max - int_min)
tempval = stream_a[count].f - int_func(functime)
datarray_base[count] += tempval
count = count+1
# 3. From data array calculate chi-squared array of null-offset as a base comparison:
chisq_ = 0.
for item in datarray_base:
chisq_ = chisq_ + (item)**2.
#chisq_ = chisq_ + (item-difference)**2. # Correction may be needed for reasonable values.
deltat = guess_low
# (Write data to file for logging purposes.)
if log_chi:
newfile = open('chisq.txt','a')
writestring = str(deltat)+' '+str(chisq_)+' '+str(chisq_)+' '+str(len(datarray_base))+'\n'
newfile.write(writestring)
newfile.close()
# 4. Start iteration to find best chi-squared minimisation:
logger.info("find_offset: Starting chi-squared iterations...")
chi_lst = []
time_lst = []
min_lst = []
max_lst = []
results = []
while True:
deltat = deltat + deltat_step
if deltat > guess_high: break
N_iter = N_iter + 1.
flag == 0.
datarray = np.zeros((len(stream_a)))
count = 0
newc = 0
int_min = float(date2num(num2date(int_data[1]) + timedelta(milliseconds=deltat*1000.)))
int_max = float(date2num(num2date(int_data[2]) + timedelta(milliseconds=deltat*1000.)))
for elem in stream_a:
time = stream_a[count].time
if time > int_min and time < int_max:
functime = (time - int_min)/(int_max - int_min)
tempval = stream_a[count].f - int_func(functime)
datarray[count] += tempval
count = count+1
chisq = 0.
for item in datarray:
chisq = chisq + (item-difference)**2.
if log_chi:
newfile = open('chisq.txt','a')
writestring = str(deltat)+' '+str(chisq)+' '+str(chisq_)+' '+str(len(datarray))+'\n'
newfile.write(writestring)
newfile.close()
# Catch minimum:
if chisq < chisq_:
chisq_ = chisq
t_offset = deltat
chi_lst.append(chisq)
time_lst.append(deltat)
if plot:
plt.plot(time_lst,chi_lst,'-')
plt.show()
if not main_a:
t_offset = t_offset * (-1)
logger.info("find_offset: Found an offset of stream_a of %s seconds." % t_offset)
# RESULTS
return t_offset
def diffStreams(stream_a, stream_b, **kwargs):
"""
DESCRIPTION:
obtain and return the differences of two stream:
"""
ndtype_a = False
if len(stream_a.ndarray[0]) > 0:
ndtype_a = True
if not ndtype_a or not len(stream_a) > 0:
logger.error('diffStreams: stream_a empty - aborting.')
return stream_a
ndtype_b = False
if len(stream_b.ndarray[0]) > 0:
ndtype_b = True
# 1. Amount of columns
#if ndtype
# 2. Line contents
# --- amount of lines
# --- differences of lines
def subtractStreams(stream_a, stream_b, **kwargs):
'''
DEFINITION:
Default function will subtract stream_b from stream_a. If timesteps are different
stream_b will be interpolated
PARAMETERS:
Variables:
- stream_a: (DataStream) First stream
- stream_b: (DataStream) Second stream, which is subtracted from a
Optional:
- keys: (list) key list for subtraction - default: all keys present in both streams
RETURNS:
- difference: (DataStream) Description.
EXAMPLE:
>>> diff = subtractStreams(gsm_stream, pos_stream)
APPLICATION:
'''
keys = kwargs.get('keys')
newway = kwargs.get('newway')
getmeans = kwargs.get('getmeans')
debug = kwargs.get('debug')
if not keys:
keys = stream_a._get_key_headers(numerical=True)
keysb = stream_b._get_key_headers(numerical=True)
keys = list(set(keys)&set(keysb))
if not len(keys) > 0:
print("subtractStreams: No common keys found - aborting")
return DataStream()
ndtype = False
if len(stream_a.ndarray[0]) > 0:
# Using ndarray and eventually convert stream_b to ndarray as well
ndtype = True
newway = True
if not len(stream_b.ndarray[0]) > 0:
stream_b = stream_b.linestruct2ndarray()
elif len(stream_b.ndarray[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
else:
try:
assert len(stream_a) > 0
except:
logger.error('subtractStreams: stream_a empty - aborting subtraction.')
return stream_a
logger.info('subtractStreams: Start subtracting streams.')
headera = stream_a.header
headerb = stream_b.header
# non-destructive
#print ("SA:", stream_a.length())
#print ("SB:", stream_b.length())
sa = stream_a.copy()
sb = stream_b.copy()
# Sampling rates
sampratea = sa.samplingrate()
samprateb = sb.samplingrate()
minsamprate = min(sampratea,samprateb)
if ndtype:
timea = sa.ndarray[0]
timea = timea.astype(float)
else:
timea = sa._get_column('time')
# truncate b to time range of a
try:
sb = sb.trim(starttime=num2date(np.min(timea)).replace(tzinfo=None), endtime=num2date(np.max(timea)).replace(tzinfo=None)+timedelta(seconds=samprateb),newway=True)
#sb = sb.trim(starttime=num2date(np.min(timea)).replace(tzinfo=None), endtime=num2date(np.max(timea)).replace(tzinfo=None),newway=True)
except:
print("subtractStreams: stream_a and stream_b are apparently not overlapping - returning stream_a")
return stream_a
if ndtype:
timeb = sb.ndarray[0]
else:
timeb = sb._get_column('time')
# truncate a to range of b
try:
sa = sa.trim(starttime=num2date(np.min(timeb.astype(float))).replace(tzinfo=None), endtime=num2date(np.max(timeb.astype(float))).replace(tzinfo=None)+timedelta(seconds=sampratea),newway=True)
#sa = sa.trim(starttime=num2date(np.min(timeb.astype(float))).replace(tzinfo=None), endtime=num2date(np.max(timeb.astype(float))).replace(tzinfo=None),newway=True)
except:
print("subtractStreams: stream_a and stream_b are apparently not overlapping - returning stream_a")
return stream_a
if ndtype:
timea = sa.ndarray[0]
timea = timea.astype(float)
else:
timea = sa._get_column('time')
# testing overlapp
if not len(sb) > 0:
print("subtractStreams: stream_a and stream_b are not overlapping - returning stream_a")
return stream_a
timea = maskNAN(timea)
timeb = maskNAN(timeb)
#print "subtractStreams: timea", timea
#print "subtractStreams: timeb", timeb
# Check for the following cases:
# 1- No overlap of a and b
# 2- a high resolution and b low resolution (tested)
# 3- a low resolution and b high resolution (tested)
# 4- a shorter and fully covered by b (tested)
# 5- b shorter and fully covered by a
if ndtype:
logger.info('subtractStreams: Running ndtype subtraction')
# Assuming similar time steps
#t1s = datetime.utcnow()
# Get indicies of stream_b of which times are present in stream_a
array = [[] for key in KEYLIST]
"""
try: # TODO Find a better solution here! Roman 2017
# The try clause is not correct as searchsorted just finds
# positions independet of agreement (works well if data is similar)
idxB = np.argsort(timeb)
sortedB = timeb[idxB]
idxA = np.searchsorted(sortedB, timea)
#print timea, timeb,len(idxA), len(idxB)
indtib = idxB[idxA]
print ("solution1")
except:
indtib = np.nonzero(np.in1d(timeb, timea))[0]
print ("solution2")
"""
indtib = np.nonzero(np.in1d(timeb, timea))[0]
#print timeb[pos]
#print ("Here", timea)
# If equal elements occur in time columns
if len(indtib) > int(0.5*len(timeb)):
logger.info('subtractStreams: Found identical timesteps - using simple subtraction')
# get tb times for all matching indicies
tb = np.asarray([timeb[ind] for ind in indtib])
# Get indicies of stream_a of which times are present in matching tbs
try:
idxA = np.argsort(timea)
sortedA = timea[idxA]
idxB = np.searchsorted(sortedA, tb)
#
indtia = idxA[idxB]
except:
indtia = np.nonzero(np.in1d(tb, timea))[0]
#print ("subtractStreams", len(timea),len(timeb),idxA,idxB, indtia, indtib)
#print (np.nonzero(np.in1d(timea,tb))[0])
#idxB = np.argsort(tb)
#sortedB = tb[idxB]
#idxA = np.searchsorted(sortedB, timea)
#indtia = idxB[idxA]
if len(indtia) == len(indtib):
nanind = []
for key in keys:
foundnan = False
keyind = KEYLIST.index(key)
#print key, keyind, len(sa.ndarray[keyind]), len(sb.ndarray[keyind])
#print indtia, indtib,len(indtia), len(indtib)
if len(sa.ndarray[keyind]) > 0 and len(sb.ndarray[keyind]) > 0:
for ind in indtia:
try:
tmp = sa.ndarray[keyind][ind]
except:
print(ind, keyind, len(indtia), len(sa.ndarray[keyind]))
vala = [sa.ndarray[keyind][ind] for ind in indtia]
valb = [sb.ndarray[keyind][ind] for ind in indtib]
diff = np.asarray(vala).astype(float) - np.asarray(valb).astype(float)
if isnan(diff).any():
foundnan = True
if foundnan:
nankeys = [ind for ind,el in enumerate(diff) if isnan(el)]
nanind.extend(nankeys)
array[keyind] = diff
nanind = np.unique(np.asarray(nanind))
array[0] = np.asarray([sa.ndarray[0][ind] for ind in indtia])
if foundnan:
for ind,elem in enumerate(array):
if len(elem) > 0:
array[ind] = np.delete(np.asarray(elem), nanind)
array = np.asarray(array)
else:
if debug:
print("Did not find identical timesteps - linearily interpolating stream b")
print("- please note... this needs considerably longer")
print("- put in the larger (higher resolution) stream as stream_a")
print("- otherwise you might wait endless")
# interpolate b
function = sb.interpol(keys)
#print function, len(function), keys, sa.ndarray, sb.ndarray
# Get a list of indicies for which timeb values are
# in the vicintiy of a (within half of samplingrate)
indtia = [idx for idx, el in enumerate(timea) if np.min(np.abs(timeb-el))/(minsamprate/24./3600.)*2 <= 1.] # This selcetion requires most of the time
# limit time range to valued covered by the interpolation function
#print len(indtia), len(timeb), np.asarray(indtia)
indtia = [elem for elem in indtia if function[1] < timea[elem] < function[2]]
#t2temp = datetime.utcnow()
#print "Timediff %s" % str(t2temp-t1temp)
#print len(indtia), len(timeb), np.asarray(indtia)
#print function[1], sa.ndarray[0][indtia[0]], sa.ndarray[0][indtia[-1]], function[2]
if len(function) > 0:
nanind = []
sa.ndarray[0] = sa.ndarray[0].astype(float)
for key in keys:
foundnan = False
keyind = KEYLIST.index(key)
#print key, keyind
#print len(sa.ndarray[keyind]),len(sb.ndarray[keyind]), np.asarray(indtia)
if len(sa.ndarray[keyind]) > 0 and len(sb.ndarray[keyind]) > 0 and key in NUMKEYLIST: # and key in function:
#check lengths of sa.ndarray and last value of indtia
indtia = list(np.asarray(indtia)[np.asarray(indtia)<len(sa.ndarray[0])])
#print keyind, len(indtia), len(sa.ndarray[keyind]), indtia[0], indtia[-1]
# Convert array to float just in case
sa.ndarray[keyind] = sa.ndarray[keyind].astype(float)
#print sa.ndarray[4][indtia[-2]]
vala = [sa.ndarray[keyind][ind] for ind in indtia]
#print "VALA", np.asarray(vala)
valb = [float(function[0]['f'+key]((sa.ndarray[0][ind]-function[1])/(function[2]-function[1]))) for ind in indtia]
#print "VALB", np.asarray(valb)
diff = np.asarray(vala) - np.asarray(valb)
if isnan(diff).any():
foundnan = True
if foundnan:
nankeys = [ind for ind,el in enumerate(diff) if isnan(el)]
nanind.extend(nankeys)
array[keyind] = diff
nanind = np.unique(np.asarray(nanind))
array[0] = np.asarray([sa.ndarray[0][ind] for ind in indtia])
if foundnan:
for ind,elem in enumerate(array):
if len(elem) > 0:
array[ind] = np.delete(np.asarray(elem), nanind)
array = np.asarray(array)
#t2e = datetime.utcnow()
#print "Total Timediff %s" % str(t2e-t1s)
#print array, len(array), len(array[0])
for key in keys:
try:
sa.header['col-'+key] = 'delta '+key
except:
pass
try:
sa.header['unit-col-'+key] = sa.header['unit-col-'+key]
except:
pass
try:
sa.header['SensorID'] = sa.header['SensorID']+'-'+sb.header['SensorID']
except:
pass
#subtractedstream = DataStream([LineStruct()],sa.header,np.asarray(array))
#for key in keys:
# subtractedstream = subtractedstream._drop_nans(key)
return DataStream([LineStruct()],sa.header,np.asarray(array,dtype=object))
if np.min(timeb) < np.min(timea):
stime = np.min(timea)
else:
stime = np.min(timeb)
if np.max(timeb) > np.max(timea):
etime = np.max(timea)
else:
etime = np.max(timeb)
# if stream_b is longer than stream_a use one step after and one step before e and stime
if etime < | np.max(timeb) | numpy.max |
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 14:47:20 2018
@author: Greydon
"""
import os
import re
import numpy as np
import pandas as pd
from scipy.signal import welch, hanning, butter, lfilter, resample
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import matplotlib.ticker as mticker
import pywt
import tables
import subprocess
import scipy.io as spio
import h5py
import json
##############################################################################
# HELPER FUNCTIONS #
##############################################################################
def sorted_nicely(data, reverse = False):
"""
Sorts the given iterable in the way that is expected.
Parameters
----------
data: array-like
The iterable to be sorted.
Returns
-------
The sorted list.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(data, key = alphanum_key, reverse=reverse)
def downsample(data, oldFS, newFS):
"""
Resample data from oldFS to newFS using the scipy 'resample' function.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
oldFS: int
The sampling frequency of the data.
newFS: int
The new sampling frequency.
Returns
-------
newData: array-like
The downsampled dataset.
"""
newNumSamples = int((len(data) / oldFS) * newFS)
newData = np.array(resample(data, newNumSamples))
return newData
##############################################################################
# FILTERS #
##############################################################################
def butter_bandpass(lowcut, highcut, fs, order):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butterBandpass(d, lowcut, highcut, fs, order):
b, a = butter_bandpass(lowcut, highcut, fs, order)
y = lfilter(b, a, d)
return y
##############################################################################
# TIME DOMAIN FEATURES #
##############################################################################
def MAV(data):
"""
Mean absolute value: the average of the absolute value of the signal.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
MAVData: 1D numpy array containing average absolute value
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
MAVData = sum(abs(data))/len(data)
return MAVData
def MAVS(data1, data2):
"""
Mean Absolute Value Slope: the difference between MAVs in adjacent
segments.
Parameters
----------
data1: array-like
2D matrix of shape (time, data)
data2: array-like
2D matrix of shape (time, data) of subsequent segment to x1
Returns
-------
MAVSlope: 1D numpy array containing MAV for adjacent signals
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
MAV1Data = sum(abs(data1))/len(data1)
MAV2Data = sum(abs(data2))/len(data2)
MAVSlope = MAV2Data - MAV1Data
return MAVSlope
def MMAV1(data):
"""
Modified Mean Absolute Value 1: an extension of MAV using a weighting
window function on data below 25% and above 75%.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
MMAV1Data: 1D numpy array containing modified MAV for given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2009).
A Novel Feature Extraction for Robust EMG Pattern Recognition. Journal
of Medical Engineering and Technology, 40(4), 149–154.
"""
w1 = 0.5
segment = int(len(data)*0.25)
start = abs(data[0:segment,])*w1
middle = abs(data[segment:(len(data)-segment),])
end = abs(data[(len(data)-segment):,])*w1
combined = np.concatenate((start, middle, end))
MMAV1Data = sum(abs(combined))/len(combined)
return MMAV1Data
def MMAV2(data):
"""
Modified Mean Absolute Value 2: the smooth window is improved by using
a continuous weighting window function on data below 25% and above 75%.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
MMAV2Data: 1D numpy array containg modified MAV for signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2009).
A Novel Feature Extraction for Robust EMG Pattern Recognition. Journal
of Medical Engineering and Technology, 40(4), 149–154.
"""
segment = int(len(data)*0.25)
a = []
b = []
for i in range(segment):
endIdx = (len(data)-segment)+i
a.append((4*i)/len(data))
b.append((4*(len(data)-endIdx))/len(data))
start = abs(data[0:segment,])*a
middle = abs(data[segment:(len(data)-segment),])
end = abs(data[(len(data)-segment):,])*b
combined = np.concatenate((start,middle,end))
MMAV2Data = sum(abs(combined))/len(combined)
return MMAV2Data
def RMS(data):
"""
Root mean square: the root mean square of a given recording.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
RMSData: 1D numpy array containing root mean square of the signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2009).
A Novel Feature Extraction for Robust EMG Pattern Recognition. Journal
of Medical Engineering and Technology, 40(4), 149–154.
"""
RMSData = (sum(data*data)/len(data))**0.5
return RMSData
def VAR(data):
"""
Variance: deviation of the signal from it's mean.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
varianceData: 1D numpy array containg the signal variance
Reference
---------
<NAME>., & <NAME>. (2000). DSP-based controller for a
multi-degree prosthetic hand. Robotics and Automation, 2000. …,
2(April), 1378–1383.
"""
meanData = sum(data)/len(data)
varianceData = sum((data-meanData)*(data-meanData))/len(data)
return varianceData
def curveLen(data):
"""
Curve length: the cumulative length of the waveform over the time segment.
This feature is related to the waveform amplitude, frequency and time.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
curveLenData: 1D numpy array containing the average curve length for
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
data1 = data[1:]
data2 = data[:-1]
curveLenData = sum(abs(data2-data1))/(len(data)-1)
return curveLenData
def zeroCross(data, threshold):
"""
Zero crossings: Calculates the number of times the signal amplitude
crosses the zero y-axis.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
zeroCrossData: 1D numpy array containing total number of zero crossings
in the given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
sign = lambda z: (1, -1)[z < 0]
i = abs(np.array([sign(x) for x in data[1:]]) - np.array([sign(x) for x in data[:-1]]))
zeroCrossData = sum(i)/(len(data))
return zeroCrossData
def slopeSign(data):
"""
Slope Sign Change: The number of changes between positive and negative
slope among three consecutive segments are performed
with the threshold function.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
slopeSignData: 1D numpy array containing the total slope sign changes
for a given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
i = (data[1:-1]-data[:-2])
j = (data[1:-1]-data[2:])
slopeSignData = len(np.where((i*j) > 10)[0])
return slopeSignData
def threshold(data):
"""
Threshold: measure of how scattered the sign is (deviation).
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
thresholdData: 1D numpy array containing the total threshold value for a
given signal
Reference
---------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2011).
Characterization of subcortical structures during deep brain stimulation utilizing
support vector machines. Conference Proceedings: Annual International Conference of
the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine
and Biology Society. Annual Conference, 2011, 7949–7952.
"""
i = data-(sum(data)/len(data))
j = sum(i*i)
thresholdData = (3*(j**(1/2)))/(len(data)-1)
return thresholdData
def WAMP(data, threshold):
"""
Willison Amplitude: the number of times that the difference between signal
amplitude among two adjacent segments that exceeds a predefined
threshold to reduce noise effects.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
threshold: int
threshold level in uV (generally use 10 microvolts)
Returns
-------
WAMPData: 1D numpy array containing total number of times derivative
was above threshold in a given signal
Reference
---------
<NAME>., & <NAME>. (2000). DSP-based controller for a
multi-degree prosthetic hand. Robotics and Automation, 2000. …,
2(April), 1378–1383.
"""
i = abs(data[:-1]-data[1:])
j = i[i > threshold]
WAMPData = len(j)
return WAMPData
def SSI(data):
"""
Simple Square Integral: uses the energy of signal as a feature.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
SSIData: 1D numpy array containing the summed absolute square of the
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2009).
A Novel Feature Extraction for Robust EMG Pattern Recognition. Journal
of Medical Engineering and Technology, 40(4), 149–154.
"""
SSIData = sum(abs(data*data))
return SSIData
def powerAVG(data):
"""
Average power: the amount of work done, amount energy transferred per
unit time.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
powerAvgData: 1D numpy array containing average power in a given signal
"""
powerAvgData = sum(data*data)/len(data)
return powerAvgData
def peaksNegPos(data):
"""
Peaks: the number of positive peaks in the data window per unit time.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
peaksNegPosData: 1D numpy array containing total number of peaks in given
signal
"""
sign = lambda z: (1, -1)[z < 0]
i = [sign(z) for z in (data[2:]-data[1:-1])]
j = [sign(z) for z in (data[1:-1]-data[:-2])]
k = [a_i - b_i for a_i, b_i in zip(j, i)]
peaksNegPosData = [max([0,z]) for z in k]
peaksNegPosData = sum(peaksNegPosData)/(len(data)-2)
return peaksNegPosData
def peaksPos(data):
"""
Peak Density: calculates the density of peaks within the current locality.
A peak is defined as a point higher in amplitude than the two points
to its left and right side.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
peaksPosData: 1D numpy array containing the average number of peaks
in a given signal
"""
data1 = data[1:-1]
data2 = data[0:-2]
data3 = data[2:]
data4 = data1 - data2
data5 = data1 - data3
peakcount = 0
for i in range(len(data)-2):
if data4[i] > 0 and data5[i]>0:
peakcount += 1
peaksPosData = peakcount/(len(data)-2)
return peaksPosData
def tkeoTwo(data):
"""
Teager-Kaiser Energy Operator: is analogous to the total
(kinetic and potential) energy of a signal. This variation uses
the second derivative.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
tkeoTwoData: 1D numpy array containing total teager energy of a given
signal using two samples
Reference
---------
1. <NAME>. (1990). On a simple algorithm to calculate the
“energy” of a signal. In International Conference on Acoustics,
Speech, and Signal Processing (Vol. 2, pp. 381–384). IEEE.
2. <NAME>., <NAME>., & <NAME>. (2007). Teager-Kaiser energy
operation of surface EMG improves muscle activity onset detection.
Annals of Biomedical Engineering, 35(9), 1532–8.
"""
i = data[1:-1]*data[1:-1]
j = data[2:]*data[:-2]
tkeoTwoData = sum(i-j)/(len(data)-2)
return tkeoTwoData
def tkeoFour(data):
"""
Teager-Kaiser Energy Operator: is analogous to the total
(kinetic and potential) energy of a signal. This variation uses
the 4th order derivative.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
tkeoFourData: 1D numpy array containing total teager energy of a given
signal using 4 samples
Reference
---------
1. <NAME>. (1990). On a simple algorithm to calculate the
“energy” of a signal. In International Conference on Acoustics,
Speech, and Signal Processing (Vol. 2, pp. 381–384). IEEE.
2. <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., … <NAME>. (2008). Automated
neonatal seizure detection mimicking a human observer reading EEG.
Clinical Neurophysiology : Official Journal of the International
Federation of Clinical Neurophysiology, 119(11), 2447–54.
"""
l = 1
p = 2
q = 0
s = 3
tkeoFourData = sum(data[l:-p]*data[p:-l]-data[q:-s]*data[s:])/(len(data)-3)
return tkeoFourData
def KUR(data):
"""
Kurtosis: calculates the degree to which the signal has 'tails'. Heavy-tail
would mean many outliers. A normal distribution kurtosis value is 3.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
kurtosisData: 1D numpy array containing the total kurtosis for a given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2000). Flexible
Independent Component Analysis. Journal of VLSI Signal Processing
Systems for Signal, Image and Video Technology, 26(1), 25–38.
"""
meanX = sum(data)/len(data)
diff = [z - meanX for z in data]
sq_differences = [d**2 for d in diff]
var = sum(sq_differences)/len(data)
stdData = var**0.5
i = sum((data-meanX)**4)
j = (len(data)-1)*(stdData)**4
kurtosisData = i/j
return kurtosisData
def SKW(data):
"""
Skewness: measures symmetry in the signal, the data is symmetric if it
looks the same to the left and right of the center point. A skewness
of 0 would indicate absolutely no skew.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
skewnessData: 1D numpy array containing the total skewness for a given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
meanX = sum(data)/len(data)
diff = [z - meanX for z in data]
sq_differences = [d**2 for d in diff]
var = sum(sq_differences)/len(data)
stdX = var**0.5
i = sum((data-meanX)**3)
j = (len(data)-1)*(stdX)**3
skewnessData = i/j
return skewnessData
def crestF(data):
"""
Crest factor: the relation between the peak amplitude and the RMS of the
signal.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
crestFactorData: 1D numpy array containing the total crest factor for a given
signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
DC_remove = data - (sum(data)/len(data))
peakAmp = max(abs(DC_remove))
RMS = (sum(DC_remove*DC_remove)/len(DC_remove))**0.5
crestFactorData = peakAmp/RMS
return crestFactorData
def entropy(data):
"""
Entropy: is an indicator of disorder or unpredictability. The entropy is
smaller inside STN region because of its more rhythmic firing compared
to the mostly noisy background activity in adjacent regions.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
entropyData: 1D numpy array containing the total entropy for a given
signal
Reference
---------
<NAME>., & <NAME>. (2004). Entropy And Entropy-based
Features In Signal Processing. Laboratory of Intelligent Communication
Systems, Dept. of Computer Science and Engineering, University of West
Bohemia, Plzen, Czech Republic, 1–2.
"""
ent = 0
m = np.mean(data)
for i in range(len(data)):
quo = abs(data[i] - m)
ent = ent + (quo* np.log10(quo))
entropyData = -ent
return entropyData
def shapeFactor(data):
"""
Shape Factor: value affected by objects shape but is independent of its
dimensions.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
shapeFactorData: 1D numpy array containing shape factor value for a
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
RMS = (sum(data*data)/len(data))**0.5
shapeFactorData = RMS/(sum(abs(data))/len(data))
return shapeFactorData
def impulseFactor(data):
"""
Impulse Factor:
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
impulseFactorData: 1D numpy array containing impulse factor value for a
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
impulseFactorData = max(abs(data))/(sum(abs(data))/len(data))
return impulseFactorData
def clearanceFactor(data):
"""
Clearance Factor:
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Returns
-------
clearanceFactorData: 1D numpy array containing impulse factor value for a
given signal
Reference
---------
<NAME>., <NAME>., & <NAME>. (2011).
Rolling element bearing fault detection in industrial environments
based on a K-means clustering approach. Expert Systems with
Applications, 38(3), 2888–2911.
"""
clearanceFactorData = max(abs(data))/((sum(abs(data)**0.5)/len(data))**2)
return clearanceFactorData
##############################################################################
# FREQUENCY DOMAIN #
##############################################################################
def computeFFT(data, Fs, normalize=False):
"""
Compute the FFT of `data` and return. Also returns the axis in Hz for
further plot.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Fs: int
Sampling frequency in Hz.
Returns
-------
fAx: array-like
Axis in Hz to plot the FFT.
fftData: array-like
Value of the fft.
"""
N = data.shape[0]
fAx = np.arange(N/2) * Fs/N
if normalize:
Y = np.fft.fft(data)/int(len(data))
fftData = abs(Y[range(int(len(data)/2))])
else:
Y = np.abs(np.fft.fft(data))
fftData = 2.0/N * np.abs(Y[0:N//2])
return fAx, fftData
def wrcoef(data, coef_type, coeffs, wavename, level):
N = np.array(data).size
a, ds = coeffs[0], list(reversed(coeffs[1:]))
if coef_type =='a':
return pywt.upcoef('a', a, wavename, level=level)[:N]
elif coef_type == 'd':
return pywt.upcoef('d', ds[level-1], wavename, level=level)[:N]
else:
raise ValueError("Invalid coefficient type: {}".format(coef_type))
def wavlet(data, nLevels, waveletName, timewindow, windowSize, Fs):
"""
Wavelet Transform: captures both frequency and time information.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
nLevels: int
Number of levels for the wavlet convolution
waveletName: str
Name of the wavelet to be used
timewindow: boolean
Option to split the given signal into discrete time bins
windowSize: int
If timewindow is TRUE then provide the size of the time
window
Fs: int
If timewindow is TRUE then provide the sampling rate of the given
signal
Returns
-------
waveletData: 1D numpy array containing the standard deviation of the
wavelet convolution for a given signal
"""
if timewindow == True:
windowsize = windowSize*Fs
n = int(len(data))
windown=int(np.floor(n/windowsize))
waveletData=[]
for i in range(windown-1):
xSeg = data[windowsize*i:windowsize*(i+1)]
coeffs = pywt.wavedec(xSeg, waveletName, level=nLevels)
waveletData.append(np.std(wrcoef(xSeg, 'd', coeffs, waveletName, nLevels)))
else:
coeffs = pywt.wavedec(data, waveletName, level=nLevels)
waveletData = np.std(wrcoef(data, 'd', coeffs, waveletName, nLevels))
return waveletData
def computeAvgDFFT(data, Fs, windowLength = 256, windowOverlapPrcnt = 50, Low=500, High=5000):
"""
Fast Fourier Transform: captures the frequency information within a signal.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Fs: int
Sampling rate of the given signal
Low: int
The highpass frequency cutoff
High: int
The lowpass frequency cutoff
Returns
-------
averagePxxWelch: average power in defined passband
"""
# Defining hanning window
win = hanning(windowLength, True)
welchNoverlap = int(windowLength*windowOverlapPrcnt/100.0)
f, Pxxf = welch(data, Fs, window=win, noverlap=welchNoverlap, nfft=windowLength, return_onesided=True)
indexLow = np.where(f == min(f, key=lambda x:abs(x-Low)))[0][0]
indexHigh = np.where(f == min(f, key=lambda x:abs(x-High)))[0][0]
averagePxxWelch = np.mean(Pxxf[indexLow:indexHigh])
return averagePxxWelch
def meanFrq(data, Fs):
"""
Mean Frequency: calculated as the sum of the product of the spectrogram
intensity (in dB) and the frequency, divided by the total sum of
spectrogram intensity.
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Fs: int
Sampling rate of the given signal
Returns
-------
meanFrqData: 1D numpy array containing the mean frequency of a given
signal
Reference
---------
<NAME>., & <NAME>. (2006). GA-based Feature Subset
Selection for Myoelectric Classification. In 2006 IEEE International
Conference on Robotics and Biomimetics (pp. 1465–1470). IEEE.
"""
win = 4 * Fs
freqs, psd = welch(data, Fs, nperseg=win, scaling='density')
meanFrqData = sum(freqs*psd)/sum(psd)
return meanFrqData
def freqRatio(data, Fs):
"""
Frequency Ratio: ratio between power in lower frequencies and power in
higher frequencies
Parameters
----------
data: array-like
2D matrix of shape (time, data)
Fs: int
Sampling rate of the given signal
Returns
-------
freqRatioData:
Reference
---------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., &
<NAME>. (2000). New EMG pattern recognition based on soft computing
techniques and its application to control of a rehabilitation robotic
arm. Proc. of 6th International Conference on Soft Computing
(IIZUKA2000), 890–897.
"""
win = 4 * Fs
freqs, psd = welch(data, Fs, nperseg=win, scaling='density')
freqRatioData = abs(psd[:int(len(freqs)/2)])/abs(psd[int(len(freqs)/2):-1])
return freqRatioData
def meanAmpFreq(data, windowSize, Fs):
"""
Mean Frequency Amplitude:
Parameters
----------
data: array-like
2D matrix of shape (time, data)
windowSize: int
Size of the window
Fs: int
Sampling rate of the given signal
Returns
-------
meanAmpFreqData: 1D numpy array containing
"""
window = windowSize*Fs
n = int(len(data))
windown=int(np.floor(n/window))
meanAmpFreqData=[]
for i in range(windown-1):
xSeg = data[window*i:window*(i+1)]
meanAmpFreqData.append(np.median(abs(np.fft.fft(xSeg))))
return meanAmpFreqData
##############################################################################
# VISUALIZATION #
##############################################################################
channelLabels = {1:"Center", 2:"Anterior", 3:"Posterior", 4:"Medial", 5:"Lateral"}
class MathTextSciFormatter(mticker.Formatter):
def __init__(self, fmt="%1.2e"):
self.fmt = fmt
def __call__(self, x, pos=None):
s = self.fmt % x
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
significand = tup[0].rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
s = r'\bf %s{\times}%s' % (significand, exponent)
else:
s = r'\bf %s%s' % (significand, exponent)
return "${}$".format(s)
def axFormat(a):
a.yaxis.set_major_formatter(MathTextSciFormatter("%1.2e"))
a.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))
for tick in a.xaxis.get_major_ticks():
tick.label1.set_fontweight('bold')
# for tick in a.yaxis.get_major_ticks():
# tick.label1.set_fontweight('bold')
def axFormaty(a):
a.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))
a.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))
for tick in a.yaxis.get_major_ticks():
tick.label1.set_fontweight('bold')
def plotting(x, showOnly, timeWindow, processedFolder):
featureLabels = pd.DataFrame([{'mav': 'Mean Absolute Value',
'mavSlope': 'Mean Absolute Value Slope',
'variance': 'Variance',
'mmav1': 'Mean Absolute Value 1',
'mmav2': 'Mean Absolute Value 2',
'rms': 'Root Mean Square',
'curveLength': 'Curve Length',
'zeroCross': 'Zero Crossings',
'slopeSign': 'Slope Sign',
'threshold': 'Threshold',
'wamp': 'Willison Amplitude',
'ssi': 'Simple Square Integral',
'power': 'Power',
'peaksNegPos': 'Peaks - Negative and Positive',
'peaksPos': 'Peaks - Positive',
'tkeoTwo': 'Teager-Kaiser Energy Operator - Two Samples',
'tkeoFour': 'Teager-Kaiser Energy Operator - Four Samples',
'kurtosis': 'Kurtosis',
'skew': 'Skewness',
'crestF': 'Crest Factor',
'meanF': 'Mean Frequency',
'binData': 'Raw Data',
'AvgPowerMU': 'Bandpass Power (500-1000Hz)',
'AvgPowerSU': 'Bandpass Power (1000-3000Hz)',
'entropy': 'Signal Entropy',
'waveletStd': 'STD of Wavlet Convolution',
'spikeISI': 'Inter-Spike Interval',
'meanISI': 'Mean of ISI',
'stdISI': 'STD of ISI',
'burstIndex': 'Burst Index',
'pauseIndex': 'Pause Index',
'pauseRatio': 'Pause Ratio',
'spikeDensity': 'Spike Density'}])
subList = np.unique(x['subject'])
for isub in range(len(subList)):
if timeWindow==True:
outputDir = processedFolder + '/sub-' + str(subList[isub]) + '/timeWindow/'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
else:
outputDir = processedFolder + '/sub-' + str(subList[isub]) + '/depthWindow/'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
numSides = np.unique(x[(x['subject']==subList[isub])]['side'])
for iside in range(len(numSides)):
numChans = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['channel'])
numFeatures = list(x.drop(['subject','side','channel','depth','labels', 'chanChosen'], axis=1))
if np.isnan(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['chanChosen']).any():
chanSel = np.nan
else:
chanSel = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['chanChosen'])
for ifeatures in range(len(numFeatures)):
if 'binData' in numFeatures[ifeatures]:
fileName = 'sub-' + str(subList[isub]) + '_side-' + numSides[iside] + '_' + featureLabels[numFeatures[ifeatures]].values[0].replace(" ", "")
plotRaw(x,subList[isub],numSides[iside], numChans, chanSel, fileName, outputDir, 24000)
print('Finished subject', str(subList[isub]), numSides[iside], 'side', 'feature:', featureLabels[numFeatures[ifeatures]].values[0])
elif 'spikeISI' in numFeatures[ifeatures]:
nothing = []
elif numFeatures[ifeatures] in {'PositiveSpikes','PositiveTimes','NegativeSpikes','NegativeTimes'}:
nothing = []
else:
fig, axs = plt.subplots(len(numChans),1, sharex=True, sharey=False)
fig.subplots_adjust(hspace=0.1, wspace=0)
titleLab = 'Sub-' + str(subList[isub]) + ' ' + numSides[iside] + ' Side: ' + featureLabels[numFeatures[ifeatures]].values[0]
fileName = 'sub-' + str(subList[isub]) + '_side-' + numSides[iside] + '_' + featureLabels[numFeatures[ifeatures]].values[0].replace(" ", "")
for ichan in range(len(numChans)):
feature = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])][numFeatures[ifeatures]])
depths = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['depth'])
labels = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['labels'])
channel = channelLabels.get(numChans[ichan])
muA = np.mean(feature)
if timeWindow==False:
if len(numChans) ==1:
axs.plot(depths, feature)
axs.set_xlim(depths[0,],depths[-1])
else:
axs[ichan].plot(depths, feature)
axs[ichan].set_xlim(depths[0,],depths[-1])
else:
if len(numChans) ==1:
axs.plot(np.arange(0,x.shape[1],1), feature)
axs.set_xlim(0,(feature.shape[1]))
else:
axs[ichan].plot(np.arange(0,x.shape[1],1), feature)
axs[ichan].set_xlim(0,(feature.shape[1]))
if len(numChans) ==1:
axs.plot(axs.get_xlim(), [muA,muA], ls= 'dashed', c='black')
if ~np.isnan(chanSel):
if numChans[ichan] == chanSel:
axs.annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold', color='red')
else:
axs.annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold')
else:
axs.annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold')
if timeWindow==False:
xticlabs = np.arange(depths[0],depths[-1],1)
axs.xaxis.set_ticks(xticlabs)
axs.xaxis.set_ticklabels(xticlabs, rotation = 45)
else:
xticlabs = np.arange(0,len(feature),5)
axs.xaxis.set_ticks(xticlabs)
axs.xaxis.set_ticklabels((xticlabs*2).astype(int), rotation = 45)
axFormat(axs)
if np.size(np.where(labels==1)) != 0:
inDepth = depths[np.min(np.where(labels==1))]
outDepth = depths[np.max(np.where(labels==1))]
axs.axvspan(inDepth, outDepth, color='purple', alpha=0.2)
for xc in depths:
axs.axvline(x=xc, color='k', linestyle='--', alpha=0.2)
else:
axs[ichan].plot(axs[ichan].get_xlim(), [muA,muA], ls= 'dashed', c='black')
if ~np.isnan(chanSel):
if numChans[ichan] == chanSel:
axs[ichan].annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold', color='red')
else:
axs[ichan].annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold')
else:
axs[ichan].annotate(channel, xy=(1.01,0.5),xycoords='axes fraction', fontsize=12, fontweight='bold')
if timeWindow==False:
xticlabs = np.arange(depths[0],depths[-1],1)
axs[ichan].xaxis.set_ticks(xticlabs)
axs[ichan].xaxis.set_ticklabels(xticlabs, rotation = 45)
else:
xticlabs = np.arange(0,len(feature),5)
axs[ichan].xaxis.set_ticks(xticlabs)
axs[ichan].xaxis.set_ticklabels((xticlabs*2).astype(int), rotation = 45)
axFormat(axs[ichan])
if np.size(np.where(labels==1)) != 0:
inDepth = depths[np.min(np.where(labels==1))]
outDepth = depths[np.max(np.where(labels==1))]
axs[ichan].axvspan(inDepth, outDepth, color='purple', alpha=0.2)
for xc in depths:
axs[ichan].axvline(x=xc, color='k', linestyle='--', alpha=0.2)
plt.suptitle(titleLab, y=0.96,x=0.51, size=16, fontweight='bold')
fig.text(0.51, 0.03, 'Depth (mm)', ha='center', size=14, fontweight='bold')
fig.text(0.035, 0.5, featureLabels[numFeatures[ifeatures]].values[0], va='center', rotation='vertical', size=14, fontweight='bold')
if showOnly == True:
plt.show()
else:
figure = plt.gcf() # get current figure
figure.set_size_inches(12, 8)
if timeWindow==True:
filepath = outputDir + fileName + '.png'
else:
filepath = outputDir + fileName + '.png'
plt.savefig(filepath, dpi=100) # save the figure to file
plt.close('all')
print('Finished subject', str(subList[isub]), numSides[iside], 'side', 'feature:', featureLabels[numFeatures[ifeatures]].values[0])
def extract_raw_nwbFile(file_name, trimData, FilterData):
patientDF = pd.DataFrame([])
subject = int("".join([x for x in h5py.File(file_name, 'r+').get('/identifier').value.split('_')[0] if x.isdigit()]))
chans = list(set(h5py.File(file_name, 'r+').get('/intervals/trials/channel').value))
with open(file_name.replace('.nwb', '.json')) as side_file:
sidecar = json.load(side_file)
Fs = sidecar['SamplingFrequency']
for ichan in chans:
channelIdx = h5py.File(file_name, 'r+').get('/intervals/trials/channel').value == ichan
startTime = h5py.File(file_name, 'r+').get('/intervals/trials/start_time').value[channelIdx]
endTime = h5py.File(file_name, 'r+').get('/intervals/trials/stop_time').value[channelIdx]
depths = [float(x) for x in h5py.File(file_name, 'r+').get('/intervals/trials/depth').value[channelIdx]]
dataset = h5py.File(file_name, 'r+').get('/acquisition/'+ ichan +'/data').value
for idx, idepth in enumerate(depths):
tempData = dataset[int(startTime[idx]):int(endTime[idx])]
if FilterData:
tempData = butterBandpass(tempData, lowcut = 400, highcut = 6000, fs = Fs, order = 4)
rowDF = [{'subject': subject, 'side': h5py.File(file_name, 'r+').get('/session_description').value.split('_')[0],
'channel': ichan, 'chanChosen': np.nan, 'depth': idepth, 'rawData': tempData}]
patientDF = pd.concat([patientDF, pd.DataFrame(rowDF)], axis = 0)
if trimData == True:
datasetLength = int(5*np.floor(float(min([len(x) for x in patientDF['rawData']])/Fs)/5))*Fs
patientDF['rawData'] = [x[:int(datasetLength)] for x in patientDF['rawData']]
return patientDF
#x = filen
#isub = 0
#iside = 0
#ichan = 0
def plotRaw(x, showOnly, processedFolder, Fs, trimData, FilterData):
channelLabels = {1:"Center", 2:"Anterior", 3:"Posterior", 4:"Medial", 5:"Lateral"}
if not isinstance(x, pd.DataFrame):
if x.endswith('.nwb'):
x = extract_raw_nwbFile(x, trimData, FilterData)
subList = np.unique(x['subject'])
else:
subList = np.unique(x['subject'])
for isub in range(len(subList)):
numSides = np.unique(x[(x['subject']==subList[isub])]['side'])
for iside in range(len(numSides)):
outputDir = '\\'.join([processedFolder, 'sub-P' + str(subList[isub]).zfill(3), 'rawData', numSides[iside]])
if not os.path.exists(outputDir):
os.makedirs(outputDir)
numChans = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['channel'])
colnames = x.columns.values.tolist()
if np.isnan(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['chanChosen']).any():
chanSel = np.nan
else:
chanSel = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['chanChosen'])
for ichan in range(len(numChans)):
if 'labels' in colnames:
labelsPresent = True
labels = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['labels'])
else:
labelsPresent = False
if labelsPresent:
rawData = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['rawData'])
feature = np.empty((0, len(np.frombuffer(rawData[1,]))))
else:
rawData = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['rawData'])
feature = np.empty((0, len(rawData[1,])))
for idepth in range(len(rawData)):
if labelsPresent:
tempdat = np.frombuffer(rawData[idepth,])
tempdat = butterBandpass(tempdat, lowcut = 500, highcut = 5000, fs = Fs, order = 5)
feature = np.append(feature, [np.transpose(tempdat)], axis=0)
else:
tempdat = rawData[idepth,]
tempdat = butterBandpass(tempdat, lowcut = 500, highcut = 5000, fs = Fs, order = 5)
feature = np.append(feature, [np.transpose(tempdat)], axis=0)
nDepths = len(feature)
yshift = 120
depths = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['depth'])
fig, ax = plt.subplots()
ax.plot(feature.T + yshift * np.arange(0,nDepths,1), color='black', linewidth=0.2)
ax.yaxis.set_ticks(yshift * np.arange(0,nDepths,1))
ax.yaxis.set_ticklabels(['{:.2f}'.format(x) for x in depths])
ax.xaxis.set_ticks(np.arange(0,len(feature.T)+1,(len(feature.T)/5)))
start, end = ax.get_xlim()
xTickLabs = np.arange(0, len(feature.T)+1, len(feature.T)/5)/Fs
ax.xaxis.set_ticklabels(['{:.2f}'.format(x) for x in xTickLabs])
ax.set_ylim(-yshift,(nDepths*yshift))
ax.set_xlim(0,len(feature.T))
if labelsPresent:
if np.size(np.where(labels==1)) != 0:
inDepth = np.min(np.where(labels==1))*yshift
outDepth = np.max(np.where(labels==1))*yshift
ax.axhline(inDepth, color='green', linewidth=2)
ax.axhline(outDepth, color='red', linewidth=2)
plt.gca().invert_yaxis()
if isinstance(numChans[ichan], str):
channel = numChans[ichan]
else:
channel = channelLabels.get(numChans[ichan])
if numChans[ichan] == chanSel:
plt.title('Sub-' + str(subList[isub]).zfill(3) + ' ' + numSides[iside] + ' Side: ' + channel + " Trajectory", size=14, fontweight="bold", color = 'red')
else:
plt.title('Sub-' + str(subList[isub]).zfill(3) + ' ' + numSides[iside] + ' Side: ' + channel + " Trajectory", size=14, fontweight="bold")
plt.xlabel("Time (sec)", size=14, fontweight='bold')
plt.ylabel("Depth (mm)", size=14, fontweight='bold')
fileName = 'sub-P' + str(subList[isub]).zfill(3) + '_side-' + numSides[iside] + '_channel-' + channel + '-rawData'
figure = plt.gcf() # get current figure
figure.set_size_inches(20, 12)
filepath = os.path.join(outputDir, fileName + '.png')
plt.savefig(filepath, dpi=100) # save the figure to file
plt.close()
print('Finished subject', str(subList[isub]), numSides[iside], 'side', 'Raw Data', 'for channel', str(numChans[ichan]))
def plotRawBenGun(x, showOnly, processedFolder, Fs, trimData, FilterData):
channelLabels = {1:"Center", 2:"Anterior", 3:"Posterior", 4:"Medial", 5:"Lateral"}
if not isinstance(x, pd.DataFrame):
if x.endswith('.nwb'):
x = extract_raw_nwbFile(x, trimData, FilterData)
subList = np.unique(x['subject'])
else:
subList = np.unique(x['subject'])
for isub in range(len(subList)):
numSides = np.unique(x[(x['subject']==subList[isub])]['side'])
for iside in range(len(numSides)):
outputDir = '\\'.join([processedFolder, 'sub-P' + str(subList[isub]).zfill(3), 'rawData', numSides[iside]])
if not os.path.exists(outputDir):
os.makedirs(outputDir)
numChans = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['channel'])
colnames = x.columns.values.tolist()
if np.isnan(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['chanChosen']).any():
chanSel = np.nan
else:
chanSel = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['chanChosen'])
if numSides[iside] == 'left':
axPosition = {1:['5'], 2:['2'], 3:['8'], 4:['6'], 5:['4']}
else:
axPosition = {1:['5'], 2:['2'], 3:['8'], 4:['4'], 5:['6']}
titleLab = 'Sub-' + str(subList[isub]).zfill(3) + ' ' + numSides[iside] + ' Side: <NAME>'
fig = plt.figure()
for ichan in range(len(numChans)):
if 'labels' in colnames:
labelsPresent = True
labels = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['labels'])
else:
labelsPresent = False
if labelsPresent:
rawData = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['rawData'])
feature = np.empty((0, len(np.frombuffer(rawData[1,]))))
else:
rawData = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['rawData'])
feature = np.empty((0, len(rawData[1,])))
for idepth in range(len(rawData)):
if labelsPresent:
tempdat = np.frombuffer(rawData[idepth,])
tempdat = butterBandpass(tempdat, lowcut = 500, highcut = 5000, fs = Fs, order = 5)
feature = np.append(feature, [np.transpose(tempdat)], axis=0)
else:
tempdat = rawData[idepth,]
tempdat = butterBandpass(tempdat, lowcut = 500, highcut = 5000, fs = Fs, order = 5)
feature = np.append(feature, [np.transpose(tempdat)], axis=0)
if isinstance(numChans[ichan],str):
chanPosition = [x[0] for x in list(channelLabels.items()) if numChans[ichan] in x[1]][0]
channel = numChans[ichan]
else:
chanPosition = numChans[ichan]
channel = channelLabels.get(numChans[ichan])
subPosi = [int(x) for x in axPosition.get(chanPosition)][0]
nDepths = len(feature)
yshift = 120
depths = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['depth'])
ax = plt.subplot(3, 3, subPosi)
ax.plot(feature.T + yshift * np.arange(0,nDepths,1), color='black', linewidth=0.2)
ax.yaxis.set_ticks(yshift * np.arange(0,nDepths,1))
ax.yaxis.set_ticklabels(['{:.2f}'.format(x) for x in depths])
ax.xaxis.set_ticks(np.arange(0,len(feature.T)+1,(len(feature.T)/5)))
xTickLabs = np.arange(0, len(feature.T)+1, len(feature.T)/5)/Fs
ax.xaxis.set_ticklabels(['{:.2f}'.format(x) for x in xTickLabs])
ax.set_ylim(-yshift,(nDepths*yshift))
ax.set_xlim(0,len(feature.T))
for label in ax.yaxis.get_ticklabels()[::2]:
label.set_visible(False)
plt.gca().invert_yaxis()
if numChans[ichan] == chanSel:
ax.annotate(channel, xy=(0.42,1.01), xycoords='axes fraction', fontsize=10, fontweight='bold', color = 'red')
else:
ax.annotate(channel, xy=(0.42,1.01), xycoords='axes fraction', fontsize=10, fontweight='bold')
if labelsPresent:
if np.size(np.where(labels==1)) != 0:
inDepth = np.min(np.where(labels==1))*yshift
outDepth = np.max(np.where(labels==1))*yshift
ax.axhline(inDepth, color='green', linewidth=2)
ax.axhline(outDepth, color='red', linewidth=2)
# Set common labels
fig.text(0.51, 0.06, 'Time (sec)', ha='center', va='center', size=12, fontweight="bold")
fig.text(0.08, 0.5, 'Depth (mm)', ha='center', va='center', rotation='vertical', size=12, fontweight="bold")
plt.suptitle(titleLab, y=0.94,x=0.51, size=16, fontweight='bold')
fileName = 'sub-P' + str(subList[isub]).zfill(3) + '_side-' + numSides[iside] + '-BensGun'
figure = plt.gcf() # get current figure
figure.set_size_inches(20, 12, forward=True)
filepath = os.path.join(outputDir,fileName + '.png')
plt.savefig(filepath, dpi=100) # save the figure to file
plt.close()
print('Finished subject', str(subList[isub]), numSides[iside], 'side', 'Bens Gun.')
def spikeRaster(spikeTimesFin, patient, side, depths, channel, channelChosen, labels):
fig = plt.figure()
ax = plt.subplot(1,1,1)
spikeTimeClean = []
for trial in range(len(spikeTimesFin)):
spikeTime = np.where(spikeTimesFin[trial] > 0)[1]
spikeTime = spikeTime[np.where(np.diff(spikeTime)>1000)]
plt.vlines(spikeTime,trial,trial+1)
spikeTimeClean.append(spikeTime)
ax.yaxis.set_ticks([x+0.5 for x in range(len(depths))])
ax.yaxis.set_ticklabels(depths)
ax.xaxis.set_ticks(np.arange(0,spikeTimesFin[0].shape[1]+1,(spikeTimesFin[0].shape[1])/5))
start, end = ax.get_xlim()
ax.xaxis.set_ticklabels(np.arange(0, spikeTimesFin[0].shape[1]+1, spikeTimesFin[0].shape[1]/5)/24000)
ax.set_xlim(0,spikeTimesFin[0].shape[1])
plt.gca().invert_yaxis()
plt.xlabel("Time (sec)")
plt.ylabel("Depth (mm)")
if channel == channelChosen:
plt.title('DBS-' + str(patient) + ' ' + side + ' Side: ' + channelLabels.get(channel) + " Trajectory", fontweight='bold', color = 'red')
else:
plt.title('DBS-' + str(patient) + ' ' + side + ' Side: ' + channelLabels.get(channel) + " Trajectory", fontweight='bold')
if any(labels==1)==True:
plt.axhline(np.min(np.where(labels==1))+0.5, color='g', linestyle='-', linewidth=2)
plt.axhline(np.max(np.where(labels==1))+0.5, color='r', linestyle='-', linewidth=2)
return spikeTimeClean
def prep_nwbFile(file_name):
with h5py.File(file_name, "r") as f:
data = f['/processing']
df = {}
for item in list(data.items()):
df[item[0]] = f['/processing/'+item[0]].value.flatten()
subject = int("".join([x for x in h5py.File(file_name, 'r+').get('/identifier').value.split('_')[0] if x.isdigit()]))
df['channel'] = h5py.File(file_name, 'r+').get('/intervals/trials/channel').value
df['depth'] = [float(x) for x in h5py.File(file_name, 'r+').get('/intervals/trials/depth').value]
df['subject'] = np.repeat(subject,len(df['channel']))
df['side'] = np.repeat(h5py.File(file_name, 'r+').get('/session_description').value.split('_')[0], len(df['channel']))
df['chanChosen'] = np.repeat(np.nan,len(df['channel']))
return pd.DataFrame(df)
def plotFeatureMaps(x, showOnly, verticalPlots, reducedFeatures, processedFolder, nSubplots):
channelLabels = {1:"Center", 2:"Anterior", 3:"Posterior", 4:"Medial", 5:"Lateral"}
if reducedFeatures == True:
timeLabels = pd.DataFrame([{'mav': 'Mean Absolute \nValue',
'variance': 'Variance',
'rms': 'Root Mean Square',
'curveLength': 'Curve Length',
'ssi': 'Simple Square \nIntegral',
'power': 'Power',
'entropy': 'Signal Entropy',
'tkeoFour': 'Teager-Kaiser \nEnergy - Four'}])
frequencyLabels = pd.DataFrame([{'meanF': 'Mean Frequency',
'freqRatio': 'Frequency Ratio',
'AvgPowerMU': 'Bandpass Power \n(500-1000Hz)',
'AvgPowerSU': 'Bandpass Power \n(1000-3000Hz)',
'waveletStd': 'STD of Wavlet \nConvolution'}])
spikeLabels = pd.DataFrame([])
else:
timeLabels = pd.DataFrame([{'mav': 'Mean Absolute \nValue',
'mavSlope': 'Mean Absolute \nValue Slope',
'variance': 'Variance',
'mmav1': 'Mean Absolute \nValue 1',
'mmav2': 'Mean Absolute \nValue 2',
'rms': 'Root Mean Square',
'curveLength': 'Curve Length',
'zeroCross': 'Zero Crossings',
'threshold': 'Threshold',
'wamp': 'Willison Amplitude',
'ssi': 'Simple Square \nIntegral',
'power': 'Power',
'entropy': 'Signal Entropy',
'peaks': 'Peaks - \nNeg and Pos',
'tkeoTwo': 'Teager-Kaiser \nEnergy - Two',
'tkeoFour': 'Teager-Kaiser \nEnergy - Four',
'shapeF': 'Shape Factor',
'kurtosis': 'Kurtosis',
'skew': 'Skewness',
'crestF': 'Crest Factor'}])
frequencyLabels = pd.DataFrame([{'meanF': 'Mean Frequency',
'freqRatio': 'Frequency Ratio',
'AvgPowerMU': 'Bandpass Power \n(500-1000Hz)',
'AvgPowerSU': 'Bandpass Power \n(1000-3000Hz)',
'waveletStd': 'STD of Wavlet \nConvolution'}])
spikeLabels = pd.DataFrame([])
# spikeLabels = pd.DataFrame([{'spikeISI': 'Inter-Spike Interval',
# 'meanISI': 'Mean of ISI',
# 'stdISI': 'STD of ISI',
# 'burstIndex': 'Burst Index',
# 'pauseIndex': 'Pause Index',
# 'pauseRatio': 'Pause Ratio',
# 'spikeDensity': 'Spike Density'}])
if not isinstance(x, pd.DataFrame):
if x.endswith('.nwb'):
x = prep_nwbFile(x)
subList = np.unique(x['subject'])
else:
subList = np.unique(x['subject'])
for isub in range(len(subList)):
numSides = np.unique(x[(x['subject']==subList[isub])]['side'])
for iside in range(len(numSides)):
if verticalPlots == True:
if reducedFeatures == True:
outputDir = '\\'.join([processedFolder, 'sub-P' + str(subList[isub]).zfill(3), 'activityMaps-VerticalReduced', numSides[iside]])
else:
outputDir = '\\'.join([processedFolder, 'sub-P' + str(subList[isub]).zfill(3), 'activityMaps-Vertical', numSides[iside]])
else:
if reducedFeatures == True:
outputDir = '\\'.join([processedFolder, 'sub-P' + str(subList[isub]).zfill(3), 'activityMaps-Reduced', numSides[iside]])
else:
outputDir = '\\'.join([processedFolder, 'sub-P' + str(subList[isub]).zfill(3), 'activityMaps', numSides[iside]])
if not os.path.exists(outputDir):
os.makedirs(outputDir)
numChans = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['channel'])
colnames = x.columns.values.tolist()
if 'labels' in colnames:
labelsPresent = True
numFeatures = list(x.drop(['subject','side','channel','depth','labels', 'chanChosen'], axis=1))
else:
labelsPresent = False
numFeatures = list(x.drop(['subject','side','channel','depth', 'chanChosen'], axis=1))
numTime = list(set(list(timeLabels)).intersection(numFeatures))
numFreq = list(set(list(frequencyLabels)).intersection(numFeatures))
numSpike = list(set(list(spikeLabels)).intersection(numFeatures))
featureDomains = {'Time': numTime, 'Frequency': numFreq,'Spike': numSpike}
featureDomains.setdefault('Time', []).append(timeLabels)
featureDomains.setdefault('Frequency', []).append(frequencyLabels)
featureDomains.setdefault('Spike', []).append(spikeLabels)
for ichan in range(len(numChans)):
depths = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['depth'])
if labelsPresent:
labels = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['labels'])
if isinstance(numChans[ichan],str):
channel = numChans[ichan]
else:
channel = channelLabels.get(numChans[ichan])
for iDomain in range(3):
domainName = list(featureDomains.keys())[iDomain]
numDomain = list(featureDomains.values())[iDomain][:-1]
featureLabel = list(featureDomains.values())[iDomain][-1]
if len(numDomain)>0:
numFigs = int(np.floor(len(numDomain)/nSubplots))
nSubplotsReal = [nSubplots] * numFigs
if len(numDomain)%nSubplots !=0:
numFigs += 1
if not nSubplotsReal:
nSubplotsReal = [len(numDomain)%nSubplots]
else:
nSubplotsReal.append(len(numDomain)%nSubplots)
nStart = 0
for iplot in range(numFigs):
if verticalPlots == True:
fig, axs = plt.subplots(1,nSubplotsReal[iplot], sharex=False, sharey=True)
fig.subplots_adjust(hspace=0, wspace=0.1)
else:
fig, axs = plt.subplots(nSubplotsReal[iplot],1, sharex=True, sharey=False)
fig.subplots_adjust(hspace=0.1, wspace=0)
titleLab = 'Sub-' + str(subList[isub]).zfill(3) + ' ' + numSides[iside] + ' Side: ' + channel + ' Channel - ' + domainName + ' Features #' + str(iplot+1)
fileName = 'sub-P' + str(subList[isub]).zfill(3) + '_side-' + numSides[iside] + '_channel-' + channel + '-' + domainName + 'Features' + str(iplot+1)
axCount = 0
nEnd = nStart + nSubplotsReal[iplot]
for ifeatures in range(nStart, nEnd):
feature = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])][numDomain[ifeatures]])
feature = (feature - min(feature))/(max(feature)-min(feature))
muA = np.mean(feature)
if verticalPlots == True:
axs[axCount].plot(feature, depths)
axs[axCount].set_ylim(depths[0,],depths[-1])
axs[axCount].set_xlabel(featureLabel[numDomain[ifeatures]].values[0], fontsize=10, fontweight='bold')
axs[axCount].plot([muA,muA], axs[axCount].get_ylim(), ls= 'dashed', c='black')
else:
axs[axCount].plot(depths, feature)
axs[axCount].set_xlim(depths[0,],depths[-1])
axs[axCount].annotate(featureLabel[numDomain[ifeatures]].values[0], xy=(1.01,0.5), xycoords='axes fraction', fontsize=10, fontweight='bold')
axs[axCount].plot(axs[axCount].get_xlim(), [muA,muA], ls= 'dashed', c='black')
if labelsPresent:
if np.size(np.where(labels==1)) != 0:
inDepth = depths[np.min(np.where(labels==1))]
outDepth = depths[np.max(np.where(labels==1))]
axs[axCount].axvspan(inDepth, outDepth, color='purple', alpha=0.2)
for xc in depths:
if verticalPlots == True:
axs[axCount].axhline(y=xc, color='k', linestyle='--', alpha=0.2)
else:
axs[axCount].axvline(x=xc, color='k', linestyle='--', alpha=0.2)
axs[axCount].invert_yaxis()
if verticalPlots == True and axCount == 0:
axs[axCount].set_ylabel('Depth (mm)', size=14, fontweight='bold')
if verticalPlots == True and axCount == (int(np.ceil(nSubplotsReal[iplot]/2))-1):
if nSubplotsReal[iplot]%2 !=0:
axs[axCount].annotate('Normalized Units', xy=(0,-.2), xycoords='axes fraction', fontsize=14, fontweight='bold')
else:
axs[axCount].annotate('Normalized Units', xy=(0.5,-.2), xycoords='axes fraction', fontsize=14, fontweight='bold')
if verticalPlots == False and axCount == (int(np.ceil(nSubplotsReal[iplot]/2))-1):
if nSubplotsReal[iplot]%2 !=0:
axs[axCount].set_ylabel('Normalized Units', size=14, fontweight='bold')
else:
axs[axCount].set_ylabel('Normalized Units', size=14, fontweight='bold')
axs[axCount].yaxis.set_label_coords(-.05,0)
axCount +=1
if verticalPlots == True:
axs[(axCount-1)].yaxis.set_ticks(depths)
axFormaty(axs[(axCount-1)])
plt.suptitle(titleLab, y=0.94,x=0.51, size=16, fontweight='bold')
plt.subplots_adjust(bottom=0.20)
if nSubplotsReal[iplot] == 2:
plt.subplots_adjust(left=0.35)
plt.subplots_adjust(right=0.65)
elif nSubplotsReal[iplot] == 3:
plt.subplots_adjust(left=0.27)
plt.subplots_adjust(right=0.73)
elif nSubplotsReal[iplot] == 4:
plt.subplots_adjust(left=0.19)
plt.subplots_adjust(right=0.81)
else:
start, end = axs[axCount-1].get_xlim()
axs[axCount-1].xaxis.set_ticks(np.linspace(depths[0], depths[-1], len(depths)))
axs[axCount-1].xaxis.set_ticklabels(['{:.2f}'.format(x) for x in depths], rotation=45)
plt.subplots_adjust(right=0.80)
if nSubplotsReal[iplot] == 2:
plt.subplots_adjust(bottom=0.57)
elif nSubplotsReal[iplot] == 3:
plt.subplots_adjust(bottom=0.415)
elif nSubplotsReal[iplot] == 4:
plt.subplots_adjust(bottom=0.265)
plt.suptitle(titleLab, y=0.96,x=0.46, size=16, fontweight='bold')
plt.xlabel('Depth (mm)', size=14, fontweight='bold')
nStart += nSubplotsReal[iplot]
if showOnly == True:
plt.show()
else:
figure = plt.gcf() # get current figure
figure.set_size_inches(12, 8)
filepath = os.path.join(outputDir , fileName + '.png')
plt.savefig(filepath, dpi=100) # save the figure to file
plt.close('all')
print('Finished subject', str(subList[isub]), numSides[iside], 'side', 'channel', numChans[ichan])
def plotFeatureMaps_gui(x, verticalPlots, processedFolder, nSubplots):
channelLabels = {1:"Center", 2:"Anterior", 3:"Posterior", 4:"Medial", 5:"Lateral"}
timeLabels = pd.DataFrame([{'mav': 'Mean Absolute \nValue',
'rms': 'Root Mean Square',
'curveLength': 'Curve Length',
'power': 'Power',
'entropy': 'Signal Entropy',
'tkeoFour': 'Teager-Kaiser \nEnergy - Four'}])
frequencyLabels = pd.DataFrame([])
spikeLabels = pd.DataFrame([])
# spikeLabels = pd.DataFrame([{'spikeISI': 'Inter-Spike Interval',
# 'meanISI': 'Mean of ISI',
# 'stdISI': 'STD of ISI',
# 'burstIndex': 'Burst Index',
# 'pauseIndex': 'Pause Index',
# 'pauseRatio': 'Pause Ratio',
# 'spikeDensity': 'Spike Density'}])
subList = np.unique(x['subject'])
rowFinal = []
plotFinal = []
for isub in range(len(subList)):
plots = {}
if verticalPlots == True:
plots['outputDir'] = processedFolder + '/sub-' + str(subList[isub]) + '/activityMaps-Vertical/'
else:
plots['outputDir'] = processedFolder + '/sub-' + str(subList[isub]) + '/activityMaps/'
numSides = np.unique(x[(x['subject']==subList[isub])]['side'])
plotFinal.append(plots)
for iside in range(len(numSides)):
numChans = np.unique(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside])]['channel'])
colnames = x.columns.values.tolist()
if 'labels' in colnames:
labelsPresent = True
numFeatures = list(x.drop(['subject','side','channel','depth','labels', 'chanChosen'], axis=1))
else:
labelsPresent = False
numFeatures = list(x.drop(['subject','side','channel','depth', 'chanChosen'], axis=1))
numTime = list(set(list(timeLabels)).intersection(numFeatures))
numFreq = list(set(list(frequencyLabels)).intersection(numFeatures))
numSpike = list(set(list(spikeLabels)).intersection(numFeatures))
featureDomains = {'Time': numTime, 'Frequency': numFreq,'Spike': numSpike}
featureDomains.setdefault('Time', []).append(timeLabels)
featureDomains.setdefault('Frequency', []).append(frequencyLabels)
featureDomains.setdefault('Spike', []).append(spikeLabels)
for ichan in range(len(numChans)):
depths = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['depth'])
if labelsPresent:
labels = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])]['labels'])
channel = channelLabels.get(numChans[ichan])
for iDomain in range(3):
domainName = list(featureDomains.keys())[iDomain]
numDomain = list(featureDomains.values())[iDomain][:-1]
featureLabel = list(featureDomains.values())[iDomain][-1]
if len(numDomain)>0:
numFigs = int(np.floor(len(numDomain)/nSubplots))
nSubplotsReal = [nSubplots] * numFigs
if len(numDomain)%nSubplots !=0:
numFigs += 1
if not nSubplotsReal:
nSubplotsReal = [len(numDomain)%nSubplots]
else:
nSubplotsReal.append(len(numDomain)%nSubplots)
nStart = 0
for iplot in range(numFigs):
# if verticalPlots == True:
# fig, axs = plt.subplots(1,nSubplotsReal[iplot], sharex=False, sharey=True)
# fig.subplots_adjust(hspace=0, wspace=0.1)
# else:
# fig, axs = plt.subplots(nSubplotsReal[iplot],1, sharex=True, sharey=False)
# fig.subplots_adjust(hspace=0.1, wspace=0)
titleLab = 'Sub-' + str(subList[isub]) + ' ' + numSides[iside] + ' Side: ' + channel + ' Channel - ' + domainName + ' Features #' + str(iplot+1)
fileName = 'sub-' + str(subList[isub]) + '_side-' + numSides[iside] + '_channel-' + channel + '-' + domainName + 'Features' + str(iplot+1)
axCount = 0
nEnd = nStart + nSubplotsReal[iplot]
for ifeatures in range(nStart, nEnd):
row = {}
row['subject'] = str(subList[isub])
row['side'] = numSides[iside]
row['channel'] = channel
row['domain'] = domainName
row['plotTitle'] = titleLab
row['fileName'] = fileName
feature = np.array(x[(x['subject']==subList[isub]) & (x['side'] == numSides[iside]) & (x['channel'] == numChans[ichan])][numDomain[ifeatures]])
feature = (feature - min(feature))/(max(feature)-min(feature))
featureMean = np.mean(feature)
if verticalPlots == True:
row['plot'] = ['plot',feature, depths]
row['featureMean'] = ['plot', [featureMean,featureMean], 'get_ylim()', 'dashed', 'black']
row['depthLim'] = ['set_ylim', [depths[0,],depths[-1]]]
row['featureLabel'] = ['set_xlabel', featureLabel[numDomain[ifeatures]].values[0], 10, 'bold']
else:
row['plot'] = ['plot',depths, feature]
row['featureMean'] = ['plot', 'get_xlim()', [featureMean,featureMean], 'dashed', 'black']
row['depthLim'] = ['set_xlim', [depths[0,],depths[-1]]]
row['featureLabel'] = ['annotate', featureLabel[numDomain[ifeatures]].values[0], [1.01,0.5], 'axes fraction', 10, 'bold']
if labelsPresent:
if np.size(np.where(labels==1)) != 0:
inDepth = depths[np.min(np.where(labels==1))]
outDepth = depths[np.max(np.where(labels==1))]
row['labels'] = [inDepth, outDepth]
if verticalPlots == True:
row['labelsType'] = ['axhspan', [inDepth, outDepth], 'purple', 0.2]
else:
row['labelsType'] = ['axvspan', [inDepth, outDepth], 'purple', 0.2]
for xc in depths:
if verticalPlots == True:
row['depthMark'] = ['axhline', 'y', 'k', 0.2, '--']
else:
row['depthMark'] = ['axvline', 'x', 'k', 0.2, '--']
if verticalPlots == True and axCount == 0:
row['yLabel'] = ['set_ylabel', 'Depth (mm)', 14, 'bold']
if verticalPlots == True and axCount == (int(np.ceil(nSubplotsReal[iplot]/2))-1):
if nSubplotsReal[iplot]%2 !=0:
row['yLabel'] = ['annotate', 'Normalized Units', [0,-.2], 'axes fraction', 14, 'bold']
else:
row['yLabel'] = ['annotate', 'Normalized Units', [0.5,-.2], 'axes fraction', 14, 'bold']
if verticalPlots == False and axCount == (int(np.ceil(nSubplotsReal[iplot]/2))-1):
if nSubplotsReal[iplot]%2 !=0:
row['yLabel'] = ['set_ylabel', 'Normalized Units', 14, 'bold']
else:
row['yLabel'] = ['set_ylabel', 'Normalized Units', [-.05,0], 'yaxis.set_label_coords', 14, 'bold']
rowFinal.append(dict(zip(row.keys(), row.values())))
axCount +=1
# if verticalPlots == True:
# axs[(axCount-1)].yaxis.set_ticks(depths)
# axFormaty(axs[(axCount-1)])
# plt.suptitle(titleLab, y=0.94,x=0.51, size=16, fontweight='bold')
# plt.subplots_adjust(bottom=0.20)
# if nSubplotsReal[iplot] == 2:
# plt.subplots_adjust(left=0.35)
# plt.subplots_adjust(right=0.65)
# elif nSubplotsReal[iplot] == 3:
# plt.subplots_adjust(left=0.27)
# plt.subplots_adjust(right=0.73)
# elif nSubplotsReal[iplot] == 4:
# plt.subplots_adjust(left=0.19)
# plt.subplots_adjust(right=0.81)
#
# else:
# start, end = axs[axCount-1].get_xlim()
# axs[axCount-1].xaxis.set_ticks(np.linspace(depths[0], depths[-1], len(depths)))
# axs[axCount-1].xaxis.set_ticklabels(['{:.2f}'.format(x) for x in depths], rotation=45)
# plt.subplots_adjust(right=0.80)
#
# if nSubplotsReal[iplot] == 2:
# plt.subplots_adjust(bottom=0.57)
# elif nSubplotsReal[iplot] == 3:
# plt.subplots_adjust(bottom=0.415)
# elif nSubplotsReal[iplot] == 4:
# plt.subplots_adjust(bottom=0.265)
#
# plt.suptitle(titleLab, y=0.96,x=0.46, size=16, fontweight='bold')
# plt.xlabel('Depth (mm)', size=14, fontweight='bold')
nStart += nSubplotsReal[iplot]
#
print('Finished subject', str(subList[isub]), numSides[iside], 'side', 'channel', numChans[ichan])
return rowFinal
def plotFFT(data, Fs, facet=False, freqMin=1, freqMax=5000, yMin=None, yMax=None):
"""
Create the x-axis and plot the FFT of data.
Parameters
----------
data: array-like
Data containing the frequency series to plot. Each column is an
electrode.
facet: bool, default to False
If True, each electrode will be plotted on a different facet.
freqMin: float, default to None
Minimum frequency (x-axis) to show on the plot.
freqMax: float, default to None
Maximum frequency (x-axis) to show on the plot.
yMin: float, default to None
Minimum value (y-axis) to show on the plot.
yMax: float, default to None
Maximum value (y-axis) to show on the plot.
fs: float
Sampling frequency of data in Hz.
Returns
-------
fig: instance of matplotlib.figure.Figure
The figure of the FFT.
"""
tf, fftData = computeFFT(data, Fs)
yMax = | np.mean(fftData) | numpy.mean |
"""
BSD 3-Clause License
Copyright (c) 2017, <NAME>
Copyright (c) 2020, enhuiz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
def nms(dets, thresh):
if 0 == len(dets):
return []
x1, y1, x2, y2, scores = dets[:, 0], dets[:, 1], dets[:, 2], dets[:, 3], dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1, yy1 = np.maximum(x1[i], x1[order[1:]]), np.maximum(y1[i], y1[order[1:]])
xx2, yy2 = | np.minimum(x2[i], x2[order[1:]]) | numpy.minimum |
# SPDX-License-Identifier: Apache-2.0
"""Unit Tests for TFLite_Detection_PostProcess op"""
import os
import struct
import numpy as np
import flatbuffers
from common import * # pylint: disable=wildcard-import,unused-wildcard-import
from backend_test_base import Tf2OnnxBackendTestBase
from tf2onnxnightly import utils
from tf2onnxnightly.tfonnx import process_tf_graph
from tf2onnxnightly import optimizer
from tf2onnxnightly.tflite import Model, OperatorCode, SubGraph, Operator, Tensor, Buffer
from tf2onnxnightly.tflite.BuiltinOperator import BuiltinOperator
from tf2onnxnightly.tflite.TensorType import TensorType
from tf2onnxnightly.tflite.CustomOptionsFormat import CustomOptionsFormat
# pylint: disable=missing-docstring
class TFLiteDetectionPostProcessTests(Tf2OnnxBackendTestBase):
@requires_tflite("TFLite_Detection_PostProcess")
@check_opset_min_version(11, "Pad")
def test_postprocess_model1(self):
self._test_postprocess(num_classes=5, num_boxes=100, detections_per_class=2, max_detections=20)
@requires_tflite("TFLite_Detection_PostProcess")
@check_opset_min_version(11, "Pad")
def test_postprocess_model2(self):
self._test_postprocess(num_classes=5, num_boxes=100, detections_per_class=7, max_detections=20)
@requires_tflite("TFLite_Detection_PostProcess")
@check_opset_min_version(11, "Pad")
def test_postprocess_model3(self):
self._test_postprocess(num_classes=5, num_boxes=3, detections_per_class=7, max_detections=20)
@requires_tflite("TFLite_Detection_PostProcess")
@check_opset_min_version(11, "Pad")
def test_postprocess_model4(self):
self._test_postprocess(num_classes=5, num_boxes=99, detections_per_class=2, max_detections=20, extra_class=True)
@requires_tflite("TFLite_Detection_PostProcess")
@check_opset_min_version(11, "Pad")
def test_postprocess_model5(self):
self._test_postprocess(num_classes=1, num_boxes=100, detections_per_class=0,
max_detections=50, use_regular_nms=False)
def _test_postprocess(self, num_classes, num_boxes, detections_per_class,
max_detections, extra_class=False, use_regular_nms=True):
model = self.make_postprocess_model(num_classes=num_classes, detections_per_class=detections_per_class,
max_detections=max_detections, x_scale=11.0, w_scale=6.0,
use_regular_nms=use_regular_nms)
np.random.seed(42)
box_encodings_val = np.random.random_sample([1, num_boxes, 4]).astype(np.float32)
if extra_class:
num_classes += 1
class_predictions_val = | np.random.random_sample([1, num_boxes, num_classes]) | numpy.random.random_sample |
# _*_ coding:utf-8 _*_
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
import caffe
import yaml
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
from fast_rcnn.bbox_transform import bbox_transform
from utils.cython_bbox import bbox_overlaps
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
#import matplotlib
#matplotlib.use('Agg')
DEBUG = False
'''
layer {
name: 'roi-data'
type: 'Python'
bottom: 'rpn_rois'
bottom: 'gt_boxes'
bottom: 'data'
top: 'rois_p2'
top: 'rois_p3'
top: 'labels_p2'
top: 'labels_p3'
top: 'bbox_targets_p2'
top: 'bbox_targets_p3'
top: 'bbox_inside_weights_p2'
top: 'bbox_inside_weights_p3'
top: 'bbox_outside_weights_p2'
top: 'bbox_outside_weights_p3'
python_param {
module: 'rpn.proposal_target_layer'
layer: 'ProposalTargetLayer'
param_str: "'num_classes': 2"
}
}
target:
layer {
name: 'roi-data'
type: 'Python'
bottom: 'rpn_rois_p2'
bottom: 'rpn_rois_p3'
bottom: 'gt_boxes'
bottom: 'data'
top: 'rois_p2'
top: 'rois_p3'
top: 'labels_p2'
top: 'labels_p3'
top: 'bbox_targets_p2'
top: 'bbox_targets_p3'
top: 'bbox_inside_weights_p2'
top: 'bbox_inside_weights_p3'
top: 'bbox_outside_weights_p2'
top: 'bbox_outside_weights_p3'
python_param {
module: 'rpn.proposal_target_layer'
layer: 'ProposalTargetLayer'
param_str: "'num_classes': 2"
}
}
'''
class ProposalTargetLayer(caffe.Layer):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str)
self._num_classes = layer_params['num_classes']
self._batch_rois = 256 #cfg.TRAIN.BATCH_SIZE
# sampled rois (0, x1, y1, x2, y2)
top[0].reshape(1, 5, 1, 1)
top[1].reshape(1, 5, 1, 1)
# labels_1
top[2].reshape(1, 1, 1, 1)
# labels_2
top[3].reshape(1, 1, 1, 1)
# bbox_targets_1
top[4].reshape(1, self._num_classes * 4, 1, 1)
# bbox_targets_2
top[5].reshape(1, self._num_classes * 4, 1, 1)
# bbox_inside_weights_1
top[6].reshape(1, self._num_classes * 4, 1, 1)
# bbox_inside_weights_2
top[7].reshape(1, self._num_classes * 4, 1, 1)
# bbox_outside_weights_1
top[8].reshape(1, self._num_classes * 4, 1, 1)
# bbox_outside_weights_2
top[9].reshape(1, self._num_classes * 4, 1, 1)
def forward(self, bottom, top):
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
#branch 1 2
rois_list = []
branch_num = 2
for i in xrange(branch_num):
rois_list.append(bottom[i].data) # 300 [ 0. 70.29284668 0. 105.74542236 49.81745911]
#--debug
# for i in rois_list:
# print(i[0:2])
# input()
gt_boxes = bottom[2].data
gt_boxes = gt_boxes.reshape(gt_boxes.shape[0], gt_boxes.shape[1])
w = (gt_boxes[:, 2] - gt_boxes[:, 0])
h = (gt_boxes[:, 3] - gt_boxes[:, 1])
g_s = w * h
g_s[g_s <= 0] = 1e-6
gt_index = g_s.copy()
#### alter ####
gt_index_list = []
gt_index[g_s >= 2000] = 1
gt_index_list.append(gt_index.copy())
gt_index[g_s >= 3000] = 2
gt_index_list.append(gt_index.copy())
rois_list_res = []
labels_list = []
bbox_targets_list = []
bbox_inside_weights_list = []
branch_num = 2
for i in xrange(branch_num):
gt_index = gt_index_list[i]
g_index = (gt_index == (i+1))
num_g = sum(g_index)
# get gt_bbox
start = 0
end_g = num_g
index_range = range(start, end_g)
if num_g == 0:
num_g = 1
each_gt_box = np.zeros((num_g, 5), dtype=np.float32)
else:
each_gt_box = np.zeros((num_g, 5), dtype=np.float32)
each_gt_box[index_range, :] = gt_boxes[g_index, :]
zeros = np.zeros((each_gt_box.shape[0], 1), dtype=each_gt_box.dtype)
rois_per_image = np.inf if cfg.TRAIN.BATCH_SIZE == -1 else cfg.TRAIN.BATCH_SIZE
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
rois_list[i] = np.vstack(
(rois_list[i], np.hstack((zeros, each_gt_box[:, :-1])))
)
labels, rois, bbox_targets, bbox_inside_weights = _sample_rois(
rois_list[i], each_gt_box, fg_rois_per_image,
rois_per_image, self._num_classes)
rois_list_res.append(rois)
labels_list.append(labels)
bbox_targets_list.append(bbox_targets)
bbox_inside_weights_list.append(bbox_inside_weights)
#--debug
#print(each_gt_box)
im = bottom[3].data
rois_part1 = rois_list_res[0]
rois_part2 = rois_list_res[1]
rois_part1 = rois_part1.reshape((rois_part1.shape[0], rois_part1.shape[1], 1, 1))
top[0].reshape(*rois_part1.shape)
top[0].data[...] = rois_part1
rois_part2 = rois_part2.reshape((rois_part2.shape[0], rois_part2.shape[1], 1, 1))
top[1].reshape(*rois_part2.shape)
top[1].data[...] = rois_part2
# classification labels
# modified by ywxiong
labels_1 = labels_list[0]
labels_2 = labels_list[1]
labels_1 = labels_1.reshape((labels_1.shape[0], 1, 1, 1))
top[2].reshape(*labels_1.shape)
top[2].data[...] = labels_1
labels_2 = labels_2.reshape((labels_2.shape[0], 1, 1, 1))
top[3].reshape(*labels_2.shape)
top[3].data[...] = labels_2
# bbox_targets
# modified by ywxiong
bbox_targets_1 = bbox_targets_list[0]
bbox_targets_2 = bbox_targets_list[1]
bbox_targets_1 = bbox_targets_1.reshape((bbox_targets_1.shape[0], bbox_targets_1.shape[1], 1, 1))
top[4].reshape(*bbox_targets_1.shape)
top[4].data[...] = bbox_targets_1
bbox_targets_2 = bbox_targets_2.reshape((bbox_targets_2.shape[0], bbox_targets_2.shape[1], 1, 1))
top[5].reshape(*bbox_targets_2.shape)
top[5].data[...] = bbox_targets_2
# bbox_inside_weights
# modified by ywxiong
bbox_inside_weights_1 = bbox_inside_weights_list[0]
bbox_inside_weights_2 = bbox_inside_weights_list[1]
bbox_inside_weights_1 = bbox_inside_weights_1.reshape(
(bbox_inside_weights_1.shape[0], bbox_inside_weights_1.shape[1], 1, 1))
top[6].reshape(*bbox_inside_weights_1.shape)
top[6].data[...] = bbox_inside_weights_1
bbox_inside_weights_2 = bbox_inside_weights_2.reshape(
(bbox_inside_weights_2.shape[0], bbox_inside_weights_2.shape[1], 1, 1))
top[7].reshape(*bbox_inside_weights_2.shape)
top[7].data[...] = bbox_inside_weights_2
# bbox_outside_weights
# modified by ywxiong
bbox_inside_weights_1 = bbox_inside_weights_list[0]
bbox_inside_weights_2 = bbox_inside_weights_list[1]
bbox_inside_weights_1 = bbox_inside_weights_1.reshape(
(bbox_inside_weights_1.shape[0], bbox_inside_weights_1.shape[1], 1, 1))
top[8].reshape(*bbox_inside_weights_1.shape)
top[8].data[...] = np.array(bbox_inside_weights_1 > 0).astype(np.float32)
bbox_inside_weights_2 = bbox_inside_weights_2.reshape(
(bbox_inside_weights_2.shape[0], bbox_inside_weights_2.shape[1], 1, 1))
top[9].reshape(*bbox_inside_weights_2.shape)
top[9].data[...] = np.array(bbox_inside_weights_2 > 0).astype(np.float32)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
###### **************** #######
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
# for ind in inds:
# cls = clss[ind]
# start = 4 * cls
# end = start + 4
# bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
# bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
#return bbox_targets, bbox_inside_weights
if cfg.TRAIN.AGNOSTIC:
for ind in inds:
cls = clss[ind]
start = 4 * (1 if cls > 0 else 0)
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
else:
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = | npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False) | numpy.random.choice |
import sys, os
sys.path.append(os.path.abspath(__file__).split('test')[0])
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as io
from pyml.supervised.NeuralNetwork import NeuralNetwork as NN
"""
------------------------------------------------------------------------------------------------------------------------
PLOT DATOS
------------------------------------------------------------------------------------------------------------------------
"""
def displayData(X):
m, n = X.shape
example_width = int(np.round(np.sqrt(n)))
fig, ax_array = plt.subplots(10, 10, figsize=(10, 10))
fig.subplots_adjust(wspace=0.025, hspace=0.025)
ax_array = ax_array.ravel()
for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_width, example_width, order='F'),
cmap='Greys', extent=[0, 1, 0, 1])
ax.axis('off')
plt.show()
"""
------------------------------------------------------------------------------------------------------------------------
DE LISTA A ARRAY
------------------------------------------------------------------------------------------------------------------------
"""
def unroll_input(input):
theta_ravel = [] # Creamos la lista que almacenará las matrices tras flatten
for theta_element in input:
theta_ravel.append(np.ravel(theta_element)) # Hacer que la matriz sea un vector y almacenarlo en lista temporal
return np.concatenate(theta_ravel) # Hacer que la lista temporal sea un solo vector
"""
------------------------------------------------------------------------------------------------------------------------
EJEMPLO NUMEROS
------------------------------------------------------------------------------------------------------------------------
"""
data = io.loadmat('../../../data/ex3weights.mat')
theta1 = data['Theta1']
theta2 = data['Theta2']
print("Dimensiones theta 1:", theta1.shape)
print("Dimensiones theta 2:", theta2.shape)
data = io.loadmat('../../../data/ex3data1.mat')
data = pd.DataFrame(np.hstack((data['X'], data['y'])))
print(data.info())
print(data.head())
print(data.describe())
#-----------------------------------------------------------------------------------------------------------------------
#---------------------------------------------- OBTENCION DATOS --------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
m, n = data.shape
X = np.array(data.iloc[:, 0:n-1]).T
y = np.array(data.iloc[:, -1], ndmin=2)
rand_indices = | np.random.choice(m, 100, replace=False) | numpy.random.choice |
# Connectome-based CNN-RNN
# 2021.03.16 <NAME>
###### import ######################
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import load_model
import scipy.stats as measures
from pymatreader import read_mat
####################################
### Functions and Initializations ##
tf.config.experimental.list_physical_devices('GPU')
def getLayerIndexByName(model, layername):
for idx, layer in enumerate(model.layers):
if layer.name == layername:
return idx
####################################
###### Dataset preparation #########
# Load DAVIS test data
test_set = ['rollerblade', 'scooter-black','scooter-gray', 'soapbox', 'soccerball',
'stroller', 'surf', 'swing', 'tennis', 'train']
data = read_mat('.\\data\\DAVIS_CNNRNN_data.mat')
print(data.keys())
pos_x = np.array([]); pos_y = np.array([]); pos_z = np.array([])
delta_x = np.array([]); delta_y = np.array([]); delta_z = np.array([]); fr_timed = []
check = 0
for i in range(len(data['training_data'])):
if any(ele in data['training_data'][i]['label'] for ele in test_set)==True:
if check==0:
input_frames = data['training_data'][i]['images']
check += 1
else:
input_frames = np.concatenate((input_frames,data['training_data'][i]['images']), axis=0)
for j in range(data['training_data'][i]['images'].shape[0]-10):
fr_timed.append(data['training_data'][i]['images'][j:j+10,:,:])
pos_x = np.append(pos_x,[data['training_data'][i]['x'][0:-1-9]])
pos_y = np.append(pos_y,[data['training_data'][i]['y'][0:-1-9]])
pos_z = np.append(pos_z, [data['training_data'][i]['z'][0:-1-9]])
delta_x = np.append(delta_x, [data['training_data'][i]['delta_x'][0:-1-9]])
delta_y = np.append(delta_y, [data['training_data'][i]['delta_y'][0:-1-9]])
delta_z = np.append(delta_z, [data['training_data'][i]['delta_z'][0:-1-9]])
timed_fr = np.array(fr_timed)
print('Frames with time dimension', timed_fr.shape)
print('size of frames', input_frames.shape,
'size of x', pos_x.shape, 'size of y', pos_y.shape, 'size of delta_x',
delta_x.shape, 'size of delta_y', delta_y.shape)
y_true = np.stack((pos_x, pos_y, pos_z), axis=1); print('Array of true outputs', y_true.shape)
####################################
###### Load model ##################
connectome_cnn = load_model('connectome_model_CNNRNN_v3')
print(connectome_cnn.summary())
####################################
###### Predict DAVIS test data #####
pred_davis = connectome_cnn.predict(timed_fr); print('Shape of prediction', pred_davis.shape)
# PERFORMANCE RMSE and Pearson's r
RMSE_x = np.sqrt(np.mean((pos_x[300:-1] - pred_davis[300:-1,0])**2)); print('RMSE_x', RMSE_x)
RMSE_y = np.sqrt(np.mean((pos_y[300:-1] - pred_davis[300:-1,1])**2)); print('RMSE_y', RMSE_y)
RMSE_z = np.sqrt(np.mean((pos_z[300:-1] - pred_davis[300:-1,2])**2)); print('RMSE_z', RMSE_z)
x_pearson_corr = measures.pearsonr(pred_davis[300:-1,0], pos_x[300:-1])[0]; print('x_Pearson', x_pearson_corr)
y_pearson_corr = measures.pearsonr(pred_davis[300:-1,1], pos_y[300:-1])[0]; print('y_Pearson', y_pearson_corr)
z_pearson_corr = measures.pearsonr(pred_davis[300:-1,2], pos_z[300:-1])[0]; print('z_Pearson', z_pearson_corr)
# Plot prediction against ground truth
fig2, ax2 = plt.subplots(3); idd_l = np.array([0,0,0,1,1,1]); idd_r = np.array([0,1,2,0,1,2])
labels = ['x','y','z','delta_x','delta_y','delta_z']
labels2 = ['x','y','z']
fig2.suptitle('Test data prediction')
for i in range(pred_davis.shape[1]):
ax2[i].plot(np.arange(0,293), y_true[300:-1,i], linewidth=1, color='black', alpha=0.7)
ax2[i].plot(np.arange(0,293), pred_davis[300:-1,i], linewidth=1, color='blue')
ax2[i].legend(['ground truth', 'prediction'], loc='upper right', frameon=False)
ax2[i].set_title('{bar_dir}'.format(bar_dir=labels[i]))
ax2[i].set_ylabel('Distance (a.u.)'); ax2[i].set_xlabel('Time (a.u.)')
ax2[i].set_xlim(0, 300+1); ax2[i].set_xticks(np.arange(0,300+1,150))
ax2[0].set_ylim(0, 30+1); ax2[0].set_yticks(np.arange(0,30+1,10))
ax2[1].set_ylim(0, 15+1); ax2[1].set_yticks(np.arange(0,15+1,5))
ax2[2].set_ylim(0, 15+1); ax2[2].set_yticks(np.arange(0,15+1,5))
fig_b, ax_b = plt.subplots(); x_l = np.arange(len(labels2)); vec = [RMSE_x,RMSE_y,RMSE_z]
bb = ax_b.bar(x_l, vec); colo = ['y', 'g', 'b']
ax_b.set_xticks(x_l); ax_b.set_xticklabels(labels2); ax_b.set_ylabel('RMSE'); ax_b.set_ylim(0,5)
for index, value in enumerate(vec):
ax_b.text(x=index, y=value, s = str("{:.2f}".format(value))); bb[index].set_color(colo[index])
fig_c, ax_c = plt.subplots(); vec2 = [x_pearson_corr,y_pearson_corr,z_pearson_corr]
bb2 = ax_c.bar(x_l, vec2)
ax_c.set_xticks(x_l); ax_c.set_xticklabels(labels2); ax_c.set_ylabel('Pearson r'); ax_c.set_ylim(0,1)
for index, value in enumerate(vec2):
ax_c.text(x=index, y=value, s = str("{:.2f}".format(value))); bb2[index].set_color(colo[index])
plt.show()
####################################
###### Plot learned filters ########
show_filt = []; show_special = []; all_filters = []
layer_names = ['L1R', 'L2R', 'L3R', 'L5L1', 'L5L2',
'Mi1L1', 'Mi1L5', 'Tm3L1', 'Tm3L5', 'Mi9L3', 'Mi4L5', 'C3L1',
'Tm1L2', 'Tm2L2', 'Tm4L2', 'Tm9L3', 'Tm9Mi4',
'T4aMi1', 'T4aTm3', 'T4aMi9', 'T4aMi4', 'T4aC3',
'T4bMi1', 'T4bTm3', 'T4bMi9', 'T4bMi4', 'T4bC3',
'T4cMi1', 'T4cTm3', 'T4cMi9', 'T4cMi4', 'T4cC3',
'T4dMi1', 'T4dTm3', 'T4dMi9', 'T4dMi4', 'T4dC3',
'T5aTm1', 'T5aTm2', 'T5aTm4', 'T5aTm9',
'T5bTm1', 'T5bTm2', 'T5bTm4', 'T5bTm9',
'T5cTm1', 'T5cTm2', 'T5cTm4', 'T5cTm9',
'T5dTm1', 'T5dTm2', 'T5dTm4', 'T5dTm9',
'LPLC2T4a', 'LPLC2T4b', 'LPLC2T4c', 'LPLC2T4d',
'LPLC2T5a', 'LPLC2T5b', 'LPLC2T5c', 'LPLC2T5d']
for ele in layer_names:
ind_layer = getLayerIndexByName(connectome_cnn, ele)
filters = connectome_cnn.layers[ind_layer].get_weights()[0]
all_filters.append(np.squeeze(filters, axis=(2,3)))
if ele == 'T4aMi9' or ele == 'T4bMi9' or ele == 'T5aTm4' or ele == 'T5bTm4':
show_special.append(filters)
print(ele, filters.shape)
else:
show_filt.append(filters)
print(ele, filters.shape)
show_kernel = np.array(show_filt); show_kernel = np.squeeze(show_kernel, axis=(3,4))
show_kernel_sp = np.array(show_special); show_kernel_sp = np.squeeze(show_kernel_sp, axis=(3,4))
print('All filters', len(all_filters))
print('3x3 filters', show_kernel.shape); print('5x5 filters', show_kernel_sp.shape)
#### Lamina
fig_lam, ax_lam = plt.subplots(2,3)
im_lam = []
im_lam.append(ax_lam[0,0].imshow(all_filters[0], cmap='RdYlBu', vmin=-1, vmax=2)); ax_lam[0,0].set_title('L1')
im_lam.append(ax_lam[0,1].imshow(all_filters[1], cmap='RdYlBu', vmin=-1, vmax=2)); ax_lam[0,1].set_title('L2')
im_lam.append(ax_lam[0,2].imshow(all_filters[2], cmap='RdYlBu', vmin=-1, vmax=2)); ax_lam[0,2].set_title('L3')
im_lam.append(ax_lam[1,0].imshow(all_filters[3], cmap='RdYlBu', vmin=-1, vmax=2)); ax_lam[1,0].set_title('L5L1')
im_lam.append(ax_lam[1,1].imshow(all_filters[4], cmap='RdYlBu', vmin=-1, vmax=2)); ax_lam[1,1].set_title('L5L2')
fig_lam.suptitle('LAMINA trained'); fig_lam.colorbar(im_lam[0], ax=ax_lam, label='a.u.')
fig_lam.delaxes(ax = ax_lam[1,2])
for i in range(ax_lam.shape[0]):
for j in range(ax_lam.shape[1]):
ax_lam[i,j].set_axis_off()
#### Outer medulla
fig_med, ax_med = plt.subplots(4,4)
im_med = []
# ON PATHWAY
im_med.append(ax_med[0,0].imshow(all_filters[5], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[0,0].set_title('Mi1L1')
im_med.append(ax_med[0,1].imshow(all_filters[6], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[0,1].set_title('Mi1L5')
im_med.append(ax_med[0,2].imshow(all_filters[7], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[0,2].set_title('Tm3L1')
im_med.append(ax_med[0,3].imshow(all_filters[8], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[0,3].set_title('Tm3L5')
im_med.append(ax_med[1,0].imshow(all_filters[9], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[1,0].set_title('Mi9L3')
im_med.append(ax_med[1,1].imshow(all_filters[10], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[1,1].set_title('Mi4L5')
im_med.append(ax_med[1,2].imshow(all_filters[11], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[1,2].set_title('C3L1')
# OFF PATHWAY
im_med.append(ax_med[2,0].imshow(all_filters[12], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[2,0].set_title('Tm1L2')
im_med.append(ax_med[2,1].imshow(all_filters[13], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[2,1].set_title('Tm2L2')
im_med.append(ax_med[2,2].imshow(all_filters[14], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[2,2].set_title('Tm4L2')
im_med.append(ax_med[3,0].imshow(all_filters[15], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[3,0].set_title('Tm9L3')
im_med.append(ax_med[3,1].imshow(all_filters[16], cmap='YlGnBu', vmin=-1, vmax=2)); ax_med[3,1].set_title('Tm9Mi4')
###
fig_med.suptitle('Outer MEDULLA trained'); fig_med.colorbar(im_med[0], ax=ax_med, label='a.u.')
#fig_med.delaxes(ax = ax_med[0,2])
for i in range(ax_med.shape[0]):
for j in range(ax_med.shape[1]):
ax_med[i,j].set_axis_off()
### Inner medulla (T4)
fig_lp, ax_lp = plt.subplots(4,5)
im_lp = []
im_lp.append(ax_lp[0,0].imshow(all_filters[17], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[0,0].set_title('T4aMi1')
im_lp.append(ax_lp[0,1].imshow(all_filters[18], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[0,1].set_title('T4aTm3')
im_lp.append(ax_lp[0,2].imshow(all_filters[19], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[0,2].set_title('T4aMi9 (5x5)')
im_lp.append(ax_lp[0,3].imshow(all_filters[20], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[0,3].set_title('T4aMi4')
im_lp.append(ax_lp[0,4].imshow(all_filters[21], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[0,4].set_title('T4aC3')
###
im_lp.append(ax_lp[1,0].imshow(all_filters[22], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[1,0].set_title('T4bMi1')
im_lp.append(ax_lp[1,1].imshow(all_filters[23], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[1,1].set_title('T4bTm3')
im_lp.append(ax_lp[1,2].imshow(all_filters[24], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[1,2].set_title('T4bMi9 (5x5)')
im_lp.append(ax_lp[1,3].imshow(all_filters[25], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[1,3].set_title('T4bMi4')
im_lp.append(ax_lp[1,4].imshow(all_filters[26], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[1,4].set_title('T4bC3')
###
im_lp.append(ax_lp[2,0].imshow(all_filters[27], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[2,0].set_title('T4cMi1')
im_lp.append(ax_lp[2,1].imshow(all_filters[28], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[2,1].set_title('T4cTm3')
im_lp.append(ax_lp[2,2].imshow(all_filters[29], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[2,2].set_title('T4cMi9')
im_lp.append(ax_lp[2,3].imshow(all_filters[30], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[2,3].set_title('T4cMi4')
im_lp.append(ax_lp[2,4].imshow(all_filters[31], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[2,4].set_title('T4cC3')
###
im_lp.append(ax_lp[3,0].imshow(all_filters[32], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[3,0].set_title('T4dMi1')
im_lp.append(ax_lp[3,1].imshow(all_filters[33], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[3,1].set_title('T4dTm3')
im_lp.append(ax_lp[3,2].imshow(all_filters[34], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[3,2].set_title('T4dMi9')
im_lp.append(ax_lp[3,3].imshow(all_filters[35], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[3,3].set_title('T4dMi4')
im_lp.append(ax_lp[3,4].imshow(all_filters[36], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lp[3,4].set_title('T4dC3')
###
for i in range(ax_lp.shape[0]):
for j in range(ax_lp.shape[1]):
ax_lp[i,j].set_axis_off()
fig_lp.suptitle('Inner MEDULLA trained'); fig_lp.colorbar(im_lp[1], ax=ax_lp, label='a.u.')
### LOBULA
fig_lo, ax_lo = plt.subplots(4,4)
im_lo = []
im_lo.append(ax_lo[0,0].imshow(all_filters[37], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[0,0].set_title('T5aTm1')
im_lo.append(ax_lo[0,1].imshow(all_filters[38], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[0,1].set_title('T5aTm2')
im_lo.append(ax_lo[0,2].imshow(all_filters[39], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[0,2].set_title('T5aTm4 (5x5)')
im_lo.append(ax_lo[0,3].imshow(all_filters[40], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[0,3].set_title('T5aTm9')
###
im_lo.append(ax_lo[1,0].imshow(all_filters[41], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[1,0].set_title('T5bTm1')
im_lo.append(ax_lo[1,1].imshow(all_filters[42], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[1,1].set_title('T5bTm2')
im_lo.append(ax_lo[1,2].imshow(all_filters[43], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[1,2].set_title('T5bTm4 (5x5)')
im_lo.append(ax_lo[1,3].imshow(all_filters[44], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[1,3].set_title('T5bTm9')
###
im_lo.append(ax_lo[2,0].imshow(all_filters[45], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[2,0].set_title('T5cTm1')
im_lo.append(ax_lo[2,1].imshow(all_filters[46], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[2,1].set_title('T5cTm2')
im_lo.append(ax_lo[2,2].imshow(all_filters[47], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[2,2].set_title('T5cTm4')
im_lo.append(ax_lo[2,3].imshow(all_filters[48], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[2,3].set_title('T5cTm9')
###
im_lo.append(ax_lo[3,0].imshow(all_filters[49], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[3,0].set_title('T5dTm1')
im_lo.append(ax_lo[3,1].imshow(all_filters[50], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[3,1].set_title('T5dTm2')
im_lo.append(ax_lo[3,2].imshow(all_filters[51], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[3,2].set_title('T5dTm4')
im_lo.append(ax_lo[3,3].imshow(all_filters[52], cmap='jet', vmin=-0.5, vmax=0.5)); ax_lo[3,3].set_title('T5dTm9')
###
for i in range(ax_lo.shape[0]):
for j in range(ax_lo.shape[1]):
ax_lo[i,j].set_axis_off()
fig_lo.suptitle('LOBULA trained'); fig_lo.colorbar(im_lo[1], ax=ax_lo, label='a.u.')
### OPTIC GLOMERULI
fig_op, ax_op = plt.subplots(2,4)
im_op = []
im_op.append(ax_op[0,0].imshow(all_filters[53], cmap='RdYlBu', vmin=-0.5, vmax=0.5)); ax_op[0,0].set_title('LPLC2T4a')
im_op.append(ax_op[0,1].imshow(all_filters[54], cmap='RdYlBu', vmin=-0.5, vmax=0.5)); ax_op[0,1].set_title('LPLC2T4b')
im_op.append(ax_op[0,2].imshow(all_filters[55], cmap='RdYlBu', vmin=-0.5, vmax=0.5)); ax_op[0,2].set_title('LPLC2T4c')
im_op.append(ax_op[0,3].imshow(all_filters[56], cmap='RdYlBu', vmin=-0.5, vmax=0.5)); ax_op[0,3].set_title('LPLC2T4d')
###
im_op.append(ax_op[1,0].imshow(all_filters[57], cmap='RdYlBu', vmin=-0.5, vmax=0.5)); ax_op[1,0].set_title('LPLC2T5a')
im_op.append(ax_op[1,1].imshow(all_filters[58], cmap='RdYlBu', vmin=-0.5, vmax=0.5)); ax_op[1,1].set_title('LPLC2T5b')
im_op.append(ax_op[1,2].imshow(all_filters[59], cmap='RdYlBu', vmin=-0.5, vmax=0.5)); ax_op[1,2].set_title('LPLC2T5c')
im_op.append(ax_op[1,3].imshow(all_filters[60], cmap='RdYlBu', vmin=-0.5, vmax=0.5)); ax_op[1,3].set_title('LPLC2T5d')
###
for i in range(ax_op.shape[0]):
for j in range(ax_op.shape[1]):
ax_op[i,j].set_axis_off()
fig_op.suptitle('OPTIC GLOMERULI trained'); fig_op.colorbar(im_op[1], ax=ax_op, label='a.u.')
####################################
########INITIAL WEIGHTS#############
scale = 1/75
# LAMINA #
L1R = scale*np.array([[0,0, 0],[0, -35, 0],[0, 0, 0]])
L2R = scale*np.array([[0,0, 0],[0, -45, 0],[0, 0, 0]])
L3R = scale*np.array([[0,0, 0],[0, -10, 0],[0, 0, 0]])
L5L1 = scale*np.array([[0,0, 0],[0, 120, 0],[0, 0, 0]])
L5L2 = scale*np.array([[0,0, 0],[0, 60, 0],[0, 0, 0]])
# Outer MEDULLA #
Mi1L1 = scale*np.array([[0,0, 0],[0, 140, 0],[0, 0, 0]]) # excit
Mi1L5 = scale*np.array([[0,0, 0],[0, 50, 0],[0, 0, 0]])
Tm1L2 = scale*np.array([[0,0, 0],[0, 180, 0],[0, 0, 0]])
Tm2L2 = scale*np.array([[0,0, 0],[0, 160, 0],[0, 0, 0]])
Tm3L1 = scale* | np.array([[50,50, 50],[50, 110, 50],[50, 50, 50]]) | numpy.array |
import numpy as np
import cv2
import math
##############################
# #
# ### RBox gt ### #
# #
##############################
def rbox2poly(rboxes):
ctr_x = rboxes[:, 0:1]
ctr_y = rboxes[:, 1:2]
width = rboxes[:, 2:3]
height = rboxes[:, 3:4]
angle = rboxes[:, 4:]
# struct = np.zeros_like(rboxes[:, 0])
l = (- width / 2.0)
r = (width / 2.0)
t = (- height / 2.0)
b = (height / 2.0)
# anti-clockwise [n, 1, 1]
cosA = np.cos(-angle / 180 * np.pi)[..., np.newaxis]
sinA = np.sin(-angle / 180 * np.pi)[..., np.newaxis]
polys = np.concatenate([l, t, r, t, r, b, l, b], axis=1).reshape(-1, 4, 2)
# [n, 4, 1]
x_poly, y_poly = polys[..., 0:1], polys[..., 1:2]
x_poly_new = x_poly * cosA - y_poly * sinA + ctr_x[..., np.newaxis]
y_poly_new = x_poly * sinA + y_poly * cosA + ctr_y[..., np.newaxis]
return np.concatenate([x_poly_new, y_poly_new], axis=-1).reshape(-1, 8)
def pyramid_targets_rbox(imshape, scale_stack, area_thres, rboxes, dense_ratio=0.7, areas=None):
p_heatmap_stack = []
p_angle_stack = []
p_target_stack = []
# area_thres = [32 ** 2, 64 ** 2]
scaled_qboxes = []
scaled_rboxes = []
box_in_4pts = rbox2poly(rboxes)
if not areas is None:
# small = areas < area_thres[0]
medium = (areas >= area_thres[0]) & (areas < area_thres[1])
# large = areas >= area_thres[1]
# print('pyshape:', small.shape, box_in_4pts.shape)
# scaled_qboxes.append(box_in_4pts)
scaled_qboxes.append(box_in_4pts[medium])
# scaled_qboxes.append(box_in_4pts)
# scaled_rboxes.append(rboxes) # [small]
scaled_rboxes.append(rboxes[medium]) # [medium]
# scaled_rboxes.append(rboxes) # [large]
for i in range(len(scale_stack)):
scale = scale_stack[i]
# if not areas is None:
box_in_4pts = scaled_qboxes[i]
rbox = scaled_rboxes[i]
# print("scaled_qboxes:", scaled_qboxes[i], rbox)
heatmap, target = make_target_rbox(imshape, scale, box_in_4pts, rbox, dense_ratio=dense_ratio)
p_heatmap_stack.append(heatmap)
p_angle_stack.append([])
p_target_stack.append(target)
p_heatmap = np.concatenate(p_heatmap_stack, axis=0)
p_target = np.concatenate(p_target_stack, axis=0)
return p_heatmap, p_target
def make_target_rbox(imshape, scale, box_in_4pts, rboxes, dense_ratio=0.7):
# heatmap_stack: [nbox, H, W]
# target_stack: [nbox, 8, H, W]
# print("box_in_4pts make:", box_in_4pts, rboxes)
heatmap_stack, angle_cls, target_stack, angle_reg = compute_target_rbox(box_in_4pts, scale,
get_heatmap_rbox(
box_in_4pts,
imshape,
float(1 / scale)
),
rboxes[:, -1])
# print("angle_cls:", angle_cls.shape)
# print("angle_reg:", angle_reg.shape)
# heatmap: [H*W]
# target: [H*W, 4+6]
target_stack = np.transpose(target_stack, (0, 2, 3, 1))
# angle_cls = np.sum(angle_cls, axis=1)[:, np.newaxis, :, :]
# print("angle_cls:", angle_cls.shape)
# angle_cls = np.transpose(angle_cls, (0, 2, 3, 1))
angle_reg = np.transpose(angle_reg, (0, 2, 3, 1))
# print("heatmap_stack:", heatmap_stack.shape)
heatmap = np.sum(heatmap_stack, axis=0).reshape(-1)
# print("heatmap:", heatmap.shape, np.unique(heatmap))
heatmap[heatmap > 1] = 0
# 4 edges
target = np.sum(target_stack, axis=0).reshape(-1, 4)
# print("make_target:", np.where(target[:, 0] != 0), target[target[:, 0] != 0])
angle_cls_map = angle_cls # np.sum(angle_cls, axis=0).reshape(-1)
# remove overlapping labels
# angle_cls_map[angle_cls_map > 6] = 0
# print("angle_reg_map:", angle_reg.shape)
angle_reg_map = np.sum(angle_reg, axis=0).reshape(-1, 1)
# print("[heatmap, angle_cls_map]", heatmap.shape, angle_cls_map.shape)
# 1 channel for textness, 6 for angle cls, [b, 1, h, w]
# heatmap = np.concatenate([heatmap, angle_cls_map], axis=0)
# 4 channels for coods reg, 6 for angle reg, [b, 4, h, w]
# print("[target, angle_reg_map]", target.shape, angle_reg_map.shape)
target = np.concatenate([target, angle_reg_map], axis=1)
# print("make_target:", heatmap.shape, target.shape)
target = target * heatmap[..., np.newaxis]
return heatmap, target
def make_target_rbox_mc(imshape, scale, box_in_4pts, rboxes, seq_labels, dense_ratio=0.7):
# heatmap_stack: [nbox, H, W]
# target_stack: [nbox, 8, H, W]
# print("box_in_4pts make:", box_in_4pts, rboxes)
# gt_boxes, imshape, stride, gt_rboxes, seq_labels
heatmap, classmap = get_heatmap_rbox_multiclass(
box_in_4pts,
imshape,
float(1 / scale),
rboxes,
seq_labels
)
heatmap_stack, angle_cls, target_stack, angle_reg = compute_target_rbox(box_in_4pts,
scale,
heatmap,
rboxes[:, -1])
# print("angle_cls:", angle_cls.shape)
# print("angle_reg:", angle_reg.shape)
# heatmap: [H*W]
# classmap: [H*W]
# target: [H*W, 4+6]
target_stack = np.transpose(target_stack, (0, 2, 3, 1))
# angle_cls = np.sum(angle_cls, axis=1)[:, np.newaxis, :, :]
# print("angle_cls:", angle_cls.shape)
# angle_cls = np.transpose(angle_cls, (0, 2, 3, 1))
angle_reg = np.transpose(angle_reg, (0, 2, 3, 1))
# print("heatmap_stack:", heatmap_stack.shape)
heatmap = np.sum(heatmap_stack, axis=0).reshape(-1)
biclsmap = np.sum((classmap > 0).astype(np.float32), axis=0).reshape(-1)
classmap = np.sum(classmap, axis=0).reshape(-1)
# print("heatmap:", heatmap.shape, np.unique(heatmap))
heatmap[heatmap > 1] = 0
classmap[biclsmap > 1] = 0
# 4 edges
target = np.sum(target_stack, axis=0).reshape(-1, 4)
# print("make_target:", np.where(target[:, 0] != 0), target[target[:, 0] != 0])
angle_cls_map = angle_cls # np.sum(angle_cls, axis=0).reshape(-1)
# remove overlapping labels
# angle_cls_map[angle_cls_map > 6] = 0
# print("angle_reg_map:", angle_reg.shape)
angle_reg_map = np.sum(angle_reg, axis=0).reshape(-1, 1)
# print("[heatmap, angle_cls_map]", heatmap.shape, angle_cls_map.shape)
# 1 channel for textness, 6 for angle cls, [b, 1, h, w]
# heatmap = np.concatenate([heatmap, angle_cls_map], axis=0)
# 4 channels for coods reg, 6 for angle reg, [b, 4, h, w]
# print("[target, angle_reg_map]", target.shape, angle_reg_map.shape)
target = np.concatenate([target, angle_reg_map], axis=1)
# print("make_target:", heatmap.shape, target.shape)
target = target * heatmap[..., np.newaxis]
return heatmap, target, classmap
def get_heatmap_rbox(gt_boxes, imshape, stride, proportion=0.7):
# gt_boxes_4pts:[n, (lt, rt, rb, lb) * (x, y)] gt box pts in anti-clock order within 8 channels
cls_num = 1
fill_mask_ori = np.zeros((int(math.ceil(imshape[0])), int(math.ceil(imshape[1]))))
# print("fill_mask_ori:", fill_mask_ori.shape)
mask_stack = []
if len(gt_boxes) < 1:
mask_stack.append(fill_mask_ori[np.newaxis, ...])
for i in range(len(gt_boxes)):
fill_mask = fill_mask_ori.copy()
coods = np.array(gt_boxes[i], np.int32).reshape(4, 2)
pt1 = coods[0]
pt2 = coods[1]
pt3 = coods[2]
pt4 = coods[3]
ctr = (((pt1 + pt3) / 2 + (pt2 + pt4) / 2) / 2).reshape(-1, 2)
rescale_coods = np.array((coods - ctr) * proportion + ctr, np.int32)
fill_mask = cv2.fillPoly(fill_mask, np.array(np.array([rescale_coods]) / stride, np.int32), cls_num)
# print("rescale_coods:", np.array(np.array([rescale_coods]) / stride, np.int32), fill_mask.shape)
mask_stack.append(fill_mask[np.newaxis, ...])
return np.concatenate(mask_stack, axis=0)
def get_heatmap_rbox_multiclass(gt_boxes, imshape, stride, gt_rboxes, seq_labels, proportion=0.7):
# gt_boxes_4pts:[n, (lt, rt, rb, lb) * (x, y)] gt box pts in anti-clock order within 8 channels
cls_num = 1
fill_mask_ori = np.zeros((math.ceil(imshape[0]), math.ceil(imshape[1])))
char_mask_ori = np.zeros((math.ceil(imshape[0]), math.ceil(imshape[1])))
# print("fill_mask_ori:", fill_mask_ori.shape)
mask_stack = []
char_map_stack = []
if len(gt_boxes) < 1:
mask_stack.append(fill_mask_ori[np.newaxis, ...])
char_map_stack.append(char_mask_ori[np.newaxis, ...])
label_len = np.array([len(seq) for seq in seq_labels])
char_poses = []
for i in range(len(label_len)):
l = label_len[i]
rbox = gt_rboxes[i]
x, y, w, h, a = rbox
arc = -a * np.pi / 180.0
w *= proportion
h *= proportion
# radius in [1, w / 2]
char_r = min(max(rbox[2] / float(2 * l + 1e-10), 1), w / 3.) / stride
xs = [x - (w / 2 - w / (2 * l) - nth * (w / l)) * np.abs(np.cos(arc)) for nth in range(l)]
ys = [y - (w / 2 - w / (2 * l) - nth * (w / l)) * np.abs(np.sin(arc)) for nth in range(l)]
# [x, y, r, label]
char_poses.append([[xs[n] / stride, ys[n] / stride, char_r, seq_labels[i][n]] for n in range(l)])
for i in range(len(gt_boxes)):
fill_mask = fill_mask_ori.copy()
char_mask = char_mask_ori.copy()
coods = np.array(gt_boxes[i], np.int32).reshape(4, 2)
pt1 = coods[0]
pt2 = coods[1]
pt3 = coods[2]
pt4 = coods[3]
ctr = (((pt1 + pt3) / 2 + (pt2 + pt4) / 2) / 2).reshape(-1, 2)
rescale_coods = np.array((coods - ctr) * proportion + ctr, np.int32)
fill_mask = cv2.fillPoly(fill_mask, np.array(np.array([rescale_coods]) / stride, np.int32), cls_num)
char_pos = char_poses[i]
for n in range(label_len[i]):
char_mask = cv2.circle(
char_mask,
(int(char_pos[n][0]), int(char_pos[n][1])),
int(char_pos[n][2]),
int(char_pos[n][3]),
-1
)
# print("char_mask:", np.unique(char_mask))
mask_stack.append(fill_mask[np.newaxis, ...])
char_map_stack.append(char_mask[np.newaxis, ...])
return np.concatenate(mask_stack, axis=0), np.concatenate(char_map_stack, axis=0)
def compute_target_rbox(gt_boxes_4pts, scale, heatmap, angles, base_angle=30.):
# gt_boxes_4pts: qbox in clock-wise
h, w = heatmap.shape[1:]
if gt_boxes_4pts.shape[0] < 1:
return heatmap[np.newaxis, ...], [], np.zeros((1, 4, h, w)), np.zeros((1, 1, h, w))
# p_grid in [x, y] shape
p_grid = (np.mgrid[:h, :w][np.newaxis, ...].reshape(2, -1).T + 0.5) * float(1. / scale)
p_grid = np.concatenate([p_grid[:, 1:2], p_grid[:, 0:1]], axis=-1)
gt_boxes_4pts = np.array(gt_boxes_4pts).reshape(-1, 4, 2).astype(np.float32)
pj_dis_coll = []
for i in range(gt_boxes_4pts.shape[0]):
A = gt_boxes_4pts[i]
B = np.concatenate([gt_boxes_4pts[i][1:], gt_boxes_4pts[i][0:1]], axis=0)
# AB: [4, 2]
AB = B - A
# AP: [line, grid, cood] -> [4, h * w, 2]
AP = p_grid[np.newaxis, :, :] - gt_boxes_4pts[i][:, np.newaxis, :]
AB_norm = np.sqrt(np.sum(AB ** 2, axis=-1))[..., np.newaxis]
# AP_norm = np.sqrt(np.sum(AP ** 2, axis=-1))[..., np.newaxis]
'''
# print("AP_norm * sin_BAP:", AB.shape, AP.shape, AB_norm.shape, AP_norm.shape, np.tile(AB, (AP.shape[0], 1)).shape)
cos_BAP = np.abs(np.sum(AB[:, np.newaxis, :] * AP, axis=-1))
BAP_fraction = (AB_norm[:, np.newaxis, :] * AP_norm)
# print("BAP_fraction:", cos_BAP.shape, BAP_fraction.shape)
cos_BAP = cos_BAP[:, :, np.newaxis] / (BAP_fraction + 1e-10)
sin_BAP = np.sqrt(1 - cos_BAP ** 2)
# print("AP_norm * sin_BAP:", AP_norm.shape, sin_BAP.shape)
# norm for each level by scale
pj_dis = AP_norm * sin_BAP # * scale * (0.5 ** 3)
'''
# [4, 1]
X1, Y1 = AB[..., 0:1], AB[..., 1:2]
# [4, h * w, 1]
X2, Y2 = AP[..., 0:1], AP[..., 1:2]
dis_numerator = np.abs(X1[:, np.newaxis, :] * Y2 - X2 * Y1[:, np.newaxis, :])
pj_dis = dis_numerator / (AB_norm[:, np.newaxis, :] + 1e-10)
pj_dis_coll.append(pj_dis.reshape(AB.shape[0], h, w)[np.newaxis, ...])
for i in range(heatmap.shape[0]):
pj_dis_coll[i] *= heatmap[i]
# Angle Map
angles = | np.array(angles) | numpy.array |
"""
Copyright 2021 Biomedical Computer Vision Group, Heidelberg University.
Author: <NAME> (<EMAIL>)
Distributed under the MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
"""
import argparse
import numpy as np
import pandas as pd
import skimage.util
def disk_mask(imsz, ir, ic, nbpx):
ys, xs = np.ogrid[-nbpx:nbpx + 1, -nbpx:nbpx + 1]
se = xs ** 2 + ys ** 2 <= nbpx ** 2
mask = np.zeros(imsz, dtype=int)
if ir - nbpx < 0 or ic - nbpx < 0 or ir + nbpx + 1 > imsz[0] or ic + nbpx + 1 > imsz[1]:
mask = skimage.util.pad(mask, nbpx)
mask[ir:ir + 2 * nbpx + 1, ic:ic + 2 * nbpx + 1] = se
mask = skimage.util.crop(mask, nbpx)
else:
mask[ir - nbpx:ir + nbpx + 1, ic - nbpx:ic + nbpx + 1] = se
return mask
def find_nn(cim, icy, icx, nim, nbpx):
mask = disk_mask(cim.shape, icy, icx, nbpx)
iys_nim, ixs_nim = np.where(nim * mask)
if iys_nim.size == 0:
return np.NaN, np.NaN
d2 = (icy - iys_nim) ** 2 + (icx - ixs_nim) ** 2
I1 = np.argsort(d2)
iy_nim = iys_nim[I1[0]]
ix_nim = ixs_nim[I1[0]]
mask = disk_mask(cim.shape, iy_nim, ix_nim, nbpx)
iys_cim, ixs_cim = np.where(cim * mask)
d2 = (iy_nim - iys_cim) ** 2 + (ix_nim - ixs_cim) ** 2
I2 = np.argsort(d2)
if not iys_cim[I2[0]] == icy or not ixs_cim[I2[0]] == icx:
return np.NaN, np.NaN
return iy_nim, ix_nim
def points_linking(fn_in, fn_out, nbpx=6, th=25, minlen=50):
data = pd.read_csv(fn_in, delimiter="\t")
all_data = np.array(data)
assert all_data.shape[1] in [3, 4], 'unknow collum(s) in input data!'
coords = all_data[:, :3].astype('int64')
frame_1st = np.min(coords[:, 0])
frame_end = np.max(coords[:, 0])
assert set([i for i in range(frame_1st, frame_end + 1)]).issubset(set(coords[:, 0].tolist())), "spots missing at some time point!"
nSlices = frame_end
stack_h = np.max(coords[:, 2]) + nbpx
stack_w = np.max(coords[:, 1]) + nbpx
stack = np.zeros((stack_h, stack_w, nSlices), dtype='int8')
stack_r = np.zeros((stack_h, stack_w, nSlices), dtype='float64')
for i in range(all_data.shape[0]):
iyxz = tuple(coords[i, ::-1] - 1)
stack[iyxz] = 1
if all_data.shape[1] == 4:
stack_r[iyxz] = all_data[i, -1]
else:
stack_r[iyxz] = 1
tracks_all = np.array([], dtype=float).reshape(0, nSlices, 4)
maxv = np.max(stack_r)
br_max = maxv
idx_max = np.argmax(stack_r)
while 1:
iyxz = np.unravel_index(idx_max, stack.shape)
spot_br = np.empty((nSlices, 1))
track = np.empty((nSlices, 3))
for i in range(nSlices):
spot_br[i] = np.NaN
track[i, :] = np.array((np.NaN, np.NaN, np.NaN))
spot_br[iyxz[2]] = maxv
track[iyxz[2], :] = np.array(iyxz[::-1]) + 1
# forward
icy = iyxz[0]
icx = iyxz[1]
for inz in range(iyxz[2] + 1, nSlices):
iny, inx = find_nn(stack[:, :, inz - 1], icy, icx, stack[:, :, inz], nbpx)
if np.isnan(iny) and not inz == nSlices - 1:
iny, inx = find_nn(stack[:, :, inz - 1], icy, icx, stack[:, :, inz + 1], nbpx)
if np.isnan(iny):
break
else:
iny = icy
inx = icx
stack[iny, inx, inz] = 1
stack_r[iny, inx, inz] = stack_r[iny, inx, inz - 1]
elif np.isnan(iny) and inz == nSlices - 1:
break
track[inz, :] = np.array((inz, inx, iny)) + 1
spot_br[inz] = stack_r[iny, inx, inz]
icy = iny
icx = inx
# backward
icy = iyxz[0]
icx = iyxz[1]
for inz in range(iyxz[2] - 1, -1, -1):
iny, inx = find_nn(stack[:, :, inz + 1], icy, icx, stack[:, :, inz], nbpx)
if np.isnan(iny) and not inz == 0:
iny, inx = find_nn(stack[:, :, inz + 1], icy, icx, stack[:, :, inz - 1], nbpx)
if np.isnan(iny):
break
else:
iny = icy
inx = icx
stack[iny, inx, inz] = 1
stack_r[iny, inx, inz] = stack_r[iny, inx, inz + 1]
elif np.isnan(iny) and inz == 0:
break
track[inz, :] = np.array((inz, inx, iny)) + 1
spot_br[inz] = stack_r[iny, inx, inz]
icy = iny
icx = inx
for iz in range(nSlices):
if not np.isnan(track[iz, 0]):
stack[track[iz, 2].astype(int) - 1, track[iz, 1].astype(int) - 1, iz] = 0
stack_r[track[iz, 2].astype(int) - 1, track[iz, 1].astype(int) - 1, iz] = 0
# discard short trajectories
if np.count_nonzero(~np.isnan(spot_br)) > | np.max((1, minlen * (frame_end - frame_1st) / 100)) | numpy.max |
import numpy as np
import scipy as scp
from scipy import special
import pandas as pd
import os
import shutil
class ddm_data_simulator():
def __init__(self):
self.model_params = dict({'mu': 0,
'sigma_2': 1,
'theta': 1})
self.bernoulli_p = 'will be initiated upon a call to the make_data() function'
self.sample_params = dict({'n_samples': 10000})
self.mu = (self.model_params['mu'] * self.model_params['theta']) / self.model_params['sigma_2']
self.mu_tilde = 1
self.t_tilde_large_mu = 2.5
self.t_tilde_small_mu = self.compute_t_tilde_small_mu()
self.a = self.compute_a()
self.C_f_1_s = self.compute_C_f_1_s()
self.C_f_1_l = self.compute_C_f_1_l()
self.F_1_inf = self.compute_F_1_inf()
self.F_1_l_tilde_small_mu = self.compute_F_1_l_t(self.t_tilde_small_mu)
self.F_1_s_tilde_small_mu = self.compute_F_1_s_t(self.t_tilde_small_mu)
def acceptt(self,
t_star = [],
f_t_star = [],
c_2 = []):
#print('f_t_star: ', f_t_star)
z = np.random.uniform(low = 0, high = f_t_star, size = 1)
b = np.exp(- c_2)
k_tilde = 3
#print('z: ', z)
#print('b: ', b)
while True:
if z > b:
return 0
b = b - (k_tilde * np.exp(- c_2 * np.power(k_tilde, 2)))
#print('b: ', b)
if z <= b:
return 1
k_tilde = k_tilde + 2
b = b + (k_tilde * np.exp(- c_2 * np.power(k_tilde, 2)))
#print('b: ', b)
k_tilde = k_tilde + 2
if k_tilde > 10:
return 1
def sample_small_mu(self):
# supply a, C_f_1_s, C_f_2_s, F_1_s(t_tilde), F_1(inf)
while True:
P = np.random.uniform(low = 0, high = self.F_1_inf)
#print('in small sample mu, P: ', P)
if P <= (self.C_f_1_s * self.F_1_s_tilde_small_mu):
t_star = self.compute_F_1_s_t_inv(P / self.C_f_1_s)
#print('in sample small mu, t_star: ', t_star)
if self.acceptt(t_star = t_star,
f_t_star = np.exp( - ( 1 / (2 * self.a * t_star)) - np.sqrt(((self.a - 1) * np.power(self.mu, 2)) / self.a) + (np.power(self.mu, 2) * t_star) / 2),
c_2 = (1 / (2 * t_star))
):
return t_star
else:
t_star = self.compute_F_1_l_t_inv(((P - self.C_f_1_s * self.compute_F_1_s_t(self.t_tilde_small_mu)) / self.C_f_1_l) + self.compute_F_1_l_t(self.t_tilde_small_mu))
#print('in sample small mu, t_star: ', t_star)
if self.acceptt(t_star = t_star,
f_t_star = np.exp((- np.power(np.pi, 2) * t_star) / 8),
c_2 = (np.power(np.pi, 2) * t_star) / 8
):
return t_star
def sample_large_mu(self):
if t_star >= 0.63662:
C_s = 0
C_l = - np.log(np.pi / 4) - (0.5 * np.log(2 * np.pi))
else:
C_l = - ((np.power(np.pi, 2) * t_tilde) / 8) + (1.5 * np.log(t_tilde) + (1 / (2 * t_tilde)))
C_2 = C_l + (0.5 * np.log(2 * np.pi)) + np.log(np.pi / 4)
while true:
t_star = np.random.wald(mean = (1/np.abs(self.mu)), scale = 1)
if t_star <= t_tilde:
if self.acceptt(t_star = t_star,
f_t_star = np.exp(C_s - (1/(2 * t_star))),
c_2 = (1 / (2 * t_star))
):
return t_star
else:
if self.acceptt(t_star = t_star,
f_t_star = np.exp(C_l - (1 / (2 * t_star)) - (1.5 * np.log(t_star))),
c_2 = (np.power(np.pi, 2) * t_star) / 8
):
return t_star
def sample_wfpt(self):
if self.mu <= self.mu_tilde:
t_star = self.sample_small_mu()
else:
t_star = self.sample_large_mu()
return ((t_star * np.power(self.model_params['theta'], 2)) / self.model_params['sigma_2']), np.random.choice([1, -1], p = [self.bernoulli_p, 1 - self.bernoulli_p])
def make_data(self):
self.bernoulli_p = 1 / (1 + np.exp(-2 * self.mu))
data = np.zeros((self.sample_params['n_samples'],2))
for i in range(0, self.sample_params['n_samples'], 1):
data[i, 0], data[i, 1] = self.sample_wfpt()
if i % 1000 == 0:
print(i, ' data points sampled')
return data
def compute_t_tilde_small_mu(self):
return 0.12 + 0.5 * | np.exp(- self.mu/3) | numpy.exp |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020, University of Southampton
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md file in the project root for full license information.
"""
import math
import unittest
from unittest.mock import patch
import numpy as np
from auv_cal.cone_fitting import CircularCone, build_matrix, rotation_matrix
cone_points = np.array(
[
[-1.33048143698340549, -10.9167707649920445, 16.0510645154023663],
[-1.35444014094203435, -9.85047375230570488, 14.4832746889369801],
[-1.37413509610432372, -8.97394348476293047, 13.1945012799404964],
[-1.39061151089872914, -8.24066080532580258, 12.1163465903305951],
[-1.40459877249746512, -7.61816283388455684, 11.2010800416958851],
[-1.41662135489108421, -7.08310677445542147, 10.4143804293164877],
[-1.4270660565611577, -6.6182773637618908, 9.73093593640615495],
[-1.43622439354878639, -6.21070008021096864, 9.13166996168519063],
[-1.4443202368208774, -5.85041108585671843, 8.60193254967370535],
[-1.45152835738756214, -5.52963183766690403, 8.13028681131961761],
[-1.45798718750993861, -5.24220112363124002, 7.70767383958905761],
[-1.46380779856235255, -4.98317548446758263, 7.32682520450624164],
[-1.4690803425334733, -4.74854252132031807, 6.98184142587877954],
[-1.47387875555397518, -4.53501155555069246, 6.66788417777062747],
[-1.47826424698250891, -4.33985833976516311, 6.38094796518596752],
[-1.48599279676010076, -3.99594701649774953, 5.87529084769434728],
[-1.48941531414188821, -3.8436524235919709, 5.65137020406256863],
[-1.48228792478428839, -4.16080820992080991, 6.11768832114919014],
[-1.49258657516399906, -3.70254034149209099, 5.44389134585008971],
[-1.49553327289983051, -3.57142274125332682, 5.25110749924903253],
[-1.49827844859047166, -3.44927412625144614, 5.07151086375407889],
[-1.50084209549335168, -3.33520465665189247, 4.90379309673348551],
[-1.5032416467473424, -3.22843843596744717, 4.74681338788338714],
[-1.50549237243837286, -3.12829583883298223, 4.59957247553411985],
[-1.5076077049881853, -3.03417902890704028, 4.46119135343988127],
[-1.50959950752068495, -2.9455600146745069, 4.33089370907611126],
[-1.5114782965332878, -2.86197073899794185, 4.20799135218291109],
[-1.51325342770012639, -2.78299480956345135, 4.09187205593626135],
[-1.51493325173750759, -2.70826056176537699, 3.98198935722065528],
[-1.51652524581227666, -2.63743521010830717, 3.87785395736237071],
[-1.51803612485603789, -2.57021989393614847, 3.77902643780299297],
[-1.51947193628102517, -2.50634546190684571, 3.68511106195990923],
[-1.52083814091515213, -2.44556886981331001, 3.59575047889738286],
[-1.52213968243994757, -2.38767009010187881, 3.51062117935291829],
[-1.52338104719290901, -2.33244945024751615, 3.42942958231705086],
[-1.5245663158588143, -2.27972533212532991, 3.35190865239052416],
[-1.52569920830506844, -2.22933217651882165, 3.27781496578782994],
[-1.52678312259916749, -2.18111874657023952, 3.20692615706655504],
[-1.52782116907020771, -2.13494661180116374, 3.13903869016389825],
[-1.52881620013376307, -2.09068882069574435, 3.07396590667921155],
[-1.52977083648234569, -2.04822873503912373, 3.01153631198721161],
[-1.53068749014795524, -2.00745900347272821, 2.95159206604349533],
[-1.53156838486393321, -1.96828065524682172, 2.89398765091759236],
[-1.53241557408812668, -1.93060229806378203, 2.83858869137190428],
[-1.32745348765844562, -10.5515533634389271, 16.0145350241871434],
[-1.35166099288881081, -9.52306608626207485, 14.4535567535042144],
[-1.37156859611230608, -8.6772707100982629, 13.1698576422620413],
[-1.388228538826356, -7.969460130299451, 12.0955838429222311],
[-1.40237562640273339, -7.36841430821775134, 11.1833513936014572],
[-1.41453853690862541, -6.85167103815115563, 10.3990684627423171],
[-1.42510736284869588, -6.40265626787095243, 9.71757991623639761],
[-1.43437622860982006, -6.00887339012222021, 9.11991912920162839],
[-1.4425710923938222, -5.66072200812765747, 8.59151517685181432],
[-1.44986841242063047, -5.35070485536185014, 8.12098914337984112],
[-1.45640799788928721, -5.0728817904008876, 7.69932542704392819],
[-1.46230205398588131, -4.82248549718748087, 7.3192884723442253],
[-1.46764167411843482, -4.59564564622944172, 6.97500411790629915],
[-1.4725015821645091, -4.38918740837497801, 6.6616537923884751],
[-1.47694365139982819, -4.2004819443481356, 6.37524759617999237],
[-1.48101955309625644, -4.02733387245473207, 6.11245350641919938],
[-1.48477277598046942, -3.86789546683987773, 5.87046715209084713],
[-1.48824018426474902, -3.72060046066187322, 5.64691134432713326],
[-1.49145323274157793, -3.58411241976658212, 5.43975772096831367],
[-1.49443892388951505, -3.45728407765484835, 5.24726502753743862],
[-1.49722056871179965, -3.33912500935725065, 5.06793005452447254],
[-1.49981839671090289, -3.2287757151415275, 4.90044830314222146],
[-1.5022500487826842, -3.12548667867538388, 4.74368220102183269],
[-1.50453097843558403, -3.02860132022747441, 4.59663522957105375],
[-1.50667478062952731, -2.93754202512675144, 4.45843071878108521],
[-1.5086934630235378, -2.8517986191298581, 4.32829435580838151],
[-1.51059767106570364, -2.7709188049134208, 4.20553967003794948],
[-1.5123968758363231, -2.69450018108770228, 4.08955592000569013],
[-1.51409953164203581, -2.62218354641916784, 3.97979793093618905],
[-1.51571320889519412, -2.55364725411690285, 3.87577752600646086],
[-1.5172447066853012, -2.48860242895472661, 3.77705626717160925],
[-1.51870014857347702, -2.42678889720504065, 3.68323927785474758],
[-1.52008506445628644, -2.36797170844949179, 3.59396996395317991],
[-1.52140446080649072, -2.31193815122554991, 3.50892548436006813],
[-1.52266288117148241, -2.25849518259884929, 3.42781284971857625],
[-1.52386445847031893, -2.207467206195322, 3.35036555004806136],
[-1.52501296035767653, -2.15869414479853772, 3.2763406294442321],
[-1.52611182870398387, -2.11202976293854894, 3.20551614020189435],
[-1.52716421406319269, -2.06734020244362338, 3.13768892016040235],
[-1.52817300585515659, -2.02450270006452415, 3.07267264638825299],
[-1.52914085887177409, -1.98340446129811698, 3.01029612593794882],
[-1.53007021661865283, -1.94394166865484541, 2.95040179065195129],
[-1.5309633319247562, -1.90601860601074069, 2.89284436815501111],
[-1.53182228518563335, -1.86954688349427678, 2.83748970543244283],
[-1.32451545348815269, -10.1885804114473277, 15.9790902786198927],
[-1.34896310068767478, -9.19747568650687342, 14.4247077016716148],
[-1.36907615603067323, -8.38209891447350941, 13.1459251308659528],
[-1.38591355909381164, -7.69951977274339772, 12.075413510252563],
[-1.40021529147628576, -7.11973877021840096, 11.1661236379591937],
[-1.41251405685573106, -6.62115964151123837, 10.3841853711034613],
[-1.42320310758531021, -6.18783959819700158, 9.70459510317229679],
[-1.43257907375540361, -5.80775300171109521, 9.10849262435023554],
[-1.44086992104320366, -5.47165792801216533, 8.58138351709908065],
[-1.44825373201933338, -5.17233475721190405, 8.11194501089400255],
[-1.45487164264596602, -4.90406171678415514, 7.69120345915648329],
[-1.46083695140638192, -4.66224558959483648, 7.3119551663479001],
[-1.46624166006955803, -4.44315654566532725, 6.96835051578512132],
[-1.4711612530270497, -4.24373438373187462, 6.65559009181948902],
[-1.47565824484143082, -4.06144471057274536, 6.36969912108158987],
[-1.47978485113488145, -3.89417066088126695, 6.10735765314878698],
[-1.48358502556404703, -3.74013031654801686, 5.86577104645280656],
[-1.48709603173194416, -3.59781297999663918, 5.64257002362806226],
[-1.49034966937129143, -3.46592946365416621, 5.43573270883095105],
[-1.49337324037567987, -3.34337292621420845, 5.24352320594595067],
[-1.49619031687346515, -3.22918773425927741, 5.06444276322996689],
[-1.49882135710728481, -3.12254449397350964, 4.89719061471512163],
[-1.50128420317809863, -3.02271987217066851, 4.74063233285433139],
[-1.50359448627170611, -2.92908016806176486, 4.59377406357686802],
[-1.50576595882755115, -2.8410678468444357, 4.45574140646769035],
[-1.50781076856832441, -2.75819043030791056, 4.32576199153290641],
[-1.50973968592583652, -2.68001127679142614, 4.20315101910121935],
[-1.5115622938551192, -2.60614188595710239, 4.08729919114272189],
[-1.51328714709902634, -2.53623544206540696, 3.97766258497257974],
[-1.51492190648974168, -2.46998136927675294, 3.87375411414904081],
[-1.51647345273578726, -2.40710071862823316, 3.77513629371527903],
[-1.51794798325962299, -2.34734224215405618, 3.6814150831116117],
[-1.519351094959976, -2.29047903762810412, 3.59223462401341065],
[-1.5206878552293559, -2.23630566945493481, 3.50727272492849007],
[-1.52196286312617457, -2.1846356886978584, 3.4262369717744745],
[-1.52318030225799261, -2.13529948914671852, 3.34886136547843716],
[-1.52434398665727322, -2.08814244747571687, 3.27490340512441591],
[-1.52545740070954761, -2.04302330452171521, 3.2041415492582237],
[-1.52652373401452657, -1.9998127519840958, 3.13637299936172376],
[-1.52754591191485734, -1.95839219476263748, 3.07141175878606987],
[-1.52852662230792369, -1.91865266398484113, 3.00908692801622601],
[-1.5294683392581172, -1.88049385974358629, 2.949241203364525],
[-1.53037334384639601, -1.8438233058390312, 2.89172955132421405],
[-1.53124374262694096, -1.80855560152788319, 2.83641803506289669],
[-1.321668371452974, -9.82779045439390941, 15.9447427889390827],
[-1.34634751549444576, -8.87365407939521234, 14.3967387736394699],
[-1.36665881953194268, -8.08838893192374364, 13.1227137671383485],
[-1.38366759548346718, -7.4308074572260292, 12.0558445124994709],
[-1.39811876466511831, -6.87210918225227729, 11.1494047249989503],
[-1.41054888119169086, -6.39154961789985165, 10.369738259385791],
[-1.42135422510302023, -5.97380761275182426, 9.69198786827494096],
[-1.43083383078813764, -5.60732176930486492, 9.09739618088944546],
[-1.43921759239807101, -5.28320382019545143, 8.57154274966192986],
[-1.44668515445181955, -4.99450827132175856, 8.10315910916167859],
[-1.45337892976499905, -4.73572909698707623, 7.68331220735316922],
[-1.45941326974596208, -4.50244519389711773, 7.30482918527101255],
[-1.46488105152979653, -4.29106570650142505, 6.96188418934110409],
[-1.46985849280231418, -4.09864387416029974, 6.64969635445662277],
[-1.47440872676395429, -3.92273881432611748, 6.36430555910495865],
[-1.47858449439247952, -3.76131143310367655, 6.10240354922510253],
[-1.48243019822857547, -3.6126450208931673, 5.86120511148356904],
[-1.48598348762010191, -3.47528396298249742, 5.6383486364940163],
[-1.48927649556311748, -3.34798592025817143, 5.43181853614299825],
[-1.49233681331665768, -3.22968414822261751, 5.23988410944393568],
[-1.49518826543879846, -3.11945753221044297, 5.06105092725866701],
[-1.49785153135107318, -3.01650655619313346, 4.8940218437547216],
[-1.5003446477537794, -2.9201338781784969, 4.73766548166545753],
[-1.50268341771383884, -2.82972851387670987, 4.59099057165169544],
[-1.50488174604351532, -2.74475287014226144, 4.45312491517908171],
[-1.50695191601253953, -2.66473204660753948, 4.32329802734085256],
[-1.50890481902729401, -2.58924495572730917, 4.20082672989551398],
[-1.51075014634660376, -2.51791691057767286, 4.0851031256096384],
[-1.51249654995869975, -2.45041340496102178, 3.97558450701434785],
[-1.51415177825564773, -2.38643486790292325, 3.87178484602870787],
[-1.51572279099413754, -2.32571221898571379, 3.77326758287164932],
[-1.51721585714069218, -2.26800308541649898, 3.67963948858090539],
[-1.51863663850207109, -2.21308856867135972, 3.59054541917198167],
[-1.51999026149322858, -2.16077046976978604, 3.50566381388511816],
[-1.52128137896063942, -2.11086889903629427, 3.42470281722957326],
[-1.5225142236322351, -2.06322020959508512, 3.3473969262568537],
[-1.52369265448789903, -2.01767520457301686, 3.2735040819020842],
[-1.52482019712087991, -1.97409757662971086, 3.2028031372561343],
[-1.52590007897937352, -1.9323625454323945, 3.13509164698600484],
[-1.52693526023024972, -1.89235566438791647, 3.07018393136039558],
[-1.52792846086664036, -1.85397177259968915, 3.00790937589018537],
[-1.52888218458190606, -1.81711407183915075, 2.94811093379420797],
[-1.52979873985145254, -1.78169331147357957, 2.89064380361489315],
[-1.5306802585959256, -1.74762706690086023, 2.83537425854087788],
[-1.31891331193991546, -9.46912121392759865, 15.9115054684033392],
[-1.34381531730794457, -8.55155201923237662, 14.3696615180315437],
[-1.36431765567960817, -7.79610088959776171, 13.1002338162705225],
[-1.38149169446193576, -7.16329026758040843, 12.0368859674877928],
[-1.39608706341569322, -6.62549792807447702, 11.1332027684251464],
[-1.40864399506977045, -6.16281747843024608, 10.3557343700051092],
[-1.41956166690667884, -5.76054009759232066, 9.67976469970423814],
[-1.42914141738132638, -5.40756211982123514, 9.08663563348532399],
[-1.4376149908286735, -5.09534427129150114, 8.56199814158089723],
[-1.44516353173664092, -4.81721177245546883, 8.09463621050106497],
[-1.45193068010447646, -4.56787180257588155, 7.675656011114115],
[-1.45803180001655774, -4.34307344685783203, 7.29791448838074874],
[-1.46356061102782298, -4.13936334432111508, 6.95560876251101767],
[-1.46859403689989421, -3.95390702242279213, 6.64397590732115617],
[-1.47319580679384399, -3.7843562009383156, 6.35906997336241897],
[-1.47741916800214623, -3.62874883368948264, 6.09759402232464787],
[-1.48130895584721989, -3.48543283728757602, 5.85677196408501555],
[-1.48490319171431695, -3.3530072070968453, 5.63424961049666262],
[-1.4882343301225105, -3.23027606538219336, 5.42801745986543516],
[-1.49133024158350969, -3.11621244509987116, 5.23634984078389021],
[-1.49421499432813443, -3.00992948497753687, 5.05775650957832479],
[-1.4969094813363939, -2.91065732482076589, 4.8909438261720366],
[-1.4994319272458998, -2.81772442703927517, 4.73478336757849672],
[-1.50179830115538548, -2.73054236573706444, 4.58828636814031654],
[-1.5040226550932716, -2.64859335488123326, 4.45058276239864359],
[-1.50611740331303223, -2.57141995687590841, 4.32090389182183898],
[-1.50809355414279045, -2.49861653940817385, 4.19856814925552069],
[-1.50996090353358192, -2.42982214361725868, 4.08296899489971477],
[-1.51172819749026788, -2.36471449886745777, 3.97356489898978182],
[-1.51340326906952893, -2.30300497467102216, 3.86987085922398588],
[-1.51499315447260785, -2.24443430292238189, 3.77145121258921678],
[-1.51650419186229812, -2.18876893670847217, 3.67791351686191392],
[-1.51794210583076405, -2.13579793785233152, 3.58890332056972072],
[-1.51931207989173345, -2.08533030573375155, 3.50409967445617943],
[-1.5206188189321681, -2.03719267608137367, 3.42321126462959358],
[-1.52186660320923384, -1.99122733130180163, 3.34597306920534843],
[-1.52305933519848646, -1.94729047422637547, 3.27214345758396297],
[-1.52420058037357831, -1.90525072546697305, 3.20150166547218085],
[-1.52529360281526083, -1.86498781130250224, 3.13384559006366858],
[-1.52634139639868716, -1.82639141449415532, 3.06898985899821453],
[-1.52734671218664264, -1.78936016490485317, 3.00676413424190514],
[-1.52831208255648776, -1.753800750474658, 2.94701161820830926],
[-1.52923984250626299, -1.71962713213555718, 2.88958773453492723],
[-1.53013214851748947, -1.6867598487594373, 2.83435896014755473],
[-1.31625137675109638, -9.11250953135374431, 15.8793916092664311],
[-1.34136761307980446, -8.23111944389879824, 14.3434877716829075],
[-1.36205375712847943, -7.50519417193243843, 13.0784957699785629],
[-1.37938692346396774, -6.8969346173198387, 12.0185471757369786],
[-1.39412122403593552, -6.37987678796106117, 11.1175260323543679],
[-1.40680040077256607, -5.93493919075926346, 10.3421810713045677],
[-1.41782640017839068, -5.54801634852413805, 9.66793219252076597],
[-1.42750276564840806, -5.20845603682527258, 9.07621690861589236],
[-1.43606301407702053, -4.90806346583285436, 8.55275503953465943],
[-1.44368972833088627, -4.6404312696349912, 8.08638115690097692],
[-1.45052772613783287, -4.400477371429381, 7.66823927132416294],
[-1.45669334411323104, -4.18411917989970572, 7.29121508941782093],
[-1.46228111131939498, -3.98803939450744105, 6.94952790783611452],
[-1.46736863036493093, -3.80951471344741632, 6.6384321210259607],
[-1.47202020365674069, -3.64628857786532068, 6.35399546624308709],
[-1.47628956570745351, -3.49647528720186296, 6.09293193566102431],
[-1.48022196845635801, -3.35848681918669456, 5.85247425343459149],
[-1.48385579155198677, -3.2309763203148103, 5.63027540262266069],
[-1.4872237992283559, -3.11279399838493109, 5.42433176385225924],
[-1.49035413107028836, -3.00295235364908875, 5.23292252737739361],
[-1.49327109015971571, -2.90059852045412381, 5.05456149582838954],
[-1.4959957753528963, -2.80499207878668022, 4.88795841877456638],
[-1.49854659250425271, -2.71548711393388098, 4.73198773002181916],
[-1.50093967083989899, -2.63151760471930318, 4.58566308526085997],
[-1.50318920439616344, -2.55258544142744714, 4.44811648219116318],
[-1.50530773379868377, -2.47825053735149137, 4.31858102897732099],
[-1.50730638019958674, -2.40812261925377813, 4.19637663836630548],
[-1.50919504058862586, -2.34185437332717816, 4.08089808389050823],
[-1.51098255171961404, -2.27913569253796533, 3.97160497533004664],
[-1.51267682838240503, -2.21968882425679004, 3.86801330301044333],
[-1.51428498058551941, -2.16326425797931288, 3.76968827177506238],
[-1.51581341330890607, -2.10963722470827131, 3.67623820083422848],
[-1.5172679117784853, -2.05860470442307042, 3.58730930900390677],
[-1.51865371465596932, -2.00998285763290063, 3.50258123894918416],
[-1.51997557709605435, -1.9636048125164336, 3.42176320107030385],
[-1.5212378252706058, -1.91931875150964282, 3.34459063920476707],
[-1.52244440367721978, -1.87698625110890105, 3.27082233758051144],
[-1.52359891632225741, -1.83648083663843531, 3.2002379023643841],
[-1.52470466268408855, -1.79768672019521336, 3.13263556241425123],
[-1.52576466921243359, -1.76049769524511568, 3.0678302430104698],
[-1.52678171699710252, -1.72481616564526208, 3.00565187383739207],
[-1.52775836613889382, -1.69055229039959554, 2.94594389864077622],
[-1.52869697727215326, -1.65762322836739528, 2.88856195906244251],
[-1.52959973062012544, -1.62595246955682637, 2.83337272935735873],
[-1.31368369685936415, -8.75789131239576157, 15.8484148557007902],
[-1.33900553458329141, -7.91230543162854083, 14.3182296368508784],
[-1.35986823809643531, -7.21562738581781993, 13.057510327028016],
[-1.3773543689598815, -6.63170622091794826, 12.0008376036226707],
[-1.39222229985238899, -6.13521691460374807, 11.1023829166194741],
[-1.4050191159534966, -5.70789015856108062, 10.3290858446268796],
[-1.41614940609831197, -5.33621515341392882, 9.6564970372355905],
[-1.42591882053829977, -5.00998504513456755, 9.06614601436851508],
[-1.43456257172236046, -4.72134517275713517, 8.54381886070089358],
[-1.4422646196615394, -4.46415239419047882, 8.07839885179469341],
[-1.44917091054716352, -4.23353299710407338, 7.66106644283641014],
[-1.45539871346574401, -4.02557090958609987, 7.28473504984715348],
[-1.4610433340937119, -3.83708350368045492, 6.94364534031405434],
[-1.46618302663605449, -3.66545756659013655, 6.63306840415638188],
[-1.4708826439840359, -3.50852740766834259, 6.34908517426136587],
[-1.47519638871486669, -3.36448299180583632, 6.08842018324863865],
[-1.47916991315129631, -3.23179981024652951, 5.84831465661920102],
[-1.48284194135938718, -3.10918471575355415, 5.62642849523864363],
[-1.48624553540821203, -2.99553363725686195, 5.42076375511289843],
[-1.48940909370586594, -2.88989824151884411, 5.22960431782544699],
[-1.49235714529617969, -2.7914594084676736, 5.05146789109246352],
[-1.49511098716516422, -2.69950594904821273, 4.88506749625840087],
[-1.49768919960285052, -2.61341739528014205, 4.72928032492041073],
[-1.50010806600138547, -2.53264998156078081, 4.5831223704782591],
[-1.50238191714349956, -2.45672514753025073, 4.44572762274224242],
[-1.50452341536579803, -2.38522004875065496, 4.31633089591224373],
[-1.50654379050041465, -2.31775967769294544, 4.19425357059435022],
[-1.50845303687942578, -2.25401028499224543, 4.07889168880327446],
[-1.51026007869797207, -2.19367385732666298, 3.9697059610469041],
[-1.51197290951058938, -2.13648345909104975, 3.86621333654745802],
[-1.51359871046133532, -2.08219928423252476, 3.76797985858231899],
[-1.51514395093629184, -2.03060529506925702, 3.67461458203879054],
[-1.51661447461372911, -1.98150634874155562, 3.58576437339577669],
[-1.5180155733249634, -1.93472573070828657, 3.50110944730095719],
[-1.51935205069400392, -1.89010302957148801, 3.42035952082071182],
[-1.52062827716890792, -1.84749229936409365, 3.34325048788771584],
[-1.52184823777345435, -1.8067604649365212, 3.26954153366397371],
[-1.52301557367826423, -1.76778593373369963, 3.19901262239117568],
[-1.52413361850502227, -1.7304573834554835, 3.13146230352232724],
[-1.52520543012621057, -1.69467270014023352, 3.06670579006146138],
[-1.5262338185991644, -1.66033804533800278, 3.00457327050588585],
[-1.52722137077175502, -1.62736703442891906, 2.94490842191848623],
[-1.52817047201352829, -1.59568001093782086, 2.88756709671632628],
[-1.5290833254564391, -1.56520340401052827, 2.83241615994982165],
[-1.31121142990154915, -8.40520147372787996, 15.8185891735558695],
[-1.33673023603250174, -7.59505815927557748, 14.2938994557566375],
[-1.35776223209953573, -6.92735832703844689, 13.0372883714863583],
[-1.37539513429908977, -6.36757006619948829, 11.9837668645874409],
[-1.39039135915559697, -5.89148880998134228, 11.0877819403848878],
[-1.40330117167799417, -5.48164520186716508, 10.3164562699140046],
[-1.41453167797587964, -5.12511477527492509, 9.64546600706741053],
[-1.42439053805124582, -4.81213019611907367, 9.05642902909609759],
[-1.43311458347641651, -4.53517273252030506, 8.53519508260321302],
[-1.44088909049529557, -4.28836038837885525, 8.07069425092900694],
[-1.44786108466304331, -4.06702551871641305, 7.65414202622429052],
[-1.4541487275441225, -3.86741682857479319, 7.27847847137776149],
[-1.45984806854080484, -3.68648502156768876, 6.93796481059039127],
[-1.46503798617092107, -3.52172592829548314, 6.62788819705209953],
[-1.46978386099315861, -3.37106390136045553, 6.34434226236037802],
[-1.47414034442558295, -3.23276391321399759, 6.08406168466874497],
[-1.4781534728667165, -3.10536443879562229, 5.84429587381336191],
[-1.48186230087805337, -2.98762560660897858, 5.62271139163809774],
[-1.485300176408064, -2.87848871396911621, 5.41731575968987666],
[-1.48849574636473969, -2.77704430291929016, 5.22639737809410487],
[-1.49147375679423355, -2.68250675682416118, 5.04847771634307119],
[-1.49425569499919164, -2.5941939149291513, 4.88227294789666377],
[-1.49686030886136279, -2.51151058533785676, 4.726662921605846],
[-1.49930402991890177, -2.43393511350471003, 4.58066588361586469],
[-1.5016013203847125, -2.36100836538476777, 4.44341774365413222],
[-1.50376495959419465, -2.29232463352773541, 4.31415496029919865],
[-1.50580628186819654, -2.22752408560384119, 4.19220032910642892],
[-1.5077353751406859, -2.1662864585354682, 4.07695111496375162],
[-1.50956124769976463, -2.10832576494380453, 3.96786908962424079],
[-1.5112919688582882, -2.0533858272531087, 3.86447212689035036],
[-1.5129347881890145, -2.00123649231351086, 3.76632707853402771],
[-1.51449623704050618, -1.95167040855735308, 3.67304370890493903],
[-1.51598221533172994, -1.90450027051724846, 3.58426950911346198],
[-1.51739806605708649, -1.85955645350133114, 3.49968524548950599],
[-1.51874863948531802, -1.81668497546090668, 3.41900112381742671],
[-1.52003834867790455, -1.77574573443680417, 3.34195347221006589],
[-1.52127121766626971, -1.73661097907108397, 3.26830186261177991],
[-1.52245092339574972, -1.69916397700418487, 3.19782660472187619],
[-1.52358083235738384, -1.66329785191854107, 3.13032655731069465],
[-1.5246640326762837, -1.62891456482508912, 3.06561721100081819],
[-1.52570336230057202, -1.59592401914285764, 3.0035290040268583],
[-1.52670143383290147, -1.56424327236977589, 2.94390583859991839],
[-1.52766065646187998, -1.53379583982116463, 2.8866037705434322],
[-1.52858325538109141, -1.50451107813099538, 2.83148984904253354],
[-1.27700213098832904, -9.01413268932937939, 17.6714559597061402],
[-1.30883575740194535, -8.05437389170427842, 15.7899288168619307],
[-1.33454289144508631, -7.27932486242774335, 14.2705097823843889],
[-1.35573688944428139, -6.64034394829852204, 13.0178409486435598],
[-1.37351033732399341, -6.10449038810663769, 11.9673446983507219],
[-1.38862948292846911, -5.64866230343784004, 11.0737317240312638],
[-1.40164761025950657, -5.25617853847314986, 10.3043000097984052],
[-1.41297421918694233, -4.91469293630240855, 9.63484594387617932],
[-1.42291888327084659, -4.61487205385139454, 9.04707208890450509],
[-1.43171997730447753, -4.3495290449756272, 8.52688923192096837],
[-1.43956403314314541, -4.11304009469470166, 8.06327235230773631],
[-1.44659910674746328, -3.90094141145383411, 7.6474705587042866],
[-1.45294421221512327, -3.70964479714499484, 7.27244948773633304],
[-1.45869610977703368, -3.53623299339886987, 6.9324900974757],
[-1.46393427493658979, -3.37830986523828392, 6.6228949649775517],
[-1.46872459303892522, -3.23388901219579861, 6.33976991765884623],
[-1.47312214504435524, -3.101309779040748, 6.0798593793277993],
[-1.47717333503964876, -2.97917311268500606, 5.84042062299289633],
[-1.48091753407881432, -2.86629200144286722, 5.61912661116195356],
[-1.48438836395458718, -2.76165277014806954, 5.41399011814444453],
[-1.4876147096748793, -2.66438455464157498, 5.22330388732858708],
[-1.49062152525564962, -2.57373500763565355, 5.04559300455239512],
[-1.4934304804342422, -2.48905080072373641, 4.87957667391882488],
[-1.49606048377571788, -2.40976185306155921, 4.72413729943943128],
[-1.49852810888395349, -2.33536848137759012, 4.57829529370065025],
[-1.50084794402966781, -2.26543085891270612, 4.44118841299345046],
[-1.50303288078279951, -2.19956031334068225, 4.31205469761162874],
[-1.50509435371316869, -2.13741209994769576, 4.19021830427246122],
[-1.50704254057142739, -2.07867936630541683, 4.07507767436124446],
[-1.50888653034862785, -2.02308808538363527, 3.966095600720835],
[-1.51063446507104571, -1.97039278052745104, 3.86279084682568996],
[-1.51229365999756049, -1.92037290158420371, 3.76473104248069879],
[-1.51387070596240747, -1.87282973933493735, 3.67152663482119967],
[-1.51537155688284786, -1.8275837871953946, 3.58282571614763823],
[-1.51680160488130578, -1.78447247633094808, 3.49830958380712875],
[-1.5181657450201187, -1.74334822394654787, 3.41768891402854491],
[-1.51946843128673548, -1.70407674536830189, 3.34070045289910844],
[-1.5207137251787668, -1.66653558923700484, 3.26710414473337796],
[-1.52190533800529271, -1.63061286214852807, 3.19668063183700957],
[-1.52304666783283693, -1.59620611475930518, 3.12922907080938772],
[-1.52414083185036842, -1.56322136600037798, 3.0645652195965738],
[-1.52519069480278979, -1.53157224582706908, 3.00251975692348294],
[-1.52619889403891773, -1.50117924003827974, 2.94293680182504414],
[-1.52716786163513518, -1.47196902326278134, 2.8856726060214104],
[-1.52809984398539567, -1.44387386833265752, 2.8305943960438138],
[-1.27451383183629097, -8.62171121536314544, 17.6370222905580825],
[-1.30655788172086385, -7.70534135373842854, 15.7624482910170887],
[-1.3324446917439301, -6.96505179775236183, 14.2480733514890687],
[-1.35379337447281856, -6.3545403291581275, 12.9991792385604317],
[-1.37170110775044241, -5.84243064412225532, 11.9515809480848745],
[-1.38693776235414457, -5.40670653121107225, 11.0602409692817787],
[-1.40005948288717041, -5.03146376662576866, 10.2926247921627141],
[-1.41147804091394846, -4.70492680304649458, 9.62464374275380763],
[-1.42150482820972912, -4.41819068227348222, 9.03808137395510336],
[-1.43037968736993726, -4.16439655816563992, 8.51890687224753762],
[-1.43829034549756796, -3.93817594600524545, 8.05613818519768721],
[-1.44538584011783389, -3.73526677783260519, 7.64105660421843691],
[-1.45178599794765839, -3.55224233540384438, 7.26665225568482143],
[-1.45758825712690321, -3.38631615292108812, 6.92722499977987827],
[-1.46287266276305816, -3.2351991580336863, 6.61809219067325749],
[-1.46770558203455947, -3.09699342998118032, 6.33537134263541457],
[-1.47214250606392016, -2.97011207363773355, 6.07581622020240708],
[-1.4762301901536552, -2.85321801458269464, 5.83669163417894676],
[-1.48000830776230541, -2.74517669988169555, 5.61567668388842467],
[-1.48351074240884162, -2.6450191531320435, 5.41078918064620229],
[-1.48676660672164096, -2.55191283243266742, 5.22032603330247813],
[-1.48980105357868231, -2.46513843397911403, 5.04281579646597056],
[-1.49263592719925375, -2.38407127260991869, 4.87698058157811598],
[-1.49529028985712831, -2.30816621924254717, 4.72170524414616644],
[-1.49778085107991976, -2.23694542693754261, 4.57601227553962531],
[-1.50012231976652277, -2.1699882612974859, 4.43904120408898706],
[-1.50232769490392948, -2.1069229867555106, 4.31003158812377229],
[-1.50440850702196993, -2.04741986162753342, 4.18830889085110591],
[-1.50637501985706135, -1.99118537107542615, 4.07327268300429601],
[-1.50823639967096113, -1.93795738505207304, 3.96438673768260985],
[-1.51000085811930762, -1.88750107264913436, 3.86117067252790314],
[-1.51167577336826819, -1.83960543849048497, 3.76319286439031808],
[-1.51326779322732641, -1.79408037341384063, 3.67006441604874789],
[-1.51478292333844222, -1.7507541325017173, 3.58143399713938271],
[-1.51622660288808153, -1.7094711699228371, 3.49698341499432486],
[-1.51760376985418888, -1.67009027304527935, 3.41642379768610382],
[-1.51891891743776686, -1.6324829486480672, 3.33949229277789428],
[-1.52017614303786619, -1.59653202236853531, 3.26594920227998209],
[-1.52137919089481732, -1.56213041922235729, 3.19557548801790858],
[-1.52253148933653115, -1.52918009846027148, 3.12817059271981623],
[-1.5236361834083274, -1.4975911204440826, 3.06355053116930787],
[-1.52469616354029025, -1.46728082683793626, 3.00154621316195191],
[-1.52571409080242892, -1.43817311837933848, 2.94200196607599462],
[-1.52669241921227195, -1.41019781694306245, 2.88477422987700383],
[-1.52763341548853537, -1.38329010063922508, 2.82973040152502353],
[-1.27213489944898095, -8.23146111408164138, 17.6041020644336932],
[-1.30437902272597639, -7.35803551281320001, 15.7361623126300483],
[-1.33043684159576836, -6.65218420797884136, 14.226603044791112],
[-1.35193286255940559, -6.06990264822590309, 12.9813145272285784],
[-1.36996858431331447, -5.58135349164683614, 11.9364855355447119],
[-1.38531729610211496, -5.16558991766890241, 11.0473184375600848],
[-1.39853784704409678, -4.80747384921408294, 10.2814383911601173],
[-1.41004415968819008, -4.49579297291886615, 9.61486633526512158],
[-1.42014934946801441, -4.22206563355355691, 9.02946309357703747],
[-1.42909465180157769, -3.97975725818160875, 8.51125359079293986],
[-1.43706892890169424, -3.76375195664729345, 8.04929679819341715],
[-1.4442221511115958, -3.56998733982641658, 7.63490474267422314],
[-1.45067491786679104, -3.39519661628397884, 7.2610909452800545],
[-1.45652531226088566, -3.23672291613530172, 6.92217332746208758],
[-1.46185392155978233, -3.09238329560723191, 6.6134833662872845],
[-1.46672757174205448, -2.96036757599313738, 6.33114974774896044],
[-1.47120214462486798, -2.83916203348658724, 6.07193516707017888],
[-1.47532473016408905, -2.72749109778234899, 5.83311164321179643],
[-1.47913529004584743, -2.62427228879101948, 5.61236414489170343],
[-1.48266795731126644, -2.52858101246882638, 5.40771530166628622],
[-1.48595206164768401, -2.43962278777978003, 5.21746600750175649],
[-1.48901294560986019, -2.35671113693646905, 5.04014813603913137],
[-1.49187261987374375, -2.27924983591821917, 4.87448658090691911],
[-1.4945502933795034, -2.2067185539831593, 4.71936854385943771],
[-1.49706280537361547, -2.13866115053373607, 4.57381850602783135],
[-1.49942497989510026, -2.07467607281083044, 4.43697769207952675],
[-1.50164991847650486, -2.01440842722457925, 4.30808711367802122],
[-1.50374924326860326, -1.9575433936040989, 4.18647348495785643],
[-1.50573330011639195, -1.90380072428509317, 4.07153745807334744],
[-1.50761132907723594, -1.85293012512286004, 3.96274374486502534],
[-1.50939160831245034, -1.80470735776488622, 3.85961278103808603],
[-1.51108157607991811, -1.75893093511948129, 3.76171365897143639],
[-1.51268793461999618, -1.71541930730102643, 3.66865810947787674],
[-1.51421673899426201, -1.674008455169536, 3.58009535526030298],
[-1.51567347335998215, -1.63454982421174511, 3.4957076922346646],
[-1.51706311670563898, -1.59690854390067249, 3.41520668138757433],
[-1.51839019970813593, -1.56096188755042009, 3.33832985496578516],
[-1.51965885407985768, -1.52659793560533652, 3.26483785773777635],
[-1.52087285553800688, -1.4937144116891139, 3.19451195772603658],
[-1.52203566133754231, -1.46221766591379598, 3.12715187187473154],
[-1.52315044315365289, -1.43202178416155412, 3.06257386112732366],
[-1.52422011597233964, -1.40304780549755126, 3.00060905675726319],
[-1.52524736354334922, -1.37522303270301949, 2.94110198584895821],
[-1.52623466086329551, -1.348480423252711, 2.88390926882000231],
[-1.52718429408560086, -1.32275804999540236, 2.82889846601289463],
[-1.26986656208462168, -7.84329347169913405, 17.572712278259246],
[-1.30230041418723919, -7.01238684562359627, 15.7110857660281056],
[-1.32852055598775176, -6.34066628994154335, 14.206111854369027],
[-1.35015653685924009, -5.78638515796817199, 12.9642581753524979],
[-1.36831391167836314, -5.32122076763807961, 11.9220684341605541],
[-1.38376918739381716, -4.9252801585203656, 11.0349729265896759],
[-1.39708376371746046, -4.58418109969791754, 10.270748606704144],
[-1.40867359473545539, -4.28726746223834176, 9.60552067134828391],
[-1.41885342570588402, -4.02647593781399582, 9.02122347019754756],
[-1.42786581028488757, -3.79559266025096731, 8.50393498403847659],
[-1.43590068585176578, -3.589751714629414, 8.04275324634828159],
[-1.44310890689263394, -3.40508843199385502, 7.62901955834813705],
[-1.44961180565754044, -3.23849445944769698, 7.25576972938162612],
[-1.45550807719047182, -3.08744137585857681, 6.91733889210219122],
[-1.46087882339626129, -2.94985147031937389, 6.60907198469173895],
[-1.46579130593313622, -2.82400159858677613, 6.32710834350037299],
[-1.47030177775206972, -2.70845064322749662, 6.06821917923112952],
[-1.47445764680554769, -2.60198408259802516, 5.82968338505890848],
[-1.47829914873782342, -2.50357113899147565, 5.60919152807375809],
[-1.48186065381913168, -2.41233129691486559, 5.40477083427830607],
[-1.48517169814993411, -2.32750788516034479, 5.2147259998473432],
[-1.48825780469723679, -2.24844704306776766, 5.03759206553992644],
[-1.49114114249425, -2.17458083280411474, 4.87209658016350389],
[-1.49384106003630945, -2.10541357454781908, 4.71712898487975618],
[-1.49637452001988769, -2.04051070911802857, 4.57171566019063036],
[-1.49875645607686625, -1.97948965896944618, 4.43499945021487818],
[-1.5010000673590056, -1.92201228137433611, 4.30622275422202438],
[-1.5031170632483386, -1.86777859930266965, 4.1847134808186004],
[-1.50511786777441947, -1.81652156455533387, 4.06987331487278414],
[-1.50701179127186591, -1.76800266015135121, 3.96116786476767002],
[-1.50880717524392738, -1.72200818913709131, 3.85811834756667338],
[-1.5105115151876809, -1.67834612798631189, 3.76029453913123657],
[-1.51213156519565906, -1.63684344686128047, 3.66730877022967849],
[-1.51367342741224631, -1.59734381787221968, 3.57881079194704466],
[-1.51514262884287643, -1.55970564733825534, 3.49448336701261475],
[-1.51654418755417231, -1.52380037983912064, 3.41403847006812455],
[-1.51788266993597665, -1.48951103124558482, 3.33721400095736964],
[-1.51916224040230508, -1.45673091545426558, 3.26377093200586588],
[-1.52038670467094961, -1.42536253562901538, 3.19349082387357974],
[-1.52155954756916634, -1.39531661567478116, 3.12617365559516491],
[-1.52268396615651391, -1.36651125167892284, 3.0616359234044439],
[-1.52376289882792393, -1.33887116633415904, 2.99970897028677053],
[-1.52479905095480328, -1.3123270520512651, 2.94023751423866786],
[-1.52579491753530672, -1.28681499069242733, 2.88307834819422437],
[-1.52675280325302443, -1.26227593969738261, 2.82809918870294474],
[-1.2677100630409246, -7.45711801788779294, 17.5428701370810316],
[-1.3003232998998322, -6.66832461487093475, 15.6872336564838726],
[-1.32669705654630632, -6.03044116611957381, 14.186612843296297],
[-1.34846558481389311, -5.50394116250568644, 12.9480215847946276],
[-1.36673823712450959, -5.06199347083151796, 11.9083396401311443],
[-1.38229454085172887, -4.68574420627704669, 11.0232132452667528],
[-1.39569829440434523, -4.36155717001357868, 10.2605632424569446],
[-1.40736736512875615, -4.07932569602538031, 9.59661369989970581],
[-1.41761803493378191, -3.83140009441562679, 9.0133687221127623],
[-1.42669410148087539, -3.61188380121740593, 8.49695664236394776],
[-1.43478651753614161, -3.41615837508605136, 8.03651257738993507],
[-1.4420469731027532, -3.24055499573652428, 7.6234056274701727],
[-1.4485974933215604, -3.08212232621608573, 7.25069277242334209],
[-1.45453735212338531, -2.93845929721896049, 6.91272549670760394],
[-1.45994813844965932, -2.80759257394165251, 6.60486153019773692],
[-1.46489752642351534, -2.68788536958443203, 6.32325033194737696],
[-1.46944212047039136, -2.57796863240419549, 6.06467120773086954],
[-1.47362962978415313, -2.47668845341874455, 5.82640958666759712],
[-1.47750054960199617, -2.38306540258386468, 5.60616135957854222],
[-1.48108947503880217, -2.29626275199814289, 5.40195812407563025],
[-1.48442613787594291, -2.21556139981484046, 5.2121081930644122],
[-1.48753623214728004, -2.14033990237018701, 5.03514962032586499],
[-1.49044207706849874, -2.07005844037291498, 4.86981248097728248],
[-1.49316315350890472, -2.00424584363611391, 4.71498834715395798],
[-1.49571654128131093, -1.94248901465105295, 4.56970540696660255],
[-1.49811727800328121, -1.88442424906073125, 4.43310804591537],
[-1.50037865546410543, -1.8297300676391528, 4.30443998412110673],
[-1.50251246583629428, -1.77812126134472703, 4.1830302673134705],
[-1.5045292073627512, -1.72934391650890928, 4.06828156358720872],
[-1.50643825709337653, -1.68317123697583937, 3.95966033498566317],
[-1.50824801666940811, -1.63940001811892211, 3.8566885426247639],
[-1.50996603593732526, -1.59784765707653453, 3.75893661327254724],
[-1.51159911822875581, -1.55834960642139797, 3.66601744910633887],
[-1.51315341040218398, -1.52075719638410445, 3.57758130449360578],
[-1.51463448015907387, -1.48493576486183709, 3.49331138683747211],
[-1.516047382684375, -1.4507630456309808, 3.41292006484688271],
[-1.51739671829246925, -1.41812777410511148, 3.33614558858227461],
[-1.51868668246372729, -1.3869284771358239, 3.26274924246202591],
[-1.51992110941828384, -1.35707241912346777, 3.19251286598795758],
[-1.52110351018045686, -1.32847468138031144, 3.12523668794718246],
[-1.52223710592964512, -1.30105735549542323, 3.06073742880298294],
[-1.52332485730492473, -1.27474883456479038, 2.99884663331366985],
[-1.52436949022473023, -1.24948318870879516, 2.93940920143744044],
[-1.52537351869569715, -1.22519961341019878, 2.88228209054749218],
[-1.52633926501255002, -1.20184194095476382, 2.82733316609608165],
[-1.26566665629569663, -7.07284307929948142, 17.5145929937389759],
[-1.29844892954344648, -6.32577683623730369, 15.6646210602613216],
[-1.32496756760636214, -5.72145086011838266, 14.1681191036089409],
[-1.34686119442111951, -5.22252299877577109, 12.9326161627578937],
[-1.36524270700396211, -4.80363174686579253, 11.8953091415808174],
[-1.38089445913901976, -4.44694825824430051, 11.0120481868628595],
[-1.39438249792004298, -4.1395730407001281, 10.2508900823653164],
[-1.40612648675465368, -3.87194249975919158, 9.58815234808789185],
[-1.41644415162652382, -3.6368160649870207, 9.00590504513891155],
[-1.42558046027823959, -3.42861123358057629, 8.49032413368242622],
[-1.4337273212165571, -3.2429546551326891, 8.03057981705226354],
[-1.4410372113635519, -3.07637157482106627, 7.61806750501742957],
[-1.44763280879186507, -2.92606631564313213, 7.24586421847417572],
[-1.4536139331839204, -2.78976411419136294, 6.90833692487913709],
[-1.45906263282406012, -2.66559519458241745, 6.60085546868961792],
[-1.4640469709849937, -2.55200848153347559, 6.31957889769210635],
[-1.46862388380388942, -2.44770647300648259, 6.06129418710289425],
[-1.47284136485866601, -2.35159545649703006, 5.82329295937817015],
[-1.47674015451575324, -2.2627470109515575, 5.60327615080372521],
[-1.48035506025658825, -2.18036791820737852, 5.3992795027187972],
[-1.48371599872328619, -2.10377641610018218, 5.20961475671124052],
[-1.48684882558894405, -2.03238328677625457, 5.03282282330646069],
[-1.48977600200062188, -1.96567666930637608, 4.86763617320342057],
[-1.49251713394963792, -1.90320976812369991, 4.71294839948526612],
[-1.49508941196610046, -1.844590832945189, 4.56778940474058626],
[-1.49750797198571695, -1.78947493507727207, 4.43130503659924457],
[-1.49978619339790953, -1.73755717527806564, 4.30274026825436806],
[-1.50193594667352803, -1.68856704063887775, 4.18142522431890562],
[-1.50396780025020216, -1.64226368992838201, 4.06676350584896706],
[-1.50589119428747109, -1.59843199393642199, 3.95822238498466605],
[-1.50771458732019359, -1.55687919342846071, 3.85532452898995093],
[-1.5094455806172169, -1.51743206517074691, 3.75764098243598177],
[-1.51109102410167107, -1.47993450814081218, 3.6647851898960635],
[-1.51265710694555078, -1.44424547899323952, 3.57640788350694283],
[-1.5141494353646352, -1.41023721921216016, 3.4921926928385334],
[-1.51557309967505738, -1.37779372697732483, 3.4118523607517437],
[-1.51693273230194081, -1.34680943522134955, 3.33512546985076241],
[-1.51823255813306734, -1.31718806413371126, 3.2617736009205851],
[-1.51947643837074797, -1.28884162183233086, 3.1915788582742719],
[-1.52066790884074976, -1.26168953135258732, 3.12434170790206434],
[-1.52181021355873058, -1.23565786571008052, 3.0598790832452254],
[-1.52290633422539501, -1.21067867574403776, 2.99802272072405174],
[-1.52395901621500962, -1.18668939787234362, 2.93861769315230292],
[-1.52497079153420723, -1.16363233088955598, 2.88152111412387368],
[-1.52594399915528056, -1.14145417259646997, 2.82660099056125791],
[-1.26373760188410533, -6.69037553938643992, 17.4878982848962217],
[-1.29667855429063428, -5.98467025057215807, 15.6432630716344434],
[-1.32333331204305882, -5.41363627653978607, 14.1506437117331654],
[-1.34534455028057232, -4.94208202143883746, 12.9180532838165139],
[-1.36382846299116833, -4.54609487663722778, 11.8829868858767522],
[-1.37957003940021927, -4.20885774732270246, 11.0014865006407323],
[-1.39313742701876597, -3.91819901349027955, 10.2417368658176482],
[-1.40495196910250963, -3.66509209331089458, 9.5801434994598651],
[-1.41533274367012551, -3.44270126838679191, 8.99883859319992041],
[-1.42452581488744356, -3.2457550212625299, 8.48404298613344565],
[-1.4327239874596458, -3.07012283027066912, 8.02495995356860981],
[-1.44008047663640282, -2.91252231229834235, 7.6130097107576109],
[-1.44671857341285914, -2.77031216185441087, 7.24128817862500185],
[-1.45273861000531923, -2.64134292728239872, 6.90417692936871408],
[-1.45822306624809439, -2.52384761466016316, 6.59705723720892667],
[-1.46324037114170635, -2.41636024592208409, 6.31609719836855898],
[-1.4678477726646888, -2.31765437789161366, 6.05809102665557919],
[-1.47209353181637237, -2.22669609854577333, 5.8203361909210356],
[-1.47601861952780178, -2.14260767350809944, 5.60053839103089324],
[-1.47965804307357462, -2.06463912987010412, 5.39673728113268858],
[-1.48304189304702372, -1.9921458264810763, 5.20724784088525716],
[-1.48619617724979625, -1.9245705892443401, 5.0306136791081224],
[-1.48914349043212346, -1.86142936304015394, 4.86556952950217969],
[-1.49190355638415562, -1.80229959831642961, 4.71101089448822208],
[-1.49449366988853316, -1.74681078298659731, 4.56596929663996676],
[-1.49692905947092636, -1.69463667109916205, 4.4295919652897533],
[-1.49922318702772661, -1.6454888638109193, 4.3011250579057636],
[-1.50138799678443302, -1.59911147586507685, 4.17989971885843659],
[-1.50343412330745441, -1.55527667928357349, 4.06532043112734076],
[-1.50537106621654382, -1.51378096044121713, 3.95685523270884865],
[-1.50720733765554082, -1.47444196074998568, 3.85402745851593398],
[-1.50895058735134047, -1.43709579747746696, 3.75640873729526747],
[-1.51060770913663456, -1.4015947816731662, 3.66361302654013743],
[-1.51218493206451643, -1.36780546618901, 3.5752915102329399],
[-1.51368789865392039, -1.33560696940051793, 3.4911282172385758],
[-1.51512173233771019, -1.30488953024300391, 3.41083624432962962],
[-1.51649109581264652, -1.27555325816017007, 3.33415448869088893],
[-1.5178002416915175, -1.24750704796563094, 3.26084481148794048],
[-1.51905305661681256, -1.22066763478136719, 3.19068956758129563],
[-1.52025309979976964, -1.19495876840156567, 3.12348944740520329],
[-1.52140363678984492, -1.17031048983834207, 3.05906158593855526],
[-1.52250766914922564, -1.14665849559355904, 2.99723790098205312],
[-1.52356796060034538, -1.12394357749205787, 2.93786362894451836],
[-1.52458706012594547, -1.10211112780188847, 2.88079603128215833],
[-1.52556732242803039, -1.08111070093325012, 2.82590324882830091],
[-1.26192416103064686, -6.30962080515323276, 17.4628034636740956],
[-1.29501342218144333, -5.6449303018165482, 15.6231747470863898],
[-1.32179550688237435, -5.10693718568190125, 14.1341996815490294],
[-1.3439168294310011, -4.66256859190347228, 12.9043442499446126],
[-1.3624966381353294, -4.28934126820381501, 11.8713827452346763],
[-1.3783223695168525, -3.97143733589601533, 10.991536861994561],
[-1.39196412483993015, -3.69740470660606491, 10.23311126151855],
[-1.40384481188925503, -3.45874808726338179, 9.5725939709260075],
[-1.41428476915332335, -3.24903257778466203, 8.99217545792755146],
[-1.4235310837878723, -3.06329473726504098, 8.47811866990118723],
[-1.43177739722945141, -2.89764473248817467, 8.01965792138639166],
[-1.43917761445066339, -2.74899094895070517, 7.60823671459592266],
[-1.44585559929538698, -2.61484523276825254, 7.23696871774978145],
[-1.45191216320344041, -2.49318250247055451, 6.90024922007291686],
[-1.457430189659632, -2.38233781002055212, 6.59347023302747015],
[-1.46247844985890652, -2.28092969244049604, 6.31280835466619727],
[-1.46711448363939412, -2.18780230016400834, 6.05506460133627122],
[-1.47138680235120534, -2.10198114621600762, 5.81754193702724454],
[-1.47533659282261231, -2.02263887725680735, 5.59795053970192935],
[-1.47899904945125438, -1.94906851478072274, 5.39433374237841257],
[-1.4824044257819784, -1.88066233121594251, 5.20500956963000583],
[-1.48557887215051609, -1.81689502349343668, 5.02852416796271839],
[-1.48854510850467459, -1.75731019754024054, 4.86361439966284248],
[-1.49132296903881567, -1.70150942776240721, 4.70917756330713289],
[-1.49392984625748038, -1.64914333677773817, 4.56424670561132828],
[-1.49638105548759381, -1.59990427316283612, 4.42797035601832079],
[-1.49869013598350564, -1.55352026290973133, 4.29959578646487461],
[-1.50086910113046135, -1.50974998338567112, 4.1784551010762252],
[-1.50292864751033672, -1.46837856365952435, 4.06395361295180102],
[-1.50487833051022246, -1.4292140569091496, 3.9555600810338527],
[-1.50672671255812429, -1.3920844626895712, 3.85279846879672494],
[-1.5084814888376652, -1.35683520160001603, 3.75524095501651534],
[-1.51014959437493657, -1.32332696414298545, 3.66250198017220452],
[-1.51173729564013626, -1.29143387064735271, 3.57423315376223183],
[-1.51325026921496164, -1.26104189101246789, 3.49011888071391851],
[-1.51469366960746976, -1.23204748245583673, 3.40987259114983043],
[-1.51607218792184839, -1.20435641096595414, 3.33323347858518559],
[-1.51739010279019881, -1.17788272819330486, 3.25996366832323492],
[-1.51865132473150566, -1.15254788037658895, 3.18984575127755976],
[-1.51985943490642228, -1.12827992984319558, 3.12268062936013813],
[-1.52101771907693917, -1.10501287283367366, 3.0582856274602559],
[-1.522129197449132, -1.08268604002657898, 2.99649283430885571],
[-1.52319665096966861, -1.06124356829853927, 2.93714764049684041],
[-1.52422264455825651, -1.04063393403575732, 2.88010744684602304],
[-1.52520954768452821, -1.02080953978885747, 2.8252405204156319],
[-1.26022759105903748, -5.93048278145429464, 17.4393259292140321],
[-1.29345477328647251, -5.30648112117527226, 15.6043710469560359],
[-1.32035535871161436, -4.80129221349678037, 14.1187999153139678],
[-1.34257919699879058, -4.38393207183278566, 12.8915002487332853],
[-1.3612483527352488, -4.03332845254888639, 11.8605064807765679],
[-1.37715252419575518, -3.7346509130732346, 10.9822078412559794],
[-1.39086362119682816, -3.47715905299207062, 10.2250208402048077],
[-1.40280600153546042, -3.25288348182135767, 9.56551048873092924],
[-1.41330117301867664, -3.05578632004038164, 8.98592164736874821],
[-1.42259717254231277, -2.8812094633768357, 8.47255657824250896],
[-1.43088841885431761, -2.72550175019869956, 8.01467858417805168],
[-1.43832945801288647, -2.58576082339395352, 7.60375292129284475],
[-1.44504468655884977, -2.45965053031293568, 7.23290984070147669],
[-1.45113536174313928, -2.34526927150406372, 6.89655745151663702],
[-1.45668474268846926, -2.2410534502899524, 6.59009780225889141],
[-1.46176191913461251, -2.14570556937190871, 6.30971543993426831],
[-1.46642470268296599, -2.05813993359012759, 6.05221774221346909],
[-1.47072183785365196, -1.97744112652591864, 5.81491281269027915],
[-1.47469471260055474, -1.90283188722652219, 5.59551501837549203],
[-1.47837869567667579, -1.83364799463773753, 5.39207113423159701],
[-1.48180419248791839, -1.76931843879154438, 5.20290203407138474],
[-1.48499748622569827, -1.70934962443245841, 5.02655623934669737],
[-1.48798141355227931, -1.65331268172475432, 4.86177260469685546],
[-1.49077591160028744, -1.60083319366473842, 4.70745011012037118],
[-1.49339846399996423, -1.55158281974022261, 4.56262322929859199],
[-1.49586446703037823, -1.50527241965321079, 4.42644170904272372],
[-1.49818753209942335, -1.46164637277992515, 4.29815386495596741],
[-1.50037973710615069, -1.42047785761529077, 4.17709270004996469],
[-1.50245183648776548, -1.38156490711518676, 4.06266430498559572],
[-1.50441343766255797, -1.34472709511737043, 3.9543381140795173],
[-1.5062731499781532, -1.30980273911143774, 3.85163867969957119],
[-1.50803871103693798, -1.27664652786195809, 3.75413869599407057],
[-1.50971709430837642, -1.24512750046051668, 3.66145305604170668],
[-1.51131460118451022, -1.21512731753528458, 3.57323376812719617],
[-1.51283694004050551, -1.1865387765023574, 3.48916558965224466],
[-1.51428929439131665, -1.15926453159143872, 3.4089622632110741],
[-1.51567638185951892, -1.13321598643702348, 3.33236326011637241],
[-1.5170025053678311, -1.10831233268882645, 3.25913095331308034],
[-1.51827159772668341, -1.08447971266191678, 3.18904815504644867],
[-1.51948726059019168, -1.06165048774860549, 3.12191596553574335],
[-1.52065279859316815, -1.03976259732866194, 3.05755188776959663],
[-1.52177124935071184, -1.01875899538122949, 2.99578817079292836],
[-1.52284540989370121, -0.998587154028656276, 2.9364703498155178],
[-1.52387786002470249, -0.979198624915471494, 2.87945595639205809],
[-1.52487098300483614, -0.96054865071170803, 2.8246133759991161],
[-1.25864914010852136, -5.55286385341994571, 17.4174829535605653],
[-1.29200383468543212, -4.96924551802089187, 15.5868667748588656],
[-1.31901405891540935, -4.49663883721162261, 14.104457152720455],
[-1.34133280168199942, -4.10612082147550783, 12.8795323100281784],
[-1.36008471005932963, -3.77801308349813825, 11.8503677052391598],
[-1.37606156091161003, -3.49846159553799119, 10.9735078713374996],
[-1.38983692872794862, -3.25743030170612924, 10.2174730463515591],
[-1.40183650751192146, -3.04747066850417658, 9.55889966354143361],
[-1.41238288359142805, -2.86293827755019192, 8.98008306391576916],
[-1.42172497049584812, -2.69947779208072403, 8.46736200782653725],
[-1.4300579048844364, -2.55367483015077923, 8.01002671723960979],
[-1.43753682521235082, -2.42281487395340012, 7.59956265463383218],
[-1.44428662047496803, -2.30471269224728914, 7.22911547801566456],
[-1.45040896021122756, -2.19758933365337539, 6.8931052098927541],
[-1.45598745105018668, -2.09998190055315392, 6.58694322806839239],
[-1.46109147750669943, -2.01067634519270522, 6.30682146942111999],
[-1.46577910273199219, -1.928656714121042, 6.04955322662633588],
[-1.47009928712369908, -1.85306632830718354, 5.81245138312353227],
[-1.47409360488458985, -1.78317774784426186, 5.59323420240483493],
[-1.47779758625732582, -1.71836928634572761, 5.3899516615044325],
[-1.48124177732759921, -1.65810646715776011, 5.20092728531817361],
[-1.4844525843802594, -1.60192724933121511, 5.02471180540224438],
[-1.48745295223178498, -1.54943015857477562, 4.86004593072954716],
[-1.49026291341617023, -1.50026467793574181, 4.70583020645844385],
[-1.49290003602799048, -1.45412341171557502, 4.56110043474785432],
[-1.49537979138959432, -1.41073565225433417, 4.42500749590442766],
[-1.49771585780306182, -1.36986206506377606, 4.29680067741671134],
[-1.4999203729849202, -1.33129027188010673, 4.17581381946393471],
[-1.50200414502115387, -1.2948311595009947, 4.06145373696812051],
[-1.50397682958257106, -1.26031577897980807, 3.95319049339999884],
[-1.5058470795326091, -1.22759272787984419, 3.85054918978311189],
[-1.50762267181824083, -1.19652593001447682, 3.75310300047874223],
[-1.50931061556899238, -1.16699274399664388, 3.66046724033613913],
[-1.51091724457267307, -1.13888234515528142, 3.57229428930373194],
[-1.51244829670014291, -1.1120943358089288, 3.48826923332067285],
[-1.51390898237862781, -1.08653754716169426, 3.40810610626384802],
[-1.51530404383569528, -1.06212900268857124, 3.33154463843315263],
[-1.51663780653329661, -1.03879301817349079, 3.25834743367084956],
[-1.51791422396729248, -1.01646041783524033, 3.18829751060987077],
[-1.51913691680974861, -0.995067849438699747, 3.1211961544060558],
[-1.52030920721042628, -0.974557184109247632, 3.05686103415600652],
[-1.52143414894165763, -0.954874988875517361, 2.99512454843953924],
[-1.52251455396271917, -0.935972061862327287, 2.93583236737483322],
[-1.52355301589021574, -0.917803021620612003, 2.87884214448331965],
[-1.52455193078684004, -0.90032594337805516, 2.82402237572916626],
[-1.25719004169050197, -5.17666487754912463, 17.3972916063347398],
[-1.2906618152933258, -4.63314497797604208, 15.5706765152694828],
[-1.31777277876756105, -4.19291338698769511, 14.0911839184122147],
[-1.34017877109853822, -3.82908220313868064, 12.8684512612624928],
[-1.35900679193765295, -3.52335094206058264, 11.8409758445678843],
[-1.37505051572908399, -3.2628317322385052, 10.9654452144158299],
[-1.38888503893487525, -3.03818602267004012, 10.2104751690446687],
[-1.40093727857930239, -2.84248143479858406, 9.55276796480524304],
[-1.41153080900743411, -2.67046369271676776, 8.97466548159456501],
[-1.42091534737937164, -2.51807783079888647, 8.46254013850625775],
[-1.42928668885904742, -2.38214448143174584, 8.00570698938311587],
[-1.43680051554102528, -2.2601356424240846, 7.5956701411466554],
[-1.44358216853034271, -2.15001599568348922, 7.22558947120772466],
[-1.4497336960122631, -2.05012845900724061, 6.88989599973504774],
[-1.45533902386655312, -1.95911022443574501, 6.58400971855012695],
[-1.46046780749002836, -1.87583021145508222, 6.30412938921154442],
[-1.46517834125126534, -1.79934182258927344, 6.04707376805894992],
[-1.46951978402016481, -1.7288468047296055, 5.81016015446628131],
[-1.47353388126615048, -1.66366728530049279, 5.59111041238486983],
[-1.47725631175789673, -1.60322390423337646, 5.38797747815574102],
[-1.48071775098912828, -1.5470185458095369, 5.19908732716705124],
[-1.48394471849260912, -1.49462057977672869, 5.02299273417822079],
[-1.48696025860227721, -1.44565580697443585, 4.8584361227248003],
[-1.48978449164641913, -1.39979750892952159, 4.70431948536671296],
[-1.49243506345824484, -1.35675914859840474, 4.55967985296820988],
[-1.49492751443550365, -1.31628837749038086, 4.42366915435160113],
[-1.49727558446105413, -1.27816208429602196, 4.29553757615198784],
[-1.49949146632284624, -1.24218227979407403, 4.17461973316508406],
[-1.50158601750358245, -1.20817265776150307, 4.06032311054848005],
[-1.50356893810598002, -1.17597570578121347, 3.9521183540718634],
[-1.50544892106693529, -1.14545026602919009, 3.84953107261952754],
[-1.5072337795684021, -1.11646946634696453, 3.75213488511604876],
[-1.50893055558390898, -1.08891895763787261, 3.65954549691870668],
[-1.51054561274077526, -1.06269540594828427, 3.57141563213334035],
[-1.51208471607992845, -1.03770519730975486, 3.48743068095885445],
[-1.51355310082042882, -1.01386332112373223, 3.40730494706225695],
[-1.51495553185751453, -0.991092404018934725, 3.33077840064934838],
[-1.5162963554188984, -0.969321871044044769, 3.25761385947302129],
[-1.51757954405924744, -0.948487215037029707, 3.18759453339230348],
[-1.51880873597402544, -0.928529358237395464, 3.12052187893353228],
[-1.51998726945227491, -0.909394092834694523, 3.05621371913374107],
[-1.52111821315522922, -0.891031589295950166, 2.99450259116960371],
[-1.52220439279935937, -0.873395963081599613, 2.93523429021351845],
[-1.52324841473204242, -0.856444891817212617, 2.87826658285713233],
[-1.52425268681420678, -0.840139276196806839, 2.82346806750426893],
[-1.25585150912462629, -4.8017851819470998, 17.3787686777393127],
[-1.28942990057131635, -4.29809966857105419, 15.5558145697129309],
[-1.31663266441368876, -3.89005105394953477, 14.0789924683327392],
[-1.3391182070314116, -3.55276259008333684, 12.8582676818016175],
[-1.35801565425719395, -3.26929694543398552, 11.8323400986668474],
[-1.3741203990337838, -3.02772291312528585, 10.9580279278880557],
[-1.38800891813433425, -2.81939311496036682, 10.2040343122219443],
[-1.40010923894679351, -2.637886971928741, 9.54712169455748239],
[-1.41074583356460104, -2.47833727518105551, 8.96967452286682132],
[-1.42016914984075115, -2.33698720859870424, 8.45809601265915667],
[-1.4285755820051862, -2.210890781674693, 8.00172394444571466],
[-1.43612130694859341, -2.09770527981255217, 7.59207949347542943],
[-1.44293207742731555, -1.99554436239947885, 7.22233555776119118],
[-1.44911028650570017, -1.90287209339132346, 6.88693323031244464],
[-1.4547401509301281, -1.8184251886626599, 6.58130039435216574],
[-1.45989157296036698, -1.74115508701695143, 6.3016420649344278],
[-1.46462305772957047, -1.67018418863898388, 6.04478200580493752],
[-1.46898394506149943, -1.60477237695794006, 5.80804156429777674],
[-1.47301613660464259, -1.54429111095648031, 5.58914590542326195],
[-1.47675544659262359, -1.48820316323287671, 5.38615067923898749],
[-1.48023266856577829, -1.43604661875784445, 5.19738410865819844],
[-1.48347442537694163, -1.38742212445325475, 5.02140084273274123],
[-1.48650385216529002, -1.34198264431588665, 4.85694487808172859],
[-1.48934114937723905, -1.29942516388618179, 4.70291953544960428],
[-1.49200403379566948, -1.25948392463270786, 4.55836297338222352],
[-1.49450810886796037, -1.22192486888575358, 4.42242808315959302],
[-1.49686717069123842, -1.18654104993817655, 4.29436587689163218],
[-1.49909346233029828, -1.15314881717715245, 4.17351168062961797],
[-1.50119788636783058, -1.12158462774608036, 4.05927359503485263],
[-1.50319018347690059, -1.09170236788825203, 3.95112280070324973],
[-1.50507908318789685, -1.06337109138275299, 3.848585373042964],
[-1.50687243177348584, -1.03647310121985781, 3.75123533941494713],
[-1.50857730120321976, -1.01090231523927088, 3.65868876400059229],
[-1.51020008235828307, -0.986562867872245119, 3.5705986871837796],
[-1.51174656509680072, -0.963367909130314226, 3.48665077881394492],
[-1.51322200728413203, -0.941238569124101088, 3.40655959056068935],
[-1.51463119452261408, -0.920103062093176716, 3.33006531319107468],
[-1.5159784920106758, -0.899895908499487973, 3.2569309611462236],
[-1.51726788971519455, -0.88055725742399682, 3.18693992013838878],
[-1.5185030418418739, -0.862032294495386875, 3.11989380430830421],
[-1.51968730142606834, -0.84427072301448236, 3.05561057829472293],
[-1.52082375073355869, -0.827226307930387428, 2.99392290677882578],
[-1.52191522805175894, -0.810856473962002555, 2.93467669999324787],
[-1.52296435136162334, -0.795121950510084385, 2.87772982857688264],
[-1.52397353930590063, -0.779986457125067312, 2.82295098521034005],
[-1.2546347298986078, -4.42812257611273363, 17.3619306005086749],
[-1.2883092471641453, -3.96402845281275473, 15.5422948920700073],
[-1.31559483178413639, -3.58798590486582647, 14.0678947353280943],
[-1.33815218060813867, -3.27710738108115596, 12.8489918566556973],
[-1.35711232239503965, -3.01580516087786643, 11.8244694016086331],
[-1.37327219120485622, -2.79309598211157217, 10.9512638298624552],
[-1.38720950335524629, -2.60101781879108707, 10.1981573645100543],
[-1.39935328437891693, -2.43365788587637732, 9.54196696087362817],
[-1.41002881402533742, -2.28653321193297998, 8.96511563512055787],
[-1.41948719792964684, -2.15618308546275506, 8.45403451425188557],
[-1.42792536989247032, -2.03989338556067157, 7.99808198255246605],
[-1.43549995265571528, -1.93550555414253789, 7.58879469353386682],
[-1.44233707004516321, -1.84128136601510195, 7.21935735591754479],
[-1.44853942610522268, -1.75580536497602746, 6.88422020184350725],
[-1.45419149993278807, -1.67791326915321726, 6.57881827613823855],
[-1.45936341650389734, -1.60663862367401555, 6.29936227032186835],
[-1.46411387114256075, -1.54117249593953765, 6.04268049449627043],
[-1.46849236699504382, -1.48083263898659867, 5.80609797202662126],
[-1.47254094669668101, -1.42503962583549804, 5.58734286629668198],
[-1.4762955467886214, -1.37329818305918283, 5.38447329274454489],
[-1.47978706740796362, -1.32518244842504229, 5.19581951653318619],
[-1.48304222471879554, -1.2803242227788163, 5.01993789014527447],
[-1.48608423587928007, -1.23840352989940028, 4.85557384014723148],
[-1.48893337371013512, -1.19914097211554527, 4.70163189483636668],
[-1.49160741909310191, -1.16229149539722232, 4.55715123820327594],
[-1.49412203244287101, -1.12763926976820472, 4.42128563688283549],
[-1.49649106065263093, -1.09499345901412015, 4.29328685388452946],
[-1.49872679222200222, -1.06418470453633285, 4.17249086236953826],
[-1.50084017049361629, -1.03506218654732174, 4.05830632308762418],
[-1.50284097280947981, -1.00749115495607588, 3.95020490339028063],
[-1.50473796177692165, -0.981350844637122743, 3.84771310334805161],
[-1.50653901358142894, -0.956532707035827934, 3.75040532216964184],
[-1.50825122730965466, -0.93293890349050812, 3.65789795076903346],
[-1.50988101848240763, -0.910481016170296087, 3.5698443175677026],
[-1.51143419939586332, -0.889078940821096064, 3.48593034713604721],
[-1.51291604839175697, -0.86865993209096104, 3.40587081707330919],
[-1.51433136979643646, -0.849157777455945895, 3.32940611910852047],
[-1.5156845459631425, -0.830512079979804252, 3.25629944692081574],
[-1.5169795826050132, -0.812667633538400103, 3.18633434649847658],
[-1.51822014840685515, -0.795573876894421939, 3.11931257565687936],
[-1.51940960974072059, -0.779184415251651608, 3.05505222813251676],
[-1.52055106117695749, -0.763456599754120946, 2.99338608486928237],
[-1.52164735237323945, -0.74835115690487175, 2.93416016103118471],
[-1.52270111183335222, -0.733831861124507268, 2.87723242215900488],
[-1.52371476795285954, -0.719865244702138529, 2.82247164693630159],
[-1.25354086000071385, -4.0555733705905439, 17.3467933714804623],
[-1.28730097751032257, -3.63084891092655093, 15.53013102355402],
[-1.31466036148068577, -3.28665090370171464, 14.0579022744696083],
[-1.33728172745531926, -3.00206102081806359, 12.8406337299536268],
[-1.35629778662817047, -2.76282882461293999, 11.8173723816398937],
[-1.37250683826548525, -2.55891105439327715, 10.945160464473064],
[-1.38648769821491058, -2.38302573130687767, 10.1928509689080542],
[-1.39867027828258483, -2.22976421175484507, 9.53730965118776908],
[-1.4093805759002751, -2.09502518039230123, 8.96099406704161616],
[-1.41887028156450556, -1.97564216420374184, 8.45036034779873191],
[-1.42733680907089466, -1.86913153568807622, 7.99478534128399954],
[-1.43493717795109221, -1.77351786038899029, 7.58581957557290831],
[-1.44179784238573716, -1.68721024108969231, 7.21665834938818396],
[-1.44802178336316878, -1.60891309262574289, 6.8817600916401096],
[-1.45369371367992151, -1.53756065869991687, 6.57656627198464427],
[-1.45888395675284466, -1.47226821323681567, 6.29729267570838047],
[-1.46365137740227058, -1.41229518872148896, 6.04077169357691446],
[-1.4680456243534159, -1.35701696368803137, 5.80433164922926181],
[-1.47210886593289225, -1.30590302623079157, 5.58570339855988607],
[-1.4758771477371424, -1.2584998934192686, 5.38294727139787099],
[-1.47938146496345535, -1.21441762049238844, 5.19439536765153509],
[-1.48264861699918593, -1.17331904942381593, 5.01860557049004985],
[-1.4857018941629998, -1.13491116915229129, 4.85432459169230057],
[-1.48856163384018858, -1.09893811894242122, 4.70045804511281062],
[-1.49124567410042674, -1.06517548150030472, 4.55604603678084885],
[-1.49376972618849457, -1.03342559673417833, 4.42024312057664481],
[-1.49614768232530571, -1.00351368936453422, 4.29230173496375755],
[-1.49839187155746667, -0.975284650126161723, 4.17155843531235515],
[-1.50051327360542586, -0.948600345382740517, 4.05742238638708574],
[-1.50252169854030337, -0.923337356644879148, 3.9493656936492334],
[-1.50442593849447825, -0.899385071926046664, 3.84691523946549996],
[-1.50623389635592608, -0.8766440666621792, 3.74964575785959342],
[-1.50795269541959609, -0.855024724206987385, 3.65717393399443269],
[-1.50958877320420837, -0.834446055540014719, 3.56915335574160775],
[-1.51114796203942303, -0.814834685413414417, 3.48527017715464948],
[-1.5126355585500153, -0.796123978184218539, 3.40523937941522625],
[-1.51405638378162966, -0.778253281383080697, 3.32880153537035905],
[-1.51541483540627775, -0.761167268925527662, 3.25572000026782504],
[-1.51671493319881545, -0.744815368981561199, 3.18577846459840464],
[-1.51796035877477609, -0.729151264040285407, 3.1187788157359484],
[-1.51915449041719963, -0.714132452760222081, 3.05453926385167662],
[-1.52030043368614343, -0.699719864876467756, 2.99289269476707309],
[-1.5214010483949203, -0.68587752181738193, 2.93368521831889328],
[-1.52245897244644257, -0.672572236823729686, 2.87677488568695283],
[-1.52347664294797314, -0.659773349308177437, 2.8220305531772496],
[-1.2525710182781149, -3.6840324067003376, 17.3333724735250883],
[-1.28640617447530992, -3.29847737044951206, 15.5193360279649575],
[-1.31383029368433291, -2.98597794019166596, 14.0490262086017861],
[-1.3365078428727819, -2.72756702627098369, 12.8332028586049773],
[-1.35557299756153049, -2.51032036585468621, 11.8110573213462988],
[-1.37182524755051549, -2.32512753822121221, 10.9397250673308051],
[-1.38584436881124917, -2.16538182626757658, 10.1881214925889481],
[-1.39806104780918883, -2.02617543160751978, 9.53315540571338005],
[-1.40880190974604935, -1.90378636452168815, 8.9573148450745439],
[-1.41831915701271094, -1.79534070507938104, 8.4470780173984874],
[-1.42681062372090173, -1.69858407585821625, 7.9918380769129076],
[-1.43443367699993241, -1.6117232325841937, 7.58315780930863159],
[-1.44131506052972314, -1.53331389418055641, 7.2142418721195174],
[-1.44755799806482921, -1.46217979602507997, 6.8795559402980011],
[-1.45324740731376756, -1.39735327626323791, 6.57454716481853474],
[-1.4584537857296731, -1.33803099608226894, 6.2954358365670755],
[-1.46323614681451497, -1.28354047966185525, 6.03905795680902813],
[-1.46764426701818107, -1.2333145100993117, 5.80274477001726119],
[-1.47172042496143463, -1.18687131045277017, 5.58422951568028303],
[-1.47550076195103474, -1.14379904027212009, 5.38157448448032483],
[-1.47901635662336051, -1.10374354971892563, 5.19311340142754485],
[-1.48229408142406038, -1.06639861972953987, 5.0174055058274849],
[-1.48535729090375379, -1.03149811868316221, 4.85319864840427773],
[-1.4882263791388084, -0.9988096504287306, 4.6993994052671777],
[-1.49091923441781504, -0.96812937299858115, 4.5550486999583244],
[-1.49345161262550508, -0.939277743789625807, 4.41930178453018918],
[-1.49583744579381905, -0.91209600353281628, 4.2914116966219007],
[-1.49808909858439021, -0.886443253600231018, 4.17071550818908676],
[-1.50021758267307614, -0.862194013030503581, 4.05662283130851886],
[-1.50223273688327197, -0.839236165856576566, 3.94860616035519296],
[-1.50414337928666852, -0.817469227873189319, 3.84619271714335564],
[-1.50595743623225187, -0.796802876313360775, 3.7489575330543623],
[-1.50768205228590091, -0.777155697054588201, 3.65651755464183692],
[-1.50932368429618102, -0.758454112711648643, 3.56852660030830693],
[-1.51088818219741361, -0.740631461861477325, 3.48467102805825091],
[-1.51238085868169359, -0.723627205110858207, 3.40466600004658781],
[-1.51380654948842563, -0.7073862380787036, 3.32825225016030046],
[-1.51516966575331469, -0.691858294864527323, 3.25519327733760067],
[-1.51647423961059014, -0.676997428397584811, 3.18527290061063661],
[-1.5177239640416289, -0.66276155635005618, 3.11829312262701475],
[-1.51892222779947916, -0.649112063162170516, 3.05407225717801234],
[-1.52007214610475616, -0.636013450253036017, 2.99244328344053434],
[-1.52117658769855835, -0.623433027745310731, 2.93325239554168737],
[-1.52223819874688049, -0.611340642067020323, 2.8763577209253306],
[-1.52325942401601133, -0.599708434651972988, 2.82162818503753288],
[-1.25172628087776361, -3.31339309644113911, 17.3216827986172888],
[-1.28562587606116763, -2.96682894475654058, 15.5099224278661367],
[-1.31310562313446111, -2.68589786550230691, 14.0412771746537253],
[-1.33583147707461758, -2.4535680191166449, 12.8267083666042367],
[-1.35493886161885801, -2.25823143603143039, 11.8055321183643098],
[-1.37122828343303249, -2.09170416116903235, 10.9349645314447752],
[-1.38528033967041253, -1.94805047766151773, 10.1839749971082671],
[-1.3975263800087292, -1.82286049566355968, 9.52950959121894137],
[-1.40829356751202384, -1.71278947400111003, 8.9540827501946918],
[-1.41783454341673631, -1.61525454313299055, 8.44419180604524477],
[-1.42634750234676377, -1.52822946680010396, 7.98924404588276627],
[-1.43399010969321217, -1.45010235811673116, 7.58081288326579994],
[-1.44088935763135129, -1.37957491688108091, 7.21211109325053901],
[-1.44714867835896843, -1.31558970759906213, 6.87761063805954098],
[-1.45285316557095046, -1.2572767778979379, 6.57276360001088555],
[-1.45807346622345979, -1.20391387119368254, 6.29379418218475095],
[-1.46286872156672798, -1.15489635913162747, 6.03754152190478521],
[-1.4672888178122474, -1.10971423195884245, 5.80133940151817118],
[-1.47137612837868104, -1.06793428672646207, 5.58292313227556036],
[-1.47516687684790671, -1.02918619314970239, 5.38035670974372238],
[-1.47869221359237879, -0.993151486741149836, 5.19197527235234446],
[-1.48197907387669958, -0.959554796035272939, 5.01633923927328773],
[-1.48505086748739745, -0.928156792179623324, 4.85219745245014167],
[-1.4879280372570638, -0.898748478880105672, 4.69845732570099628],
[-1.49062851466849144, -0.871146534546521023, 4.55416049449042415],
[-1.49316809400544637, -0.845189487172894216, 4.41846281905385307],
[-1.49556074154804008, -0.820734553288558488, 4.29061785913629734],
[-1.49781885259882985, -0.797655010259731956, 4.16996313696893228],
[-1.49995346632810533, -0.775837999824882107, 4.05590865464007777],
[-1.50197444629951549, -0.755182682496963076, 3.94792724572023479],
[-1.50389063290604619, -0.735598679138827038, 3.8455464281646452],
[-1.5057099726866463, -0.717004748898402933, 3.74834149285220652],
[-1.50743962851355628, -0.699327662712474596, 3.65592961451354803],
[-1.50908607387208726, -0.682501239439637364, 3.56796481284832678],
[-1.51065517384939829, -0.666465517874734625, 3.48413362400069593],
[-1.51215225496786254, -0.651166042809127377, 3.40415136824104936],
[-1.5135821656155668, -0.636553247221500085, 3.32775892019684205],
[-1.51494932851856867, -0.622581915829548804, 3.25471990442003012],
[-1.51625778645134335, -0.609210717770629073, 3.1848182523453441],
[-1.51751124218062872, -0.596401798236928404, 3.11785606744972021],
[-1.51871309347396832, -0.584120420566910936, 3.05365175418590651],
[-1.5198664638699908, -0.572334651666523753, 2.99203837343443801],
[-1.5209742297971327, -0.561015084761237404, 2.93286219311291374],
[-1.52203904453673622, -0.550134594410868538, 2.87598140744792952],
[-1.52306335945054649, -0.539668119490351095, 2.82126500244678002],
[-1.20807844068863446, -3.33263018121002608, 19.6000312532414647],
[-1.25100767582920858, -2.94354747253931448, 17.3117385728718389],
[-1.2849610702487082, -2.63581757999735045, 15.5019021423594161],
[-1.31248729423210198, -2.38634053496745846, 14.0346652712773636],
[-1.33525353054661, -2.18000576415773084, 12.8211589004526534],
[-1.35439623664280595, -2.00651294317452766, 11.8008042470451908],
[-1.37071676315359503, -1.85859900088623053, 10.930885373962143],
[-1.38479638979094122, -1.7309954872386768, 10.1804172093224672],
[-1.39706701807475908, -1.619787847043199, 9.52637727542281354],
[-1.40785625897247635, -1.52200676645673094, 8.95130229522365006],
[-1.41741711940084425, -1.43535910825401936, 8.44170575541895118],
[-1.42594809454583915, -1.35804580432937061, 7.98700688671211001],
[-1.43360709856855029, -1.2886355942189216, 7.57878808850003338],
[-1.44052133098067769, -1.22597560083461921, 7.21026900240860424],
[-1.44679439795220621, -1.1691267862240049, 6.87592691147992952],
[-1.45251154010035766, -1.11731656930796741, 6.57121807324248408],
[-1.45774352922344796, -1.06990350768693432, 6.29237000458378226],
[-1.46254961326986743, -1.02635060580341575, 6.03622450038133884],
[-1.4669797701435523, -0.986204887491463733, 5.80011749455917069],
[-1.47107645246849117, -0.949081582237736421, 5.58178605553568641],
[-1.47487595257936932, -0.914651753537593426, 5.37929562549297113],
[-1.47840948080282053, -0.882632525852430128, 5.19098254266960435],
[-1.48170402491171904, -0.852779294913219577, 5.01540822820837207],
[-1.48478304086782775, -0.824879467148793122, 4.85132236616883983],
[-1.48766701226664155, -0.798747389136451935, 4.69763308235875776],
[-1.49037390670723813, -0.774220211276487436, 4.55338261756984064],
[-1.49291955058336279, -0.75115449085955377, 4.41772734936775002],
[-1.49531793881654695, -0.729423384788559392, 4.28992128178771459],
[-1.497581492336167, -0.708914315898777292, 4.16930232037968374],
[-1.49972127330958016, -0.689527022211301244, 4.05528079938021513],
[-1.50174716599536451, -0.671171917763291437, 3.94732984134535725],
[-1.5036680294592859, -0.653768708460559012, 3.84497721663366265],
[-1.50549182713130358, -0.63724521783335597, 3.74779843738242446],
[-1.50722573719977082, -0.621536386474132341, 3.65541087295099087],
[-1.50887624707017176, -0.606583415907606782, 3.56746871480661243],
[-1.51044923450890156, -0.592333033140745813, 3.48365865115901086],
[-1.51195003761163327, -0.578736856502060526, 3.4036961373027439],
[-1.51338351535158044, -0.565750846860403889, 3.32732216809775005],
[-1.51475410015475598, -0.553334831106760183, 3.25430047544663337],
[-1.51606584370093489, -0.541452087036048191, 3.18441508688075858],
[-1.51732245694710621, -0.530068980592875549, 3.11746819211184878],
[-1.51852734520622579, -0.519154647933752433, 3.05327827316038114],
[-1.5196836389796804, -0.508680715976462028, 2.99167846083656741],
[-1.5207942211311376, -0.49862105610856966, 2.93251508623852519],
[-1.52186175089856346, -0.488951566553755435, 2.87564640079427392],
[-1.52288868516539977, -0.479649979579653496, 2.82094144240283695],
[-1.20743694534676327, -2.91450297152666993, 19.5896235443328273],
[-1.25041617783044168, -2.57438624847443887, 17.3035532843889044],
[-1.28441269002976677, -2.30535611030983834, 15.4952864271539816],
[-1.31197619632152152, -2.08723485778423878, 14.0292000083926336],
[-1.33477484957103099, -1.90682121367290125, 12.8165625861871426],
[-1.35394592765223543, -1.75511509140104627, 11.7968807214884581],
[-1.37029145279717324, -1.62576952026783284, 10.927493704085931],
[-1.38439324882673231, -1.51418011590423873, 10.1774534933277749],
[-1.39668365771997038, -1.41692544986083946, 9.5237632022789267],
[-1.40749064828182746, -1.33141007269902589, 8.9489777029270261],
[-1.41706751979372436, -1.25562944791848996, 8.4396236463664227],
[-1.42561300788727618, -1.18801083990606338, 7.98513000251146909],
[-1.43328522583432649, -1.12730298661126804, 7.57708650286573704],
[-1.44021153916347688, -1.07249795469620635, 7.20871839549378635],
[-1.4464956933956985, -1.02277473270402908, 6.87450731053208219],
[-1.45222304686840742, -0.977457820007581968, 6.56991291876524652],
[-1.45746447143531155, -0.935986357802493729, 6.29116544780104636],
[-1.46227930057882416, -0.897890798601032492, 6.03510886773979571],
[-1.46671758572313538, -0.862775050425373835, 5.79908087464483391],
[-1.47082184301217489, -0.830302653313324646, 5.58081997691313259],
[-1.47462841992744398, -0.800185964301713848, 5.37839280291366251],
[-1.47816857489248732, -0.772177613749806757, 5.19013667527548606],
[-1.48146933780980361, -0.74606369529990102, 5.01461383769433411],
[-1.48455420169438179, -0.721658292490324538, 4.85057466595282438],
[-1.48744368285580375, -0.69879904563695483, 4.69692787103124676],
[-1.49015577788144959, -0.677343535400658947, 4.55271619151442852],
[-1.49270633894042803, -0.657166312741068959, 4.41709643063818458],
[-1.49510938394766746, -0.638156444367906861, 4.28932295821571685],
[-1.49737735440777353, -0.620215472239906251, 4.16873399555537016],
[-1.49952133095357087, -0.603255707854884804, 4.05474015065319016],
[-1.50155121446200179, -0.587198798951620859, 3.94681478438196587],
[-1.50347587899464852, -0.571974519182746666, 3.84448587536403252],
[-1.50530330154738445, -0.557519741313846184, 3.74732911840235117],
[-1.50704067261056984, -0.543777562282164451, 3.65496204362511934],
[-1.50869449077120454, -0.530696554542634868, 3.56703898446156664],
[-1.51027064398117061, -0.518230122934715376, 3.48324675486833701],
[-1.5117744796338437, -0.506335950116598377, 3.40330092185513555],
[-1.51321086520678216, -0.494975516656424197, 3.3269425798121075],
[-1.51458424091973831, -0.484113684312245163, 3.25393554955587572],
[-1.51589866560814746, -0.473718333002546066, 3.18406393825282752],
[-1.51715785681050552, -0.463760043566530111, 3.11713000711503385],
[-1.51836522590350853, -0.454211819714724796, 3.05295230251111427],
[-1.51952390898436263, -0.445048843635587388, 2.99136401329348312],
[-1.52063679408889008, -0.436248260599271742, 2.93221152302796195],
[-1.52170654524284132, -0.4277889886224856, 2.87535313066970666],
[-1.52273562376814153, -0.419651549857361805, 2.82065791725555348],
[-1.20693597054516544, -2.49710890457620449, 19.5814956600517363],
[-1.24995270329853758, -2.20579888817389014, 17.2971396147635872],
[-1.28398160868782707, -1.97535632105819547, 15.4900858176313481],
[-1.31157315920488271, -1.78850885346120414, 14.0248902592249394],
[-1.33439622197028629, -1.63395455751463659, 12.8129269885106076],
[-1.35358868280505895, -1.50398742533895491, 11.7937680603663022],
[-1.36995306346325085, -1.39317260691178046, 10.9247951925332121],
[-1.38407159345175246, -1.29756711886101805, 10.1750888237351482],
[-1.39637694372278753, -1.21424082062851091, 9.52167176842857188],
[-1.40719735069096985, -1.14097082488457047, 8.94711288513650516],
[-1.41678633250303276, -1.07604025253394919, 8.4379489802870502],
[-1.42534280493399335, -1.01810200352428093, 7.98361654430290191],
[-1.43302503052922825, -0.96608429024442366, 7.57571097600002208],
[-1.43996049934911441, -0.91912372298879641, 7.20746186110381082],
[-1.44625306149295918, -0.87651700696542989, 6.87335419628724509],
[-1.45198816367902994, -0.837685479045594206, 6.56885029818253408],
[-1.45723675290620291, -0.802148671324515394, 6.29018249763640735],
[-1.46205822691595877, -0.769504329955767896, 6.03419645406981253],
[-1.46650269238122322, -0.739413122209020113, 5.7982312333223982],
[-1.47061271319153097, -0.711586796705579672, 5.58002646416625314],
[-1.47442467828948898, -0.685778920134646142, 5.3776496987225153],
[-1.4779698822667855, -0.661777559223933776, 5.18943902691434733],
[-1.48127538671266623, -0.639399447501870277, 5.01395733416049882],
[-1.48436471251558766, -0.618485296881494784, 4.84995553637905452],
[-1.48725840059837067, -0.598896000240954796, 4.69634280188948683],
[-1.48997446936169231, -0.580509533517542531, 4.55216225866672897],
[-1.49252879037299113, -0.563218411461506085, 4.41657104321064331],
[-1.49493539885409299, -0.546927584946619305, 4.28882381195640594],
[-1.49720675179825968, -0.531552692946127836, 4.16825903385255181],
[-1.49935394374046949, -0.517018601289817026, 4.05428753178098233],
[-1.50138688807057163, -0.503258174772258648, 3.94638285383912768],
[-1.50331447014255692, -0.490211240263877301, 3.84407314240257092],
[-1.50514467716900402, -0.477823707037779044, 3.74693423602125053],
[-1.50688470890602533, -0.466046817187430573, 3.65458379144476764],
[-1.50854107236328217, -0.454836504230135275, 3.56667625400464416],
[-1.51011966316573387, -0.444152842107568779, 3.48289853686018525],
[-1.51162583571169717, -0.433959570060774347, 3.40296629522657179],
[-1.51306446388653582, -0.424223681463514746, 3.3266207021430616],
[-1.51443999378288985, -0.414915066789832865, 3.25362564874360549],
[-1.51575648962875986, -0.406006202579132114, 3.18376530522469414],
[-1.51701767392293974, -0.397471879630464542, 3.11684198943555923],
[-1.51822696261260193, -0.389288964772462054, 3.05267429875738916],
[-1.51938749601307377, -0.381436191467717234, 2.99109546809268245],
[-1.52050216605929767, -0.373893975260483014, 2.93195192266762072],
[-1.5215736403866893, -0.366644250695587814, 2.87510199920456788],
[-1.52260438366374795, -0.359670326849704347, 2.82041481304643105],
[-1.20657641266197602, -2.08030415976202754, 19.575662143360681],
[-1.24961810574623566, -1.83767368492093497, 17.2925093751059968],
[-1.28366863538470599, -1.64572901972398222, 15.4863100756026721],
[-1.31127894894430552, -1.49008971470943807, 14.0217442154158114],
[-1.33411837312044268, -1.3613452786943292, 12.810259072513638],
[-1.35332518961463455, -1.25307887927280981, 11.7914722539571404],
[-1.36970224767426307, -1.16076461667310427, 10.9227950428939113],
[-1.38383204394887604, -1.08111878433505937, 10.1733277605929082],
[-1.3961474666850282, -1.01170106281523764, 9.52010700109138597],
[-1.40697692946244035, -0.950660087474621829, 8.94571142313619205],
[-1.41657409557746994, -0.896565883276879583, 8.43668496163543757],
[-1.42513800044175643, -0.848296428834378835, 7.98246939533150446],
[-1.43282700584911149, -0.804958992050062605, 7.57466411519108895],
[-1.43976868473672481, -0.765834406774853593, 7.20650176775137208],
[-1.44606695685748443, -0.730336846897719494, 6.8724697293075403],
[-1.4518073278356427, -0.697984292228674308, 6.56803218987182547],
[-1.45706079478447492, -0.668376511369025206, 6.28942297198252742],
[-1.46188679832248969, -0.641178420316881437, 6.03348893518185392],
[-1.46633548200473762, -0.616107345380141891, 5.79757012002713523],
[-1.47044944160737212, -0.592923161938456511, 5.57940695384087704],
[-1.47426509377301795, -0.571420578981789995, 5.37706764821864436],
[-1.47781375726547437, -0.551423043755774001, 5.18889084174183335],
[-1.48112251485772894, -0.532777883042348521, 5.01343987942849978],
[-1.48421490607805184, -0.51535239794376797, 4.84946606465085672],
[-1.48711148831330076, -0.499030700776275482, 4.69587889430475247],
[-1.48983029455893545, -0.483711134597057002, 4.55172177655830623],
[-1.4923872093643944, -0.469304153888174314, 4.41615208808772053],
[-1.49479627953645244, -0.455730573029406172, 4.28842469220671241],
[-1.49706997243838846, -0.44292011018887234, 4.16787823687728842],
[-1.49921939191483489, -0.430810170090469791, 4.05392370055132112],
[-1.50125445973660399, -0.419344821155476111, 3.94603476707302958],
[-1.50318406882255595, -0.4084739317452597, 3.84373969772289392],
[-1.50501621323073098, -0.398152437362627176, 3.74661443558232188],
[-1.50675809892660051, -0.388339716218196018, 3.65427672961322481],
[-1.5084162385651434, -0.378999054915865163, 3.5663811067582909],
[-1.50999653291510794, -0.370097189439977492, 3.48261455263018771],
[-1.51150434107137377, -0.361603909347218588, 3.40269278695732691],
[-1.51294454121625188, -0.35349171523744205, 3.32635704038432989],
[-1.51432158338120804, -0.345735521319910533, 3.25337125562035823],
[-1.51563953541149798, -0.338312396297209383, 3.18351964915673991],
[-1.51690212313371897, -0.331201336928759638, 3.11660458049962807],
[-1.51811276556176833, -0.324383069564480431, 3.05244468460176055],
[-1.51927460584174145, -0.317839875699109098, 2.99087323032861674],
[-1.52039053852562223, -0.31155543822246351, 2.93173667367338142],
[-1.52146323367199554, -0.305514705556625632, 2.87489337928784705],
[-1.52249515819623116, -0.299703771298470756, 2.82021248791860968],
[-1.20635910478467956, -1.66394359280645432, 19.5721365103859206],
[-1.24941317154386211, -1.46989784887513175, 17.2896734473937386],
[-1.283474511109296, -1.31638411395825239, 15.4839681404339444],
[-1.31109426400402862, -1.19190387636729889, 14.0197693457703529],
[-1.33394196228424256, -1.08893221410958585, 12.8085651684640016],
[-1.35315607146636663, -1.00233783071582705, 11.789998733794917],
[-1.3695395960662875, -0.928501421061889931, 10.9214979652404782],
[-1.3836751610643101, -0.864796975665193868, 10.1721744262618419],
[-1.39599576003957626, -0.809272904370768931, 9.51907253766117023],
[-1.4068298930212968, -0.760448590822322745, 8.94477654954756929],
[-1.41643129449123917, -0.717180402274045115, 8.43583448174845607],
[-1.42499905876824173, -0.678570980365829302, 7.98169115655306616],
[-1.43269159667234147, -0.643906335583074863, 7.57394827229591705],
[-1.43963652218918803, -0.612611286037952341, 7.20584025202141376],
[-1.44593778964834874, -0.584217288746807561, 6.87185585888432282],
[-1.45168093397179776, -0.558338820758383703, 6.56746037917048842],
[-1.45693697723942495, -0.534655771464009222, 6.28888851184540787],
[-1.46176538146192714, -0.512900133846290296, 6.03298782436698922],
[-1.4662163086193063, -0.492845818023296189, 5.79709893449902136],
[-1.47033237043548093, -0.47430076465660681, 5.57896274427333783],
[-1.47414999742133768, -0.457100774394213205, 5.37664785881193286],
[-1.47770052045406164, -0.441104632971361654, 5.18849324532564093],
[-1.48101103293160996, -0.426190225304060422, 5.01306252513901107],
[-1.48410508373871042, -0.412251412149641727, 4.84910723541016075],
[-1.48700323853258021, -0.399195500277202742, 4.69553707201059733],
[-1.48972353764509058, -0.386941178609114367, 4.55139561338970822],
[-1.49228187215557973, -0.375416823183793658, 4.41584038269937285],
[-1.49469229470153087, -0.364559096268666116, 4.28812636986018525],
[-1.49696727786844952, -0.354311781744011789, 4.16759233276393815],
[-1.49911793019178474, -0.344624811538341347, 4.05364934571974],
[-1.50115417766742665, -0.335453447523447301, 3.94577117649454889],
[-1.50308491702981906, -0.326757590658656338, 3.84348616012244459],
[-1.50491814579140559, -0.318501194876487981, 3.74637030473468968],
[-1.50666107305258534, -0.310651767639708365, 3.65404141686206918],
[-1.50832021431975405, -0.303179942576835004, 3.56615407456002176],
[-1.50990147296094346, -0.296059112345283093, 3.48239530896139193],
[-1.51141021044540858, -0.289265112046968143, 3.40248088045246222],
[-1.51285130712870552, -0.282775945257701999, 3.32615205609371944],
[-1.51422921503531072, -0.276571546125170931, 3.25317281129732283],
[-1.51554800384155453, -0.270633572114563348, 3.18332739199755155],
[-1.51681140105928525, -0.264945222892451537, 3.11641818427246164],
[-1.51802282725611293, -0.25949108158224149, 3.05226384711111587],
[-1.51918542701290371, -0.254256975232285132, 2.99069767116975349],
[-1.52030209620866574, -0.249229851837486166, 2.93156613223843099],
[-1.52137550613118666, -0.244397671667660576, 2.87472761299085722],
[-1.52240812483613741, -0.239749310997816961, 2.8200512706118217],
[-1.20628481277833521, -1.24788084080517447, 19.5709311866278171],
[-1.24933861612278863, -1.102357602461284, 17.2886417319344723],
[-1.28339990504144752, -0.987230696185729006, 15.4830680851795126],
[-1.31101973178267484, -0.893877089851823681, 14.018972359176102],
[-1.33386757931065403, -0.816653619986114854, 12.807850940115566],
[-1.35308188447878264, -0.751712158043768341, 11.7893523453197631],
[-1.36946563440365354, -0.696338458171349317, 10.9209081523206617],
[-1.38360144316685685, -0.648563176484464154, 10.1716324845313473],
[-1.3959222973452543, -0.606922737976197446, 9.5185716072600286],
[-1.40675669237670298, -0.5703067671797446, 8.94431113193565963],
[-1.41635835968400103, -0.53785760494329149, 8.43540010419395436],
[-1.42492639152348843, -0.508902282686960139, 7.98128413347382359],
[-1.43261919731345633, -0.48290534740903035, 7.57356553186495685],
[-1.43956439008321246, -0.459435443643578412, 7.20547920781012063],
[-1.44586592351048449, -0.438141188942571091, 6.87151431324932283],
[-1.45160933207598708, -0.418733461175765487, 6.56713644943992225],
[-1.45686563756538856, -0.400972193825336332, 6.28858057316069097],
[-1.46169430179855753, -0.384656395210146773, 6.03269446487913541],
[-1.466145486637777, -0.36961650923641437, 5.79681891985776421],
[-1.47026180374106286, -0.35570850090509587, 5.57869498919491935],
[-1.47407968359015684, -0.34280922874189701, 5.37639140410208416],
[-1.47763045705908436, -0.330812788893824872, 5.18824723915148667],
[-1.4809412175608927, -0.319627600912422871, 5.01282620764316533],
[-1.48403551400807254, -0.309174065418348465, 4.84887992597776485],
[-1.48693391209422376, -0.299382666864608904, 4.69531815865994862],
[-1.48965445219326131, -0.290192425751933991, 4.55118454387616467],
[-1.49221302543011958, -0.281549627439645178, 4.41563665701176777],
[-1.49462368448985994, -0.27340677155302312, 4.28792953385657949],
[-1.49689890200642672, -0.265721698581022514, 4.16740197274570257],
[-1.49904978656377375, -0.258456859752374812, 4.05346508378307835],
[-1.50108626420576341, -0.251578703498172629, 3.94559266652967366],
[-1.50301723171390722, -0.245057157344658799, 3.84331308435539842],
[-1.5048506866465361, -0.238865188356593039, 3.74620237072590045],
[-1.50659383814855508, -0.232978428579673463, 3.65387835489121215],
[-1.50825320176940991, -0.227374854538102023, 3.56599563533923325],
[-1.50983468091863049, -0.222034511899798614, 3.48224126162850789],
[-1.51134363710544006, -0.216939278053412454, 3.4023310108043523],
[-1.51278495072382357, -0.212072656643526702, 3.3260061650259769],
[-1.51416307383514592, -0.207419599155499484, 3.2530307134220835],
[-1.51548207615115782, -0.202966349484530073, 3.18318891441571283],
[-1.5167456852177259, -0.198700308107125689, 3.11628316548008755],
[-1.51795732163507302, -0.194609913030137582, 3.05213213602265743],
[-1.51912013001539248, -0.190684535148375606, 2.99056912624369042],
[-1.52023700626773839, -0.186914386016525186, 2.93144062069250344],
[-1.52131062170874776, -0.183290436350451585, 2.87460501009624858],
[-1.52234344442183445, -0.179804343829350538, 2.81993145905698084],
[-1.2063542318777789, -0.83196843577777857, 19.5720574516722401],
[-1.24939508067232485, -0.73493828274842643, 17.2894231016567304],
[-1.28344541137999091, -0.65817713404102085, 15.4836170783150155],
[-1.31105590558181251, -0.595934502537187494, 14.0193591721873361],
[-1.33389574174449765, -0.544447241528366543, 12.8081213569548193],
[-1.35310311475015066, -0.501149301759315158, 11.7895373238878864],
[-1.36948082095623769, -0.46423078676482632, 10.9210292586425393],
[-1.38361132374858298, -0.432378538671432222, 10.1717051222454415],
[-1.39592748990334425, -0.404616663740099802, 9.51860701448584834],
[-1.40675771884694645, -0.380204788877553157, 8.94431765834443837],
[-1.41635566438725324, -0.358571054275253986, 8.43538405182534845],
[-1.4249203554910308, -0.339266751307388259, 7.98125032450630734],
[-1.43261014953285848, -0.321934865063563724, 7.57351770062033669],
[-1.43955261640191501, -0.306287790723811737, 7.20542027677767027],
[-1.44585167374486656, -0.29209124722118518, 6.87144659087838061],
[-1.45159282573458626, -0.279152466487420425, 6.56706177411656533],
[-1.45684706849295176, -0.2673113887143031, 6.28850041950397998],
[-1.46167384197288275, -0.256434007363663619, 6.03261002323018669],
[-1.46612328929605784, -0.246407275511942525, 5.79673115641844738],
[-1.47023800597152898, -0.23713516225347242, 5.57860469201293885],
[-1.47405440849412539, -0.228535567208391549, 5.37629921857741166],
[-1.47760381556545006, -0.220537882920216421, 5.18815369569809093],
[-1.48091330995752024, -0.213081051792448933, 5.01273174341736105],
[-1.48400643124116383, -0.206112004339050525, 4.84878490207616419],
[-1.48690373687638488, -0.199584394211595267, 4.69522287382777925],
[-1.48962325995284695, -0.193457566227642458, 4.55108924550528826],
[-1.49218088512834024, -0.187695708821174867, 4.41554155001813875],
[-1.49459065932697022, -0.182267153576459756, 4.28783478788621597],
[-1.49686505003459325, -0.177143792903502184, 4.16730772805488847],
[-1.49901516122089173, -0.172300593243780664, 4.05337145605994031],
[-1.50105091478222019, -0.167715186009186612, 3.94549975086611449],
[-1.50298120376199629, -0.163367522148043093, 3.84322095853271328],
[-1.5048140223408335, -0.159239579083686078, 3.74611109794371533],
[-1.50655657660409004, -0.155315110990112415, 3.6537879860423943],
[-1.50821537932341543, -0.151579435107880939, 3.56590621091289917],
[-1.50979633138073122, -0.14801924817458148, 3.48215281330702942],
[-1.51130479198045831, -0.144622468132101278, 3.40224356280768214],
[-1.51274563941065332, -0.141378097141182574, 3.32591973524631968],
[-1.51412332380479109, -0.138276102630574549, 3.25294531438426615],
[-1.51544191310639809, -0.135307313670214119, 3.18310455409178727],
[-1.51670513323670675, -0.132463330413686187, 3.11619984798196725],
[-1.51791640330068911, -0.129736444726554601, 3.05204986219219743],
[-1.51907886653210022, -0.127119570421096229, 2.99048789415628891],
[-1.52019541756727827, -0.124606181767809687, 2.93136042608772263],
[-1.52126872654601408, -0.122190259160322418, 2.87452584674658507],
[-1.52230126046186731, -0.119866240981249414, 2.81985331908389192],
[-1.20656798385249209, -0.416057925466376544, 19.5755253931930149],
[-1.2495831293762798, -0.367524449816552912, 17.2920253638688166],
[-1.28361154667862531, -0.329131165815032101, 15.4856213515972616],
[-1.31120326205210835, -0.298000741375479161, 14.0209348817162951],
[-1.33402689238508509, -0.272250386201037808, 12.8093806707588289],
[-1.35322017602735301, -0.250596328893954279, 11.7905572744605482],
[-1.36958554427405366, -0.23213314309729724, 10.9218643827278346],
[-1.38370516929966247, -0.216203932703072255, 10.1723950336795497],
[-1.39601168472679116, -0.202320534018375547, 9.51918112556456997],
[-1.40683330211714908, -0.190112608394209348, 8.94479822494755084],
[-1.41642352276499373, -0.179294116805695403, 8.43578819570796945],
[-1.42498125084617167, -0.169640625100734715, 7.98159141098956759],
[-1.43266474082776707, -0.160973566385142375, 7.57380629842094777],
[-1.43960147709381703, -0.15314909330785284, 7.20566484013447717],
[-1.44589530573135039, -0.14605003088195459, 6.87165395299596238],
[-1.45163167061460174, -0.139579968328327086, 6.56723750984778931],
[-1.45688151672805666, -0.133658854745693279, 6.28864911578486829],
[-1.46170424039373503, -0.128219670209976738, 6.03273548337940646],
[-1.46614994729522174, -0.123205877923968213, 5.79683655632270423],
[-1.47026120064576782, -0.118569451664981285, 5.57869270083736968],
[-1.47407438894068221, -0.11426933247602189, 5.37637209299682883],
[-1.47762080649246363, -0.110270209439736103, 5.18821335413958362],
[-1.48092751473478224, -0.106541547822646188, 5.01277982505550401],
[-1.48401803449145109, -0.103056807950795207, 4.84882281408491433],
[-1.48691290668731146, -0.099792812529349767, 4.69525182950590647],
[-1.48963014977375163, -0.096729230504789071, 4.55111029525032595],
[-1.49218563540411808, -0.0938481531700703225, 4.41555560665180824],
[-1.49459339891126697, -0.091133743836052869, 4.28784264748628718],
[-1.49686589741702392, -0.0885719465927233135, 4.16731008718763185],
[-1.49901422559675646, -0.0861502428503883033, 4.05336892611076927],
[-1.50104829698823705, -0.0838574467591193945, 3.94549287001656701],
[-1.50297699709771604, -0.0816835324509628397, 3.84321020181780115],
[-1.50480831329191345, -0.0796194874757626125, 3.74609688573463062],
[-1.50654944548121228, -0.0776571879114027058, 3.65377069123149667],
[-1.50820690082833009, -0.07578929149891106, 3.56588616502376476],
[-1.50978657510912795, -0.0740091458386827467, 3.48213031171002285],
[-1.51129382286992953, -0.0723107092282262998, 3.40221886918804683],
[-1.51273351814077284, -0.070688482156110885, 3.32589308544501705],
[-1.51411010715533001, -0.0691374478153118782, 3.25291691971042374],
[-1.51542765427875437, -0.0676530202803015263, 3.183074604188187],
[-1.51668988214301748, -0.0662309992202191056, 3.11616851331130906],
[-1.51790020682447002, -0.0648675302061739317, 3.05201729620048345],
[-1.5190617687636514, -0.0635590698227469081, 2.99045423516024211],
[-1.52017746001676168, -0.0623023549187197295, 2.9313257989257635],
[-1.52124994833664262, -0.0610943754352065369, 2.87449036422646698],
[-1.52228169850544326, -0.0599323503348599754, 2.81981708325516189],
[-1.20692661478575758, 0, 19.5813438709206515],
[-1.24990324722871771, 0, 17.296455230032695],
[-1.28389874772712798, 0, 15.4890861745029174],
[-1.31146219915246642, 0, 14.0237037432098983],
[-1.33426139732734894, 0, 12.8116323967860737],
[-1.35343340782868604, 0, 11.79241515524809],
[-1.36978012138920557, 0, 10.9234160527732413],
[-1.38388327758577723, 0, 10.1737044078766914],
[-1.39617516288904131, 0, 9.52029585709121662],
[-1.40698370865536981, 0, 8.94575452597772447],
[-1.41656218839297354, 0, 8.43661404606194942],
[-1.42510931969452947, 0, 7.9823087490032858],
[-1.4327832030263874, 0, 7.57443255083050904],
[-1.43971119471929376, 0, 7.2062140118662521],
[-1.44599703362420517, 0, 6.87213741737564199],
[-1.45172607320542579, 0, 6.56766459079954501],
[-1.45696918173726231, 0, 6.28902752300350443],
[-1.46178569006459225, 0, 6.03307164188939193],
[-1.46622564766661645, 0, 5.79713585905162176],
[-1.47033156925594688, 0, 5.57895970431389632],
[-1.47413980126660937, 0, 5.37661067051115982],
[-1.47768160136334625, 0, 5.18842681672727224],
[-1.48098399890817767, 0, 5.0129710178870317],
[-1.48407048654141493, 0, 4.84899419387316399],
[-1.48696158032429349, 0, 4.69540552713141057],
[-1.4896752766923651, 0, 4.55124816677764876],
[-1.49222742773665651, 0, 4.4156792751578422],
[-1.49463205135041344, 0, 4.2879535375632285],
[-1.49690158905955384, 0, 4.16740945356520776],
[-1.49904712155090936, 0, 4.0534578775270278],
[-1.50107854978010247, 0, 3.94557238922647091],
[-1.50300474790587524, 0, 3.84328116244409967],
[-1.50483369303485737, 0, 3.74616006652328215],
[-1.5065725757780446, 0, 3.65382678816273199],
[-1.50822789484998787, 0, 3.56593580164281088],
[-1.509805538334692, 0, 3.48217404797290442],
[-1.51131085376049112, 0, 3.40225720906368956],
[-1.51274870874131984, 0, 3.32592648347098585],
[-1.51412354363363066, 0, 3.25294578666494649],
[-1.51543941740909949, 0, 3.18309931201330976],
[-1.51670004774130263, 0, 3.11618939939861495],
[-1.51790884614041288, 0, 3.0520346671327534],
[-1.5190689488351492, 0, 2.99046836998718657],
[-1.52018324399059557, 0, 2.93133695203924249],
[-1.5212543957593716, 0, 2.87449876789070036],
[-1.52228486558768372, 0, 2.8198229498386036],
[-1.20743059350146775, 0.416355377069471622, 19.5895204911186376],
[-1.25035583846072496, 0.367751717194330863, 17.3027182939932693],
[-1.28430737000810002, 0.329309582053358973, 15.49401583561054],
[-1.31183303465082934, 0.298143871511552139, 14.027669154618529],
[-1.33459954451265195, 0.272367253982173874, 12.8148792998612802],
[-1.35374307404638183, 0.250693161860441105, 11.7951132655337538],
[-1.37006479646957002, 0.232214372282978004, 10.9256862159141139],
[-1.38414587635137809, 0.216272793179902834, 10.1756349191144277],
[-1.39641813827459282, 0.202379440329651134, 9.52195266751008518],
[-1.40720914050804935, 0.190163397365952641, 8.94718784606807027],
[-1.4167718530965967, 0.179338209252733749, 8.43786274534112302],
[-1.42530474494998538, 0.169679136303569705, 7.98340336308295306],
[-1.43296571120366001, 0.161007383281310118, 7.57539738338564028],
[-1.4398819374019336, 0.15317892951084397, 7.20706863348520876],
[-1.44615701933713425, 0.146076466599710547, 6.87289775351638088],
[-1.45187618983563338, 0.139603479791887836, 6.56834372420832224],
[-1.45711021479462222, 0.133679836219690124, 6.289636294136419],
[-1.46191833765896906, 0.128238450671839394, 6.0336191041100431],
[-1.4663505328742561, 0.123222733854979477, 5.79762962787678493],
[-1.47044925039558105, 0.118584616967385564, 5.5794062283154906],
[-1.47425078049049718, 0.114283006239573437, 5.37701544357192951],
[-1.4777863318809763, 0.110282562080669888, 5.18879454589551781],
[-1.4810828910932865, 0.106552725977934276, 5.01330575726180783],
[-1.48416391312167373, 0.103066938411227341, 4.84929945224824621],
[-1.48704988081297906, 0.099802005423689874, 4.6956843551846088],
[-1.48975876119019857, 0.0967375818954674394, 4.55150322818174313],
[-1.49230638020768458, 0.0938557471828732476, 4.41591290495418676],
[-1.49470673245636809, 0.0911406544180712319, 4.28816779037025064],
[-1.49697223862187756, 0.0885782389717087448, 4.16760614361889559],
[-1.49911396069742442, 0.0861559747527255448, 4.05363861211573617],
[-1.50114178282330046, 0.0838626694314693055, 3.94573859675063021],
[-1.5030645639920035, 0.0816882915213045513, 3.84343411607737817],
[-1.50489026759644684, 0.0796238236823516715, 3.74630090425464646],
[-1.50662607181718244, 0.0776611377225351757, 3.65395652984527963],
[-1.50827846407545429, 0.0757928876419826369, 3.5660553635552823],
[-1.50985332217242463, 0.0740124177535502331, 3.48228425530453833],
[-1.51135598425381734, 0.0723136834571204562, 3.40235880665751722],
[-1.51279130935511441, 0.0706911826801455284, 3.32602014510084754],
[-1.51416372997443305, 0.0691398963458645238, 3.25303212307292577],
[-1.51547729787136309, 0.0676552365121144139, 3.18317887789498322],
[-1.51673572408884394, 0.0662330010519145834, 3.11626269949258106],
[-1.51794241403052732, 0.0648693339329285135, 3.05210216154428648],
[-1.51910049829212901, 0.0635606903051099958, 2.99053047885542522],
[-1.52021285983433918, 0.0623038057309155333, 2.931394059639576],
[-1.52128215799413824, 0.061095668995748538, 2.87455122624996839],
[-1.52231084975530973, 0.0599334980218625968, 2.81987108192863545],
[-1.20808031066302268, 0.833158834940015636, 19.6000615919638683],
[-1.25094122560153886, 0.735847779765611221, 17.3108190189860025],
[-1.28483768675147547, 0.658891121411013025, 15.5004136311940801],
[-1.3123160051878322, 0.596507275085378263, 14.0328336463835246],
[-1.33504154280948617, 0.544914915432443414, 12.8191233855482309],
[-1.35414936204812353, 0.501536800758564327, 11.7986532378452242],
[-1.37043973994270707, 0.464555844048375299, 10.9286762312380272],
[-1.38449312246630107, 0.432654100770719141, 10.1781877206311666],
[-1.39674075674810938, 0.404852393260321575, 9.52415255144906503],
[-1.40750973449097461, 0.380408036348912071, 8.94909905510815662],
[-1.41705264616293869, 0.358747505357705998, 8.43953506354003125],
[-1.42556764956689852, 0.339420868944499576, 7.98487594191935202],
[-1.43321238293276876, 0.322070198411858133, 7.57670141763896332],
[-1.44011381809852046, 0.306407195340110738, 7.20822927037610484],
[-1.44637537183090648, 0.292197044814324569, 6.87393547925698467],
[-1.45208212597748609, 0.279246562687978095, 6.56927538723468363],
[-1.4573047183023482, 0.267395361156394173, 6.29047587120417262],
[-1.46210228285653554, 0.2565091724309711, 6.03437828143859623],
[-1.46652470016579506, 0.246474739523663056, 5.79831824729417278],
[-1.47061433912473394, 0.237195861149150672, 5.58003263353376955],
[-1.47440741969169276, 0.22859029762661906, 5.37758675166621369],
[-1.47793508931996831, 0.220587326764174391, 5.18931686212720233],
[-1.48122428091057157, 0.213125795814470692, 5.01378434653542371],
[-1.48429840232798305, 0.206152555878886534, 4.84973887705080564],
[-1.48717789483622664, 0.199621193937748548, 4.69608858738553447],
[-1.48988068863424128, 0.193490998523927427, 4.55187574027539288],
[-1.49242257695286096, 0.187726110308535948, 4.41625674500830812],
[-1.49481752520743072, 0.182294820147247627, 4.28848564396400089],
[-1.49707792799000217, 0.17716898556094704, 4.16790038532127838],
[-1.49921482388737815, 0.172323542975560712, 4.05391134850006551],
[-1.50123807598480852, 0.167736097875397588, 3.94599170251872788],
[-1.50315652428416913, 0.163386578726540138, 3.8436692645418562],
[-1.50497811500614742, 0.15925694338689389, 3.74651959317667904],
[-1.50671001076561373, 0.155330928944908658, 3.65416010342897613],
[-1.50835868484174407, 0.151593837672537091, 3.5662450312464351],
[-1.50993000215863193, 0.14803235315280705, 3.4824611079197858],
[-1.51142928911202912, 0.144634381733208528, 3.40252383027373062],
[-1.51286139399422681, 0.14138891532656328, 3.32617423305740045],
[-1.51423073946167741, 0.138285912279605266, 3.25317608637771372],
[-1.51554136824149621, 0.135316193592990741, 3.18331345427510692],
[-1.51679698307203426, 0.132471352233365453, 3.11638856128992225],
[-1.5180009817086304, 0.129743673650310282, 3.05221992262354958],
[-1.51915648769151912, 0.127126065915608294, 2.9906407006646849],
[-1.52026637746259508, 0.124611998152671724, 2.93149725654160198],
[-1.52133330432673386, 0.122195446130661356, 2.87464787022380852],
[-1.5223597196778853, 0.119870844069124849, 2.81996160672616192],
[-1.20887607855908685, 1.25056146057836592, 19.6129722400707038],
[-1.25165964918824102, 1.10440557281315388, 17.320760733619629],
[-1.28548988860055591, 0.988838375846581741, 15.5082818611938862],
[-1.31291126591656115, 0.895166772215837181, 14.0391988775850471],
[-1.33558752163680561, 0.817706645900085238, 12.824365896533001],
[-1.35465238228985485, 0.752584656827697374, 11.8030360345810497],
[-1.37090504810182989, 0.697070363441608443, 10.9323868666425579],
[-1.38492510152734138, 0.649183641340941309, 10.181363441697096],
[-1.39714309575322093, 0.607453519892704685, 9.52689603698391707],
[-1.40788556178648849, 0.570764416940468422, 8.95148860568301252],
[-1.41740463393726213, 0.538254924279973013, 8.44163139579091037],
[-1.42589809613662744, 0.509249320048795528, 7.98672683609860901],
[-1.43352327788224021, 0.48321009366817097, 7.57834496902914534],
[-1.44040689419714818, 0.459704327935260004, 7.20969620978466086],
[-1.44665214671282549, 0.438379438397826005, 6.87525085887256981],
[-1.45234393584812138, 0.418945366110035022, 6.57045982515904914],
[-1.45755274539396051, 0.401161306071732282, 6.29154648355833412],
[-1.46233757794847574, 0.384825678407493654, 6.03534938969085921],
[-1.46674820118033011, 0.369768454079074826, 5.79920192147348956],
[-1.47082688658044747, 0.355845214495269568, 5.58083911400081067],
[-1.47460976962344081, 0.342932504563590979, 5.37832477990565128],
[-1.47812792414278538, 0.33092416212151915, 5.18999394260582481],
[-1.48140821860453387, 0.319728392504575631, 5.01440695578009432],
[-1.48447400424355713, 0.309265417550741051, 4.85031263192078921],
[-1.48734567235963455, 0.299465571616033821, 4.69661838151146327],
[-1.49004110890576591, 0.29026774848813619, 4.55236585545560235],
[-1.4925760677939619, 0.281618125998860713, 4.41671094274879827],
[-1.49496447938397869, 0.273469112082435128, 4.28890724115952438],
[-1.49721870691544967, 0.265778468683215974, 4.16829231718176985],
[-1.4993497608516182, 0.258508579457175591, 4.05427622115337005],
[-1.50136747897942469, 0.251625834465825371, 3.94633183720569436],
[-1.50328067848289848, 0.245100110622211736, 3.84398673492502008],
[-1.5050972849497013, 0.238904330944397031, 3.74681625697795972],
[-1.50682444229199541, 0.2330140890142261, 3.65443762937311245],
[-1.50846860679668682, 0.227407327657792008, 3.56650492209970427],
[-1.51003562791550383, 0.222064062928750555, 3.48270472026590427],
[-1.51153081792599164, 0.216966146113778707, 3.40275239155109599],
[-1.51295901221187346, 0.212097057786711257, 3.32638885628825465],
[-1.51432462160416814, 0.207441728986872381, 3.25337778294411484],
[-1.51563167797676135, 0.202986385443316264, 3.18350314503600984],
[-1.51688387408931358, 0.198718411452748517, 3.11656708628393897],
[-1.5180845985069249, 0.194626230577810894, 3.05238804956200083],
[-1.51923696629186922, 0.19069920078977437, 2.99079913238629436],
[-1.52034384605282025, 0.186927522055674589, 2.93164663757316335],
[-1.52140788384621795, 0.183302154680267254, 2.87478879256885778],
[-1.52243152434889129, 0.179814746970341527, 2.82009461498485603],
[-1.20981813158097906, 1.66871466424962622, 19.6282562382362293],
[-1.25251126812880664, 1.47354266838683112, 17.3325456369001003],
[-1.28626408389436331, 1.31924521425062924, 15.5176218326230266],
[-1.3136188907232822, 1.19419899156662024, 14.0467656383023698],
[-1.33623753113498611, 1.09080614786529484, 12.8306073142655297],
[-1.35525216844492702, 1.00389049514439055, 11.8082619491358933],
[-1.37146074319891431, 0.929803893694179262, 10.9368182995777818],
[-1.38544182791978532, 0.865901142449865824, 10.1851621880665473],
[-1.39762516434606665, 0.810217486577429979, 9.53018318586702051],
[-1.4083366279520757, 0.761263042136257284, 8.95435653312772395],
[-1.41782781980918982, 0.717887503700855634, 8.44415176228131337],
[-1.42629608685351328, 0.679188612787387291, 7.98895605791164165],
[-1.43389839776367856, 0.644448718096035322, 7.58032804660461501],
[-1.44076116744845728, 0.61308985848870523, 7.21146946047339465],
[-1.44698734615310043, 0.584641351981050716, 6.87684390267710821],
[-1.45266162231239093, 0.558716008581688683, 6.5718970509421144],
[-1.45785429982491999, 0.534992403605475286, 6.29284814740940313],
[-1.4626242277170598, 0.513201483409494674, 6.03653244860418159],
[-1.46702104181788684, 0.493116316577440927, 5.80028067374214906],
[-1.47108689983715757, 0.47454416123779275, 5.58182569655953831],
[-1.47485783856512054, 0.457320260020192437, 5.37922955848751272],
[-1.47836484584450711, 0.441302939058061727, 5.19082582067045095],
[-1.48163471488177434, 0.426369702124251571, 5.01517362123650923],
[-1.48469073077107683, 0.412414091880854594, 4.85102075574855185],
[-1.48755322645821142, 0.39934314804247284, 4.69727377884958752],
[-1.49024003622212753, 0.387075334083669453, 4.55297361715916171],
[-1.49276686805638303, 0.375538834731165561, 4.41727554352533502],
[-1.49514761138200125, 0.364670149117561349, 4.28943262899531508],
[-1.49739459282540088, 0.354412921378793155, 4.16878198771805408],
[-1.49951879000788724, 0.344716963220204009, 4.05473327987764964],
[-1.50153001117422469, 0.335537432664631263, 3.9467590517177249],
[-1.50343704686330804, 0.326834140622458302, 3.84438657907166537],
[-1.50524779856927005, 0.318570962660162993, 3.74719094829016708],
[-1.50696938836508121, 0.310715337807233261, 3.65478916095758111],
[-1.5086082526959752, 0.303237839737884485, 3.56683508991686615],
[-1.51017022294694248, 0.296111808420969125, 3.48301514655164901],
[-1.5116605949101658, 0.289313032518692059, 3.40304454500111486],
[-1.51308418889654095, 0.282819474560101147, 3.32666406951318949],
[-1.51444540192909227, 0.276611032315815963, 3.25363726761478489],
[-1.51574825320892903, 0.270669330930165875, 3.18374800506607691],
[-1.51699642384416178, 0.264977541282823026, 3.11679832933920586],
[-1.51819329166895645, 0.259520220798112156, 3.0526065971377947],
[-1.51934196184633619, 0.25428317353076757, 2.9910058286556529],
[-1.52044529383858973, 0.249253326858785207, 2.93184225717646063],
[-1.52150592523852102, 0.244418622528276508, 2.87497404748885188],
[-1.52252629287972563, 0.239767920138539509, 2.82027016062957081],
[-1.21090662738643973, 2.08777004721799964, 19.6459161443213794],
[-1.25349616071380909, 1.84337670693207234, 17.3461748122307995],
[-1.28716029956222022, 1.65020551225925671, 15.5284338703596045],
[-1.31443887302566531, 1.49368053752916508, 14.0555338581494436],
[-1.33699154288145006, 1.36427708457290819, 12.8378473658310668],
[-1.355948678047175, 1.25550803522886589, 11.8143306115036282],
[-1.37210677402309678, 1.1628023508670311, 10.9419701216587626],
[-1.38604324533753953, 1.08284628541995276, 10.1895835458017547],
[-1.39818690366266996, 1.01317891569758678, 9.53401359671429205],
[-1.40886287333970861, 0.951934373878181361, 8.95770245819368682],
[-1.41832214458823747, 0.897672243410276738, 8.44709581049070479],
[-1.42676156385033326, 0.849262835625198442, 7.99156328323311715],
[-1.43433768663050576, 0.805807689118261727, 7.58265035460284231],
[-1.44117658423069428, 0.76658328948441179, 7.21354875404831564],
[-1.44738091911886335, 0.731000464201446154, 6.87871436813560777],
[-1.45303513708840493, 0.698574585138656112, 6.57358684615475397],
[-1.4582093361518671, 0.668903365207278555, 6.29438066660049067],
[-1.46296218959064683, 0.641650083153792461, 6.03792728247718635],
[-1.46734318237212746, 0.61653074889585191, 5.80155434710996598],
[-1.47139434201870678, 0.593304170168833411, 5.58299224128872318],
[-1.47515159241516702, 0.571764183106654, 5.38030096303361383],
[-1.47864582302813674, 0.551733516055274986, 5.19181238608013729],
[-1.48190374096996447, 0.533058899629483607, 5.0160842455134409],
[-1.48494855567657535, 0.515607137387699699, 4.85186316281825469],
[-1.48780053334617213, 0.499261923941668573, 4.69805470429110095],
[-1.49047744915365477, 0.48392124972521261, 4.553698959914251],
[-1.49299495857394304, 0.469495269991805964, 4.41795049062289458],
[-1.49536690420683982, 0.455904543965499898, 4.29006175871535422],
[-1.49760557080609202, 0.443078571244405972, 4.16936935540986031],
[-1.49972189843462012, 0.430954568515695413, 4.05528248973269356],
[-1.50172566155351839, 0.419476441774725928, 3.94727331710017015],
[-1.50362562023184365, 0.408593918541261814, 3.84486877347327383],
[-1.50542964841250781, 0.398261811749340611, 3.74764364856129539],
[-1.50714484319571973, 0.388439392576216458, 3.65521468414219708],
[-1.50877761833858659, 0.379089853853916192, 3.56723552476535133],
[-1.51033378456794765, 0.370179849158810748, 3.4833923805844087],
[-1.51181861882638047, 0.361679095413009632, 3.4034002878364209],
[-1.51323692419059674, 0.353560029016494581, 3.32699987304521416],
[-1.51459308189584907, 0.345797507282157357, 3.25395454352510027],
[-1.51589109665368604, 0.338368548360197752, 3.18404804006946085],
[-1.51713463625040745, 0.331252103984805091, 3.1170822984970159],
[-1.51832706625113145, 0.324428860310070843, 3.05287557551776656],
[-1.5194714805008116, 0.317881062866278596, 2.9912608015716815],
[-1.52057072800449355, 0.311592362296002678, 2.93208412920538564],
[-1.52162743667844302, 0.305547678047880189, 2.87520365043055204],
[-1.52264403438904972, 0.299733077635639722, 2.82048826055136992],
[-1.21214164873453023, 2.50787927324385596, 19.6659533010205685],
[-1.25461432626255687, 2.2140252816398065, 17.3616482501921467],
[-1.28817848261752155, 1.98181305018080267, 15.540717335167793],
[-1.31537112713663107, 1.79368790064086103, 14.0655026208587479],
[-1.33784945114025589, 1.63818300139623196, 12.8460850359487839],
[-1.35674179363825509, 1.5074908818202839, 11.8212409982740567],
[-1.37284301695555744, 1.39611154266666837, 10.9478413470777927],
[-1.38672922775398444, 1.30005865101936058, 10.1946265884101503],
[-1.39882818781301399, 1.21637233722870786, 9.53838641110178287],
[-1.40946417391963164, 1.14280878966456623, 8.96152559228630707],
[-1.41888748726307679, 1.0776360662757547, 8.45046281971237612],
[-1.42729440989820833, 1.01949600707014865, 7.9945478554417484],
[-1.43484103152321674, 0.967308560577449628, 7.58531129586150055],
[-1.44165303614448548, 0.9202040656241397, 7.2159335479359612],
[-1.44783276192240695, 0.877474401165095119, 6.88086176246962022],
[-1.45346438125262201, 0.838537143030358978, 6.57552876326306457],
[-1.45861776019783163, 0.802908858824494476, 6.29614363461540982],
[-1.46335137407188376, 0.770184933722044107, 6.03953352193702919],
[-1.4677145379241443, 0.740024136768476115, 5.80302260582613272],
[-1.47174913266028584, 0.712136677093686887, 5.58433844287632652],
[-1.47549095502332905, 0.68627486280154848, 5.38153871580214194],
[-1.47897078370920654, 0.662225724048898456, 5.19295338608344537],
[-1.48221522889673341, 0.639805134775568329, 5.01713859853174782],
[-1.48524741484431, 0.618853089518462407, 4.85283964364060871],
[-1.48808753260945137, 0.599229878903134838, 4.69896096706541577],
[-1.49075329083515906, 0.580812970455256705, 4.55454170998663788],
[-1.49326028588092941, 0.563494447502237539, 4.41873562583004631],
[-1.49562230764692283, 0.547178893041557091, 4.29079448626754267],
[-1.49785159375944832, 0.531781630920514337, 4.17005428913503273],
[-1.49995904201159624, 0.517227255866067659, 4.05592373141641449],
[-1.5019543888444804, 0.503448398495356608, 3.94787452486775425],
[-1.50384636003788841, 0.490384682621050616, 3.84543321955340511],
[-1.5056427985310501, 0.477981840803507974, 3.74817426830084166],
[-1.50735077332303002, 0.4661909608215557, 3.6557141177756991],
[-1.50897667264139201, 0.454967840997404616, 3.56770615315464745],
[-1.51052628396834132, 0.444272436461276843, 3.48383635591717944],
[-1.51200486303738257, 0.434068381733219877, 3.4038195600913328],
[-1.51341719353424153, 0.424322577626422892, 3.32739621288719922],
[-1.51476763893102961, 0.415004832583843797, 3.25432956217830771],
[-1.51606018763713601, 0.406087550259947139, 3.1844032066217518],
[-1.51729849245085924, 0.397545456537343023, 3.11741895501366484],
[-1.51848590513394033, 0.38935536029075396, 3.0531949502799951],
[-1.51962550679811392, 0.381495943129233628, 2.9915640207050731],
[-1.52072013468381284, 0.373947574102576952, 2.9323722269210406],
[-1.52177240582116302, 0.366692145981003192, 2.87547757806769999],
[-1.52278473798851954, 0.359712930233508588, 2.8207488945810959],
[-1.21352320596701979, 2.92919394543763412, 19.6883678761200933],
[-1.25586568738059823, 2.58560582696005525, 17.3789648797815133],
[-1.28931850222926703, 2.3141614142576592, 15.5544706486889783],
[-1.31641549017401527, 2.09429737073138789, 14.0766701847016851],
[-1.33881107462737403, 1.912587249467677, 12.8553185839220276],
[-1.35763132440244494, 1.75989245755872514, 11.8289914468768576],
[-1.3736692774855781, 1.62977710888119454, 10.9544304246943138],
[-1.38749958082941682, 1.51757766653217496, 10.2002898871912624],
[-1.39954882518891899, 1.41983214149600467, 9.54330032248385862],
[-1.41014034249671205, 1.33391654019959249, 8.96582474519868988],
[-1.41952366613359127, 1.25780578011837196, 8.4542517077956294],
[-1.42789444946097333, 1.18991204121747751, 7.99790879132604537],
[-1.43540826345192274, 1.12897279147904039, 7.58830997701269183],
[-1.44219036092903985, 1.07397154537268924, 7.21862302996928928],
[-1.44834271907583023, 1.02408071045765769, 6.88328534671897074],
[-1.45394920603749345, 0.978619657909851393, 6.57772212923692923],
[-1.45907942979687522, 0.937023487025516189, 6.29813643779293386],
[-1.46379164543312124, 0.898819431406302605, 6.04135060680950442],
[-1.46813497899208034, 0.863608811171812096, 5.80468493794767948],
[-1.47215114831528426, 0.831053067597337991, 5.58586383292210709],
[-1.47587580875766866, 0.800862841961525684, 5.38294238775568346],
[-1.47933961584557871, 0.772789351561123161, 5.19424842727869152],
[-1.48256907198465981, 0.746617518265656033, 5.01833631919987333],
[-1.48558720673920397, 0.722160447720446408, 4.85394986646385718],
[-1.488414127637669, 0.699254959292864742, 4.69999226210418364],
[-1.49106746936944212, 0.677757940622473742, 4.55550158661248439],
[-1.49356276258873399, 0.657543354598813745, 4.41963069055345503],
[-1.49591373862526411, 0.638499766486393283, 4.29163057331211473],
[-1.49813258273095751, 0.620528288705130437, 4.17083656908234079],
[-1.50023014572561642, 0.603540863222949198, 4.05665680209139445],
[-1.50221612180196318, 0.58745881858177329, 3.94856248775320529],
[-1.5040991986388943, 0.572211651656101106, 3.84607974434565048],
[-1.50588718472711669, 0.557735994343376618, 3.7487826476936954],
[-1.50758711784351052, 0.543974733242503894, 3.65628731415140074],
[-1.50920535785183718, 0.530876256530797064, 3.56824683853914282],
[-1.51074766640998126, 0.518393807101464099, 3.48434694630341202],
[-1.51221927568940639, 0.506484924872049991, 3.40430224503284951],
[-1.51362494783426826, 0.495110964244854179, 3.32785298110291228],
[-1.51496902658413091, 0.484236675163877528, 3.25476222378006197],
[-1.51625548223924733, 0.473829838199764763, 3.18481341247127592],
[-1.51748795094904754, 0.463860945704949068, 3.11780821363112137],
[-1.51866976914268759, 0.454302922393111708, 3.05356464265655791],
[-1.51980400378831448, 0.44513087977063609, 2.99191541331548905],
[-1.52089347905898298, 0.436321899730036045, 2.93270648318545657],
[-1.52194079989336517, 0.427854843343516367, 2.8757957684732065],
[-1.52294837286508211, 0.419710181498251012, 2.8210520056418158],
[-1.25725009279698985, 2.95823551246350558, 17.3981226076759903],
[-1.29058015234189893, 2.64734390223979243, 15.5696913250477795],
[-1.31757172448871951, 2.39558495361585422, 14.0890340084532415],
[-1.33987615876637456, 2.18755291227163218, 12.8655455652975359],
[-1.35861700826552423, 2.01276593817114735, 11.8375796738690582],
[-1.37458529216604775, 1.86384446395396308, 10.9617352536292447],
[-1.38835404373506988, 1.73544255466883746, 10.2065715246460993],
[-1.40034856016715259, 1.62359253352713351, 9.54875358780645378],
[-1.41089113030220203, 1.52528770843481287, 8.97059833523224448],
[-1.42023044029993795, 1.4382080408100637, 8.4584610400141873],
[-1.42856145008927404, 1.36053471437073714, 8.00164478889289654],
[-1.43603915870266241, 1.29082171568778659, 7.59164521538879455],
[-1.44278834368703524, 1.22790497335066817, 7.22161612451861679],
[-1.44891058444048348, 1.17083683591150178, 6.88598414120451974],
[-1.45448941391075226, 1.11883801070057864, 6.58016605043277814],
[-1.45959415580817153, 1.07126176573076815, 6.3003582597040797],
[-1.46428282266969934, 1.02756689310154337, 6.04337779005345155],
[-1.4686043324277267, 0.987297030203513781, 5.80654065888441551],
[-1.47260022339900831, 0.950064660257424864, 5.58756778313897939],
[-1.47630599529884576, 0.915538601735718438, 5.38451140145819362],
[-1.47975216808545351, 0.88343413020027084, 5.19569697824034282],
[-1.48296512555596594, 0.853505108233649445, 5.01967691779915004],
[-1.48596779307092208, 0.825537662817178886, 4.85519337944353335],
[-1.48878018625007602, 0.799345066442993968, 4.70114817201785762],
[-1.4914198584173215, 0.774763562814193008, 4.55657820380097256],
[-1.49390226794228442, 0.751648939845174668, 4.42063532746443322],
[-1.49624108172421577, 0.729873698402056403, 4.29256968872709344],
[-1.49844842740461859, 0.709324699363133138, 4.17171588812942673],
[-1.50053510413729918, 0.689901197304505387, 4.05748141664712225],
[-1.50251075964877634, 0.671513188669844108, 3.9493369408645207],
[-1.50438403971565093, 0.654080017267674285, 3.84680810155550956],
[-1.50616271494515774, 0.637529191511177817, 3.74946855757511388],
[-1.50785378878042975, 0.621795376816672074, 3.65693405990305287],
[-1.50946358989620277, 0.606819533626614116, 3.56885738214152459],
[-1.51099785155500577, 0.592548177080394423, 3.48492396645406943],
[-1.5124617800215101, 0.578932738764214472, 3.40484816985703631],
[-1.51386011375555141, 0.565929014488056659, 3.32837001645788266],
[-1.5151971748020745, 0.553496684859205335, 3.2552523778282012],
[-1.51647691355225644, 0.54159889769723446, 3.18527851708186027],
[-1.51770294785243309, 0.530201903179889245, 3.11824994307672387],
[-1.51887859727621577, 0.519274734111526803, 3.05398452999341696],
[-1.52000691324376325, 0.508788924935150821, 2.99231486477485564],
[-1.52109070556356252, 0.498718264119200727, 2.9330867908510494],
[-1.52213256588288148, 0.489038575384058427, 2.87615812147749317],
[-1.52313488845890177, 0.479727523924063648, 2.82139750007839929],
[-1.25876732074232711, 3.33203114314145443, 17.419118364978381],
[-1.29196315480691193, 2.98145343416979669, 15.5863760086321044],
[-1.3188395205773078, 2.69762629208994786, 14.1025907825368932],
[-1.34104437840282387, 2.46314273584192156, 12.8767628579291564],
[-1.35969851442829293, 2.26616419071382191, 11.8470027970094787],
[-1.37559073098113949, 2.09835874217461926, 10.9697532021462045],
[-1.38929229136832366, 1.95369228473670553, 10.2134691107624427],
[-1.40122707518421552, 1.82768748937071579, 9.55474404165468627],
[-1.41171622893896687, 1.71695217033117586, 8.9758444015646468],
[-1.42100751148806648, 1.61886931688513891, 8.46308903993842065],
[-1.42929512413541704, 1.53138763300223246, 8.00575423697278232],
[-1.43673344044966766, 1.45287651281427066, 7.59531554754571481],
[-1.44344671840178163, 1.38202345378893976, 7.22491150008551219],
[-1.44953610265570076, 1.31776009332336419, 6.88895693231825401],
[-1.45508475992194519, 1.25920796531706802, 6.58285941868533797],
[-1.46016170338594553, 1.20563810371316604, 6.30280808663382874],
[-1.46482468069850258, 1.15644053739355468, 6.04561414270741704],
[-1.46912238254839833, 1.11110096159122573, 5.80858891587412973],
[-1.47309615125822901, 1.06918269042935976, 5.58944950941126439],
[-1.4767813166513033, 1.03031254650090931, 5.38624503476308636],
[-1.4802082507239096, 0.994169720635461607, 5.19729837287760699],
[-1.48340320783776791, 0.960476897161144483, 5.02115977904798338],
[-1.48638899965096871, 0.928993124781855473, 4.85656961344292171],
[-1.48918554150740179, 0.899508045205813289, 4.7024281696592789],
[-1.49181029796698317, 0.87183718712356173, 4.55777107268484194],
[-1.49427864854925208, 0.845818102952406781, 4.42174908265674915],
[-1.49660418987688937, 0.821307177371463526, 4.29361141059192963],
[-1.49879898675869483, 0.798176975196938088, 4.17269185366843764],
[-1.50087378200325339, 0.776314025174096645, 4.05839720938236059],
[-1.50283817266607667, 0.755616958323725663, 3.95019754323680994],
[-1.50470075883264687, 0.735994936385533816, 3.84761797299326247],
[-1.50646926980377982, 0.717366318954180793, 3.75023170075491175],
[-1.50815067158881044, 0.69965752805666459, 3.65765407722956359],
[-1.50975125885911532, 0.682802076870956509, 3.56953752408650082],
[-1.51127673392122097, 0.666739735553479873, 3.48556717308791431],
[-1.51273227479809003, 0.651415812114010007, 3.40545710666268597],
[-1.51412259413174688, 0.636780530242323506, 3.32894710532236848],
[-1.51545199031928113, 0.622788489172307336, 3.25579982395078416],
[-1.51672439205116238, 0.609398193234842123, 3.18579833241103572],
[-1.5179433972242331, 0.59657164083048464, 3.11874396678603372],
[-1.51911230704103462, 0.584273964246551758, 3.05445444642225139],
[-1.52023415597635259, 0.572473113129170375, 2.99276221919194052],
[-1.52131173818352328, 0.561139575559496229, 2.93351300334158882],
[-1.52234763082466307, 0.550246131623372214, 2.87656449920885127],
[-1.52334421473470383, 0.539767635142217928, 2.82178524816015042],
[-1.26041708282185505, 3.70710906712310262, 17.4419481608141993],
[-1.29346716298340403, 3.31658246918821531, 15.6045205175305544],
[-1.32021850043901279, 3.00049659190684714, 14.1173364649217152],
[-1.34231534094011673, 2.73941906314309547, 12.8889666920882622],
[-1.36087544629997148, 2.52013971537031756, 11.8572573608173464],
[-1.37668520009376283, 2.33336474592163157, 10.9784811295612776],
[-1.39031393692891148, 2.17236552645142389, 10.2209798019539484],
[-1.40218399315462539, 2.03215071471684805, 9.56126911274277091],
[-1.4126152726539174, 1.90893955764042866, 8.98156061869821798],
[-1.42185452618729702, 1.79981585593328752, 8.46813360216611777],
[-1.43009513076599926, 1.70249420329285606, 8.01023522649288822],
[-1.43749078065218128, 1.61515818050806881, 7.59931923929046338],
[-1.44416516972643816, 1.53634592524089153, 7.22850757825839452],
[-1.45021897082805196, 1.46486764730106267, 6.89220228055149953],
[-1.45573495329855729, 1.39974514740245892, 6.58580091852856953],
[-1.46078179348880433, 1.34016678301725167, 6.30548471409616962],
[-1.46541695178624343, 1.28545346647916081, 6.04805855978445095],
[-1.46968887248948499, 1.23503266595755035, 5.81082869333027485],
[-1.47363868545282717, 1.18841829472002192, 5.59150807665770344],
[-1.4773015363585198, 1.14519498942520936, 5.3881424252456096],
[-1.4807076368558294, 1.10500569914614144, 5.19905181448259501],
[-1.48388310105650967, 1.06754179932122995, 5.02278416580638609],
[-1.48685061743212765, 1.03253515099453308, 4.85807788542927721],
[-1.48962999269955088, 0.999751672952718828, 4.70383162124254195],
[-1.49223859527291114, 0.968986100826565777, 4.55907960438899185],
[-1.49469171927305489, 0.940057685077392957, 4.4229714082891336],
[-1.4970028852168018, 0.912806637327318038, 4.29475522862503123],
[-1.49918408987443685, 0.887091177440495149, 4.17376398985752939],
[-1.50124601504603605, 0.862785066118411548, 4.05940373608712601],
[-1.50319820292671658, 0.839775532361294097, 3.95114387975988945],
[-1.50504920413671761, 0.817961523987346451, 3.84850897036046513],
[-1.50680670326157595, 0.797252223947923611, 3.75107171367498093],
[-1.50847762579010536, 0.777565786489745059, 3.65844702543425049],
[-1.51006822958871267, 0.758828256074729923, 3.5702869448316048],
[-1.51158418345922452, 0.740972638950600282, 3.48627626626257436],
[-1.51303063485932232, 0.723938102803503458, 3.40612877369048395],
[-1.51441226849048016, 0.707669284341346017, 3.32958398282603296],
[-1.5157333571588012, 0.692115688200365731, 3.25640431298272093],
[-1.51699780607194001, 0.677231163424973248, 3.18637262391449916],
[-1.5182091915398741, 0.66297344608728126, 3.11929006384065843],
[-1.51937079488705873, 0.649303758498743, 3.05497418373658558],
[-1.52048563225345035, 0.636186457009811801, 2.99325728023116433],
[-1.52155648085424877, 0.623588721661577305, 2.93398493541772165],
[-1.52258590217983336, 0.611480281999933095, 2.87701472680968529],
[-1.52357626254379319, 0.599833174229839949, 2.82221508475139693],
[-1.26219902833263098, 4.08358509166439365, 17.4666071420736131],
[-1.29509176575962193, 3.65282292906560002, 15.6241198920487729],
[-1.32170822133227239, 3.30427055333068109, 14.1332663212916856],
[-1.3436885898547517, 3.01644377314332024, 12.9021526842175636],
[-1.36214734479229249, 2.7747445912435591, 11.868339365273588],
[-1.37786824493612614, 2.56890689733854005, 10.9879154108889381],
[-1.39141853482192523, 2.39150060672518405, 10.2291003224018109],
[-1.40321888020073682, 2.23701560611711647, 9.56832584252821228],
[-1.41358784090774248, 2.1012792229709687, 8.98774431279855257],
[-1.42277107807182879, 1.98107365301140725, 8.47359230674424602],
[-1.43096107824609753, 1.87387760246512758, 8.0150855632712954],
[-1.43831080221115037, 1.77768750835085987, 7.60365429708254137],
[-1.4449433350223051, 1.69089113672822933, 7.23240254391483539],
[-1.45095884045936718, 1.6121764893992967, 6.89571852965790111],
[-1.45643965927133356, 1.54046502422929499, 6.58898903545348436],
[-1.46145410460918557, 1.47486194043128815, 6.3083867542992822],
[-1.46605932718935161, 1.41461864904235668, 6.05070976704026187],
[-1.47030350576056201, 1.35910408095514135, 5.81325881899449115],
[-1.47422754123345912, 1.30778249625548804, 5.59374240443824711],
[-1.47786638090706779, 1.26019613875796477, 5.39020257532383962],
[-1.48125006371088608, 1.2159515448372944, 5.20095638041770059],
[-1.48440455270801031, 1.17470863883367693, 5.02454922337495535],
[-1.48735240371741284, 1.13617197506191236, 4.85971740242390737],
[-1.49011330649697915, 1.10008364909172962, 4.70535778997871645],
[-1.49270452595298586, 1.06621751853784685, 4.56050311338233705],
[-1.49514126427874383, 1.03437445956391061, 4.42430166568018191],
[-1.4974369600753854, 1.00437844883105099, 4.29600054704554069],
[-1.49960353688781467, 0.976073308033488907, 4.17493174027051328],
[-1.50165161086264831, 0.949319983878685658, 4.06050047649928736],
[-1.50359066516273288, 0.923994263507824654, 3.95217546345846849],
[-1.50542919718575008, 0.899984846143654815, 3.84948063736899648],
[-1.50717484340897934, 0.877191707804167775, 3.75198816838055382],
[-1.50883448572914602, 0.855524708403124001, 3.6593125027606348],
[-1.51041434242033068, 0.834902400332846129, 3.57110526687821883],
[-1.51192004624453213, 0.815251005327933931, 3.48705089097084464],
[-1.51335671178332865, 0.796503532517541357, 3.40686283681366575],
[-1.51472899368695524, 0.778599015446565845, 3.33028033425099279],
[-1.51604113723870504, 0.761481849754600648, 3.257065548268542],
[-1.51729702239205788, 0.745101216353242779, 3.18700111176546086],
[-1.51850020224281224, 0.729410577496547363, 3.11988797011023244],
[-1.51965393673978766, 0.71436723521955825, 3.05554349246183765],
[-1.52076122230768851, 0.699931943321530303, 2.99379981211618151],
[-1.52182481794898616, 0.686068565468805125, 2.93450236411884413],
[-1.52284726830364336, 0.67274377314618794, 2.87750859332074027],
[-1.52383092407237219, 0.659926778141578518, 2.82268681014193357],
[-1.26411274896949033, 4.46157440812761319, 17.4930896585336804],
[-1.2968364919434312, 3.99026612905671163, 15.6451684476765216],
[-1.32330817988196947, 3.60902230876900765, 14.150374968965151],
[-1.34516360854534311, 3.29427822500900369, 12.9163158738894683],
[-1.36351369193148786, 3.03003042651441801, 11.8802442972919469],
[-1.37913935360349238, 2.80502919376981508, 10.9980519639058141],
[-1.39260558385012856, 2.61113546971899169, 10.2378269875232117],
[-1.40433124865895897, 2.44231521505905924, 9.57591090571072634],
[-1.41463346120895483, 2.29400020736587296, 8.99439247971369227],
[-1.42375671067535947, 2.16266842127902725, 8.47946243509818665],
[-1.4318925264659621, 2.04556075209423138, 8.02030278216946435],
[-1.43919308135859247, 1.94048505351720357, 7.60831848066536853],
[-1.44578080662039099, 1.84567762547283953, 7.23659435654142413],
[-1.45175531958935777, 1.75970341768407024, 6.89950381683629121],
[-1.45719850110594784, 1.68138288589147855, 6.59242206509950446],
[-1.46217827470147732, 1.60973755013006681, 6.31151264446830318],
[-1.46675145898523462, 1.54394890419498387, 6.05356632853116583],
[-1.47096594798596225, 1.48332700637239889, 5.81587797081844649],
[-1.47486239719709777, 1.42728619083590136, 5.59615127323576278],
[-1.47847554130238401, 1.37532608493245023, 5.39242435800598141],
[-1.48183523415435081, 1.32701662760091144, 5.20301102738523991],
[-1.48496727698773312, 1.28198613840667619, 5.0264539843361753],
[-1.48789408352391672, 1.23991173626993079, 4.86148726595835345],
[-1.49063521825195799, 1.20051158516936241, 4.70700584018487422],
[-1.49320783523067568, 1.16353857290702023, 4.56204082127294086],
[-1.49562703821939325, 1.12877512318444806, 4.42573912881902309],
[-1.49790617811560911, 1.09602891081629927, 4.2973466878255735],
[-1.5000571000732581, 1.06512930182716548, 4.17619447091401064],
[-1.5020903499609719, 1.03592437928342895, 4.06168683710711154],
[-1.50401534775700285, 1.00827844542368927, 3.95329173809871426],
[-1.50584053389776051, 0.982069913411582895, 3.85053245216791407],
[-1.50757349337699287, 0.957189519604439987, 3.75298057478240832],
[-1.50922106144447676, 0.933538800893490839, 3.66025004850322855],
[-1.51078941401045008, 0.91102879236811074, 3.57199205674330056],
[-1.51228414527685295, 0.889578908986384032, 3.48789063898411378],
[-1.51371033465243898, 0.869115981618625466, 3.40765891126302689],
[-1.5150726046387315, 0.849573423161905761, 3.33103579664730454],
[-1.51637517107695419, 0.83089050469970116, 3.25778318717674464],
[-1.51762188690675637, 0.813011725128619633, 3.18768347227512905],
[-1.51881628039354988, 0.79588626046823574, 3.1205373795858744],
[-1.51996158862341479, 0.77946748134534094, 3.05616208310819015],
[-1.52106078693516178, 0.763712529005102225, 2.99438954080750452],
[-1.52211661485322036, 0.748581941731091938, 2.93506502987065598],
[-1.52313159899721318, 0.734039324817934213, 2.8780458527236501],
[-1.52410807337157417, 0.720051058285783196, 2.82320019102884157],
[-1.26615778386160738, 4.84119152652967433, 17.5213893325554793],
[-1.29870081496685397, 4.32900271655617974, 15.6676598318437108],
[-1.32501781648578132, 3.91482536688980787, 14.1686564240127275],
[-1.34673982446698948, 3.57298320776956357, 12.9314507635044613],
[-1.3649739147428388, 3.28604831326979685, 11.8929671645649186],
[-1.38049796050845286, 3.04177516722201435, 11.0088862782919836],
[-1.39387453065569056, 2.83130764039437155, 10.2471557292734765],
[-1.40552056032495365, 2.64808221510498454, 9.58402063236073332],
[-1.41575161217716139, 2.48713121058149955, 9.00150180445073644],
[-1.42481092028566225, 2.34462556502703912, 8.48574098727093862],
[-1.43288898967910061, 2.217566293550989, 8.02588416242877045],
[-1.44013715025019495, 2.10357111834298172, 7.61330931677209843],
[-1.4466771342786946, 2.00072369634287517, 7.24108076253325272],
[-1.45260797512676443, 1.90746501784322575, 6.90355608380952201],
[-1.45801106231645483, 1.82251382789556837, 6.59609812326819167],
[-1.46295390328542463, 1.74480740758812103, 6.31486065592469981],
[-1.46749296207281166, 1.67345688657425895, 6.05662665487068264],
[-1.47167582880889158, 1.60771309029542753, 5.8186846844922977],
[-1.47554289710072162, 1.5469401340572082, 5.59873333133781959],
[-1.47912867479751009, 1.49059478855563055, 5.39480652319557041],
[-1.48246281833594185, 1.43821019689307783, 5.20521459721687041],
[-1.4855709563644699, 1.38938290883007975, 5.0284973738811729],
[-1.48847535108562323, 1.34376246973007629, 4.86338647698462267],
[-1.49119543343453098, 1.30104299561402037, 4.70877484181843542],
[-1.4937482393078636, 1.26095630590923546, 4.56369186100227164],
[-1.49614876754921222, 1.22326628793331782, 4.42728298825096989],
[-1.49841027558863771, 1.18776424284465154, 4.29879289429545075],
[-1.50054452504680302, 1.15426501926677139, 4.17755147357704626],
[-1.50256198691220244, 1.12260378332520894, 4.0629621542654677],
[-1.50447201384756868, 1.09263330614627896, 3.9544920810909554],
[-1.50628298560960849, 1.06422167461548423, 3.85166383005065605],
[-1.50800243235251252, 1.03725035029537893, 3.75404838318442913],
[-1.50963713964244728, 1.01161251625524828, 3.66125914536995634],
[-1.51119323827149499, 0.987211663192433697, 3.57294682716953904],
[-1.51267628137751986, 0.963960375387635238, 3.48879505092217235],
[-1.51409131091499494, 0.941779284301192954, 3.40851656356701005],
[-1.51544291515366081, 0.920596163411102397, 3.33184996065325967],
[-1.51673527858691526, 0.900345142540178056, 3.25855684280887514],
[-1.5179722253936867, 0.88096602366597887, 3.18841933949879275],
[-1.51915725740478602, 0.862403683242891517, 3.12123794589061898],
[-1.52029358736771325, 0.844607548537618436, 3.05682962759191934],
[-1.52138416817539279, 0.827531137501259506, 2.99502615534109751],
[-1.52243171861888982, 0.811131653362523131, 2.93567263774667042],
[-1.52343874613698982, 0.795369626497223536, 2.87862622513033628],
[-1.52440756696336743, 0.780208597264715698, 2.82375496163883621],
[-1.26833362487980628, 5.22255022009331871, 17.5514991325279048],
[-1.30068415784809166, 4.66912261791622729, 15.691587083782748],
[-1.32683651996643381, 4.22175256352956296, 14.1881041510047083],
[-1.34841661350097275, 3.85261889571706551, 12.9475513602491361],
[-1.36652738936077256, 3.54284878723163299, 11.906502531374878],
[-1.38194345025172516, 3.27918784805494123, 11.0204134464989263],
[-1.3952247733698786, 3.05205419174660975, 10.257082122976998],
[-1.40678622989880298, 2.85434887225844047, 9.59265103141140152],
[-1.41694172679877428, 2.68070056421389769, 9.00906868187598953],
[-1.42593315902464846, 2.52697015523355439, 8.49242470026705121],
[-1.43394993941915616, 2.38991656569859368, 8.0318267440084874],
[-1.44114249973050468, 2.26696572991315337, 7.61862411374384685],
[-1.44763182780610022, 2.15604740311546905, 7.24585930832734171],
[-1.45351633534150948, 2.05547764593748816, 6.90787308866848715],
[-1.45887688903445034, 1.96387273523868355, 6.60001515664143135],
[-1.46378055370034921, 1.88008511484448526, 6.31842890381664546],
[-1.46828341631900772, 1.80315507267246589, 6.05988901208853559],
[-1.47243274393700041, 1.73227381639647127, 5.82167736153242643],
[-1.47626865181213862, 1.66675492946531811, 5.6014871022388002],
[-1.47982540675486729, 1.60601206934636775, 5.39734770448190115],
[-1.48313245546781936, 1.54954137138428871, 5.20756582311648408],
[-1.48621524327936028, 1.49690743927417569, 5.03067821556071237],
[-1.48909587147801026, 1.44773209727028762, 4.86541394118335901],
[-1.49179362918678393, 1.40168528916879054, 4.71066377538511283],
[-1.49432542685269865, 1.35847766077518095, 4.56545528139087597],
[-1.49670615194857604, 1.31785447341384065, 4.42893235529437135],
[-1.49894896269932998, 1.27959057791406239, 4.30033833506118768],
[-1.50106553207521176, 1.24348623958893323, 4.17900196947566371],
[-1.50306625160651586, 1.20936365071788643, 4.06432569759118234],
[-1.50496040253249852, 1.17706400198072525, 3.95577580665665129],
[-1.50675630023357887, 1.14644501104728835, 3.85287412641249372],
[-1.50846141668902378, 1.11737882717705461, 3.75519098704860133],
[-1.51008248476426465, 1.08975024673709853, 3.66233922207003326],
[-1.51162558739734143, 1.06345518711370768, 3.57396903954999523],
[-1.51309623417539152, 1.03839937639487645, 3.48976361852706685],
[-1.51449942733349419, 1.01449722405114451, 3.40943531368616748],
[-1.51583971884192925, 0.991670844102196636, 3.33272237250059655],
[-1.51712125995501856, 0.969849207277180936, 3.25938608588509693],
[-1.51834784435768766, 0.948967402723554843, 3.18920830701023217],
[-1.51952294585474057, 0.92896599309896799, 3.12198928395046016],
[-1.52064975139133729, 0.909790449550372804, 3.05754576081035978],
[-1.52173119006587765, 0.891390655268714971, 2.99570930931378809],
[-1.52276995869143628, 0.873720468101643721, 2.93632485887016648],
[-1.52376854437548692, 0.856737334187020805, 2.87924939810709501],
[-1.52472924451600522, 0.840401945796223404, 2.82435082497945089],
[-1.27063972215429954, 5.60576348009249692, 17.583411449223469],
[-1.30278589835398351, 5.01071499366916662, 15.7169426968089549],
[-1.3287636324160097, 4.5298760196000325, 14.2087111148121021],
[-1.35019330450912678, 4.13324480972181618, 12.9646112198274324],
[-1.36817344531688012, 3.80048179254688989, 11.9208445559554104],
[-1.38347516166454843, 3.51730973304546168, 11.0326281959859323],
[-1.3966556654284934, 3.27341171584803137, 10.2676014153766584],
[-1.40812762859039697, 3.06114701867477557, 9.60179781524321285],
[-1.4182031958377419, 2.87473620777920624, 9.01708923840077858],
[-1.42712283807844886, 2.70972690774388969, 8.49951006729000014],
[-1.43507480756219952, 2.56263358493249882, 8.03812734473827106],
[-1.44220858223905157, 2.43068862175121048, 7.6242599768929642],
[-1.4486443598230383, 2.31166653163250801, 7.25092735422063317],
[-1.45447989248965737, 2.20375741286311744, 6.91245241834731061],
[-1.45979549250720275, 2.10547426803923488, 6.60417095408306576],
[-1.46465775548461941, 2.01558406718066863, 6.32221535739003215],
[-1.46912236882711333, 1.93305574845672767, 6.06335153099260271],
[-1.47323625730645946, 1.85702049240310019, 5.82485427783772369],
[-1.47703924137611975, 1.7867410177936931, 5.60441099247955066],
[-1.480565332620283, 1.72158759607009637, 5.40004642633986887],
[-1.48384375571187554, 1.66101912952821884, 5.21006333628684626],
[-1.48689976195147544, 1.60456808843684384, 5.03299523739690002],
[-1.4897552823464395, 1.55182841911087421, 4.86756847461110898],
[-1.49242945597841192, 1.50244576105209249, 4.71267153716673004],
[-1.49493906058619253, 1.45610947459694073, 4.5673300519857376],
[-1.49729886584539007, 1.41254609985414126, 4.43068626654248998],
[-1.49952192506578319, 1.37151395585285063, 4.3019821081917744],
[-1.50161981747684581, 1.33279865456524771, 4.18054511315299315],
[-1.50360285059825571, 1.29620935396401338, 4.06577667360045591],
[-1.50548023016222188, 1.26157561186778366, 3.95714216922528239],
[-1.50726020349942935, 1.228744731113665, 3.85416263992653008],
[-1.5089501811008057, 1.19757950881016306, 3.75640772596787853],
[-1.51055684013463298, 1.16795631969172664, 3.66348965609971611],
[-1.51208621296851131, 1.13976347711194337, 3.57505810654112821],
[-1.51354376317062167, 1.1128998258607512, 3.49079578711655625],
[-1.51493445100884472, 1.08727352943218492, 3.41041463731895389],
[-1.5162627901026311, 1.06280102109997499, 3.33365253618358848],
[-1.51753289659125734, 1.03940609355565572, 3.26027044678624023],
[-1.51874853194682946, 1.01701910621439451, 3.19004992982581781],
[-1.51991314037027814, 0.995576292818959319, 3.12279097180880294],
[-1.52102988155343044, 0.97501915484194257, 3.05831008235422619],
[-1.52210165946349618, 0.955293928533574865, 2.99643862250031345],
[-1.52313114770222602, 0.936351115390712585, 2.93702133194220183],
[-1.52412081190601945, 0.918145067413264737, 2.87991502811960709],
[-1.52507292958212726, 0.900633619831964882, 2.8249874542062634],
[-1.27307548974330986, 5.99094348114498843, 17.6171181742419805],
[-1.30500537430637342, 5.35386820228255811, 15.743718682337148],
[-1.33079845417897946, 4.83926710610538002, 14.2304698338911315],
[-1.35206918402256471, 4.41491978456347756, 12.9826234914819754],
[-1.36991136995843488, 4.05899665172869728, 11.9359870289896985],
[-1.38509239197803602, 3.75618275790653877, 11.0455249224689123],
[-1.3981665195109334, 3.49541629877733406, 10.2787085535920983],
[-1.40954408784528007, 3.26850802978615684, 9.61145642508991571],
[-1.41953537136374508, 3.06926566781350285, 9.02555935441408153],
[-1.42837933104212089, 2.89292016413594055, 8.5069933576622514],
[-1.43626298950082765, 2.73573902762062326, 8.04478257809689623],
[-1.44333481482568105, 2.59475921766414608, 7.63021382444362928],
[-1.44971416862878688, 2.46759858490006234, 7.25628208872174607],
[-1.45549810554249781, 2.35232017057373399, 6.91729150159338602],
[-1.4607663516975975, 2.24733284876553485, 6.60856315840115016],
[-1.46558500685465365, 2.1513174412545375, 6.32621785068912423],
[-1.4700093363024187, 2.06317099831918416, 6.06701221693235127],
[-1.47408590334121992, 1.98196423978651426, 5.82821359262221783],
[-1.47785421717364751, 1.90690866732083575, 5.60750329984033247],
[-1.4813480199881548, 1.83733087750468838, 5.40290111166222431],
[-1.48459630215625338, 1.77265230108209382, 5.21270567286953135],
[-1.48762411027065045, 1.71237307657053295, 5.03544707829022364],
[-1.49045319572004775, 1.65605910635560827, 4.86984880962696032],
[-1.49310253934589832, 1.60333158587479052, 4.7147969447130551],
[-1.49558877895079245, 1.55385847163718704, 4.56931506815810273],
[-1.49792656001669489, 1.50734748177780631, 4.43254368860286174],
[-1.50012882525750335, 1.46354031732439349, 4.30372324563204423],
[-1.50220705509963914, 1.42220786281622646, 4.18217999659396611],
[-1.50417146852667472, 1.38314617795556316, 4.06731422955057731],
[-1.5060311917058371, 1.34617313225102753, 3.95859036702567835],
[-1.50779440026897027, 1.31112556545127856, 3.85552861590516605],
[-1.50946843992823143, 1.27785688036252387, 3.75769788881604683],
[-1.5110599291800515, 1.24623499313818686, 3.66470977669698073],
[-1.512574847125711, 1.21614058060358654, 3.57621339483742195],
[-1.51401860886546791, 1.18746557558075905, 3.49189095819216977],
[-1.51539613047037047, 1.1601118702165516, 3.41145396835554715],
[-1.5167118851747432, 1.13399019452317229, 3.33463991576970376],
[-1.51796995214293484, 1.10901914311812599, 3.261209417731739],
[-1.51917405893049096, 1.08512432780749357, 3.19094372645891067],
[-1.52032761857082033, 1.06223763742939425, 3.12364255256581247],
[-1.52143376206479064, 1.04029658944601056, 3.05912215833967505],
[-1.52249536692434928, 1.01924376028408314, 2.9972136825853819],
[-1.52351508231842558, 0.999026283487492006, 2.93776166488040635],
[-1.52449535128367786, 0.97959540644734322, 2.88062274208421831],
[-1.5254384303921622, 0.960906097884826726, 2.82566449409256837],
[-1.27564031139474521, 6.37820155697372471, 17.6526107797419876],
[-1.30734188897751502, 5.69866977246609174, 15.7719066349723303],
[-1.33294024892197616, 5.14999641628945337, 14.2533724344952244],
[-1.35404350101569371, 4.69770194229934468, 13.0015809638343658],
[-1.37174041295106819, 4.3184420407718811, 11.9519234128421772],
[-1.3867944010757367, 3.99584827428602107, 11.0590977238327817],
[-1.39975661156155629, 3.71810349946038432, 10.2903982146830053],
[-1.41103490315193403, 3.47646280486708337, 9.62162205699978124],
[-1.42093757036091217, 3.26431604001831221, 9.03447468722715286],
[-1.42970197734395743, 3.07657387529712345, 8.5148706372193903],
[-1.43751384739696997, 2.90925421497005798, 8.05178887143183708],
[-1.44452058224359914, 2.75919661776867242, 7.63648240388329658],
[-1.45084066114553356, 2.62386077015681574, 7.26192054328695225],
[-1.45657040299149232, 2.50118150008640105, 6.92238762229795057],
[-1.46178891595889171, 2.38946265108664946, 6.61318927844863946],
[-1.46656177725777814, 2.28729818471396573, 6.33043409357600595],
[-1.4709438074907355, 2.19351269538092097, 6.07086895986307873],
[-1.47498118928412514, 2.10711598469160721, 5.83175335763177216],
[-1.47871310415206647, 2.02726796536976872, 5.61076222180280215],
[-1.48217301073646701, 1.95325125445890979, 5.40591008954657148],
[-1.48538965286070335, 1.88444955959939975, 5.21549128112657367],
[-1.48838786175827664, 1.82033047840940299, 5.03803229465661317],
[-1.49118919989254306, 1.76043169431758528, 4.87225360103778726],
[-1.49381248169772118, 1.70434981133297025, 4.71703874254213318],
[-1.49627419784391802, 1.65173125736016924, 4.57140915639976253],
[-1.49858886325432339, 1.60226482234738965, 4.43450352302615869],
[-1.50076930439658351, 1.55567549845987285, 4.30556071779629601],
[-1.50282689786109525, 1.51171936471234458, 4.18390565351269483],
[-1.50477176959780512, 1.47017931512429234, 4.06893745744693813],
[-1.5066129621776978, 1.43086147245926587, 3.96011954583579184],
[-1.50835857590996403, 1.3935921625255101, 3.85697124981325024],
[-1.51001588846147183, 1.35821534940978927, 3.75906071704297595],
[-1.51159145670470041, 1.32459045175154144, 3.66599886793588459],
[-1.51309120380002837, 1.29259047560806439, 3.57743422807996625],
[-1.51452049395088428, 1.26210041162573705, 3.49304849217593638],
[-1.5158841968207315, 1.23301585387332224, 3.412552701455283],
[-1.51718674324221481, 1.20524180537932946, 3.3356839378292622],
[-1.51843217356159199, 1.17869164157749129, 3.26220245507182183],
[-1.51962417972978026, 1.15328620783063673, 3.19188918108420383],
[-1.5207661420638412, 1.1289530312264322, 3.12454353642374327],
[-1.52186116144989003, 1.10562563011316795, 3.05998152334262086],
[-1.52291208763363217, 1.08324290752183039, 2.99803404699424192],
[-1.52392154414198622, 1.06174861682023614, 2.93854543655247769],
[-1.52489195029477242, 1.04109088975948305, 2.88137213901080536],
[-1.52582554069532406, 1.02122181857561056, 2.82638156258720485],
[-1.27833354634586249, 6.76764818653691869, 17.6898803986978876],
[-1.30979471652204116, 6.04520638394788179, 15.8014977980415452],
[-1.3351882487406086, 5.46213374484794389, 14.2774107052830992],
[-1.35611547171843894, 4.98164867161875513, 13.0214761110923565],
[-1.37365979082111167, 4.57886596940370527, 11.9686468811357951],
[-1.38858041578731251, 4.23634703121552914, 11.073340434371703],
[-1.40142518485352263, 3.94150833240053267, 10.3026648355247268],
[-1.41259933789274661, 3.68504175102455678, 9.63228968809474217],
[-1.42240907838132058, 3.45991397444104987, 9.04383069430285502],
[-1.43109008571549801, 3.26071158770619807, 8.523137788976479],
[-1.43882671348119895, 3.08320010031685454, 8.05914248443933268],
[-1.4457652400896297, 2.92401958669812689, 7.64306230856371549],
[-1.45202321591018624, 2.7804699879131225, 7.26783960729513367],
[-1.4576961857013353, 2.65035670127515521, 6.9277379330553357],
[-1.46286260775795629, 2.53187759035030879, 6.61804670144344609],
[-1.46758750997381071, 2.42353900729620175, 6.33486168296034879],
[-1.4719252456657983, 2.32409249315652344, 6.0749195446119133],
[-1.4759215975769997, 2.23248645011647007, 5.83547152655444012],
[-1.47961540310416884, 2.14782881095766465, 5.6141858641976734],
[-1.48303982321077998, 2.06935789285225091, 5.40907160326102332],
[-1.48622334295083092, 1.99641941590410088, 5.21841852879377388],
[-1.48919056757675494, 1.92844821700586966, 5.04074936722923095],
[-1.49196286135157141, 1.86495357668946404, 4.87478143240218209],
[-1.49455886416781758, 1.80550735268595419, 4.7193956079930075],
[-1.49699491239958871, 1.7497343131943246, 4.5736110797662759],
[-1.49928538407837442, 1.69730420839061269, 4.4365646113765731],
[-1.50144298380650509, 1.64792522612909065, 4.3074934382985397],
[-1.50347897933555918, 1.60133855787219326, 4.18572106377148323],
[-1.50540339911323628, 1.55731386115162374, 4.07064539817688331],
[-1.5072251981104936, 1.51564545061390432, 3.96172880285467777],
[-1.50895239771612255, 1.476149084722294, 3.85848969089910643],
[-1.51059220430990826, 1.43865924219991248, 3.76049540808366034],
[-1.51215111021157833, 1.40302680328909357, 3.66735617193065844],
[-1.51363497998798135, 1.3691170673258124, 3.57871988987108169],
[-1.51504912453752816, 1.33680805106252487, 3.4942677112495435],
[-1.51639836492479319, 1.30598902242326664, 3.41371019472303905],
[-1.51768708758254856, 1.27655923254606884, 3.33678399396069691],
[-1.51891929221298727, 1.24842681551752377, 3.26324898167219413],
[-1.52009863349058971, 1.22150783048350964, 3.19288574579161777],
[-1.52122845748241975, 1.19572542509535173, 3.12549340281868337],
[-1.52231183355045374, 1.17100910273111825, 3.06088768241661757],
[-1.52335158237663393, 1.14729407877738132, 2.99889924480421044],
[-1.52435030064935106, 1.12452071359378336, 2.93937219858819487],
[-1.52531038286650222, 1.10263401171072473, 2.88216279172164436],
[-1.52623404064034252, 1.08158317840654217, 2.82713825244598915],
[-1.28115453510933186, 7.15939299031685472, 17.7289179049688386],
[-1.31236310739630269, 6.3935638565460895, 15.8324831289733421],
[-1.33754165925653457, 5.77574807406230839, 14.3025761518227164],
[-1.3582842844223797, 5.26681661306637139, 13.0423011391985657],
[-1.37566869149558935, 4.84031576637337047, 11.9861503583087927],
[-1.39044963418350553, 4.47771916092993294, 11.0882466590396493],
[-1.40317145405736965, 4.16566525423281586, 10.3155026427186307],
[-1.414236627202893, 3.89427477055869131, 9.64345410288349569],
[-1.42394915320928095, 3.65608566364545151, 9.05362265655360332],
[-1.43254293767497831, 3.4453564323828596, 8.53179053387439623],
[-1.44020089336780877, 3.257597258808139, 8.06683952773278534],
[-1.44706811796233437, 3.08924654396453402, 7.64994999439638512],
[-1.45326118608597765, 2.93744282293974246, 7.27403604312183472],
[-1.45887482978456684, 2.79986078443502739, 6.93333946882463348],
[-1.46398682542163838, 2.67459131567414721, 6.6231327053930853],
[-1.46866162474110817, 2.56005237340193181, 6.33949811413478415],
[-1.47295309114234785, 2.45492181857057989, 6.07916166124977764],
[-1.47690658826755339, 2.35808614933474914, 5.83936596453683876],
[-1.48056059297513243, 2.26860090859092889, 5.61777224995806357],
[-1.48394795443690386, 2.18565977785280463, 5.41238381831444393],
[-1.48709688674179241, 2.10857021254433263, 5.22148571053741328],
[-1.49003175856912073, 2.03673405847577671, 5.04359670796238291],
[-1.49277372673876396, 1.96963200055809118, 4.87743082243464166],
[-1.49534124850031569, 1.90681098802053706, 4.72186615717717117],
[-1.49775049880169853, 1.84787399202806979, 4.57591954341687845],
[-1.50001571248266719, 1.79247160611136347, 4.43872574039682366],
[-1.5021494666931936, 1.7402951138518703, 4.30952026877528915],
[-1.50416291537405322, 1.69107073326077018, 4.18762515789048617],
[-1.50606598503162248, 1.64455481124142722, 4.07243704573205978],
[-1.50786753906120663, 1.60052979006496798, 3.96341719066088105],
[-1.50957551636018739, 1.55880080493714979, 3.86008304591015206],
[-1.51119704880470773, 1.51919280038568494, 3.76200111884981458],
[-1.51273856125682737, 1.48154807545771772, 3.66878089212029579],
[-1.5142058570597654, 1.44572418513243961, 3.58006962686743568],
[-1.51560419141991143, 1.41159213906756453, 3.4955479022699425],
[-1.51693833463165384, 1.37903484966513989, 3.41492577246025375],
[-1.51821262674841839, 1.34794579010373616, 3.33793944338846149],
[-1.5194310250200862, 1.3182278299261021, 3.264348389369637],
[-1.52059714518947486, 1.28979222136607263, 3.1939328429091427],
[-1.52171429755562992, 1.26255771412912554, 3.12649160261975512],
[-1.52278551856170274, 1.23644978002873307, 3.06184011317641458],
[-1.52381359854216925, 1.21139993189544826, 2.9998087787200447],
[-1.52480110616344655, 1.18734512365186418, 2.94024147725369467],
[-1.52575041000859302, 1.1642272204884847, 2.88299424863069476],
[-1.52666369768809207, 1.14199252976668331, 2.82793413292223361],
[-1.28410260519826802, 7.55354473646040159, 17.7697139925230942],
[-1.31504629371967252, 6.74382714728037147, 15.8648533639770726],
[-1.33999966466157816, 6.09090756664353705, 14.3288600505289203],
[-1.36054910423965203, 5.55326164995776406, 13.0640480315256386],
[-1.37776627880083291, 5.10283806963271491, 12.0044265588109607],
[-1.39240122983536563, 4.72000416893344266, 11.1038098074159244],
[-1.40499460927911657, 4.39060815399723925, 10.3289056822785064],
[-1.41594598180364417, 4.10419125160476561, 9.65510991940021235],
[-1.42555702850459243, 3.85285683379619615, 9.06384570150555113],
[-1.4340597909938706, 3.63053111644017878, 8.5408244514255216],
[-1.44163566935682841, 3.43246587942102011, 8.07487598133794826],
[-1.44842852261057309, 3.25489555642825446, 7.65714179649746729],
[-1.45455390246766969, 3.09479553716525491, 7.28050650118126175],
[-1.46010568947268071, 2.94970846358117367, 6.93918916057471069],
[-1.46516094588218881, 2.81761720361959389, 6.62844447151509453],
[-1.46978352038416338, 2.69685049611773087, 6.34434079211696211],
[-1.47402676379299979, 2.58601186630350854, 6.08359291547900316],
[-1.47793560142204439, 2.48392538054125112, 5.84343445772329328],
[-1.48154813317701906, 2.38959376318874028, 5.62151932790151232],
[-1.48489688234278061, 2.30216570905857498, 5.41584483056029686],
[-1.48800977987275385, 2.22091011921306558, 5.22469105544873624],
[-1.49091094731085394, 2.1451956076415164, 5.04657266697666707],
[-1.49362132482325793, 2.07447406225452458, 4.88020023145376936],
[-1.49615917894888506, 2.00826735429380587, 4.72444895097617845],
[-1.49854051611302275, 1.94615651443249704, 4.57833320020244905],
[-1.50077942169692835, 1.88777285748041312, 4.44098564722267142],
[-1.50288833984360992, 1.83279065834586596, 4.31164002375864897],
[-1.50487830574241932, 1.78092107188452009, 4.18961682160833337],
[-1.50675913954930207, 1.73190705695322089, 4.07431135148245183],
[-1.508539609136893, 1.68551911635407037, 3.96518372122295082],
[-1.5102275673673049, 1.64155170366011416, 3.86175038286041827],
[-1.51183006842221457, 1.59982017822456557, 3.76357696927329055],
[-1.51335346682539873, 1.56015821322206061, 3.6702721966048979],
[-1.51480350208926695, 1.52241557998921295, 3.58148265192462345],
[-1.51618537136276288, 1.48645624643408114, 3.49688831973617598],
[-1.5175037920191321, 1.45215673877428908, 3.41619872796651514],
[-1.51876305577213189, 1.41940472502068404, 3.33914961561115931],
[-1.51996707562907285, 1.38809778596262356, 3.26550004147707185],
[-1.52111942676279988, 1.35814234532392852, 3.19502986737454142],
[-1.52222338220228637, 1.32945273554784826, 3.12753756037631314],
[-1.52328194409211526, 1.30195037956591042, 3.0628382679288042],
[-1.52429787114973991, 1.27556307209183251, 3.00076212709603585],
[-1.52527370284938057, 1.25022434659785264, 2.94115277537144815],
[-1.52621178077892994, 1.22587291628829731, 2.8838660355682193],
[-1.52711426754807089, 1.20245217917170932, 2.82876875150144658],
[-1.28717707674740889, 7.95021135638105481, 17.8122592532251716],
[-1.31784349453674232, 7.0960803551978433, 15.8985990815265961],
[-1.34256143266967132, 6.40767956501434099, 14.3562535016154627],
[-1.36290907777730874, 5.84103890475989473, 13.0867085937596688],
[-1.37995169688416675, 5.36647882121620512, 12.02346802562964],
[-1.39443435600386367, 4.96324092814934481, 11.1200231271155552],
[-1.40689382003566377, 4.61637034699202253, 10.3428678488559349],
[-1.41772659177941174, 4.31482006193733891, 9.66725161495960883],
[-1.42723191739545929, 4.05025273855468271, 9.07449482614275205],
[-1.43563988311859747, 3.81625791715070228, 8.55023500009240678],
[-1.44313030369623108, 3.60782575924057713, 8.08324771296519629],
[-1.44984574104689523, 3.42098433280728065, 7.66463394564678868],
[-1.45590067645588839, 3.25254406442272925, 7.28724753481378151],
[-1.46138809996029528, 3.09991415143092963, 6.94528384880120164],
[-1.4663843273994861, 2.96096835340280862, 6.63397909655248252],
[-1.47095257742126106, 2.83394533264646675, 6.34938704290553613],
[-1.47514566554824444, 2.7173735944309767, 6.08821083895130677],
[-1.47900805952395187, 2.61001422268932703, 5.84767472273965794],
[-1.4825774658919193, 2.51081667610728321, 5.6254249814689361],
[-1.48588606797149803, 2.41888429669778748, 5.41945267426813881],
[-1.48896150143468353, 2.33344712911405638, 5.22803273451506456],
[-1.49182763015700393, 2.25384030455374695, 5.04967553948827508],
[-1.49450516847250858, 2.17948670402240818, 4.88308806782163352],
[-1.49701218417512272, 2.10988294413943489, 4.72714250103620959],
[-1.49936450810482058, 2.04458796559748279, 4.58085065625531218],
[-1.5015760699510794, 1.98321367729387865, 4.44334302460366537],
[-1.50365917532797666, 1.92541723670045783, 4.31385147555983473],
[-1.5056247357643644, 1.87089464207406264, 4.19169490045641169],
[-1.50748246068693703, 1.81937538358768247, 4.07626722846668876],
[-1.50924101852753489, 1.77061795469710792, 3.96702736992852056],
[-1.5109081725965916, 1.72440606655045814, 3.86349073481900263],
[-1.51249089621648714, 1.68054544024054109, 3.76522204587225939],
[-1.51399547071668095, 1.63886107654898461, 3.67182922150617763],
[-1.51542756919480071, 1.59919492226587012, 3.58295814726710438],
[-1.51679232839882761, 1.56140386746938153, 3.49828818878258962],
[-1.51809441065043571, 1.52535802027077261, 3.41752832636856407],
[-1.51933805738284544, 1.49093921518877393, 3.34041381307770502],
[-1.52052713558869823, 1.45803971905765617, 3.26670327531727223],
[-1.52166517824978897, 1.42656110460917729, 3.19617618913627588],
[-1.52275541963909777, 1.39641326691794987, 3.12863067659474003],
[-1.52380082623809554, 1.36751356200854746, 3.0638815758334359],
[-1.52480412389193476, 1.33978605028161635, 3.00175874598809722],
[-1.52576782172590542, 1.31316083017393503, 2.94210557427064989],
[-1.52669423326505171, 1.28757344974160981, 2.88477765763536897],
[-1.52758549513122666, 1.26296438573841585, 2.82964163566630766],
[-1.29037726799257535, 8.34949996936372152, 17.8565442526619584],
[-1.32075392094395871, 7.45040673353002258, 15.9337107642085254],
[-1.34522611934168435, 6.72613059670842084, 14.384747480687782],
[-1.3653633376937524, 6.13020274066582704, 13.1102744976512344],
[-1.38222407452680573, 5.63128326659068978, 12.0432671678678176],
[-1.39654814972951224, 5.2074676769575019, 11.1368797364022942],
[-1.40886823913849968, 4.84298457203859289, 10.3573829142916267],
[-1.41957763027080452, 4.5261895457909711, 9.67987355133932681],
[-1.4289730159946572, 4.24829815565979274, 9.08556491926332832],
[-1.43728243452211024, 4.00255867841600299, 8.56001753724877013],
[-1.44468404178017806, 3.78369629990046574, 8.09195049592349669],
[-1.45131904360265884, 3.58753022014140388, 7.67242258443877567],
[-1.45730080297839804, 3.41070400697071863, 7.29425561490783192],
[-1.46272138020112497, 3.25049195600250007, 6.95162029681443716],
[-1.46765631223943482, 3.10465758358341981, 6.63973360489090503],
[-1.47216816063253986, 2.97134858109319966, 6.35463412456523002],
[-1.4763091828606647, 2.84901772131005426, 6.09301289943809365],
[-1.48012336984038995, 2.73636253247803118, 5.85208441604960772],
[-1.48364801834613314, 2.63227874222701042, 5.62948703735367495],
[-1.48691495766837578, 2.53582395881428413, 5.42320533010054984],
[-1.48995151607509402, 2.44618905624269178, 5.23150886800993842],
[-1.492781289269004, 2.36267542186461066, 5.0529035726695426],
[-1.49542475660517238, 2.2846767114801243, 4.88609269432453885],
[-1.49789977913131955, 2.21166410341537079, 4.72994527571332668],
[-1.50022200507303594, 2.14317429296147965, 4.58347047653807405],
[-1.50240520222808205, 2.07879965088162288, 4.44579652609001652],
[-1.5044615321923076, 2.01818010416041238, 4.31615335912488174],
[-1.50640177795656149, 1.9609963973399025, 4.19385820431101841],
[-1.50823553386987985, 1.90696446811134224, 4.07830355566539371],
[-1.50997136503414242, 1.85583072797492377, 3.96894707960091608],
[-1.51161694171927286, 1.80736808249043501, 3.86530310368977092],
[-1.51317915324971497, 1.76137255933829251, 3.76693540531212134],
[-1.51466420492930354, 1.71766043857962258, 3.6734510743259654],
[-1.51607770088024618, 1.67606579996811123, 3.58449526765907445],
[-1.51742471512805466, 1.63643841827535286, 3.4997467081752438],
[-1.51870985283322568, 1.59864195035076362, 3.41891380745470164],
[-1.5199373032270338, 1.56255236780349271, 3.34173131387065192],
[-1.52111088553384133, 1.52805659733993182, 3.26795740476562679],
[-1.52223408894057299, 1.49505133735234863, 3.19737115556490892],
[-1.5233101074945592, 1.46344202466726214, 3.1297703300270312],
[-1.52434187066498894, 1.43314192968503229, 3.0649694450763989],
[-1.52533207018386263, 1.40407136167552204, 3.00279807121969622],
[-1.52628318368440064, 1.37615696889577066, 2.94309933575209159],
[-1.5271974955739156, 1.34933112058739546, 2.88572860107440698],
[-1.5280771155114985, 1.323531359891849, 2.83055229467779546],
[-1.29370250057574321, 8.75151691565943857, 17.9025596035555026],
[-1.32377678104906815, 7.80688870875091556, 15.9701788585535027],
[-1.34799287375276866, 7.04632638552583579, 14.4143328886517637],
[-1.36791100710807445, 6.4208067680583083, 13.1347373233540612],
[-1.38458252932006798, 5.89729595821510522, 12.0638162971313356],
[-1.39874173579528982, 5.45272202089689273, 11.154372655791251],
[-1.41091700646011242, 5.07048299196602859, 10.3724445553044191],
[-1.42149825705926491, 4.7383275235308826, 9.69296999922295832],
[-1.43077950681537702, 4.44701738604779351, 9.09705078319776916],
[-1.43898665196281317, 4.18945480951198679, 8.57016733858865187],
[-1.44629611526163449, 3.9600965060735227, 8.10098002655474936],
[-1.45284768690400323, 3.75455020211146939, 7.68050378301498426],
[-1.45875356333833062, 3.56929063370017063, 7.30152714415621862],
[-1.46410483563650096, 3.40145567875146737, 6.958195203706806],
[-1.468976229290194, 3.24869743016100632, 6.64570496039457925],
[-1.47342962157062418, 3.10907167854351929, 6.3600792380640252],
[-1.47751668911629386, 2.98095472365568082, 6.09799651078259863],
[-1.4812809267398539, 2.86297994243837328, 5.85666114311849739],
[-1.48475920503959791, 2.75398884805731869, 5.63370327396073201],
[-1.48798298522687467, 2.65299291939675408, 5.4271007329398806],
[-1.49097927606495761, 2.55914353354433022, 5.23511753275046665],
[-1.49377139460675279, 2.47170806301836787, 5.05625497239192168],
[-1.49637957611223449, 2.39005071184592977, 4.88921243445004272],
[-1.4988214669142188, 2.31361702946490189, 4.73285570592711391],
[-1.50111252562799469, 2.24192130450934268, 4.5861911903115038],
[-1.50326635199272896, 2.17453623244231231, 4.44834477114829507],
[-1.50529495812916991, 2.11108439249822588, 4.31854437682789172],
[-1.50720899364415239, 2.05123117478198269, 4.19610551189096892],
[-1.50901793349084135, 1.99467887760283658, 4.08041918222667199],
[-1.5107302355810095, 1.94116175521582246, 3.97094176447410607],
[-1.5123534736828006, 1.89044184210350075, 3.86718646395520471],
[-1.51389445001023071, 1.84230541535636538, 3.7687160779355211],
[-1.51535929103496181, 1.79655998421670748, 3.6751368372780906],
[-1.51675352936657415, 1.75303171735835228, 3.58609314355262931],
[-1.51808217400869117, 1.71156323540195565, 3.50126305328965248],
[-1.51934977087178891, 1.67201170957182499, 3.42035438849366757],
[-1.52056045508343596, 1.63424721808032869, 3.34310137437736765],
[-1.52171799636453642, 1.59815132038284635, 3.26926172278317084],
[-1.52282583852070674, 1.56361581633822566, 3.19861409385710926],
[-1.52388713392036856, 1.53054166288945304, 3.13095587995429447],
[-1.52490477368661881, 1.49883802541847144, 3.06610126504082992],
[-1.5258814142119923, 1.46842144463915569, 3.00387952044662088],
[-1.52681950050814641, 1.4392151029380067, 2.94413350405361784],
[-1.52772128682266151, 1.41114817658318148, 2.88671833514081255],
[-1.52858885488900942, 1.38415526230107178, 2.83150022135936608],
[-1.32691128473661424, 8.16560790607147169, 16.0079938325276139],
[-1.35086084247634863, 7.36833186805281048, 14.4450005996618636],
[-1.37055120383748363, 6.71290385553053248, 13.1600886001129815],
[-1.38702617168100883, 6.16456076302670564, 12.085107662516938],
[-1.40101423053991359, 5.69904093778947907, 11.1724948384581229],
[-1.41303925256109664, 5.29889719710411278, 10.3880463801561866],
[-1.42348762202199564, 4.95126129398954973, 9.70653516175867992],
[-1.43265056206648733, 4.64643425535077004, 9.10894715476057115],
[-1.4407517316312024, 4.37696728596820961, 8.580679616866842],
[-1.44796574506049947, 4.1370449858871412, 8.11033194108291688],
[-1.45443091675065106, 3.92206089910287981, 7.68887355428293695],
[-1.46025822797189031, 3.72831887992823008, 7.30905847085930116],
[-1.46553776083958676, 3.55281881415657397, 6.96500521691944918],
[-1.47034339659985491, 3.39310014599966525, 6.65189007788684261],
[-1.47473630099819197, 3.24712580036299148, 6.36571953779494848],
[-1.47876754697777724, 3.11319483574268929, 6.10315904257056285],
[-1.48248011394645474, 2.98987586006007655, 5.86140246732610848],
[-1.48591042991611988, 2.87595567080539416, 5.63807142964140695],
[-1.48908957397947384, 2.77039920740367274, 5.43113677951428286],
[-1.49204422331442665, 2.67231801190643603, 5.23885676917490883],
[-1.49479740587307774, 2.5809451612197023, 5.05972790980779052],
[-1.49736910373385901, 2.49561517288976464, 4.89244557851930928],
[-1.49977674057762522, 2.41574777005791974, 4.73587219088438083],
[-1.50203557844593782, 2.34083466770795523, 4.58901129648580319],
[-1.50415904288501645, 2.27042874397738315, 4.45098635017232791],
[-1.50615899111562368, 2.20413510894969322, 4.32102320317012722],
[-1.50804593454570757, 2.14160369402940765, 4.19843557517015054],
[-1.50982922444443068, 2.0825230681992144, 4.08261293161554306],
[-1.5115172077019976, 2.02661525055016467, 3.9730103140993851],
[-1.51311735815096271, 1.97363133671864732, 3.8691397663588476],
[-1.51463638780845256, 1.92334779404247946, 3.77056307123744361],
[-1.51608034153210869, 1.87556330911188729, 3.67688557057142873],
[-1.51745467790498467, 1.83009609395576045, 3.58775088419243859],
[-1.51876433863141758, 1.78678157486049138, 3.50283637904942147],
[-1.52001380830383948, 1.74547040188000713, 3.42184926701893044],
[-1.52120716606390771, 1.70602672829597157, 3.3445232319302276],
[-1.52234813041233918, 1.66832671826009782, 3.27061550392240008],
[-1.52344009820440318, 1.63225724807614658, 3.19990431341594483],
[-1.52448617869268088, 1.59771477242901061, 3.13218666844937266],
[-1.52548922333581549, 1.56460433162647772, 3.06727640845940686],
[-1.52645185197504363, 1.53283867980757504, 3.00500249520609986],
[-1.52737647588464975, 1.50233751726358999, 2.94520750780216245],
[-1.52826531812335609, 1.47302681264901847, 2.88774631396401293],
[-1.52912043154834354, 1.44483820303780508, 2.83248489387203017],
[-1.3301566482175935, 8.52664518088201007, 16.0471462304199441],
[-1.35382917386352064, 7.69221121513363926, 14.4767415068815097],
[-1.37328304444219973, 7.0065461451132629, 13.1863198451031618],
[-1.38955410868759666, 6.43312087355368689, 12.1071334840280418],
[-1.40336474550224177, 5.94646078602644756, 11.1912391993017764],
[-1.41523410215939793, 5.52825821156014818, 10.4041819541562006],
[-1.4255448684394223, 5.16501763927326785, 9.720563197112293],
[-1.43458534680988725, 4.84657211760097084, 9.12124872532502806],
[-1.44257686216766112, 4.56511665242930675, 8.59154953987195569],
[-1.44969214425121651, 4.31455995312862051, 8.12000183178806445],
[-1.4560679708818427, 4.09007856989281571, 7.69752786853828042],
[-1.46181405910045026, 3.88780334867140942, 7.31684590219959308],
[-1.46701944206072321, 3.70459455065839149, 6.97204694433909378],
[-1.47175712382151369, 3.53787770149402325, 6.65828583421175146],
[-1.47608753123864878, 3.38552186063910243, 6.37155214172279116],
[-1.48006111064624779, 3.24574804966282393, 6.10849782946543574],
[-1.48372030671793809, 3.11705946789482757, 5.86630591857806571],
[-1.48710108846216382, 2.99818767835113675, 5.6425892106568396],
[-1.4902341388214686, 2.88805065663075267, 5.43531133577907699],
[-1.4931457913256041, 2.78571975993582877, 5.24272458819922971],
[-1.49585877439917381, 2.69039347913520466, 5.06332052773245511],
[-1.49839280788088236, 2.6013764025709758, 4.89579038963857727],
[-1.50076508489311311, 2.51806222297502158, 4.73899310363899051],
[-1.50299066397184844, 2.43991990904520772, 4.5919292688230815],
[-1.50508279036780945, 2.36648237479215284, 4.45371982935883182],
[-1.50705316100834508, 2.29733713568345266, 4.32358848935625861],
[-1.50891214431825604, 2.23211855668381087, 4.20084712367893243],
[-1.51066896362418057, 2.17050138451750074, 4.08488360566193709],
[-1.5123318509917576, 2.11219532261418586, 3.9751515971598983],
[-1.51390817691123281, 2.05694045775982692, 3.87116194150399462],
[-1.5154045601422117, 2.00450338643149673, 3.77247537326407745],
[-1.5168269611709353, 1.95467391903471022, 3.67869631562332522],
[-1.51818076206322883, 1.9072622638985286, 3.58946758065703087],
[-1.51947083496862301, 1.86209661148115546, 3.50446582280753471],
[-1.52070160111411035, 1.81902105396424663, 3.42339762356071287],
[-1.52187708179276315, 1.77789378714107826, 3.3459961073995097],
[-1.52300094258673946, 1.73858555089810407, 3.27201800679023203],
[-1.52407653184917846, 1.70097827215310082, 3.2012411081921357],
[-1.52510691429624656, 1.66496388023607667, 3.1334620226042964],
[-1.52609490041908336, 1.63044326967850739, 3.06849423353495165],
[-1.52704307231026903, 1.59732538944588787, 3.00616638293716099],
[-1.52795380640453793, 1.56552644098825589, 2.94632076193989745],
[-1.52882929355550323, 1.53496917023644897, 2.88881197838499748],
[-1.52967155680547107, 1.5055822409502666, 2.83350577746840226],
[-1.33351209834603002, 8.890080655640201, 16.0876267249181346],
[-1.35689702210094665, 8.01802785786922989, 14.5095465658748957],
[-1.37610564806138402, 7.30178507134611898, 13.2134226002628807],
[-1.39216544771742901, 6.70301882234317681, 12.1298859842787099],
[-1.40579239088137053, 6.1950173157484123, 11.2105986425370308],
[-1.41750067742669383, 5.7585965020459442, 10.4208448238946811],
[-1.42766913614052338, 5.37962283183385637, 9.7350482399147289],
[-1.43658302196584353, 5.04745386096107218, 9.1339501599314783],
[-1.44446122753753237, 4.75392302733871386, 8.60277224754948122],
[-1.45147452081644457, 4.49265923109851872, 8.12998526243020336],
[-1.45775808161653453, 4.25861911483482025, 7.70646266742224206],
[-1.46342031326447164, 4.04775831328470836, 7.32488571692482715],
[-1.46854915966163868, 3.85679577284874764, 6.97931696586667627],
[-1.47321671455377046, 3.68304178638442625, 6.6648890788225863],
[-1.47748263842850602, 3.52427051368267508, 6.37757414110653276],
[-1.48139672803052558, 3.37862411656084793, 6.11401018016107312],
[-1.48500087393639801, 3.24453972456691986, 5.87136900157206121],
[-1.4883305697233884, 3.12069313006516946, 5.64725429882947072],
[-1.49141608815728932, 3.00595490636352913, 5.43962224401554018],
[-1.494283407072313, 2.89935586446866678, 5.24671897781733687],
[-1.49695494496117032, 2.80005960927988573, 5.06703094679302524],
[-1.49945015039133667, 2.70734054931769075, 4.89924510943836022],
[-1.50178597804941405, 2.62056613619360856, 4.74221679645804883],
[-1.50397727606447273, 2.53918241413482404, 4.59494356096321077],
[-1.50603710332006191, 2.46270218152966081, 4.45654375542194359],
[-1.50797699108733152, 2.39069522977259874, 4.32623886772310673],
[-1.50980716005370996, 2.32278024623647639, 4.2033388686702402],
[-1.51153670137381746, 2.25861805952409478, 4.08722998848494878],
[-1.5131737285136988, 2.19790597437743962, 3.97736446517148234],
[-1.51472550524133198, 2.14037299653666402, 3.87325190334807923],
[-1.51619855402361781, 2.08577578860391855, 3.77445195591593796],
[-1.51759874824221819, 2.03389522960297287, 3.68056809818537989],
[-1.51893139097788099, 1.98453347564949278, 3.59124230881956308],
[-1.5202012825915352, 1.93751143859705466, 3.50615050715351595],
[-1.52141277891779381, 1.89266661492165711, 3.42499862431015245],
[-1.52256984155774933, 1.84985120936922809, 3.34751920772393019],
[-1.52367608149482225, 1.80893050771074781, 3.27346847645348804],
[-1.52473479704545611, 1.76978146085633314, 3.20262375897271045],
[-1.52574900698494309, 1.73229144897853793, 3.13478125670923902],
[-1.52672147954915216, 1.69635719949905761, 3.06975408601656419],
[-1.5276547578990185, 1.66188383704495712, 3.00737055896020111],
[-1.52855118254110556, 1.62878404697003032, 2.94747266961307419],
[-1.52941291112038624, 1.59697733691285348, 2.88991475775960627],
[-1.53024193593780522, 1.56638938324342281, 2.83456232621550175],
[-1.33697687668974696, 9.25599376169824772, 16.1294261662186145],
[-1.36006355103466148, 8.34584451771269542, 14.5434068354956416],
[-1.37901814002770151, 7.59867138382611174, 13.2413884670006876],
[-1.39485929987784685, 6.97429649938923468, 12.1533574183801303],
[-1.40829627880040098, 6.44474568264590264, 11.2305660877218401],
[-1.41983810110020858, 5.9899419890165051, 10.4380285401195021],
[-1.42985956447462392, 5.59510264359635379, 9.74998442152623923],
[-1.43864274715511131, 5.24910191529567705, 9.14704611535783663],
[-1.44640400975271288, 4.94340610928142343, 8.61434286821077677],
[-1.45331208025618408, 4.67136025796630605, 8.14027778286350845],
[-1.45950047835744257, 4.42769808041021928, 7.71567387715929076],
[-1.46507624372908229, 4.20819772134949144, 7.33317417738865185],
[-1.47012619042863912, 4.00943506480537959, 6.98681184441085623],
[-1.47472146856706732, 3.82860381262603999, 6.67169664385389627],
[-1.47892094466184787, 3.66338215650182475, 6.38378260975595779],
[-1.48277374281445318, 3.51183254877107309, 6.11969338591403655],
[-1.48632118010271008, 3.37232536661959426, 5.87658920368340354],
[-1.48959825823011505, 3.24348007840428298, 5.65206435884894542],
[-1.49263482576064543, 3.12411940275483957, 5.44406732961537809],
[-1.49545649279826431, 3.01323323175769708, 5.25083790941480189],
[-1.49808535751949146, 2.90994997503786124, 5.07085727131597697],
[-1.50054058821386249, 2.81351360290099217, 4.9028079635737658],
[-1.50283889328260223, 2.72326510863290316, 4.74554160596955921],
[-1.50499490357574794, 2.6386274283470903, 4.59805261124928144],
[-1.50702148556780791, 2.55909308870005114, 4.45945666012360764],
[-1.50892999954068796, 2.4842140236340593, 4.32897295599935195],
[-1.51073051371943579, 2.41359312842750118, 4.20590950713014511],
[-1.51243198288556524, 2.34687721482242884, 4.08965085027389907],
[-1.51404239815786013, 2.2837511033666047, 3.9796477560518051],
[-1.51556891322828302, 2.22393264441106764, 3.87540855257558237],
[-1.51701795126090078, 2.16716850180128651, 3.77649177813890846],
[-1.51839529582337551, 2.11323056635212048, 3.6824999313654545],
[-1.51970616856614971, 2.06191289202442452, 3.59307413221293226],
[-1.52095529584904043, 2.01302906803549453, 3.50788954263222319],
[-1.52214696610756506, 1.9664099562155144, 3.42665142370147979],
[-1.52328507942647517, 1.92190173572499212, 3.34909172836521796],
[-1.52437319052831111, 1.87936420750065758, 3.27496614677429454],
[-1.5254145461751929, 1.83866931905045572, 3.20405153560459022],
[-1.52641211781310315, 1.79969987689754496, 3.1361436743714628],
[-1.52736863014992563, 1.76234841940376041, 3.07105530122025616],
[-1.52828658624620473, 1.72651622714006381, 3.00861438840518547],
[-1.52916828960507956, 1.69211245161217816, 2.94866262401307289],
[-1.53001586367207931, 1.65905334615044753, 2.89105407171772466],
[-1.53083126909218281, 1.62726158525547016, 2.83565398467666174],
[-1.3405502433467531, 9.62446328556643316, 16.1725356280678803],
[-1.36332793775107808, 8.67572324023413444, 14.5783135161791417],
[-1.3820196552517483, 7.89725517286713341, 13.2702091386928078],
[-1.39763478321871726, 7.24699517224520129, 12.1775401019334542],
[-1.41087552636532854, 6.69568046410664408, 11.2511344941506284],
[-1.42224549940143197, 6.22232405988126569, 10.4557266791933401],
[-1.43211529510127344, 5.81148235693270454, 9.76536588906013314],
[-1.44076368336955474, 5.45153826139831565, 9.16053125709966842],
[-1.44840439143143596, 5.13358518482027915, 8.62625653377836166],
[-1.4552040280442633, 4.85068009348087692, 8.1508749427955447],
[-1.46129438995089944, 4.59733066501406551, 7.72515742103256287],
[-1.46678110275344542, 4.3691351996911143, 7.34170754090953182],
[-1.47174980975698433, 4.16252471446419836, 6.99452813626930503],
[-1.47627068390810412, 3.97457491821412656, 6.67870535364195206],
[-1.4804017700194616, 3.80286693216008542, 6.39017461279042909],
[-1.4841914964149121, 3.64538262277346892, 6.12554472862470334],
[-1.48768058722737484, 3.50042491112323484, 5.88196400244100648],
[-1.49090353582456858, 3.36655637121676854, 5.65701704520532012],
[-1.49388975254147671, 3.24255140086522076, 5.44864440752530843],
[-1.49666446772680573, 3.12735858927989252, 5.25507934377210528],
[-1.49924944887418921, 3.02007083226455419, 5.07479759493025906],
[-1.5016635750113303, 2.9199013958553639, 4.90647716696410274],
[-1.50392330043043421, 2.82616459141289722, 4.74896585807060045],
[-1.50604303185813726, 2.7382600579238856, 4.60125484733281453],
[-1.50803543734664514, 2.65565988966704802, 4.46245706460123603],
[-1.50991170088423621, 2.5778980258999149, 4.33178936137824966],
[-1.51168173353686308, 2.50456145201373825, 4.20855772561594144],
[-1.51335434953948034, 2.43528286132789962, 4.09214495090991637],
[-1.5149374139425873, 2.36973450225656102, 3.98200029754182827],
[-1.51643796703404443, 2.3076229933126764, 3.87763077983433657],
[-1.51786232968939805, 2.24868493287318216, 3.7785937889886867],
[-1.51921619297622001, 2.19268316512031802, 3.68449081853253402],
[-1.52050469469155236, 2.13940359052077733, 3.59496210478580602],
[-1.52173248500259151, 2.08865243039571391, 3.50968203036136916],
[-1.52290378295886009, 2.04025387190660679, 3.42835516690020903],
[-1.5240224253232284, 1.99404803313319623, 3.35071285567560295],
[-1.52509190891285917, 1.94988919861004839, 3.27651024266438506],
[-1.52611542743464379, 1.90764428429330479, 3.2055236991428564],
[-1.5270959036326055, 1.86719149789139527, 3.13754857056393366],
[-1.52803601742882833, 1.82841916615415334, 3.07239720598403254],
[-1.52893823062871248, 1.79122470533907729, 3.00989722807869953],
[-1.52980480867000868, 1.75551371486722285, 2.94989001016081565],
[-1.530637839820437, 1.72119917730812233, 2.89222933186954112],
[-1.53143925216621679, 1.68820075041945183, 2.83678018954411471],
[-1.34423148050254282, 9.99556741912147295, 16.2169464506781118],
[-1.36668937591015194, 9.00772543213509458, 14.6142579855846932],
[-1.38510934137117347, 8.19758589791102743, 13.2998764309211666],
[-1.40049102572252804, 7.52115550850987358, 12.2024264370823978],
[-1.41352925851353195, 6.94785567743939581, 11.2722968835697763],
[-1.42472200475619437, 6.45577158405150886, 10.4739328630904609],
[-1.43443547459250498, 6.02878677727284717, 9.78118682312715215],
[-1.44294499546560595, 5.65478444168953764, 9.17440027522388846],
[-1.4504615581906628, 5.32447913766192737, 8.63850839403426285],
[-1.45714957192648797, 5.03063542688906651, 8.16177230465967263],
[-1.4631390468969474, 4.76753172584398666, 7.73490923106757045],
[-1.46853414371825974, 4.53058406040739747, 7.35048207041958701],
[-1.47341929370082525, 4.31607671892161004, 7.00246240087109406],
[-1.47786365887681725, 4.12096597186804825, 6.6859120336686777],
[-1.48192443447700173, 3.94273473392963814, 6.39674721487549824],
[-1.48564932982501108, 3.77928338288818644, 6.13156148844445426],
[-1.48907845661224725, 3.62884665897102288, 5.88749087257195125],
[-1.49224578338441005, 3.48992965468934679, 5.66211000872874948],
[-1.49518026821429029, 3.36125796730344373, 5.45335128833196592],
[-1.4979067496768248, 3.24173848810656162, 5.2594412367384038],
[-1.50044665423101864, 3.13042827141723334, 5.07885000586830415],
[-1.50281856267940683, 3.02650960539602742, 4.91025092875458924],
[-1.50503866740563086, 2.92926988958142953, 4.75248787257952543],
[-1.50712114419475141, 2.83808527153557089, 4.60454869054305505],
[-1.50907845669057772, 2.75240724805199788, 4.46554348347746455],
[-1.51092160731089797, 2.67175162268390487, 4.33468668438888738],
[-1.51266034529312621, 2.59568934991128186, 4.2112822039077864],
[-1.51430334017888679, 2.52383890029814495, 4.09471104341474845],
[-1.51585832725529523, 2.4558598597990815, 3.98442091046713065],
[-1.51733223010290708, 2.39144753657523434, 3.87991746882292299],
[-1.51873126434694505, 2.33032839502970379, 3.78075693055681272],
[-1.52006102589159875, 2.27225617272455471, 3.68653975609276863],
[-1.52132656627900187, 2.21700856392417345, 3.59690527353904654],
[-1.5225324573127772, 2.16438437559169294, 3.51152706453755714],
[-1.52368284668904019, 2.11420107913790245, 3.43010899218752785],
[-1.52478150606156415, 2.06629269513015723, 3.35238176916806552],
[-1.52583187271507215, 2.02050795930306393, 3.27809998224859145],
[-1.52683708581676458, 1.97670872717349022, 3.2070395039142312],
[-1.52780001805150634, 1.93476858181195133, 3.13899523359490695],
[-1.52872330331240369, 1.89457161521622686, 3.07377912054908542],
[-1.52960936100878087, 1.85601135854621946, 3.01121842826205599],
[-1.53046041746406836, 1.81898984042860579, 2.95115420662641004],
[-1.53127852480210547, 1.78341675579361736, 2.89343994345136801],
[-1.5320655776591614, 1.74920873039825553, 2.83794037121510057],
[-1.34801989572704151, 10.3693838132849319, 16.2626502805018696],
[-1.37014707882903863, 9.34191190110708014, 14.6512318315696053],
[-1.38828636166236885, 8.49971241834103175, 13.3303823094315206],
[-1.40342716806939216, 7.79681760038551275, 12.2280089366046116],
[-1.41625661064923736, 7.20130479991137573, 11.2940463611943418],
[-1.42726675831322658, 6.69031292959324908, 10.4926407779120812],
[-1.43681925684452416, 6.24704024715010497, 9.79744145428041513],
[-1.44518585447732573, 5.85886157220537385, 9.18864789907542878],
[-1.45257470086767171, 5.5161064589911577, 8.6510936298511325],
[-1.45914792405702398, 5.21124258591794209, 8.1729654555813056],
[-1.46503368340600071, 4.93831578676180083, 7.74492525890475658],
[-1.47033462310774365, 4.69255730779067104, 7.35949404438503496],
[-1.47513392088501227, 4.47010279056064341, 7.01061120986260899],
[-1.47949969387207658, 4.26778757847714463, 6.69331351891165394],
[-1.48348825968834541, 4.08299521015140954, 6.40349748792079509],
[-1.48714658533856658, 3.91354364562780699, 6.13774095089294391],
[-1.49051415051927205, 3.75759869878808006, 5.89316729259930661],
[-1.49362438243863216, 3.61360737686765843, 5.6673409027207784],
[-1.49650577286390463, 3.48024598340442903, 5.45818578397261422],
[-1.49918275658080513, 3.35637930577792165, 5.26392154456170758],
[-1.50167640867413144, 3.24102822015999603, 5.08301259195092747],
[-1.50400500277605076, 3.13334375578343849, 4.91412745698702658],
[-1.50618446158417463, 3.03258616426343908, 4.75610596761982674],
[-1.5082287231482423, 2.93810790223683505, 4.60793256000810292],
[-1.51015004074328019, 2.84933969951562238, 4.46871442874033509],
[-1.51195922996591858, 2.76577907920594868, 4.3376635225546627],
[-1.5136658735817512, 2.68698084067766496, 4.21408161846280471],
[-1.51527849231800027, 2.61254912468620715, 4.09734787721620197],
[-1.51680468802829216, 2.54213076205858979, 3.98690841182855493],
[-1.51825126430683066, 2.47540967006548485, 3.88226749921936864],
[-1.51962432858992225, 2.412102108872892, 3.78298014074898514],
[-1.52092937897714831, 2.35195264790280678, 3.68864573612756841],
[-1.52217137837538674, 2.29473072116892451, 3.59890268103326427],
[-1.52335481807375328, 2.24022767363787212, 3.5134237348220636],
[-1.52448377246649436, 2.18825421885142735, 3.43191203323198835],
[-1.52556194632912634, 2.13863824251746681, 3.35409764368156083],
[-1.52659271580386857, 2.09122289836146402, 3.27973457893022902],
[-1.52757916404977423, 2.04586495185237593, 3.20859819948847669],
[-1.52852411235061814, 2.00243333495714149, 3.14048294699111752],
[-1.52943014734166649, 1.96080788120726446, 3.07520036036005973],
[-1.5302996449088031, 1.92087821536669812, 3.01257733443343856],
[-1.53113479122494267, 1.88254277609624077, 2.95245458717760467],
[-1.53193760131607082, 1.84570795339285643, 2.89468530690446357],
[-1.53270993548877188, 1.81028732538043946, 2.83913395530498924],
[-1.35191482501309013, 10.7459896347194341, 16.3096391068886888],
[-1.37370028231739427, 9.67834289814527793, 14.689226882507592],
[-1.39154989771543169, 8.8036830263629362, 13.3617189158185852],
[-1.4064423661772778, 8.07402099101770609, 12.2542802460446154],
[-1.41905673106583996, 7.45606079034593794, 11.3163761350250436],
[-1.42987891226078956, 6.92597598126320069, 10.5118441909172127],
[-1.43926580529804338, 6.46626666148396279, 9.81412407815549948],
[-1.4474854397481498, 6.06379035570170988, 9.2032689108311434],
[-1.45474301756970514, 5.70848525881783075, 8.66400746539932065],
[-1.46119830297155495, 5.39251754668073779, 8.18445001842995978],
[-1.46697753930056418, 5.10969704700196559, 7.75520148585298319],
[-1.47218180234433516, 4.85506764602919105, 7.36873976598946534],
[-1.47689297427682842, 4.62461436389713931, 7.01897115552775563],
[-1.48117809310411341, 4.41505008521418407, 6.70090666159120474],
[-1.48509257064203748, 4.2236577697158868, 6.41042251823007891],
[-1.48868260815360132, 4.04817200462821436, 6.14408041347604783],
[-1.49198703372385877, 3.88668891138148043, 5.89899075098382752],
[-1.4950387166733996, 3.73759679168349956, 5.67270738866802127],
[-1.49786566840610824, 3.59952214888289568, 5.46314571306258934],
[-1.50049190790257869, 3.47128724962536905, 5.26851822886689014],
[-1.50293814854290031, 3.35187644639064075, 5.08728344524773046],
[-1.50522234785931697, 3.24040922108602958, 4.91810496297089284],
[-1.50736015110585053, 3.13611843518666955, 4.75981846372686412],
[-1.50936525182574455, 3.03833264977909634, 4.61140487651956388],
[-1.51124968698897089, 2.94646165387786807, 4.47196841338560347],
[-1.51302408014513445, 2.85998454173775407, 4.34071847383101073],
[-1.51469784296963916, 2.77843983029933828, 4.21695464566399547],
[-1.51627934327884595, 2.70141722078415114, 4.10005420122239705],
[-1.51777604584687742, 2.62855069392475915, 3.98946161771483698],
[-1.51919463102584151, 2.55951269357614564, 3.88467974944379524],
[-1.52054109514689051, 2.49400920368008494, 3.78526235590799986],
[-1.52182083588531136, 2.4317755624971471, 3.69080774888679963],
[-1.52303872515282568, 2.37257288842902536, 3.60095336776082675],
[-1.52419917159258622, 2.31618501565540047, 3.51537112859956746],
[-1.5253061743670342, 2.26241585671665302, 3.43376342124253364],
[-1.52636336962174379, 2.21108712421841425, 3.35585965143472231],
[-1.52737407076297504, 2.16203635587453125, 3.28141324335150575],
[-1.52834130348895436, 2.11511519679274906, 3.21019903255157546],
[-1.52926783635520391, 2.07018790074285253, 3.14201099128874839],
[-1.53015620752432158, 2.02713001851512731, 3.07666023777860387],
[-1.53100874824461908, 1.98582724667712074, 3.01397328890833993],
[-1.53182760351487035, 1.94617441430167948, 2.95379052235142003],
[-1.53261475032106098, 1.9080745887531283, 2.89596481938176442],
[-1.53337201377142529, 1.87143828452418037, 2.84036036409234471],
]
)
class TestConeFitting(unittest.TestCase):
def test_rotation_matrix(self):
v = [3, 5, 0]
axis = [4, 4, 1]
theta = 1.2
res = np.dot(rotation_matrix(axis, theta), v)
self.assertAlmostEqual(res[0], 2.74911638)
self.assertAlmostEqual(res[1], 4.77180932)
self.assertAlmostEqual(res[2], 1.91629719)
def test_point_cone_distance(self):
c = CircularCone()
c.apex = np.array([0, 0, 0])
c.axis = np.array([1, 0, 0])
c.theta = math.radians(45.0)
point = np.array([0, 0, 0])
d = c.distance(point)
self.assertAlmostEqual(d, 0)
point = np.array([0, 1, 0])
d = c.distance(point)
self.assertAlmostEqual(d, math.sqrt(2.0) / 2.0)
c.axis = np.array([0, 0, 1])
d = c.distance(point)
self.assertAlmostEqual(d, math.sqrt(2.0) / 2.0)
c.axis = np.array([0, 1, 0])
d = c.distance(point)
self.assertAlmostEqual(d, math.sqrt(2.0) / 2.0)
def test_build_matrix(self):
a = [1, 0, 0]
b = [0, 1, 0]
c = [0, 0, 1]
m = build_matrix(a, b, c)
n = np.eye(3)
for i, j in zip(m.flatten(), n.flatten()):
self.assertAlmostEqual(i, j)
def test_from_coeffs(self):
k = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, np.pi / 3.0]
c = CircularCone()
c.from_coeffs(k)
vec = | np.asarray(k[3:6]) | numpy.asarray |
import os
import numpy as np
from PIL import Image
from torch.utils import data
from mypath import Path
from torchvision import transforms
from dataloaders import custom_transforms as tr
from dataloaders.mapping import KITTI2CS
class Merge3(data.Dataset):
"""return dict with img, event, label of Cityscapes"""
NUM_CLASSES = 19
def __init__(self, args, root=Path.db_root_dir('merge3'), split="train"):
self.root = root
self.split = split
self.args = args
self.images = {}
self.event = {}
self.labels = {}
self.images_base = os.path.join(self.root[0], 'leftImg8bit', self.split)
self.images[split] = self.recursive_glob(rootdir=self.images_base, suffix='.png')
self.images[split].sort()
self.event_base = os.path.join(self.root[0], 'event', self.split)
self.event[split] = self.recursive_glob(rootdir=self.event_base, suffix='.npz')
self.event[split].sort()
self.annotations_base = os.path.join(self.root[0], 'gtFine', self.split)
self.labels[split] = self.recursive_glob(rootdir=self.annotations_base, suffix='labelTrainIds.png')
self.labels[split].sort()
# --- load KITTI-360 dataset
with open('dataloaders/kitti_txt/colors_{}.txt'.format(split), 'r') as colors_f, \
open('dataloaders/kitti_txt/events_{}.txt'.format(split), 'r') as events_f, \
open('dataloaders/kitti_txt/labels_{}.txt'.format(split), 'r') as labels_f:
self.images[split] += [self.root[1] + i for i in colors_f.read().splitlines()]
self.event[split] += [self.root[1] + i for i in events_f.read().splitlines()]
self.labels[split] += [self.root[1] + i for i in labels_f.read().splitlines()]
# --- load BDD3K dataset
with open('dataloaders/bdd_txt/images_{}.txt'.format(split), 'r') as colors_f, \
open('dataloaders/bdd_txt/events_{}.txt'.format(split), 'r') as events_f, \
open('dataloaders/bdd_txt/labels_{}.txt'.format(split), 'r') as labels_f:
self.images[split] += [self.root[2] + i for i in colors_f.read().splitlines()]
self.event[split] += [self.root[2] + i for i in events_f.read().splitlines()]
self.labels[split] += [self.root[2] + i for i in labels_f.read().splitlines()]
if not self.images[split]:
raise Exception("No RGB images for split=[%s] found in %s" % (split, self.images_base))
else:
print("Found %d %s RGB images" % (len(self.images[split]), split))
print("Found %d %s RGB events" % (len(self.event[split]), split))
print("Found %d %s labels" % (len(self.labels[split]), split))
self.ignore_index = 255
def __len__(self):
return len(self.labels[self.split])
def __getitem__(self, index):
sample = dict()
lbl_path = self.labels[self.split][index].rstrip()
if 'KITTI-360_mini' in lbl_path:
sample['label'] = self.relabel(lbl_path)
else:
sample['label'] = Image.open(lbl_path)
img_path = self.images[self.split][index].rstrip()
sample['image'] = Image.open(img_path).convert('RGB')
if self.args.event_dim:
event_path = self.event[self.split][index].rstrip()
sample['event'] = self.get_event(event_path)
# data augment
if self.split == 'train':
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample), lbl_path
elif self.split == 'test':
raise NotImplementedError
def relabel(self, label_path):
"""from apollo to the 18 class (Cityscapes without 'train', cls=16)"""
_temp = np.array(Image.open(label_path))
for k, v in KITTI2CS.items():
_temp[_temp == k] = v
return Image.fromarray(_temp.astype(np.uint8))
def get_event(self, event_path):
event_volume = np.load(event_path)['data']
neg_volume = event_volume[:9, ...]
pos_volume = event_volume[9:, ...]
if self.args.event_dim == 18:
event_volume = np.concatenate((neg_volume, pos_volume), axis=0)
elif self.args.event_dim == 2:
neg_img = np.sum(neg_volume, axis=0, keepdims=True)
pos_img = np.sum(pos_volume, axis=0, keepdims=True)
event_volume = np.concatenate((neg_img, pos_img), axis=0)
elif self.args.event_dim == 1:
neg_img = np.sum(neg_volume, axis=0, keepdims=True)
pos_img = np.sum(pos_volume, axis=0, keepdims=True)
event_volume = neg_img + pos_img
return event_volume
def recursive_glob(self, rootdir='.', suffix=None):
if isinstance(suffix, str):
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
elif isinstance(suffix, list):
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for x in suffix for filename in filenames if filename.startswith(x)]
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.FixedResize(size=(1024, 2048)),
tr.ColorJitter(),
tr.RandomGaussianBlur(),
tr.RandomMotionBlur(),
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
tr.FixedResize(size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
if __name__ == '__main__':
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
args.event_dim = 2
cityscapes_train = Merge3(args, split='train')
dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
event = sample['event'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='cityscapes')
img_tmp = | np.transpose(img[jj], axes=[1, 2, 0]) | numpy.transpose |
# -*-coding:utf-8-*-
from __future__ import print_function
import numpy as np
import pickle
import os
from .rbm import RBM
INT = 0.1
EXT = 3.0
MAX = 20
RATIO = 10
SIG = 0.1
RHO = SIG / 2.0
# Restricted Boltzmann Machine Network
class RbmNet:
def __init__(self, num_layers=3, dim=None, output_dim=10, learning_rate=0.1, path=None, mode=0):
"""
初始化函数
Initial Function
:param num_layers: RBM的层数(个数),RBM级联,即上一个RBM的 hidden units 为下个RBM的 visible units
the num_layers or numbers of RBM
the hidden units of the former RBMis the visible units of the latter RBM
:param dim: 每个RBM的可见层和隐含层单元数,类型为list
第i个元素为第i个RBM可见单元个数,第i+1个元素为第i个RBM隐含单元个数
the visible units number and hidden units number of each RBM, type: list
the i-th elements of list is the the visible units number of the i-th RBM
the i+1-th elements of list is the the hidden units number of the i-th RBM
:param output_dim: mode 0下RMB Net输出的维度,即通过num_layers层RBM后,通过转移矩阵后输出的维度
mode 1下RMB Net标签的维度,即最后一层RBM输入增加的维度数
RBM Net ouput dim under mode = 0 (after num_layers RBM and transfer matrix)
Extended visible units number of last layer RBM for labeling under mode = 1
:param learning_rate: RBM的学习率, learning rate of RBM
:param path: 该RBM网络的参数存储路径
the path where we store the parameters of RBM net
:param mode: 该RBM net的模式,0 代表标签不作为可见单元 1 代表 标签作为可见单元
label is used as visible units under mode 1; otherwise, mode 0
RbmNet 类成员
w_class: mode 0 下,从最后一层隐单元转移至标签输出的权重矩阵
(1 + num_visible of the top RBM) * the dimension of label(softmax)
the weight matrix between the hidden units of the top RBM and the label(softmax)
"""
self.num_layers = num_layers
if dim is None:
self.dim = [788, 500, 500, 2000]
elif isinstance(dim, list):
self.dim = dim
else:
self.dim = [dim, 500, 500, 2000]
self.output_dim = output_dim
self.learning_rate = learning_rate
self.path = path
self.rbms = []
self.mode = mode
for i in range(0, num_layers):
num_visible = self.dim[i]
num_hidden = self.dim[i + 1]
if i == num_layers - 1 and mode == 1:
num_visible += self.output_dim
path = os.path.join(self.path, 'rbm' + ('-%d' % i) + ('-%dh' % num_hidden) + ('-%dv' % num_visible))
if not os.path.exists(path):
os.mkdir(path)
r = RBM(num_visible=num_visible, num_hidden=num_hidden, learning_rate=0.1, path=path)
self.rbms.append(r)
datafile = os.path.join(self.path, 'w_class')
if os.path.isfile(datafile):
with open(datafile, 'rb') as fp:
self.w_class = pickle.load(fp)
print("Load w_class Successfully!")
else:
# 初始化分类权重,高斯分布,均值为1,标准差为0.1
# Initialize the w_class, using a Gaussian distribution with mean 0 and standard deviation 0.1.
self.w_class = 0.1 * np.random.randn(self.dim[num_layers] + 1, self.output_dim)
with open(datafile, 'wb') as fp:
pickle.dump(self.w_class, fp)
print("Create W_class Successfully!")
print("Create RBM_net Successfully")
def train_rbms(self, batch_data, batch_label=None, max_epochs_rbm=50, max_epochs_joint=200, test_set=None,
test_label_set=None, test_name_set=None):
"""
Train Function
Under mode 0, also Prediction Function
:param batch_data: 训练集,类型为list,每个list元素为np.array,np.array是矩阵,每一行为每一个训练样本
training data, type: list of np.array,
every np.array is a matrix
where each row is a training example consisting of the states of visible units.
i.e. every np.array is a batch of training set
:param batch_label: 训练集标签,类型为list,每个list元素为list,该list为一个训练样本标签,与batch_data数据对应
training data label, type: list of list,
every list is a label of training example corresponding to batch_data.
:param max_epochs_rbm: RBM 训练的最大迭代次数,the max epochs of the RBMs training operation
:param max_epochs_joint: w_class 训练的最大迭代次数(此时RBM的weights也被迭代更新),mode 0下使用
the max epochs of the w_class training operation
( weights of each RBM is updated either)
used under mode 0
:param test_set: 测试集的集合, 类型为list,
每个list元素为list, 对该list, 其元素为np.array,np.array是矩阵,每一行为一个样本
the set of test data set, type: list of list of np.array,
every list is a test data set
every np.array is a matrix
where each row is a example consisting of the states of visible units.
i.e. every np.array is a batch of visible units data set
used under mode 0
:param test_label_set: 测试标签集的集合, 类型为list,
每个list元素为list, 对该list, 其元素为list,
该list的元素为标签,与test_set中np.array每一行对应
the set of the test data label set, type: list of list of list,
( we call list 1 of list 2 of list 3)
every list2 is a test data label set
every list3 is the label corresponding to the row of np.array in test_set
used under mode 0
:param test_name_set: 测试集名字的集合,类型为list,
每个list元素为字符串,是测试集名字,与test_set中各测试集的顺序对应
the set of the test data name, type: list of string
every string is name of the test data set corresponding to those in test_set
used under mode 0
"""
train_data = batch_data.copy()
for i in range(0, self.num_layers):
# mode 1 下,最后一层(最高层)RBM的输入 visible units 为
# 前一层RBM输出 的 hidden units 和 标签 label 共同组成
# In mode 1, the visible units of the top RBM consists of
# the hidden units of the former RBM and the label of the test data
if i == self.num_layers - 1 and self.mode == 1:
train_data = list(map(lambda y: np.array(list(map(lambda x: x[0].tolist() + x[1], zip(y[0], y[1])))),
zip(train_data, batch_label)))
self.rbms[i].train(train_data, max_epochs=max_epochs_rbm)
train_data = self.rbms[i].run_visible_for_hidden(train_data)
print("Train RbmNet Successfully (Initial for Mode 0)")
if self.mode == 0:
for epoch in range(0, max_epochs_joint):
num_batches = len(batch_data)
counter = 0
err_cr = 0
for batch in range(0, num_batches):
data = batch_data[batch]
label = np.array(batch_label[batch])
hidden_probs = np.insert(data, 0, 1, axis=1)
for i in range(0, self.num_layers):
hidden_activations = np.dot(hidden_probs, self.rbms[i].weights)
hidden_probs = self._logistic(hidden_activations)
hidden_probs[:, 0] = 1
label_out = np.exp(np.dot(hidden_probs, self.w_class))
# label_out = np.array(list(map(lambda x: list(map(lambda y: y/np.sum(x), x)), label_out)))
label_out = np.divide(label_out, np.array([np.sum(label_out, axis=1).tolist()]).T)
counter += np.count_nonzero(np.argmax(label_out, axis=1) - np.argmax(label, axis=1))
# err_cr -= np.sum(np.array(list(map(lambda x: list(map(lambda y: y[0]*y[1], zip(x[0], x[1]))),
# zip(label, np.log(label_out))))))
err_cr -= np.sum(np.multiply(label, np.log(label_out)))
if self.path:
datafile = os.path.join(self.path, 'train_epoch.txt')
with open(datafile, 'at') as fp:
fp.write('epoch: %s, wrong num: %s, error: %s\n' % (epoch, counter, err_cr / num_batches))
print('epoch: %s \n train: wrong: %s, error: %s' % (epoch, counter, err_cr / num_batches))
if test_set is not None:
len_test_set = len(test_set)
test_result = [0] * len_test_set
test_result_err = [0] * len_test_set
for i in range(0, len_test_set):
tmp_result = self.predict(batch_test=test_set[i], batch_test_label=test_label_set[i],
test_name=test_name_set[i])
if epoch == 0 or tmp_result[1] < test_result[i] or (
tmp_result[1] == test_result[i] and tmp_result[2] < test_result_err[i]):
test_result[i] = tmp_result[1]
test_result_err[i] = tmp_result[2]
datafile = os.path.join(self.path, os.path.join(test_name_set[i], 'w_class'))
with open(datafile, 'wb') as fp:
pickle.dump(self.w_class, fp)
for j in range(0, self.num_layers):
datafile = os.path.join(self.path,
os.path.join(test_name_set[i], 'weights-' + ('%d' % j)))
with open(datafile, 'wb') as fp:
pickle.dump(self.rbms[j].weights, fp)
ans = tmp_result[0]
for j in range(0, ans.__len__()):
ans[j] = str(ans[j])
str_convert = ''.join(ans)
datafile = os.path.join(self.path, os.path.join(test_name_set[i], 'best_result.txt'))
with open(datafile, 'wt') as fp:
fp.write(
'epoch: %d, wrong number: %d,error: %d\n' % (epoch, tmp_result[1], tmp_result[2]))
fp.write('%s\n' % str_convert)
print("Save Successfully!")
# combine 10 batches into 1 batch for training
tt = 0
for batch in range(0, int(num_batches / 10)):
tt += 1
data = []
label = []
for kk in range(0, 10):
data += batch_data[(tt - 1) * 10 + kk].tolist()
label += batch_label[(tt - 1) * 10 + kk]
data = np.array(data)
# max_iter is the time of linear searches we perform conjugate gradient with
max_iter = 3
# first update top-level weights (w_class) holding other weights fixed.
if epoch < 6:
hidden_probs = np.insert(data, 0, 1, axis=1)
for i in range(0, self.num_layers):
hidden_activations = np.dot(hidden_probs, self.rbms[i].weights)
hidden_probs = self._logistic(hidden_activations)
hidden_probs[:, 0] = 1
vv = [self.w_class.copy()]
tmp = self._minimize(func=0, x=vv, parameters=[hidden_probs, label], length=max_iter)
self.w_class = tmp[0]
datafile = os.path.join(self.path, 'w_class')
if os.path.isfile(datafile):
with open(datafile, 'wb') as fp:
pickle.dump(self.w_class, fp)
else:
# the update all weights (w_class and weights of each RBMs)
vv = [0] * (self.num_layers + 1)
vv[0] = self.w_class.copy()
for i in range(0, self.num_layers):
vv[i + 1] = self.rbms[i].weights
tmp = self._minimize(func=1, x=vv, parameters=[data, label], length=max_iter)
self.w_class = tmp[0]
for i in range(0, self.num_layers):
self.rbms[i].weights = tmp[i + 1]
datafile = os.path.join(self.path, 'w_class')
if os.path.isfile(datafile):
with open(datafile, 'wb') as fp:
pickle.dump(self.w_class, fp)
for i in range(0, self.num_layers):
datafile = os.path.join(self.rbms[i].path, 'weights')
if os.path.isfile(datafile):
with open(datafile, 'wb') as fp:
pickle.dump(self.rbms[i].weights, fp)
def predict(self, batch_test, batch_test_label, test_name):
"""
Prediction Function in mode 1
:param batch_test: 可见层数据,类型为list,每个list元素为np.array,np.array是矩阵,每一行为一个样本
visible units data, type: list of np.array,
every np.array is a matrix
where each row is a example consisting of the states of visible units.
i.e. every np.array is a batch of visible units data set
:param batch_test_label: 测试集标签,类型为list,每个list元素为list, 该list为一个样本标签,与batch_test数据对应
label, type: list of list,
every list is a label of example corresponding to batch_test.
:param test_name: 测试集名字,字符串格式
the name of the test set, type: string
:return: 一个list,第一个元素为识别类别的list,与batch_test对应
第二个元素为识别中错误的个数,int型
a list, the first element is also a list, consisting of the prediction answer,
corresponding to the batch_test
the second element is number of the wrong prediction
"""
if self.mode == 1:
test_data = batch_test.copy()
for i in range(0, self.num_layers-1):
test_data = self.rbms[i].run_visible_for_hidden(test_data)
test_data = list(
map(lambda y: np.array(list(map(lambda x: x + [0] * self.output_dim, y))), test_data))
ans = self.rbms[-1].predict(test_data, soft_max=self.output_dim)
test_num_batches = len(batch_test)
counter = 0
err = 0
for batch in range(0, test_num_batches):
counter += np.count_nonzero(np.array(ans[batch]) - np.argmax(np.array(batch_test_label[batch]), axis=1))
if self.path:
datafile = os.path.join(self.path, test_name)
if not os.path.exists(datafile):
os.mkdir(datafile)
datafile = os.path.join(datafile, 'test_result.txt')
for i in range(0, ans.__len__()):
ans[i] = str(ans[i])
str_convert = ''.join(ans)
with open(datafile, 'at') as fp:
fp.write('%s\n' % str_convert)
print(' %s, wrong: %s' % (test_name, counter))
print(ans)
else:
test_num_batches = len(batch_test)
counter = 0
err_cr = 0
ans = []
for batch in range(0, test_num_batches):
data = batch_test[batch]
label = np.array(batch_test_label[batch])
hidden_probs = np.insert(data, 0, 1, axis=1)
for i in range(0, self.num_layers):
hidden_activations = np.dot(hidden_probs, self.rbms[i].weights)
hidden_probs = self._logistic(hidden_activations)
hidden_probs[:, 0] = 1
label_out = np.exp(np.dot(hidden_probs, self.w_class))
label_out = np.divide(label_out, np.array([np.sum(label_out, axis=1).tolist()]).T)
predicted_ans = np.argmax(label_out, axis=1)
counter += np.count_nonzero(predicted_ans - np.argmax(label, axis=1))
err_cr -= np.sum(np.multiply(label, np.log(label_out)))
ans.append(predicted_ans.tolist())
err = err_cr / test_num_batches
if self.path:
datafile = os.path.join(self.path, test_name)
if not os.path.exists(datafile):
os.mkdir(datafile)
datafile = os.path.join(datafile, 'test_result.txt')
with open(datafile, 'at') as fp:
fp.write('%s,%s\n' % (counter, err))
print(' %s, wrong: %s, error: %s' % (test_name, counter, err))
print(ans)
return [ans, counter, err]
@staticmethod
def _logistic(x):
# return 1.0 / (1 + np.exp(-x))
return .5 * (1 + np.tanh(.5 * x))
@staticmethod
def _classify_init(w_class, hidden_probs, label):
"""
the loss function of the RBM net with each RBM weights hold
:param w_class: w_class
:param hidden_probs: the output (hidden units) of the top RBM,
suppose the input (visible units) of RBM net is data
:param label: the label of data
:return: a list, the first elements is value of the loss function with each RBM weights hold
the second elements is a list, consisting the partial derivative of the function
"""
label_out = np.exp(np.dot(hidden_probs, w_class))
# label_out = np.array(list(map(lambda x: list(map(lambda y: y/np.sum(x), x)), label_out)))
label_out = np.divide(label_out, np.array([np.sum(label_out, axis=1).tolist()]).T)
# f = - np.sum(np.array(list(map(lambda x: list(map(lambda y: y[0]*y[1], zip(x[0], x[1]))),
# zip(label, np.log(label_out))))))
f = - np.sum(np.multiply(label, np.log(label_out)))
df = np.dot(hidden_probs.T, label_out - label)
return [f, [df]]
def _classify(self, w_class, weights, data, label):
"""
the loss function of the RBM net
:param w_class: w_class
:param weights: a list, consisting of weights of each RBM
:param data: the input (visible units) of the first RBM
:param label: the label of the data
:return: a list, the first elements is value of the loss function
the second elements is a list, consisting the partial derivative of the function
corresponding to w_class and weights[i]
"""
# hidden_probs is a list, the i-th elements is the input of the i-th RBM or the output of the i-1th RBM
hidden_probs = [np.insert(data, 0, 1, axis=1)] * (self.num_layers + 1) # 0 data 1
for i in range(0, self.num_layers):
hidden_activations = np.dot(hidden_probs[i], weights[i])
hidden_probs[i + 1] = self._logistic(hidden_activations)
hidden_probs[i + 1][:, 0] = 1
label_out = np.exp(np.dot(hidden_probs[self.num_layers], w_class))
# label_out = np.array(list(map(lambda x: list(map(lambda y: y/np.sum(x), x)), label_out)))
# f = - np.sum(np.array(list(map(lambda x: list(map(lambda y: y[0]*y[1], zip(x[0], x[1]))),
# zip(label, np.log(label_out))))))
label_out = np.divide(label_out, np.array([np.sum(label_out, axis=1).tolist()]).T)
f = - np.sum(np.multiply(label, np.log(label_out)))
io = label_out - label
dw_class = np.dot(hidden_probs[self.num_layers].T, io)
tmp1 = np.dot(io, w_class.T)
# tmp2 = np.array(list(map(lambda x: list(map(lambda y: 1-y, x)), hidden_probs[self.num_layers])))
tmp2 = np.subtract(1, hidden_probs[self.num_layers])
# Ix = np.array(list(map(lambda x: list(map(lambda y: y[0]*y[1]*y[2], zip(x[0], x[1], x[2]))),
# zip(tmp1, hidden_probs[self.num_layers], tmp2))))
ix = np.multiply( | np.multiply(tmp1, hidden_probs[self.num_layers]) | numpy.multiply |
import numpy as np
import gmpy2
from gmpy2 import mpfr, mpc
import flamp
def to_fp(A):
return np.array(A, float)
def to_cpx(A):
return np.array(A, complex)
### linalg
def test_qr_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
Q, R = flamp.qr(AA)
assert Q.shape == (n, n) and R.shape == (n, n)
assert np.allclose(to_fp(Q.T @ Q), np.eye(n))
assert np.allclose(to_fp(Q @ R), A)
assert np.all(np.tril(R, -1) == 0)
## special case: size 0 matrix
AA = flamp.zeros((4, 0))
Q, R = flamp.qr(AA)
assert np.allclose(to_fp(Q), np.eye(4))
assert R.shape == (4, 0)
def test_qr_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Q, R = flamp.qr(AA)
assert Q.shape == (n, n) and R.shape == (n, n)
assert np.allclose(to_cpx(Q.T.conj() @ Q), np.eye(n))
assert np.allclose(to_cpx(Q @ R), A)
assert np.all(np.tril(R, -1) == 0)
def test_inverse_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
Ainv = flamp.inverse(AA)
assert A.shape == (n, n)
assert np.allclose(to_fp(Ainv @ A), np.eye(n))
def test_inverse_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Ainv = flamp.inverse(AA)
assert A.shape == (n, n)
assert np.allclose(to_cpx(Ainv @ A), np.eye(n))
def test_lu_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_lu_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_lu_solve_complex():
n = 5
A, b = np.random.rand(n, n) + 1j * np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_cpx(A @ x), b)
def test_lu():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
P, L, U = flamp.lu(AA)
assert np.allclose(to_cpx(P @ AA), to_cpx(L @ U))
def test_cholesky_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
A = A.T @ A
AA = mpfr(1) * A
x = flamp.cholesky_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_cholesky_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
A = A.T @ A
AA = mpfr(1) * A
x = flamp.cholesky_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_qr_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_qr_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_solve_real_overdet():
n = 5
A, b = np.random.rand(n + 2, n), np.random.rand(n + 2, 3)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
x2 = flamp.lu_solve(AA, b)
assert x.shape == (n, 3)
assert x2.shape == (n, 3)
assert np.allclose(to_fp(x), to_fp(x2))
def test_det():
n = 5
E = np.random.rand(n) # random eigenvalues
U = mpfr(1) * np.random.rand(n, n)
Uinv = flamp.inverse(U)
A = U @ np.diag(E) @ Uinv
det = flamp.det(A)
assert np.allclose(to_fp(det), np.prod(E))
### eigen
def test_eig_real():
A = mpfr(1) * np.arange(9).reshape((3, 3))
E, UL, UR = flamp.eig(A, left=True, right=True)
assert np.allclose(to_cpx(A @ UR), to_cpx(E[None, :] * UR))
assert np.allclose(to_cpx(UL @ A), to_cpx(E[:, None] * UL))
# compute only eigenvalues
E2 = flamp.eig(A, left=False, right=False)
assert np.all(E == E2)
def test_eig_complex():
A = mpfr(1) * (np.random.rand(5, 5) + 1j * np.random.rand(5, 5))
E, UL, UR = flamp.eig(A, left=True, right=True)
assert np.allclose(to_cpx(A @ UR), to_cpx(E[None, :] * UR))
assert np.allclose(to_cpx(UL @ A), to_cpx(E[:, None] * UL))
# compute only eigenvalues
E2 = flamp.eig(A, left=False, right=False)
assert np.all(E == E2)
def test_hessenberg_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
Q, H = flamp.hessenberg(AA)
assert Q.shape == (n, n) and H.shape == (n, n)
assert np.allclose(to_fp(Q.T @ Q), np.eye(n))
assert np.allclose(to_fp(Q @ H @ Q.T), A)
assert np.all(np.tril(H, -2) == 0)
def test_hessenberg_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Q, H = flamp.hessenberg(AA)
assert Q.shape == (n, n) and H.shape == (n, n)
assert np.allclose(to_cpx(Q.T.conj() @ Q), np.eye(n))
assert np.allclose(to_cpx(Q @ H @ Q.T.conj()), A)
assert np.all(np.tril(H, -2) == 0)
def test_schur():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Q, R = flamp.schur(AA)
assert Q.shape == (n, n) and R.shape == (n, n)
assert np.allclose(to_cpx(Q.T.conj() @ Q), np.eye(n))
assert np.allclose(to_cpx(Q @ R @ Q.T.conj()), A)
assert np.all(np.tril(R, -1) == 0)
### eigen_symmetric
def test_eigh_real():
n = 5
A = np.random.rand(n, n)
A = A + A.T
AA = mpfr(1) * A
E, Q = flamp.eigh(AA)
assert np.allclose(to_fp(Q.T @ Q), np.eye(n))
assert E.shape == (n,)
assert np.allclose(to_fp(Q @ np.diag(E) @ Q.T), A)
# compute only eigenvalues
E2 = flamp.eigh(AA, eigvals_only=True)
assert np.all(E == E2)
def test_eigh_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
A = A + A.T.conj()
AA = mpfr(1) * A
E, Q = flamp.eigh(AA)
assert np.allclose(to_cpx(Q.T.conj() @ Q), | np.eye(n) | numpy.eye |
import qinfer
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import qmla.model_building_utilities as model_building_utilities
import qmla.logging
__all__ = ["gaussian_prior", "prelearned_true_parameters_prior"]
def log_print(to_print_list, log_file, log_identifier="Distributions"):
r"""Writng to unique QMLA instance log."""
qmla.logging.print_to_log(
to_print_list=to_print_list, log_file=log_file, log_identifier=log_identifier
)
def gaussian_prior(
model_name,
param_minimum=0,
param_maximum=1,
default_sigma=None,
random_mean=False, # if set to true, chooses a random mean between given uniform min/max
prior_specific_terms={},
log_file="qmd.log",
log_identifier=None,
**kwargs
):
"""
Genearates a QInfer Gaussian distribution .
Given a model_name, deteremines the number of terms in the model, N.
Generates a multivariate distribution with N dimensions.
This is then used as the initial prior, which QHL uses to learn the
model parameters.
By default, each parameter's mean is the average of param_min and param_max,
with sigma = mean/4. This can be changed by specifying prior_specific_terms:
individual parameter's means/sigmas can be given.
:param str model_name: Unique string representing a model.
:param float param_minimum: Lower bound for distribution.
:param float param_maximum: Upper bound for distribution.
:param float default_sigma: Width of distribution desired. If None,
defaults to 0.25 * (param_max - param_min).
:param dict prior_specific_terms: Individual parameter mean and sigma
to enforce in the distribution.
:param str log_file: Path of the log file for logging errors.
:param str log_identifier: Unique identifying sting for logging.
:return QInfer.Distribution dist: distribution to be used as prior for parameter learning
of the named model.
"""
log_print(
[
"Getting prior for model:",
model_name,
"Specific terms:",
prior_specific_terms,
],
log_file,
log_identifier,
)
individual_terms = model_building_utilities.get_constituent_names_from_name(
model_name
)
num_terms = len(individual_terms)
available_specific_terms = list(prior_specific_terms.keys())
means = []
sigmas = []
default_mean = np.mean([param_minimum, param_maximum])
# TODO reconsider how default sigma is generated
# default_sigma = default_mean/2 # TODO is this safe?
if default_sigma is None:
default_sigma = (param_maximum - param_minimum) / 4
for term in individual_terms:
if term in available_specific_terms:
means.append(prior_specific_terms[term][0])
sigmas.append(prior_specific_terms[term][1])
else:
if random_mean:
rand_mean = random.uniform(param_minimum, param_maximum)
means.append(rand_mean)
else:
means.append(default_mean)
sigmas.append(default_sigma)
means = | np.array(means) | numpy.array |
import numpy as np
def nichols_grid(gmin,pmin,pmax,cm=None,cp=None):
# Round Gmin from below to nearest multiple of -20dB,
# and Pmin,Pmax to nearest multiple of 360
gmin = min(-20,20*np.floor(gmin/20))
pmax = 360*np.ceil(pmax/360);
pmin = min(pmax-360,360*np.floor(pmin/360));
if cp is None:
p1 = np.array([1,5,10,20,30,50,90,120,150,180])
else:
p1 = cp
g1_part1 = np.array([6,3,2,1,.75,.5,.4,.3,.25,.2,.15,.1,.05,0,-.05,-.1,-.15,-.2,-.25,-.3,-.4,-.5,-.75,-1,-2,-3,-4,-5,-6,-9,-12,-16])
g1_part2 =np.arange(-20,max(-40,gmin)-1,-10)
if gmin >-40:
g1 = np.hstack([g1_part1,g1_part2])
else:
g1 = np.hstack([g1_part1,g1_part2,gmin])
# Compute gains GH and phases PH in H plane
[p,g] = np.meshgrid((np.pi/180)*p1,10**(g1/20))
z = g* np.exp(1j*p)
H = z/(1-z)
gH = 20*np.log10(np.abs(H))
pH = np.remainder((180/np.pi)*np.angle(H)+360,360)
# Add phase lines for angle between 180 and 360 (using symmetry)
p_name = ["%.2f deg" % p1_temp for p1_temp in np.hstack([-360+p1,-p1])]
gH = np.hstack([gH,gH])
pH = np.hstack([pH,360-pH])
phase_lines = []
for indice in range(gH.shape[1]):
phase_lines.append({"y": gH[:,indice],"x": pH[:,indice]-360,"name":p_name[indice]})
# (2) Generate isogain lines for following gain values:
if cm is None:
g2_part1 = np.array([6,3,1,.5,.25,0,-1,-3,-6,-12,-20])
g2_part2 = np.arange(-40,-20,gmin-1)
g2 = np.hstack([g2_part1,g2_part2])
else:
g2 = cm
#% Phase points
p2 = np.array([1,2,3,4,5,7.5,10,15,20,25,30,45,60,75,90,105,120,135,150,175,180]);
p2 = np.hstack([p2,np.flip(360-p2[:-1])])
[g,p] = np.meshgrid(10**(g2/20),(np.pi/180)*p2) # mesh in H/(1+H) plane
z = g* | np.exp(1j*p) | numpy.exp |
import collections
import numpy as np
import uncertainties
import pint
from uncertainties import ufloat
from uncertainties import ufloat_fromstr
from pint import UnitRegistry
import string
#import latex
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
import uncertainties.unumpy as unp
import scipy.constants as const
u = UnitRegistry()
Q_ = u.Quantity
class Latexdocument(object):
def __init__(self, filename):
self.name = filename
def tabular(self, spalten, header, places, caption, label):
with open(self.name, 'w') as f:
f.write('\\begin{table} \n\\centering \n\\caption{' + caption + '} \n\\label{tab: ' + label + '} \n\\begin{tabular}{')
f.write(len(spalten) * 'S ')
f.write('} \n\\toprule \n')
f.write(header + ' \\\ \n')
f.write('\\midrule \n ')
for i in range(0, len(spalten[0])):
for j in range(0, len(spalten)):
if j == len(spalten) - 1:
f.write(('{:.' + str(places[j]) + 'f}' + '\\\ \n').format(spalten[j][i]))
else:
f.write(('{:.' + str(places[j]) + 'f} ' + ' & ').format(spalten[j][i]))
f.write('\\bottomrule \n\\end{tabular} \n\\end{table}')
I_pol = Q_(np.array([0.116, 0.067, 0.034, 0.011, 0.001, 0.004, 0.020, 0.049, 0.086,
0.137, 0.187, 0.238, 0.281, 0.307, 0.308, 0.288, 0.251, 0.198, 0.137, 0.083, 0.040,
0.013, 0.001, 0.005, 0.024, 0.053, 0.094, 0.146, 0.208, 0.264, 0.295, 0.296, 0.279,
0.252, 0.214, 0.167, 0.111]), 'mA') #in mA
winkel = Q_( | np.linspace(0, 360, 37) | numpy.linspace |
#!/usr/bin/env python
import pytest
import os
import shutil
import numpy as np
import cv2
import plantcv as pcv
import plantcv.learn
# Import matplotlib and use a null Template to block plotting to screen
# This will let us test debug = "plot"
import matplotlib
matplotlib.use('Template')
TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
TEST_TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".cache")
TEST_COLOR_DIM = (2056, 2454, 3)
TEST_GRAY_DIM = (2056, 2454)
TEST_BINARY_DIM = TEST_GRAY_DIM
TEST_INPUT_COLOR = "input_color_img.jpg"
TEST_INPUT_GRAY = "input_gray_img.jpg"
TEST_INPUT_BINARY = "input_binary_img.png"
TEST_INPUT_ROI = "input_roi.npz"
TEST_INPUT_CONTOURS = "input_contours.npz"
TEST_VIS = "VIS_SV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR = "NIR_SV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_VIS_TV = "VIS_TV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR_TV = "NIR_TV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_INPUT_MASK = "input_mask.png"
TEST_INPUT_NIR_MASK = "input_nir.png"
TEST_INPUT_FDARK = "FLUO_TV_dark.png"
TEST_INPUT_FMIN = "FLUO_TV_min.png"
TEST_INPUT_FMAX = "FLUO_TV_max.png"
TEST_INPUT_FMASK = "FLUO_TV_MASK.png"
TEST_INTPUT_GREENMAG = "input_green-magenta.jpg"
TEST_INTPUT_MULTI = "multi_ori_image.jpg"
TEST_INPUT_MULTI_CONTOUR = "roi_objects.npz"
TEST_INPUT_ClUSTER_CONTOUR = "clusters_i.npz"
TEST_INPUT_GENOTXT = "cluster_names.txt"
TEST_INPUT_CROPPED = 'cropped_img.jpg'
TEST_INPUT_CROPPED_MASK = 'cropped-mask.png'
TEST_INPUT_MARKER = 'seed-image.jpg'
TEST_FOREGROUND = "TEST_FOREGROUND.jpg"
TEST_BACKGROUND = "TEST_BACKGROUND.jpg"
TEST_PDFS = "naive_bayes_pdfs.txt"
TEST_VIS_SMALL = "setaria_small_vis.png"
TEST_MASK_SMALL = "setaria_small_mask.png"
TEST_VIS_COMP_CONTOUR = "setaria_composed_contours.npz"
TEST_ACUTE_RESULT = np.asarray([[[119, 285]], [[151, 280]], [[168, 267]], [[168, 262]], [[171, 261]], [[224, 269]],
[[246, 271]], [[260, 277]], [[141, 248]], [[183, 194]], [[188, 237]], [[173, 240]],
[[186, 260]], [[147, 244]], [[163, 246]], [[173, 268]], [[170, 272]], [[151, 320]],
[[195, 289]], [[228, 272]], [[210, 272]], [[209, 247]], [[210, 232]]])
TEST_VIS_SMALL_PLANT = "setaria_small_plant_vis.png"
TEST_MASK_SMALL_PLANT = "setaria_small_plant_mask.png"
TEST_VIS_COMP_CONTOUR_SMALL_PLANT = "setaria_small_plant_composed_contours.npz"
TEST_SAMPLED_RGB_POINTS = "sampled_rgb_points.txt"
# ##########################
# Tests setup function
# ##########################
def setup_function():
if not os.path.exists(TEST_TMPDIR):
os.mkdir(TEST_TMPDIR)
# ##########################
# Tests for the main package
# ##########################
def test_plantcv_acute():
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR))
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
_ = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask, device=0, debug="print")
# Test with debug = None
device, homology_pts = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask, device=0, debug=None)
assert all([i == j] for i, j in zip(np.shape(homology_pts), (29, 1, 2)))
def test_plantcv_acute_vertex():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR))
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, device=0, debug="print")
os.rename("1_acute_vertices.png", os.path.join(cache_dir, "1_acute_vertices.png"))
# Test with debug = "plot"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, device=0, debug="plot")
# Test with debug = None
device, acute = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, device=0, debug=None)
assert all([i == j] for i, j in zip(np.shape(acute), | np.shape(TEST_ACUTE_RESULT) | numpy.shape |
from __future__ import division, print_function
import numpy as np
from numpy import dot, newaxis
from numpy.linalg import norm, solve
import os
import sys
import lib
from training import print_dict, training_data
def col_square_norm(A):
return np.einsum('ij, ij->j', A, A)
def row_square_norm(A):
return np.einsum('ij, ij->i', A, A)
# Optimize B in-place, using Lagrange dual method of:
# Lee et al., Efficient Sparse Coding Algorithms.
# with c=1.
@lib.timeit
def optimize_dictionary(X_T, S_T, B_T, Lam_0=None):
SST = dot(S_T.T, S_T)
XST = dot(X_T.T, S_T)
XST_T = XST.T.copy()
XTX = dot(X_T, X_T.T)
XSTTXST = dot(XST_T, XST)
def B(Lam_vec):
Lam = np.diag(Lam_vec)
return solve(SST + Lam, XST_T)
def D(Lam_vec):
Lam = np.diag(Lam_vec)
return np.trace(XTX) - np.trace(Lam) \
- np.trace(XST.dot(solve(SST + Lam, XST_T)))
def grad(Lam_vec):
Lam = np.diag(Lam_vec)
return row_square_norm(solve(SST + Lam, XST_T)) - 1
def hessian(Lam, inv_SST_Lam):
return -2 * inv_SST_Lam \
* (inv_SST_Lam.dot(XSTTXST).dot(inv_SST_Lam))
# last_B_T = None
Lam_vec = np.ones(S_T.shape[1]) if Lam_0 is None else Lam_0.copy()
print('current D:', D(Lam_vec))
Lam_vec, _, _ = scipy.optimize.fmin_l_bfgs_b(
func=lambda x: -D(x),
bounds=[(0, np.inf) for l in Lam_vec],
fprime=lambda x: -grad(x),
x0=Lam_vec
)
print('final D:', D(Lam_vec))
B_T[...] = B(Lam_vec)
print(B_T)
return Lam_vec
def solve_cholesky(L, b):
# solve L L* x = b
y = solve_triangular(L, b, lower=True)
return solve_triangular(L.T, y)
@lib.timeit
# @profile
def feature_sign_search_vec(Y_T, X_T, A_T, gamma):
Y = Y_T.T.copy()
A = A_T.T.copy()
X = X_T.T.copy()
ATA = dot(A_T, A)
X_T[abs(X_T) < 1e-7] = 0
active_set = X != 0
theta = | np.sign(X) | numpy.sign |
import math
import os
import time
import numpy as np
from paddle import fluid
from paddle.fluid import layers
from pytracking.features import augmentation
from pytracking.libs import dcf, operation, fourier
from pytracking.libs.optimization import ConjugateGradient, GaussNewtonCG, GradientDescentL2
from pytracking.libs.paddle_utils import mod, n2p, \
leaky_relu, dropout2d
from pytracking.libs.tensorlist import TensorList
from pytracking.tracker.atom.optim import FactorizedConvProblem, ConvProblem
from pytracking.tracker.base.basetracker import BaseTracker
class ATOM(BaseTracker):
def initialize_features(self):
if not getattr(self, 'features_initialized', False):
self.params.features.initialize()
self.features_initialized = True
def initialize(self, image, state, *args, **kwargs):
# Initialize some stuff
self.frame_num = 1
# TODO: for now, we don't support explictly setting up device
# if not hasattr(self.params, 'device'):
# self.params.device = 'cuda' if self.params.use_gpu else 'cpu'
# Initialize features
self.initialize_features()
# Check if image is color
self.params.features.set_is_color(image.shape[2] == 3)
# Get feature specific params
self.fparams = self.params.features.get_fparams('feature_params')
self.time = 0
tic = time.time()
# Get position and size
self.pos = np.array(
[state[1] + (state[3] - 1) / 2, state[0] + (state[2] - 1) / 2],
'float32')
self.target_sz = np.array([state[3], state[2]], 'float32')
# Set search area
self.target_scale = 1.0
search_area = np.prod(self.target_sz * self.params.search_area_scale)
if search_area > self.params.max_image_sample_size:
self.target_scale = math.sqrt(search_area /
self.params.max_image_sample_size)
elif search_area < self.params.min_image_sample_size:
self.target_scale = math.sqrt(search_area /
self.params.min_image_sample_size)
# Check if IoUNet is used
self.use_iou_net = getattr(self.params, 'use_iou_net', True)
# Target size in base scale
self.base_target_sz = self.target_sz / self.target_scale
# Use odd square search area and set sizes
feat_max_stride = max(self.params.features.stride())
if getattr(self.params, 'search_area_shape', 'square') == 'square':
self.img_sample_sz = np.ones((2, ), 'float32') * np.round(
np.sqrt(
np.prod(self.base_target_sz *
self.params.search_area_scale)))
elif self.params.search_area_shape == 'initrect':
self.img_sample_sz = np.round(self.base_target_sz *
self.params.search_area_scale)
else:
raise ValueError('Unknown search area shape')
if self.params.feature_size_odd:
self.img_sample_sz += feat_max_stride - mod(self.img_sample_sz,
(2 * feat_max_stride))
else:
self.img_sample_sz += feat_max_stride - mod(
(self.img_sample_sz + feat_max_stride), (2 * feat_max_stride))
# Set sizes
self.img_support_sz = self.img_sample_sz
self.feature_sz = self.params.features.size(self.img_sample_sz)
self.output_sz = self.params.score_upsample_factor * self.img_support_sz # Interpolated size of the output
self.kernel_size = self.fparams.attribute('kernel_size')
self.iou_img_sample_sz = self.img_sample_sz
# Optimization options
self.params.precond_learning_rate = self.fparams.attribute(
'learning_rate')
if self.params.CG_forgetting_rate is None or max(
self.params.precond_learning_rate) >= 1:
self.params.direction_forget_factor = 0
else:
self.params.direction_forget_factor = (
1 - max(self.params.precond_learning_rate)
)**self.params.CG_forgetting_rate
self.output_window = None
if getattr(self.params, 'window_output', False):
if getattr(self.params, 'use_clipped_window', False):
self.output_window = dcf.hann2d_clipped(
self.output_sz.astype('long'),
self.output_sz.astype('long') *
self.params.effective_search_area /
self.params.search_area_scale,
centered=False)
else:
self.output_window = dcf.hann2d(
self.output_sz.astype('long'), centered=False)
# Initialize some learning things
self.init_learning()
# Convert image
im = image.astype('float32')
self.im = im # For debugging only
# Setup scale bounds
self.image_sz = np.array([im.shape[0], im.shape[1]], 'float32')
self.min_scale_factor = np.max(10 / self.base_target_sz)
self.max_scale_factor = np.min(self.image_sz / self.base_target_sz)
# Extract and transform sample
x = self.generate_init_samples(im)
# Initialize iounet
if self.use_iou_net:
self.init_iou_net()
# Initialize projection matrix
self.init_projection_matrix(x)
# Transform to get the training sample
train_x = self.preprocess_sample(x)
# Generate label function
init_y = self.init_label_function(train_x)
# Init memory
self.init_memory(train_x)
# Init optimizer and do initial optimization
self.init_optimization(train_x, init_y)
self.pos_iounet = self.pos.copy()
self.time += time.time() - tic
def track(self, image):
self.frame_num += 1
# Convert image
# im = numpy_to_paddle(image)
im = image.astype('float32')
self.im = im # For debugging only
# ------- LOCALIZATION ------- #
# Get sample
sample_pos = self.pos.round()
sample_scales = self.target_scale * self.params.scale_factors
test_x = self.extract_processed_sample(im, self.pos, sample_scales,
self.img_sample_sz)
# Compute scores
scores_raw = self.apply_filter(test_x)
translation_vec, scale_ind, s, flag = self.localize_target(scores_raw)
# Update position and scale
if flag != 'not_found':
if self.use_iou_net:
update_scale_flag = getattr(self.params,
'update_scale_when_uncertain',
True) or flag != 'uncertain'
if getattr(self.params, 'use_classifier', True):
self.update_state(sample_pos + translation_vec)
self.refine_target_box(sample_pos, sample_scales[scale_ind],
scale_ind, update_scale_flag)
elif getattr(self.params, 'use_classifier', True):
self.update_state(sample_pos + translation_vec,
sample_scales[scale_ind])
# ------- UPDATE ------- #
# Check flags and set learning rate if hard negative
update_flag = flag not in ['not_found', 'uncertain']
hard_negative = (flag == 'hard_negative')
learning_rate = self.params.hard_negative_learning_rate if hard_negative else None
if update_flag:
# Get train sample
train_x = TensorList([x[scale_ind:scale_ind + 1] for x in test_x])
# Create label for sample
train_y = self.get_label_function(sample_pos,
sample_scales[scale_ind])
# Update memory
self.update_memory(train_x, train_y, learning_rate)
# Train filter
if hard_negative:
self.filter_optimizer.run(self.params.hard_negative_CG_iter)
elif (self.frame_num - 1) % self.params.train_skipping == 0:
self.filter_optimizer.run(self.params.CG_iter)
self.filter = self.filter_optimizer.x
# Set the pos of the tracker to iounet pos
if self.use_iou_net and flag != 'not_found':
self.pos = self.pos_iounet.copy()
# Return new state
yx = self.pos - (self.target_sz - 1) / 2
new_state = np.array(
[yx[1], yx[0], self.target_sz[1], self.target_sz[0]], 'float32')
return new_state.tolist()
def update_memory(self,
sample_x: TensorList,
sample_y: TensorList,
learning_rate=None):
replace_ind = self.update_sample_weights(
self.sample_weights, self.previous_replace_ind,
self.num_stored_samples, self.num_init_samples, self.fparams,
learning_rate)
self.previous_replace_ind = replace_ind
for train_samp, x, ind in zip(self.training_samples, sample_x,
replace_ind):
train_samp[ind] = x[0]
for y_memory, y, ind in zip(self.y, sample_y, replace_ind):
y_memory[ind] = y[0]
if self.hinge_mask is not None:
for m, y, ind in zip(self.hinge_mask, sample_y, replace_ind):
m[ind] = layers.cast(y >= self.params.hinge_threshold,
'float32')[0]
self.num_stored_samples += 1
def update_sample_weights(self,
sample_weights,
previous_replace_ind,
num_stored_samples,
num_init_samples,
fparams,
learning_rate=None):
# Update weights and get index to replace in memory
replace_ind = []
for sw, prev_ind, num_samp, num_init, fpar in zip(
sample_weights, previous_replace_ind, num_stored_samples,
num_init_samples, fparams):
lr = learning_rate
if lr is None:
lr = fpar.learning_rate
init_samp_weight = getattr(fpar, 'init_samples_minimum_weight',
None)
if init_samp_weight == 0:
init_samp_weight = None
s_ind = 0 if init_samp_weight is None else num_init
if num_samp == 0 or lr == 1:
sw[:] = 0
sw[0] = 1
r_ind = 0
else:
# Get index to replace
r_ind = np.argmin(sw[s_ind:], 0)
r_ind = int(r_ind + s_ind)
# Update weights
if prev_ind is None:
sw /= 1 - lr
sw[r_ind] = lr
else:
sw[r_ind] = sw[prev_ind] / (1 - lr)
sw /= sw.sum()
if init_samp_weight is not None and sw[:num_init].sum(
) < init_samp_weight:
sw /= init_samp_weight + sw[num_init:].sum()
sw[:num_init] = init_samp_weight / num_init
replace_ind.append(r_ind)
return replace_ind
def localize_target(self, scores_raw):
# Weighted sum (if multiple features) with interpolation in fourier domain
weight = self.fparams.attribute('translation_weight', 1.0)
scores_raw = weight * scores_raw
sf_weighted = fourier.cfft2(scores_raw) / (scores_raw.size(2) *
scores_raw.size(3))
for i, (sz, ksz) in enumerate(zip(self.feature_sz, self.kernel_size)):
sf_weighted[i] = fourier.shift_fs(sf_weighted[i], math.pi * (
1 - np.array([ksz[0] % 2, ksz[1] % 2]) / sz))
scores_fs = fourier.sum_fs(sf_weighted)
scores = fourier.sample_fs(scores_fs, self.output_sz)
if self.output_window is not None and not getattr(
self.params, 'perform_hn_without_windowing', False):
scores *= self.output_window
if getattr(self.params, 'advanced_localization', False):
return self.localize_advanced(scores)
# Get maximum
max_score, max_disp = dcf.max2d(scores)
scale_ind = np.argmax(max_score, axis=0)[0]
max_disp = max_disp.astype('float32')
# Convert to displacements in the base scale
output_sz = self.output_sz.copy()
disp = mod((max_disp + output_sz / 2), output_sz) - output_sz / 2
# Compute translation vector and scale change factor
translation_vec = np.reshape(
disp[scale_ind].astype('float32'), [-1]) * (
self.img_support_sz / self.output_sz) * self.target_scale
translation_vec *= self.params.scale_factors[scale_ind]
# Shift the score output for visualization purposes
if self.params.debug >= 2:
sz = scores.shape[-2:]
scores = np.concatenate(
[scores[..., sz[0] // 2:, :], scores[..., :sz[0] // 2, :]], -2)
scores = np.concatenate(
[scores[..., sz[1] // 2:], scores[..., :sz[1] // 2]], -1)
return translation_vec, scale_ind, scores, None
def update_state(self, new_pos, new_scale=None):
# Update scale
if new_scale is not None:
self.target_scale = np.clip(new_scale, self.min_scale_factor,
self.max_scale_factor)
self.target_sz = self.base_target_sz * self.target_scale
# Update pos
inside_ratio = 0.2
inside_offset = (inside_ratio - 0.5) * self.target_sz
self.pos = np.maximum(
np.minimum(new_pos,
self.image_sz.astype('float32') - inside_offset),
inside_offset)
def get_label_function(self, sample_pos, sample_scale):
# Generate label function
train_y = TensorList()
target_center_norm = (self.pos - sample_pos) / (self.img_support_sz *
sample_scale)
for sig, sz, ksz in zip(self.sigma, self.feature_sz, self.kernel_size):
center = sz * target_center_norm + 0.5 * np.array(
[(ksz[0] + 1) % 2, (ksz[1] + 1) % 2], 'float32')
train_y.append(dcf.label_function_spatial(sz, sig, center))
return train_y
def extract_sample(self,
im: np.ndarray,
pos: np.ndarray,
scales,
sz: np.ndarray,
debug_save_name):
return self.params.features.extract(im, pos, scales, sz,
debug_save_name)
def extract_processed_sample(self,
im: np.ndarray,
pos: np.ndarray,
scales,
sz: np.ndarray,
debug_save_name=None) -> (TensorList,
TensorList):
x = self.extract_sample(im, pos, scales, sz, debug_save_name)
return self.preprocess_sample(self.project_sample(x))
def apply_filter(self, sample_x: TensorList):
with fluid.dygraph.guard():
sample_x = sample_x.apply(n2p)
filter = self.filter.apply(n2p)
return operation.conv2d(sample_x, filter, mode='same').numpy()
def init_projection_matrix(self, x):
# Set if using projection matrix
self.params.use_projection_matrix = getattr(
self.params, 'use_projection_matrix', True)
if self.params.use_projection_matrix:
self.compressed_dim = self.fparams.attribute('compressed_dim', None)
proj_init_method = getattr(self.params, 'proj_init_method', 'pca')
if proj_init_method == 'pca':
raise NotImplementedError
elif proj_init_method == 'randn':
with fluid.dygraph.guard():
self.projection_matrix = TensorList([
None if cdim is None else layers.gaussian_random(
(cdim, ex.shape[1], 1, 1), 0.0,
1 / math.sqrt(ex.shape[1])).numpy()
for ex, cdim in zip(x, self.compressed_dim)
])
elif proj_init_method == 'np_randn':
rng = np.random.RandomState(0)
self.projection_matrix = TensorList([
None if cdim is None else rng.normal(
size=(cdim, ex.shape[1], 1, 1),
loc=0.0,
scale=1 / math.sqrt(ex.shape[1])).astype('float32')
for ex, cdim in zip(x, self.compressed_dim)
])
elif proj_init_method == 'ones':
self.projection_matrix = TensorList([
None if cdim is None else
np.ones((cdim, ex.shape[1], 1, 1),
'float32') / math.sqrt(ex.shape[1])
for ex, cdim in zip(x, self.compressed_dim)
])
else:
self.compressed_dim = x.size(1)
self.projection_matrix = TensorList([None] * len(x))
def preprocess_sample(self, x: TensorList) -> (TensorList, TensorList):
if getattr(self.params, '_feature_window', False):
x = x * self.feature_window
return x
def init_label_function(self, train_x):
# Allocate label function
self.y = TensorList([
np.zeros(
[self.params.sample_memory_size, 1, x.shape[2], x.shape[3]],
'float32') for x in train_x
])
# Output sigma factor
output_sigma_factor = self.fparams.attribute('output_sigma_factor')
self.sigma = output_sigma_factor * np.ones((2, ), 'float32') * (
self.feature_sz / self.img_support_sz *
self.base_target_sz).apply(np.prod).apply(np.sqrt)
# Center pos in normalized coords
target_center_norm = (self.pos - np.round(self.pos)) / (
self.target_scale * self.img_support_sz)
# Generate label functions
for y, sig, sz, ksz, x in zip(self.y, self.sigma, self.feature_sz,
self.kernel_size, train_x):
center_pos = sz * target_center_norm + 0.5 * np.array(
[(ksz[0] + 1) % 2, (ksz[1] + 1) % 2], 'float32')
for i, T in enumerate(self.transforms[:x.shape[0]]):
sample_center = center_pos + np.array(
T.shift, 'float32') / self.img_support_sz * sz
y[i] = dcf.label_function_spatial(sz, sig, sample_center)
# Return only the ones to use for initial training
return TensorList([y[:x.shape[0]] for y, x in zip(self.y, train_x)])
def init_memory(self, train_x):
# Initialize first-frame training samples
self.num_init_samples = train_x.size(0)
self.init_sample_weights = TensorList(
[np.ones(x.shape[0], 'float32') / x.shape[0] for x in train_x])
self.init_training_samples = train_x
# Sample counters and weights
self.num_stored_samples = self.num_init_samples.copy()
self.previous_replace_ind = [None] * len(self.num_stored_samples)
self.sample_weights = TensorList([
np.zeros(self.params.sample_memory_size, 'float32') for x in train_x
])
for sw, init_sw, num in zip(self.sample_weights,
self.init_sample_weights,
self.num_init_samples):
sw[:num] = init_sw
# Initialize memory
self.training_samples = TensorList(
[[np.zeros([cdim, x.shape[2], x.shape[3]], 'float32')] *
self.params.sample_memory_size
for x, cdim in zip(train_x, self.compressed_dim)])
def init_learning(self):
# Get window function
self.feature_window = TensorList(
[dcf.hann2d(sz) for sz in self.feature_sz])
# Filter regularization
self.filter_reg = self.fparams.attribute('filter_reg')
# Activation function after the projection matrix (phi_1 in the paper)
projection_activation = getattr(self.params, 'projection_activation',
'none')
if isinstance(projection_activation, tuple):
projection_activation, act_param = projection_activation
if projection_activation == 'none':
self.projection_activation = lambda x: x
elif projection_activation == 'relu':
self.projection_activation = layers.relu
elif projection_activation == 'elu':
self.projection_activation = layers.elu
elif projection_activation == 'mlu':
self.projection_activation = lambda x: layers.elu(leaky_relu(x, 1 / act_param), act_param)
else:
raise ValueError('Unknown activation')
# Activation function after the output scores (phi_2 in the paper)
response_activation = getattr(self.params, 'response_activation',
'none')
if isinstance(response_activation, tuple):
response_activation, act_param = response_activation
if response_activation == 'none':
self.response_activation = lambda x: x
elif response_activation == 'relu':
self.response_activation = layers.relu
elif response_activation == 'elu':
self.response_activation = layers.elu
elif response_activation == 'mlu':
self.response_activation = lambda x: layers.elu(leaky_relu(x, 1 / act_param), act_param)
else:
raise ValueError('Unknown activation')
def generate_init_samples(self, im: np.ndarray) -> TensorList:
"""Generate augmented initial samples."""
# Compute augmentation size
aug_expansion_factor = getattr(self.params,
'augmentation_expansion_factor', None)
aug_expansion_sz = self.img_sample_sz.copy()
aug_output_sz = None
if aug_expansion_factor is not None and aug_expansion_factor != 1:
aug_expansion_sz = (self.img_sample_sz *
aug_expansion_factor).astype('long')
aug_expansion_sz += (
aug_expansion_sz - self.img_sample_sz.astype('long')) % 2
aug_expansion_sz = aug_expansion_sz.astype('float32')
aug_output_sz = self.img_sample_sz.astype('long').tolist()
# Random shift operator
get_rand_shift = lambda: None
random_shift_factor = getattr(self.params, 'random_shift_factor', 0)
if random_shift_factor > 0:
get_rand_shift = lambda: ((np.random.uniform(size=[2]) - 0.5) * self.img_sample_sz * random_shift_factor).astype('long').tolist()
# Create transofmations
self.transforms = [augmentation.Identity(aug_output_sz)]
if 'shift' in self.params.augmentation:
self.transforms.extend([
augmentation.Translation(shift, aug_output_sz)
for shift in self.params.augmentation['shift']
])
if 'relativeshift' in self.params.augmentation:
get_absolute = lambda shift: (np.array(shift, 'float32') * self.img_sample_sz / 2).astype('long').tolist()
self.transforms.extend([
augmentation.Translation(get_absolute(shift), aug_output_sz)
for shift in self.params.augmentation['relativeshift']
])
if 'fliplr' in self.params.augmentation and self.params.augmentation[
'fliplr']:
self.transforms.append(
augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))
if 'blur' in self.params.augmentation:
self.transforms.extend([
augmentation.Blur(sigma, aug_output_sz, get_rand_shift())
for sigma in self.params.augmentation['blur']
])
if 'scale' in self.params.augmentation:
self.transforms.extend([
augmentation.Scale(scale_factor, aug_output_sz,
get_rand_shift())
for scale_factor in self.params.augmentation['scale']
])
if 'rotate' in self.params.augmentation:
self.transforms.extend([
augmentation.Rotate(angle, aug_output_sz, get_rand_shift())
for angle in self.params.augmentation['rotate']
])
# Generate initial samples
init_samples = self.params.features.extract_transformed(
im, self.pos, self.target_scale, aug_expansion_sz, self.transforms)
# Remove augmented samples for those that shall not have
for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):
if not use_aug:
init_samples[i] = init_samples[i][0:1]
# Add dropout samples
if 'dropout' in self.params.augmentation:
num, prob = self.params.augmentation['dropout']
self.transforms.extend(self.transforms[:1] * num)
with fluid.dygraph.guard():
for i, use_aug in enumerate(
self.fparams.attribute('use_augmentation')):
if use_aug:
init_samples[i] = np.concatenate([
init_samples[i], dropout2d(
layers.expand(
n2p(init_samples[i][0:1]), (num, 1, 1, 1)),
prob,
is_train=True).numpy()
])
return init_samples
def init_optimization(self, train_x, init_y):
# Initialize filter
filter_init_method = getattr(self.params, 'filter_init_method', 'zeros')
self.filter = TensorList([
np.zeros([1, cdim, sz[0], sz[1]], 'float32')
for x, cdim, sz in zip(train_x, self.compressed_dim,
self.kernel_size)
])
if filter_init_method == 'zeros':
pass
elif filter_init_method == 'ones':
for idx, f in enumerate(self.filter):
self.filter[idx] = np.ones(f.shape,
'float32') / np.prod(f.shape)
elif filter_init_method == 'np_randn':
rng = np.random.RandomState(0)
for idx, f in enumerate(self.filter):
self.filter[idx] = rng.normal(
size=f.shape, loc=0,
scale=1 / np.prod(f.shape)).astype('float32')
elif filter_init_method == 'randn':
for idx, f in enumerate(self.filter):
with fluid.dygraph.guard():
self.filter[idx] = layers.gaussian_random(
f.shape, std=1 / np.prod(f.shape)).numpy()
else:
raise ValueError('Unknown "filter_init_method"')
# Get parameters
self.params.update_projection_matrix = getattr(
self.params, 'update_projection_matrix',
True) and self.params.use_projection_matrix
optimizer = getattr(self.params, 'optimizer', 'GaussNewtonCG')
# Setup factorized joint optimization
if self.params.update_projection_matrix:
self.joint_problem = FactorizedConvProblem(
self.init_training_samples, init_y, self.filter_reg,
self.fparams.attribute('projection_reg'), self.params,
self.init_sample_weights, self.projection_activation,
self.response_activation)
# Variable containing both filter and projection matrix
joint_var = self.filter.concat(self.projection_matrix)
# Initialize optimizer
analyze_convergence = getattr(self.params, 'analyze_convergence',
False)
if optimizer == 'GaussNewtonCG':
self.joint_optimizer = GaussNewtonCG(
self.joint_problem,
joint_var,
plotting=(self.params.debug >= 3),
analyze=True,
fig_num=(12, 13, 14))
elif optimizer == 'GradientDescentL2':
self.joint_optimizer = GradientDescentL2(
self.joint_problem,
joint_var,
self.params.optimizer_step_length,
self.params.optimizer_momentum,
plotting=(self.params.debug >= 3),
debug=analyze_convergence,
fig_num=(12, 13))
# Do joint optimization
if isinstance(self.params.init_CG_iter, (list, tuple)):
self.joint_optimizer.run(self.params.init_CG_iter)
else:
self.joint_optimizer.run(self.params.init_CG_iter //
self.params.init_GN_iter,
self.params.init_GN_iter)
# Get back filter and optimizer
len_x = len(self.joint_optimizer.x)
self.filter = self.joint_optimizer.x[:len_x // 2] # w2 in paper
self.projection_matrix = self.joint_optimizer.x[len_x //
2:] # w1 in paper
if analyze_convergence:
opt_name = 'CG' if getattr(self.params, 'CG_optimizer',
True) else 'GD'
for val_name, values in zip(['loss', 'gradient'], [
self.joint_optimizer.losses,
self.joint_optimizer.gradient_mags
]):
val_str = ' '.join(
['{:.8e}'.format(v.item()) for v in values])
file_name = '{}_{}.txt'.format(opt_name, val_name)
with open(file_name, 'a') as f:
f.write(val_str + '\n')
raise RuntimeError('Exiting')
# Re-project samples with the new projection matrix
compressed_samples = self.project_sample(self.init_training_samples,
self.projection_matrix)
for train_samp, init_samp in zip(self.training_samples,
compressed_samples):
for idx in range(init_samp.shape[0]):
train_samp[idx] = init_samp[idx]
self.hinge_mask = None
# Initialize optimizer
self.conv_problem = ConvProblem(self.training_samples, self.y,
self.filter_reg, self.sample_weights,
self.response_activation)
if optimizer == 'GaussNewtonCG':
self.filter_optimizer = ConjugateGradient(
self.conv_problem,
self.filter,
fletcher_reeves=self.params.fletcher_reeves,
direction_forget_factor=self.params.direction_forget_factor,
debug=(self.params.debug >= 3),
fig_num=(12, 13))
elif optimizer == 'GradientDescentL2':
self.filter_optimizer = GradientDescentL2(
self.conv_problem,
self.filter,
self.params.optimizer_step_length,
self.params.optimizer_momentum,
debug=(self.params.debug >= 3),
fig_num=12)
# Transfer losses from previous optimization
if self.params.update_projection_matrix:
self.filter_optimizer.residuals = self.joint_optimizer.residuals
self.filter_optimizer.losses = self.joint_optimizer.losses
if not self.params.update_projection_matrix:
self.filter_optimizer.run(self.params.init_CG_iter)
# Post optimization
self.filter_optimizer.run(self.params.post_init_CG_iter)
self.filter = self.filter_optimizer.x
# Free memory
del self.init_training_samples
if self.params.use_projection_matrix:
del self.joint_problem, self.joint_optimizer
def project_sample(self, x: TensorList, proj_matrix=None):
# Apply projection matrix
if proj_matrix is None:
proj_matrix = self.projection_matrix
with fluid.dygraph.guard():
return operation.conv2d(x.apply(n2p), proj_matrix.apply(n2p)).apply(
self.projection_activation).numpy()
def get_iounet_box(self, pos, sz, sample_pos, sample_scale):
"""All inputs in original image coordinates"""
box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz
- 1) / 2
box_sz = sz / sample_scale
target_ul = box_center - (box_sz - 1) / 2
return np.concatenate([np.flip(target_ul, 0), np.flip(box_sz, 0)])
def get_iou_features(self):
return self.params.features.get_unique_attribute('iounet_features')
def get_iou_backbone_features(self):
return self.params.features.get_unique_attribute(
'iounet_backbone_features')
def init_iou_net(self):
# Setup IoU net
self.iou_predictor = self.params.features.get_unique_attribute(
'iou_predictor')
# Get target boxes for the different augmentations
self.iou_target_box = self.get_iounet_box(self.pos, self.target_sz,
self.pos.round(),
self.target_scale)
target_boxes = TensorList()
if self.params.iounet_augmentation:
for T in self.transforms:
if not isinstance(
T, (augmentation.Identity, augmentation.Translation,
augmentation.FlipHorizontal,
augmentation.FlipVertical, augmentation.Blur)):
break
target_boxes.append(self.iou_target_box + np.array(
[T.shift[1], T.shift[0], 0, 0]))
else:
target_boxes.append(self.iou_target_box.copy())
target_boxes = np.concatenate(target_boxes.view(1, 4), 0)
# Get iou features
iou_backbone_features = self.get_iou_backbone_features()
# Remove other augmentations such as rotation
iou_backbone_features = TensorList(
[x[:target_boxes.shape[0], ...] for x in iou_backbone_features])
# Extract target feat
with fluid.dygraph.guard():
iou_backbone_features = iou_backbone_features.apply(n2p)
target_boxes = n2p(target_boxes)
target_feat = self.iou_predictor.get_filter(iou_backbone_features,
target_boxes)
self.target_feat = TensorList(
[layers.reduce_mean(x, 0).numpy() for x in target_feat])
if getattr(self.params, 'iounet_not_use_reference', False):
self.target_feat = TensorList([
np.full_like(tf, tf.norm() / tf.numel())
for tf in self.target_feat
])
def optimize_boxes(self, iou_features, init_boxes):
with fluid.dygraph.guard():
# Optimize iounet boxes
init_boxes = np.reshape(init_boxes, (1, -1, 4))
step_length = self.params.box_refinement_step_length
target_feat = self.target_feat.apply(n2p)
iou_features = iou_features.apply(n2p)
output_boxes = n2p(init_boxes)
for f in iou_features:
f.stop_gradient = False
for i_ in range(self.params.box_refinement_iter):
# forward pass
bb_init = output_boxes
bb_init.stop_gradient = False
outputs = self.iou_predictor.predict_iou(target_feat,
iou_features, bb_init)
if isinstance(outputs, (list, tuple)):
outputs = outputs[0]
outputs.backward()
# Update proposal
bb_init_np = bb_init.numpy()
bb_init_gd = bb_init.gradient()
output_boxes = bb_init_np + step_length * bb_init_gd * np.tile(
bb_init_np[:, :, 2:], (1, 1, 2))
output_boxes = n2p(output_boxes)
step_length *= self.params.box_refinement_step_decay
return layers.reshape(output_boxes, (
-1, 4)).numpy(), layers.reshape(outputs, (-1, )).numpy()
def refine_target_box(self,
sample_pos,
sample_scale,
scale_ind,
update_scale=True):
# Initial box for refinement
init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos,
sample_scale)
# Extract features from the relevant scale
iou_features = self.get_iou_features()
iou_features = TensorList(
[x[scale_ind:scale_ind + 1, ...] for x in iou_features])
init_boxes = np.reshape(init_box, (1, 4)).copy()
rand_fn = lambda a, b: np.random.rand(a, b).astype('float32')
if self.params.num_init_random_boxes > 0:
# Get random initial boxes
square_box_sz = np.sqrt(init_box[2:].prod())
rand_factor = square_box_sz * np.concatenate([
self.params.box_jitter_pos * np.ones(2),
self.params.box_jitter_sz * np.ones(2)
])
minimal_edge_size = init_box[2:].min() / 3
rand_bb = (rand_fn(self.params.num_init_random_boxes, 4) - 0.5
) * rand_factor
new_sz = np.clip(init_box[2:] + rand_bb[:, 2:], minimal_edge_size,
1e10)
new_center = (init_box[:2] + init_box[2:] / 2) + rand_bb[:, :2]
init_boxes = np.concatenate([new_center - new_sz / 2, new_sz], 1)
init_boxes = np.concatenate(
[np.reshape(init_box, (1, 4)), init_boxes])
# Refine boxes by maximizing iou
output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes)
# Remove weird boxes with extreme aspect ratios
output_boxes[:, 2:] = np.clip(output_boxes[:, 2:], 1, 1e10)
aspect_ratio = output_boxes[:, 2] / output_boxes[:, 3]
keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * \
(aspect_ratio > 1 / self.params.maximal_aspect_ratio)
output_boxes = output_boxes[keep_ind, :]
output_iou = output_iou[keep_ind]
# If no box found
if output_boxes.shape[0] == 0:
return
# Take average of top k boxes
k = getattr(self.params, 'iounet_k', 5)
topk = min(k, output_boxes.shape[0])
inds = np.argsort(-output_iou)[:topk]
predicted_box = np.mean(output_boxes[inds, :], axis=0)
predicted_iou = np.mean(
| np.reshape(output_iou, (-1, 1)) | numpy.reshape |
"""
CS6476: Problem Set 4 Tests
"""
import numpy as np
import cv2
import unittest
import ps4
INPUT_DIR = "input_images/test_images/"
class Part1(unittest.TestCase):
@classmethod
def setUpClass(self):
self.input_imgs_1 = ['test_lk1.png', 'test_lk3.png', 'test_lk5.png']
self.input_imgs_2 = ['test_lk2.png', 'test_lk4.png', 'test_lk6.png']
self.delta_c = [0, 0, -1]
self.delta_r = [0, -1, -1]
self.r_val = [14, 12, 14]
self.c_val = [15, 16, 15]
self.cb = [(28, 30), (24, 32), (28, 30)]
self.k_size = 15
self.k_type = 'uniform'
def test_optic_flow_LK(self):
for i in range(3):
f1 = self.input_imgs_1[i]
f2 = self.input_imgs_2[i]
img1 = cv2.imread(INPUT_DIR + f1, 0) / 255.
img2 = cv2.imread(INPUT_DIR + f2, 0) / 255.
u, v = ps4.optic_flow_lk(img1.copy(), img2.copy(),
self.k_size, self.k_type, 1.)
r = self.r_val[i]
c = self.c_val[i]
d_c = self.delta_c[i]
d_r = self.delta_r[i]
center_box = self.cb[i]
u_mean = np.mean(u[r:r + center_box[0],
c:c + center_box[1]])
check_u = abs(u_mean - d_c) <= 0.5
error_msg = "Average of U values in the area where there is " \
"movement is greater than the allowed amount."
self.assertTrue(check_u, error_msg)
v_mean = np.mean(v[r:r + center_box[0],
c:c + center_box[1]])
check_v = abs(v_mean - d_r) <= 0.5
error_msg = "Average of V values in the area where there is " \
"movement is greater than the allowed amount."
self.assertTrue(check_v, error_msg)
class Part2(unittest.TestCase):
def test_reduce(self):
input_imgs = ['test_reduce1_img.npy', 'test_reduce2_img.npy',
'test_reduce3_img.npy']
ref_imgs = ['test_reduce1_ref.npy', 'test_reduce2_ref.npy',
'test_reduce3_ref.npy']
for i in range(3):
f1 = input_imgs[i]
f2 = ref_imgs[i]
test_array = np.load(INPUT_DIR + f1)
reduced = ps4.reduce_image(test_array.copy())
ref_reduced = np.load(INPUT_DIR + f2)
correct = np.allclose(reduced, ref_reduced, atol=0.05)
self.assertTrue(correct, "Output does not match the reference "
"solution.")
def test_expand(self):
input_imgs = ['test_expand1_img.npy', 'test_expand2_img.npy',
'test_expand3_img.npy']
ref_imgs = ['test_expand1_ref.npy', 'test_expand2_ref.npy',
'test_expand3_ref.npy']
for i in range(3):
f1 = input_imgs[i]
f2 = ref_imgs[i]
test_array = np.load(INPUT_DIR + f1)
expanded = ps4.expand_image(test_array.copy())
ref_expanded = np.load(INPUT_DIR + f2)
correct = | np.allclose(expanded, ref_expanded, atol=0.05) | numpy.allclose |
import glob
import numpy as np
import pandas as pd
from shapely.geometry import LineString,MultiLineString,Point,MultiPoint
from shapely.ops import linemerge
import pyproj
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
import xgboost
from tqdm import tqdm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix,accuracy_score
import pickle
import os
import argparse
np.random.seed(10)
from param import *
#get an ensemble of 5 classifiers from scikit-learn i.e randome_forest, extra_tree,svc,KNeighbours
#and xgboost classifier
#the parameters are tuned for this dataset, set class_weights to balanced as the start to end
#goals have different distribution
def get_ensemble_of_classifiers(vote=True):
clfs={}
clf1=ExtraTreesClassifier(100,class_weight='balanced',n_jobs=-1)
clfs['extra_tree']=clf1
clf2=RandomForestClassifier(50,class_weight='balanced',n_jobs=-1)
clfs['random_forest']=clf2
clf3=KNeighborsClassifier(20,weights='distance',n_jobs=-1)
clfs['knn']=clf3
clf4=xgboost.XGBClassifier(n_estimators=100,subsample=.7)
clfs['xgb']=clf4
if vote:
clf5=SVC(0.1)
cvote=VotingClassifier(estimators=[('et', clf1), ('rf', clf2), ('kn', clf3),('xgb',clf4),('svc',clf5)], voting='hard')
return {'cvote':cvote}
else:
clf5=SVC(0.1,class_weight='balanced',probability=True)
clfs['svc']=clf5
return clfs
# get the closest and farthest distance for a track to all the goals
def closest_farthest(track):
closest_to_track=[]
farthest_to_track=[]
for i in range(0,goal.shape[0]):
point2=Point(goal[['lon','lat']].values[i])
cd=[]
for item in track:
point1=Point(item)
_,_,distance = geod.inv(point1.x, point1.y, point2.x, point2.y)
cd.append(distance)
closest_to_track.append(np.min(cd))
farthest_to_track.append(np.max(cd))
return closest_to_track,farthest_to_track
# get distance to a goal given a point on the track
def goal_dist(point1):
d={}
for i in range(0,goal.shape[0]):
point2=Point(goal[['lon','lat']].values[i])
angle1,angle2,distance = geod.inv(point1.x, point1.y, point2.x, point2.y)
d[i]=distance
return d.values()
# gets distance features for training and testing
# the feature vector includes closest and nearest distances
# and distance to goal from the start or end points of track
def get_distances(df,goal,trim=None):
start,end=Point(df[['lon','lat']].values[0]),Point(df[['lon','lat']].values[-1])
duration=df.elapsedTime_sec.values[-1]
_,_,total_distance_covered = geod.inv(start.x, start.y, end.x, end.y)
distance_to_goal_from_start=goal_dist(start)
distance_to_goal_from_end=goal_dist(end)
closest,farthest=closest_farthest(df[['lon','lat']].values)
return duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,closest,farthest
# similar to get_distance function above but additionally trims the start and end point randomly
def get_distances_multi(df,goal):
# how much to trim from start
trim_start=np.random.randint(TRIM_START,TRIM_END)
idx_s=np.where(df.elapsedTime_sec>trim_start)[0][0]
start=Point(df[['lon','lat']].values[idx_s])
# how much to trim from end
trim_end=np.random.randint(TRIM_START,TRIM_END)
idx_e=np.where(df.elapsedTime_sec>df.elapsedTime_sec.values[-1]-trim_end)[0][0]
end=Point(df[['lon','lat']].values[idx_e])
_,_,total_distance_covered = geod.inv(start.x, start.y, end.x, end.y)
distance_to_goal_from_start=goal_dist(start)
distance_to_goal_from_end=goal_dist(end)
duration=df.elapsedTime_sec.values[idx_e]
closest,farthest=closest_farthest(df[['lon','lat']].values[idx_s:idx_e])
return duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,closest,farthest
# get the train feature vector. The feature vector are aggressively augmented
# i.e for each feature vector 20 tracks with random trims are created from start and end
# also include other feature such as age, gender,duration,velocity and total distance covered
def get_train_feat(datafiles):
print ('Multi trim featurees 20 samp in each')
xfeat={}
for f in tqdm(datafiles):
for i in range(0,20):
df = pd.read_csv(f)
if i==0:
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances(df,goal,trim=None)
else:
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances_multi(df,goal)
feat=[duration,total_distance_covered]
feat.extend(distance_to_goal_from_start)
feat.extend(distance_to_goal_from_end)
feat.extend(cd)
feat.extend(fd)
if df.tripID.values[0] not in xfeat.keys():
xfeat[df.tripID.values[0]]=[feat]
else:
xfeat[df.tripID.values[0]].append(feat)
train_info['gender']=pd.factorize(train_info['gender'])[0]
train_info['age']=train_info['age'].fillna(train_info['age'].mean())
features=[]
labels_start=[]
labels_end=[]
for i,k in enumerate(train_info.tripID.values):
for item in xfeat[k]:
feat=train_info.loc[k][['age','gender']].values.tolist()
duration=item[0]
velocity=item[1]/duration
feat.extend([duration,velocity])
feat.extend(item)
features.append(feat)
labels_start.append(train_info.iloc[i]['startLocID'])
labels_end.append(train_info.iloc[i]['destLocID'])
features=np.asarray(features).astype('float32')
labels_start=np.asarray(labels_start).astype('int')
labels_end=np.asarray(labels_end).astype('int')
if SHUFFLE:
idx=range(0,len(features))
np.random.shuffle(idx)
features,labels_start,labels_end=features[idx],labels_start[idx],labels_end[idx]
return features,labels_start,labels_end
# get the test features...no augmentation as in the compition the features are already trimed
def get_test_feat(datafiles):
xfeat={}
for f in tqdm(datafiles):
df = pd.read_csv(f)
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances(df,goal,trim=None)
feat=[duration,total_distance_covered]
feat.extend(distance_to_goal_from_start)
feat.extend(distance_to_goal_from_end)
feat.extend(cd)
feat.extend(fd)
xfeat[df.tripID.values[0]]=feat
test_info['gender']=pd.factorize(test_info['gender'])[0]
test_info['age']=test_info['age'].fillna(test_info['age'].mean())
features_test=[]
for k in test_info.tripID.values:
feat=test_info.loc[k][['age','gender']].values.tolist()
duration=xfeat[k][0]
velocity=xfeat[k][1]/duration
feat.extend([duration,velocity])
feat.extend(xfeat[k])
features_test.append(feat)
features_test=np.asarray(features_test).astype('float32')
return features_test
# train the ensemble of classifiers
def train_ens(features,slabels):
sc=StandardScaler()
sc.fit(features)
clfs=get_ensemble_of_classifiers(vote=False)
ft=sc.transform(features)
for k in clfs:
clfs[k].fit(ft,slabels)
print ('train full data...done..with ',k)
return sc,clfs
# predict from the ensemble and create submission
def submit_ens(clfs,features_test,ks,subname):
y_pred=[]
for key in clfs.keys():
y_pred_i = clfs[key].predict_proba(features_test)
y_pred.append(y_pred_i)
y_pred = np.asarray(y_pred)
y=np.mean(y_pred,axis=0)
y_pred = np.argmax(y,axis=-1)
preds = [list(ks[item]) for item in y_pred]
np.savetxt(subname,preds, fmt='%d',delimiter=',')
print ('done...')
# do cross val ensemble so we know what kind of score we will get
# note there is no weighting the tracks as in compition metric. simply get accuracy score and confusion matrix
def cross_val_ens(features,slabels,dirname):
result={}
clfs = get_ensemble_of_classifiers(vote=False)
sc=StandardScaler()
ft=sc.fit_transform(features)
y_pred=[]
for key in clfs.keys():
y_pred_i = cross_val_predict(clfs[key], ft,slabels, cv=5,method='predict_proba')
y_pred.append(y_pred_i)
print ('cross val ...done...for ', key)
y_pred=np.argmax( | np.mean(y_pred,axis=0) | numpy.mean |
from gensim.models.keyedvectors import KeyedVectors
import json
from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint, EarlyStopping
from tensorflow.keras.layers import *
from tensorflow.keras.metrics import *
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import *
from tensorflow.keras import backend as K
import os
from random import shuffle
import re
import time
from tqdm import tqdm
import traceback
import numpy as np
import pandas as pd
from argparse import ArgumentParser
import random
class RewardLearning():
def __init__(self, fold, seed, action_space, metric):
self.reward_report_template = 'reward_report_{}_{}_.*.csv'
word_embed_file_path='./damd_multiwoz/data/embeddings/glove.6B.100d.w2v.txt'
self.train_val_fraction=0.8
self.EMBED_DIM=100
self.HIDDEN_DIM=100
self.MAX_POP=10
self.MAX_TIME_STEP=30
self.MAX_GOAL_LEN=50
self.MAX_STATE_LEN=50
self.MAX_ACT_LEN=50
self.reduce_lr_patience = 10
self.es_patience = 25
self.train_reward_split=[0.8,0.9][1]
self.batch_size = 50
self.num_epoch = 100
self.fold = fold
self.metric = metric
self.TRAIN_ON=action_space
self.root_path = './damd_multiwoz'
self.dataset=json.loads(open(os.path.join(self.root_path,'data/multi-woz-processed/data_for_damd_reward_{}.json'.format(self.fold)),'r').read())
self.glove_kv = KeyedVectors.load_word2vec_format(word_embed_file_path, binary=False, unicode_errors='ignore')
self.reward_folder_path= os.path.join(self.root_path,'data/multi-woz-oppe/reward')
self.data_for_damd = json.loads(open(os.path.join(self.root_path,'data/multi-woz-processed/data_for_damd.json'), 'r').read())
self.processed_reward_rollouts = None
self.embed_cache = {}
def metric_score(self, sucess,match,bleu):
return sucess+match+2*bleu/100
def load_reward_rollouts(self):
reward_record_file_prefix = self.reward_report_template.format(self.fold, self.metric)
print('reward_record_file_prefix:',reward_record_file_prefix)
rollouts_processed = {}
for file in os.listdir(self.reward_folder_path):
if re.search(reward_record_file_prefix,file):
print('file:',file)
reward_record_path = os.path.join(self.reward_folder_path,file)
df = pd.read_csv(reward_record_path)
for _,row in df.iterrows():
dial_id = row['dial_id']
rollout = json.loads(row['rollout'])
turn_nums = [int(z) for z in rollout.keys()]
turn_nums = sorted(turn_nums)
if dial_id not in rollouts_processed:
rollouts_processed[dial_id]={}
rollouts_processed[dial_id]['gen']=[]
dia_rollout={}
rollouts_processed[dial_id]['gen'].append(dia_rollout)
dia_rollout['score'] = self.metric_score(row['success'],row['match'],row['bleu'])
dia_rollout['rollout']=[]
for turn_num in turn_nums:
true_act_prob = [1.]
if 'aspn_prob' in rollout[str(turn_num)]:
true_act_prob = np.exp(rollout[str(turn_num)]['aspn_prob']).tolist()
dia_rollout['rollout'].append({
'tn':turn_num,
'act':rollout[str(turn_num)]['aspn_gen'],
'true_act':rollout[str(turn_num)]['aspn'],
'resp':rollout[str(turn_num)]['resp_gen'],
'true_act_prob':true_act_prob
})
if 'gt' not in rollouts_processed[dial_id]:
rollouts_processed[dial_id]['gt']={}
rollouts_processed[dial_id]['gt']['score']=4
rollouts_processed[dial_id]['gt']['rollout']=[]
for turn_num in turn_nums:
rollouts_processed[dial_id]['gt']['rollout'].append({
'tn':turn_num,
'act':rollout[str(turn_num)]['aspn'],
'resp':rollout[str(turn_num)]['resp'],
'true_act':rollout[str(turn_num)]['aspn'],
'true_act_prob':[1]
})
self.processed_reward_rollouts = rollouts_processed
self.dial_ids = list(self.processed_reward_rollouts.keys())
self.load_gt_dia_logs(self.dial_ids)
return rollouts_processed
def load_gt_dia_logs(self, dial_ids):
gt_dia_logs={}
for dial_id in dial_ids:
goal = self.goal_as_st(self.dataset[dial_id]['goal'])
gt_dia_log={
'goal':goal
}
gt_dia_logs[dial_id]=gt_dia_log
for turn in self.dataset[dial_id]['log']:
gt_dia_log[turn['turn_num']]={}
gt_dia_log[turn['turn_num']]['state']='begin '+turn['cons_delex']+' end'
self.gt_dia_logs = gt_dia_logs
def pad_sentence(self, token_embeds,max_seq_len):
token_embeds = token_embeds.copy()
token_embeds = token_embeds[:max_seq_len].tolist()
for i in range(max_seq_len-len(token_embeds)):
token_embeds.append(np.zeros(self.EMBED_DIM))
token_embeds = np.array(token_embeds)
return token_embeds
def pad_time_step(self, sentence_embeds,max_seq_len):
sentence_embeds = sentence_embeds[:self.MAX_TIME_STEP]
time_padded_sentences = np.array(sentence_embeds)
if self.MAX_TIME_STEP>len(sentence_embeds):
pad = np.zeros((self.MAX_TIME_STEP-len(sentence_embeds),max_seq_len,self.EMBED_DIM))
time_padded_sentences = np.concatenate([sentence_embeds,pad])
return time_padded_sentences
def get_embedding(self, token):
token = token.lower()
token = token.replace('reqt','request')\
.replace('arriveby','arrive_by')\
.replace('towninfo','town_info')\
.replace('pricerange','price_range')\
.replace('leaveat','leave_at')\
.replace('mutliple','multiple')\
.replace('dontcare','dont_care')\
.replace('-','')\
.replace('addres','address')\
.replace('addressss','address')\
.replace('addresss','address')
token = token.strip()
if token in self.embed_cache:
return self.embed_cache[token]
if token in self.glove_kv:
embedding = self.glove_kv[token]
else:
if '_' in token:
embeds = []
for sub_token in token.split('_'):
embeds.append(self.get_embedding(sub_token))
embedding = np.mean(embeds,axis=0)
else:
#print('token not in embed:',token)
embedding = self.glove_kv['unk']
self.embed_cache[token]=embedding
return embedding
def tokens_to_embeddings(self, tokens):
embeddings = []
for token in tokens:
embeddings.append(self.get_embedding(token))
return np.array(embeddings)
def tokenize(self, sentence):
sentence=sentence.lower()
sentence = sentence.replace('[',' ').replace(']',' ').replace(':','').replace(' ',' ')
return sentence.split()
def goal_as_st(self, goal):
return str(goal).replace("'",' ')\
.replace(',',' , ').replace('{',' ')\
.replace('}',' ').replace(' ',' ')
def sample_roll_out(self, dial_id):
start = time.time()
gen_rollouts_info = self.processed_reward_rollouts[dial_id]['gen']
gt_rollout_info = self.processed_reward_rollouts[dial_id]['gt']
rollout_infos = np.random.choice(gen_rollouts_info+[gt_rollout_info], size=2, replace=False)
#print(rollout_infos)
dia_log= self.gt_dia_logs[dial_id]
goal = dia_log['goal']
goal = self.tokenize(goal)
goal = self.tokens_to_embeddings(goal)
goal = self.pad_sentence(goal, self.MAX_GOAL_LEN)
rollout_pairs = []
for rollout_info in rollout_infos:
acts = []
states = []
for turn in rollout_info['rollout']:
tn = turn['tn']
act = turn[self.TRAIN_ON]#turn['act']
if tn not in self.gt_dia_logs[dial_id]:
break
state = self.gt_dia_logs[dial_id][tn]['state']
# if random.uniform(0,1)>0.95:
# print('act:',act)
# print('state:',state)
act = self.tokenize(act)
state = self.tokenize(state)
act = self.tokens_to_embeddings(act)
state = self.tokens_to_embeddings(state)
act = self.pad_sentence(act,self.MAX_ACT_LEN)
state = self.pad_sentence(state,self.MAX_STATE_LEN)
acts.append(act)
states.append(state)
acts=self.pad_time_step(acts,self.MAX_ACT_LEN)
states=self.pad_time_step(states,self.MAX_STATE_LEN)
score=rollout_info['score']
rollout_pairs.append([goal,states,acts,score])
prob = rollout_pairs[0][-1]/(rollout_pairs[0][-1]+rollout_pairs[1][-1]+1e-20)
rollout_pairs[0][-1]=prob
rollout_pairs[1][-1]=1-prob
return rollout_pairs
def get_data_gen(self, sample_roll_out):
def data_gen(dial_ids,batch_size):
try:
s1s = []
a1s = []
g1s = []
s2s = []
a2s = []
g2s = []
probs = []
while True:
shuffle(dial_ids)
for dial_id in dial_ids:
rollout_pair = sample_roll_out(dial_id)
g1,s1,a1,p1=rollout_pair[0]
g2,s2,a2,p2=rollout_pair[1]
s1s.append(s1)
a1s.append(a1)
g1s.append(g1)
s2s.append(s2)
a2s.append(a2)
g2s.append(g2)
probs.append([p1,p2])
if len(s1s)>=batch_size:
s1s = np.array(s1s)
a1s = np.array(a1s)
g1s = np.array(g1s)
s2s = np.array(s2s)
a2s = np.array(a2s)
g2s = np.array(g2s)
#print('as:',np.sum(a1s-a2s))
probs = | np.array(probs) | numpy.array |
'''
Main Author: <NAME>
Corresponding Email: <EMAIL>
'''
import numpy as np
from .base import ClassificationDecider
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import Ridge
from sklearn.utils.validation import (
check_X_y,
check_array,
NotFittedError,
)
from sklearn.utils.multiclass import type_of_target
class SimpleArgmaxAverage(ClassificationDecider):
"""
Doc string here.
"""
def __init__(self, classes=[]):
self.classes = classes
self._is_fitted = False
def fit(
self,
X,
y,
transformer_id_to_transformers,
transformer_id_to_voters,
classes=None,
):
if not isinstance(self.classes, (list, np.ndarray)):
if len(y) == 0:
raise ValueError("Classification Decider classes undefined with no class labels fed to fit")
else:
self.classes = | np.unique(y) | numpy.unique |
from __future__ import print_function
import sys
import numpy as np
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba.numpy_support import from_dtype
from numba import types, njit, typeof
from .support import TestCase, CompilationCache, MemoryLeakMixin
def array_dtype(a):
return a.dtype
def use_dtype(a, b):
return a.view(b.dtype)
def array_itemsize(a):
return a.itemsize
def array_shape(a, i):
return a.shape[i]
def array_strides(a, i):
return a.strides[i]
def array_ndim(a):
return a.ndim
def array_size(a):
return a.size
def array_flags_contiguous(a):
return a.flags.contiguous
def array_flags_c_contiguous(a):
return a.flags.c_contiguous
def array_flags_f_contiguous(a):
return a.flags.f_contiguous
def nested_array_itemsize(a):
return a.f.itemsize
def nested_array_shape(a):
return a.f.shape
def nested_array_strides(a):
return a.f.strides
def nested_array_ndim(a):
return a.f.ndim
def nested_array_size(a):
return a.f.size
def size_after_slicing_usecase(buf, i):
sliced = buf[i]
# Make sure size attribute is not lost
return sliced.size
def array_ctypes_data(arr):
return arr.ctypes.data
class TestArrayAttr(MemoryLeakMixin, TestCase):
def setUp(self):
super(TestArrayAttr, self).setUp()
self.ccache = CompilationCache()
self.a = | np.arange(10, dtype=np.int32) | numpy.arange |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
( | np.array([], dtype=np.bool_) | numpy.array |
from operator import ge
import numpy as np
from numpy.core.numeric import zeros_like
import pandas as pd
from matplotlib import pyplot as plt
import sys
data = pd.read_csv("geom.csv", header=None)
N = 100
theta = np.radians(12)
print(np.degrees(theta))
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
x_range = data.iloc[:, 0].to_numpy()
num = x_range.size
y_top = data.iloc[:, 1].to_numpy()
top = np.vstack((x_range, y_top))
x_range_flip = np.flip(x_range, axis=0)
y_bot = data.iloc[:, 3].to_numpy()
bot = np.vstack((x_range_flip, y_bot))
# combine bot and top then rotate
all = np.hstack((top, bot))
mean = np.mean(all, axis=1)
mean = np.expand_dims(mean, axis=1)
all = np.matmul(R, all - mean) + mean
# get the value back
y_top = all[1, :num]
y_top = np.flip(y_top, axis=0)
x_range = all[0, num:]
y_bot = all[1, num:]
geo = np.zeros((N + 1, N + 1))
dx = 100 / N
dy = 100 / N
def checkPoint(x, y):
result = 0
left = np.where(x_range >= x)[0]
if(left.size == 0):
return 0
left = left[0] - 1
interval = x_range[left+1] - x_range[left]
interp1 = y_top[left] * (x_range[left+1] - x) / interval \
+ y_top[left+1] * (x - x_range[left]) / interval
interp2 = y_bot[left] * (x_range[left+1] - x) / interval \
+ y_bot[left+1] * (x - x_range[left]) / interval
if interp1 >= y and interp2 <= y:
result = 9
return result
for i in range(N):
for j in range(N):
x = i * dx
y = j * dy - 50
geo[i, j] = checkPoint(x, y)
# check invalid and modifiy the cell number
def check_validity(geo):
offset = [1, 0, -1, 0, 1]
valid = 1
for i in range(N):
for j in range(N):
if(geo[i][j] == 9):
zeros = []
for k in range(4):
x = i + offset[k]
y = j + offset[k+1]
if(x < N+1 and x > 0 and y < N+1 and y > 0):
if(geo[x][y] == 0):
zeros.append(k)
if(len(zeros)>2):
print("invalid", i, j)
geo[i][j] = 5
valid = 0
for k in range(len(zeros)):
x = i + offset[zeros[k]]
y = j + offset[zeros[k]]
count = 0
for kk in range(4):
xx = x + offset[k]
yy = y + offset[k+1]
if(xx < N+1 and xx > 0 and yy < N+1 and yy > 0):
if(geo[xx][yy] == 0):
count = count + 1
if(count <= 2):
geo[x][y] = 9
if(valid):
print("valid")
return True
return False
geo = np.swapaxes(geo, 0, 1)
max_iter = 5
iter = 0
while(iter < max_iter and not check_validity(geo)):
iter = iter + 1
print(iter)
geo_padded = | np.zeros((N+1, 2*N + N+1)) | numpy.zeros |
"""
.. module:: dst_povm_sampling.py
:synopsis: Sample projective measurements in the way that DST does
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import division, absolute_import, print_function, unicode_literals
import numpy as np
from itertools import product
def reseed_choice(a, size=None, replace=True, p=None):
"""Wrapper for the numpy choice function that reseeds before sampling to
ensure that it doesn't make identical choices accross different parallel
runs.
"""
np.random.seed()
return np.random.choice(a=a, size=size, replace=replace, p=p)
def x_state(anc_outcome, sys_outcome, phi):
r"""Return the state corresponding to the projective measurement implied by
a particular outcome (:math:`\pm1`) of the x-measurement on the ancilla and
a particular outcome (:math:`\widetilde{\pm}1`) of the x-measurement on the
system:
.. math::
\begin{align}
\vert\psi\rangle&=\cos\frac{\theta}{2}\vert0\rangle+
\sin\frac{\theta}{2}\vert1\rangle \\
\theta&=\begin{cases}\operatorname{arctan2}\left(\pm2\cos\varphi,
\,-\sin^2\varphi\right) & \widetilde{+} \\
0 & \widetilde{-}\end{cases}
\end{align}
:param anc_outcome: :math:`\pm1`, indicates eigenvalue observed on ancilla
x-measurement
:param sys_outcome: :math:`\widetilde{\pm}1`, indicates eigenvalue observed
on system x-measurement
:param phi: The strength of the interaction
:returns: The state represented in the standard computational (z)
basis
"""
theta = np.where(anc_outcome > 0, np.arctan2(2*sys_outcome*np.cos(phi),
-np.sin(phi)**2), 0)
return np.array([np.cos(theta/2), np.sin(theta/2)])
def y_state(anc_outcome, sys_outcome, phi):
r"""Return the state corresponding to the projective measurement implied by
a particular outcome (:math:`\pm1`) of the y-measurement on the ancilla and
a particular outcome on the system (:math:`\widetilde{\pm}1`):
.. math::
\begin{align}
\vert\psi\rangle&=\cos\frac{\theta}{2}\vert0\rangle+
\sin\frac{\theta}{2}\vert1\rangle \\
\theta&=\operatorname{arccos}\left(\widetilde{\pm}
\frac{2\left\{\begin{array}{l r}\sin(\varphi+\pi/4) & + \\
\cos(\varphi+\pi/4) & -\end{array}\right\}^2-1}{2\left\{\begin{array}
{l r}\sin(\varphi+\pi/4) & + \\ \cos(\varphi+\pi/4) & -\end{array}
\right\}^2+1}\right)
\end{align}
:param anc_outcome: :math:`\pm1`, indicates eigenvalue observed on ancilla
z-measurement
:param sys_outcome: :math:`\widetilde{\pm}1`, indicates eigenvalue observed
on system x-measurement
:param phi: The strength of the interaction
:returns: The state represented in the standard computational (z)
basis
"""
sc = np.where(anc_outcome > 0, np.sin(phi + np.pi/4), np.cos(phi + np.pi/4))
theta = np.arccos(sys_outcome*(2*sc**2 - 1)/(2*sc**2 + 1))
return np.array([np.cos(theta/2), np.sin(theta/2)])
def z_state(anc_outcome, phi):
r"""Return the state corresponding to the projective measurement implied by
a particular outcome (:math:`\pm1`) of the z-measurement on the ancilla:
.. math::
\vert\psi\rangle=\frac{\vert0\rangle+e^{\mp i\varphi}\vert1\rangle}
{\sqrt{2}}
:param anc_outcome: :math:`\pm1`, indicates eigenvalue observed on ancilla
z-measurement
:param phi: The strength of the interaction
:returns: The state represented in the standard computational (z)
basis
"""
return np.array([(1. + 0.j)* | np.abs(anc_outcome) | numpy.abs |
import numpy as np
import matplotlib.pyplot as plt
from numpy import atleast_2d as twod
################################################################################
## PLOTTING FUNCTIONS #########################################################
################################################################################
def plotClassify2D(learner, X, Y, pre=lambda x: x, axis=None, nGrid=128, **kwargs):
"""
Plot data and classifier outputs on two-dimensional data.
This function plot data (X,Y) and learner.predict(X, Y)
together. The learner is is predicted on a dense grid
covering data X, to show its decision boundary.
Parameters
----------
learner : learner object
A trained learner object that inherits from one of
the 'Classify' or 'Regressor' base classes.
X : numpy array
N x M array of data; N = number of data, M = dimension
(number of features) of data.
Y : numpy array
1 x N arra containing labels corresponding to data points
in X.
pre : function object (optional)
Function that is applied to X before prediction.
axis : a matplotlib axis / plottable object (optional)
nGrid : density of 2D grid points (default 128)
"""
if twod(X).shape[1] != 2:
raise ValueError('plotClassify2D: function can only be called using two-dimensional data (features)')
# TODO: Clean up code
if axis == None: axis = plt
axis.plot( X[:,0],X[:,1], 'k.', visible=False )
# TODO: can probably replace with final dot plot and use transparency for image (?)
ax = axis.axis()
xticks = np.linspace(ax[0],ax[1],nGrid)
yticks = np.linspace(ax[2],ax[3],nGrid)
grid = np.meshgrid( xticks, yticks )
XGrid = np.column_stack( (grid[0].flatten(), grid[1].flatten()) )
if learner is not None:
YGrid = learner.predict( pre(XGrid) )
#axis.contourf( xticks,yticks,YGrid.reshape( (len(xticks),len(yticks)) ), nClasses )
axis.imshow( YGrid.reshape( (len(xticks),len(yticks)) ), extent=ax, interpolation='nearest',origin='lower',alpha=0.5, aspect='auto' )
cmap = plt.cm.get_cmap()
# TODO: if Soft: predictSoft; get colors for each class from cmap; blend pred with colors & show
#
try: classes = np.array(learner.classes);
except Exception: classes = np.unique(Y)
cvals = (classes - min(classes))/(max(classes)-min(classes)+1e-100)
for i,c in enumerate(classes):
axis.plot( X[Y==c,0],X[Y==c,1], 'ko', color=cmap(cvals[i]), **kwargs )
axis.axis(ax);
def histy(X,Y,axis=None,**kwargs):
"""
Plot a histogram (using matplotlib.hist) with multiple classes of data
Any additional arguments are passed directly into hist()
Each class of data are plotted as a different color
To specify specific histogram colors, use e.g. facecolor={0:'blue',1:'green',...}
so that facecolor[c] is the color for class c
Related but slightly different appearance to e.g.
matplotlib.hist( [X[Y==c] for c in np.unique(Y)] , histtype='barstacked' )
"""
if axis == None: axis = plt
yvals = np.unique(Y)
nil, bin_edges = np.histogram(X, **kwargs)
C,H = len(yvals),len(nil)
hist = np.zeros( shape=(C,H) )
cmap = plt.cm.get_cmap()
cvals = (yvals - min(yvals))/(max(yvals)-min(yvals)+1e-100)
widthFrac = .25+.75/(1.2+2*np.log10(len(yvals)))
for i,c in enumerate(yvals):
histc,nil = | np.histogram(X[Y==c],bins=bin_edges) | numpy.histogram |
from abc import ABC
from abc import abstractmethod
from copy import deepcopy
import itertools
import numpy as np
from autode.bond_lengths import get_avg_bond_length
from autode.calculation import Calculation
from autode.exceptions import AtomsNotFound, NoClosestSpecies, FitFailed
from autode.log import logger
from autode.ts_guess import get_ts_guess
from autode.config import Config
from autode.mol_graphs import is_isomorphic, make_graph
from numpy.polynomial import polynomial
from autode.utils import work_in, NoDaemonPool
from autode.units import KcalMol
from autode.calculation import Calculation
from autode.methods import high_level_method_names
import networkx as nx
from scipy.optimize import minimize, Bounds
def get_closest_species(point, pes):
"""
Given a point on an n-dimensional potential energy surface defined by
indices where the length is the dimension of the surface
Arguments:
pes (autode.pes.PES): Potential energy surface
point (tuple): Index of the current point
Returns:
(autode.complex.ReactantComplex):
"""
if all(index == 0 for index in point):
logger.info('PES is at the first point')
return deepcopy(pes.species[point])
# The indcies of the nearest and second nearest points to e.g. n,m in a 2
# dimensional PES
neareast_neighbours = [-1, 0, 1]
next_nearest_neighbours = [-2, -1, 0, 1, 2]
# First attempt to find a species that has been calculated in the nearest
# neighbours
for index_array in [neareast_neighbours, next_nearest_neighbours]:
# Each index array has elements from the most negative to most
# positive. e.g. (-1, -1), (-1, 0) ... (1, 1)
for d_indexes in itertools.product(index_array, repeat=len(point)):
# For e.g. a 2D PES the new index is (n+i, m+j) where
# i, j = d_indexes
new_point = tuple(np.array(point) + | np.array(d_indexes) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import time
from profit.sur.backend.gp_functions import predict_f
from profit.sur.backend.python_kernels import RBF
# Fixing the dimensions of the figure
plt.rcParams['figure.figsize'] = [12, 7]
# Definition of the dimension
d = 1
# Number of samples (training set)
n = 50
# Training input (x)
# should be defined as a matrix with 1 columns
x = np.linspace(start=0, stop=1, num=n).reshape((n, 1))
# Definition of the function
def f(x):
return np.sin(2 * np.pi * x) + np.sin(4 * np.pi * x)
# Plot of the function f
plt.plot(x, f(x))
plt.title('Plot of f(x) = $\sin(2 \pi x) + \sin(4 \pi x)$')
plt.xlabel('x')
plt.ylabel('f(x)')
# Definition of the training data (f_x = observations f(x))
f_x = f(x)
# the noise sigma_n
sigma_n = 1e-2
# Definition of the errors -> we assume that the noise on observations
# follows an independent , identically distributed Gaussian distribution with zero mean and variance sigma_n ^2
epsilon = sigma_n * np.random.randn(n).reshape((-1, 1))
# Observed target variable (f(x) + epsilon)
y = f_x + epsilon
# Plot of the error distribution
#sns.distplot(epsilon)
plt.title('Distribution of $\epsilon$ and the interpolated density')
plt.xlabel('$\epsilon$')
plt.ylabel('Number of values')
# define number of test data points
n_star = 30
# define input test data x_star
x_star = np.linspace(0, 1, n_star).reshape((n_star, 1))
# set the hyper - parameters l and sigma_f
l = 0.2
sigma_f = 1
# Definition of the squared exponential kernel
t = time.time()
kernel = RBF(x, y, l)
elapsed = time.time() - t
print(elapsed)
# Definition of the covariance matrices
t = time.time()
a = np.array([l, sigma_f, sigma_n])
K = RBF(x, x, *a)
K_star2 = RBF(x_star, x_star, *a)
K_star = RBF(x_star, x, *a)
elapsed = time.time() - t
print(elapsed)
# Plot of K
plt.imshow(K)
plt.colorbar()
# Plot K_star2
plt.imshow(K_star2)
plt.colorbar()
# Plot Kstar
plt.imshow(K_star)
plt.colorbar()
# Compute C
C = np.block([[K + (sigma_n / sigma_f) ** 2 * np.eye(n), np.transpose(K_star)], [K_star, K_star2]])
# Plot C
plt.imshow(C)
plt.colorbar()
# Plot function f(x)
plt.figure()
plt.plot(x, f_x, 'r')
# plot of n_prior samples from prior distribution (100 samples). The mean is zero and the covariance is given by K_star2
n_prior = 100
for i in range(0, n_prior):
f_star = np.random.multivariate_normal( | np.zeros(n_star) | numpy.zeros |
from pathlib import Path
from collections import OrderedDict
from collections import defaultdict
import random
import multiprocessing
import threading
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, IterableDataset, DataLoader, WeightedRandomSampler
from Bio import SeqIO
from bioservices import UniProt
import pandas as pd
import re
IUPAC_IDX_AMINO_PAIRS_decoding = list(enumerate([
"A",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"X",
"Y",
"Z",
"-",
'B'
]))
IUPAC_IDX_AMINO_PAIRS = list(enumerate([
"A",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"X",
"Y",
"Z",
"<mask>",
'B'
]))
IUPAC_AMINO_IDX_PAIRS = [(a, i) for (i, a) in IUPAC_IDX_AMINO_PAIRS]
alphabet_size = len(IUPAC_AMINO_IDX_PAIRS)
IUPAC_SEQ2IDX = OrderedDict(IUPAC_AMINO_IDX_PAIRS)
IUPAC_IDX2SEQ = OrderedDict(IUPAC_IDX_AMINO_PAIRS)
# Add gap tokens as the same as mask
IUPAC_SEQ2IDX["-"] = IUPAC_SEQ2IDX["<mask>"]
IUPAC_SEQ2IDX["."] = IUPAC_SEQ2IDX["<mask>"]
IUPAC_IDX2SEQ_decoding = OrderedDict(IUPAC_IDX_AMINO_PAIRS_decoding)
def seq2idx(seq, device = None):
return torch.tensor([IUPAC_SEQ2IDX[s.upper() if len(s) < 2 else s] for s in seq], device = device)
# return torch.tensor([IUPAC_SEQ2IDX[s] for s in seq if len(s) > 1 or (s == s.upper() and s != ".")], device = device)
def seq2idx_removegaps(seq, device=None):
seq = np.array([IUPAC_SEQ2IDX[aa] for aa in np.array(seq)])
keep_cols = []
for i, aa in enumerate(seq):
if IUPAC_IDX2SEQ[aa] != '<mask>':
keep_cols.append(i)
seq = seq[keep_cols]
return torch.tensor(seq, device=device)
def idx2seq(idxs):
return "".join([IUPAC_IDX2SEQ[i] for i in idxs])
def idx2seq_decoding(idxs):
return "".join([IUPAC_IDX2SEQ_decoding[i] for i in idxs])
def save_weights_file(datafile, save_file):
seqs = list(SeqIO.parse(datafile, "fasta"))
dataset = ProteinDataset(seqs)
dataset.write_to_file(save_file)
class LipaseDataset(Dataset):
def __init__(self, seqs, backbone=0, gappy_colx_threshold=1, device = None, SSVAE=False, SSCVAE=False, CVAE=False,
ssl_deg=False, ssl_iniact=False, ssl_pnp=False, ssl_glad=False, tom_odor=False, tom_perf=False,
ogt=False, topt=False, add_ssl_seqs=False, add_tom_seqs=False, only_tom_seqs=False,
tom_val_index=None, over_sample=0):
super().__init__()
self.device = device
self.seqs = seqs if isinstance(seqs, list) else list(SeqIO.parse(seqs, "fasta"))
if len(self.seqs) == 0:
self.encoded_seqs = torch.Tensor()
self.weights = torch.Tensor()
self.neff = 0
return
prepro_seqs, kept_seqs = prepro_alignment(self.seqs, backbone, gappy_colx_threshold)
self.encoded_seqs = torch.stack([torch.tensor(seq, device=device) for seq in prepro_seqs])
self.encoded_seqs = postprocess_aln_coverage(self.encoded_seqs, threshold=0.5)
discretize = True if SSCVAE or CVAE else False
if ssl_deg or ssl_iniact or ssl_pnp or ssl_glad or add_ssl_seqs:
ssl_seqs_and_labels = prep_ssl_data(ssl_deg=ssl_deg, ssl_iniact=ssl_iniact, ssl_pnp=ssl_pnp, ssl_glad=ssl_glad, discretize=discretize, ast_threshold=30, quantiles=[0, 0.25 ,0.75, 1])
for col in ssl_seqs_and_labels.columns:
if 'UNKNOWN' in col and SSCVAE:
ssl_seqs_and_labels = ssl_seqs_and_labels.drop(col, axis=1)
if ssl_deg or ssl_iniact or ssl_pnp or ssl_glad:
ssl_labels, ssl_filler_labels, _ = prep_assay_labels(ssl_seqs_and_labels, device, self.encoded_seqs.size(0), cvae=CVAE)
ssl_labels = torch.cat((ssl_filler_labels, ssl_labels))
else:
ssl_labels = None
ssl_seqs = torch.tensor(ssl_seqs_and_labels['Sequence'], device=device)
self.encoded_seqs = torch.cat((self.encoded_seqs, ssl_seqs))
else:
ssl_labels = None
if tom_odor or tom_perf or add_tom_seqs:
tom_seqs_and_labels = prep_tom_labels(val_index=tom_val_index, tom_odor=tom_odor, tom_perf=tom_perf, discretize=discretize, quantiles=[0, 0.25 ,0.75, 1])
for col in tom_seqs_and_labels.columns:
if 'UNKNOWN' in col and SSCVAE:
tom_seqs_and_labels = tom_seqs_and_labels.drop(col, axis=1)
if tom_odor or tom_perf:
tom_labels, tom_filler_labels, _ = prep_assay_labels(tom_seqs_and_labels, device, self.encoded_seqs.size(0), cvae=CVAE)
tom_labels = torch.cat((tom_filler_labels, tom_labels))
else:
tom_labels = None
tom_seqs = torch.tensor(tom_seqs_and_labels['Sequence'], device=device)
if not only_tom_seqs:
self.encoded_seqs = torch.cat((self.encoded_seqs, tom_seqs))
else:
self.encoded_seqs = tom_seqs
else:
tom_labels = None
if ssl_labels != None and tom_labels == None:
self.labels = ssl_labels
if ssl_labels == None and tom_labels != None:
self.labels = tom_labels
if ssl_labels != None and tom_labels != None:
extra_filler_labels = fill_missing(tom_seqs.size(0), ssl_seqs_and_labels, discretize, device)
ssl_labels = torch.cat((ssl_labels, extra_filler_labels))
self.labels = torch.cat((ssl_labels, tom_labels), dim=1)
if ssl_labels == None and tom_labels == None:
self.labels = None
if only_tom_seqs and tom_odor or tom_perf:
self.labels = tom_labels
if ogt or topt:
if self.labels != None:
ogt_topt_labels = match_ogt_n_topt(kept_seqs, ogt, topt, device)
for col in ogt_topt_labels:
if 'UNKNOWN' in col:
if SSCVAE:
ogt_topt_labels = ogt_topt_labels.drop(col, axis=1)
num_extra_seqs = self.labels.size(0)-len(ogt_topt_labels)
filler_labels = fill_missing(num_extra_seqs, ogt_topt_labels, discretize, device)
ogt_topt_labels = torch.cat((torch.tensor(ogt_topt_labels.values, device=device).float(), filler_labels))
self.labels = torch.cat((self.labels, ogt_topt_labels), dim=1)
if self.labels == None:
ogt_topt_labels = match_ogt_n_topt(kept_seqs, ogt, topt, device)
for col in ogt_topt_labels:
if 'UNKNOWN' in col:
if SSCVAE:
ogt_topt_labels = ogt_topt_labels.drop(col, axis=1)
self.labels = torch.tensor(ogt_topt_labels.values, device=device).float()
if not SSVAE and not CVAE and not SSCVAE:
self.labels = None
# Calculate weights
weights = []
flat_one_hot = F.one_hot(self.encoded_seqs, num_classes=max(IUPAC_SEQ2IDX.values())+1).float().flatten(1)
weight_batch_size = 1000
for i in range(self.encoded_seqs.size(0) // weight_batch_size + 1):
x = flat_one_hot[i * weight_batch_size : (i + 1) * weight_batch_size]
similarities = torch.mm(x, flat_one_hot.T)
lengths = (self.encoded_seqs[i * weight_batch_size : (i + 1) * weight_batch_size] != IUPAC_SEQ2IDX["<mask>"]).sum(1).unsqueeze(-1)
w = 1.0 / (similarities / lengths).gt(0.8).sum(1).float()
weights.append(w)
self.weights = torch.cat(weights)
# TODO fix oversampling to be indifferent of label index
self.neff = self.weights.sum()
print(self.neff)
if over_sample>0 and tom_odor:
self.weights[-(self.labels.size(0)-tom_filler_labels.size(0)):] = self.weights[-(self.labels.size(0)-tom_filler_labels.size(0)):] + over_sample
if over_sample>0 and not tom_odor:
print('######################################### USED OVER_SAMPLE FOR LABEL THAT ISNT TOM #######################################')
def write_to_file(self, filepath):
for s, w in zip(self.seqs, self.weights):
s.id = s.id + ':' + str(float(w))
SeqIO.write(self.seqs, filepath, 'fasta')
def __len__(self):
return len(self.encoded_seqs)
def __getitem__(self, i):
if type(self.labels) == type(None):
labels = self.labels
else:
labels = self.labels[i]
return self.encoded_seqs[i], self.weights[i], self.neff, labels
class BLATDataset(Dataset):
def __init__(self, seqs, device = None, SSVAE=False, SSCVAE=False, CVAE=False,
assay=False, add_assay_seqs=False, val_index=None):
super().__init__()
self.device = device
self.seqs = seqs if isinstance(seqs, list) else list(SeqIO.parse(seqs, "fasta"))
if len(self.seqs) == 0:
self.encoded_seqs = torch.Tensor()
self.weights = torch.Tensor()
self.neff = 0
return
self.encoded_seqs = torch.stack([seq2idx(seq, device) for seq in self.seqs])
num_sequences = self.encoded_seqs.size(0)
if SSCVAE or CVAE or SSVAE:
discretize = True if SSCVAE or CVAE else False
if assay or add_assay_seqs:
assay_seqs_and_labels = prep_any_labels('data_handler/files/assay_data/Blat_assay_data.pkl',
['assay'],
val_index=val_index,
discretize=discretize)
for col in assay_seqs_and_labels.columns:
if 'UNKNOWN' in col and SSCVAE:
assay_seqs_and_labels = assay_seqs_and_labels.drop(col, axis=1)
if assay:
assay_labels, assay_filler_labels, _ = prep_assay_labels(assay_seqs_and_labels, device, self.encoded_seqs.size(0), cvae=CVAE)
assay_labels = torch.cat((assay_filler_labels, assay_labels))
else:
assay_labels = None
assay_seqs = torch.tensor(assay_seqs_and_labels['Sequence'], device=device)
self.encoded_seqs = torch.cat((self.encoded_seqs, assay_seqs))
else:
assay_labels = None
if assay_labels != None:
self.labels = assay_labels
if assay_labels == None:
self.labels = None
if not SSVAE and not CVAE and not SSCVAE:
self.labels = None
# Calculate weights
weights = []
flat_one_hot = F.one_hot(self.encoded_seqs, num_classes=max(IUPAC_SEQ2IDX.values())+1).float().flatten(1)
weight_batch_size = 1000
for i in range(self.encoded_seqs.size(0) // weight_batch_size + 1):
x = flat_one_hot[i * weight_batch_size : (i + 1) * weight_batch_size]
similarities = torch.mm(x, flat_one_hot.T)
lengths = (self.encoded_seqs[i * weight_batch_size : (i + 1) * weight_batch_size] != IUPAC_SEQ2IDX["<mask>"]).sum(1).unsqueeze(-1)
w = 1.0 / (similarities / lengths).gt(0.8).sum(1).float()
weights.append(w)
self.weights = torch.cat(weights)
self.neff = self.weights.sum()
def write_to_file(self, filepath):
for s, w in zip(self.seqs, self.weights):
s.id = s.id + ':' + str(float(w))
SeqIO.write(self.seqs, filepath, 'fasta')
def __len__(self):
return len(self.encoded_seqs)
def __getitem__(self, i):
if self.labels == None:
labels = self.labels
else:
labels = self.labels[i]
return self.encoded_seqs[i], self.weights[i], self.neff, labels
class PDEDataset(Dataset):
def __init__(self, seqs, backbone=0, gappy_colx_threshold=1, device = None, SSVAE=False, SSCVAE=False, CVAE=False,
logHIF=False, ogt=False, topt=False, add_logHIF_seqs=False, val_index=None):
super().__init__()
self.device = device
self.seqs = seqs if isinstance(seqs, list) else list(SeqIO.parse(seqs, "fasta"))
if len(self.seqs) == 0:
self.encoded_seqs = torch.Tensor()
self.weights = torch.Tensor()
self.neff = 0
return
prepro_seqs, _ = prepro_alignment(self.seqs, backbone, gappy_colx_threshold)
self.encoded_seqs = torch.stack([torch.tensor(seq, device=device) for seq in prepro_seqs])
self.encoded_seqs = torch.stack([seq[25:] for seq in self.encoded_seqs])
self.encoded_seqs = postprocess_aln_coverage(self.encoded_seqs, threshold=0.5)
num_sequences = self.encoded_seqs.size(0)
if SSCVAE or CVAE or SSVAE:
discretize = True if SSCVAE or CVAE else False
if logHIF or add_assay_seqs:
assay_seqs_and_labels = prep_any_labels('data_handler/files/assay_data/PDE_logHIF_data.pkl',
['logHIF'],
val_index=val_index,
discretize=discretize)
for col in assay_seqs_and_labels.columns:
if 'UNKNOWN' in col and SSCVAE:
assay_seqs_and_labels = assay_seqs_and_labels.drop(col, axis=1)
if assay:
assay_labels, assay_filler_labels, _ = prep_assay_labels(assay_seqs_and_labels, device, self.encoded_seqs.size(0), cvae=CVAE)
assay_labels = torch.cat((assay_filler_labels, assay_labels))
else:
assay_labels = None
assay_seqs = torch.tensor(assay_seqs_and_labels['Sequence'], device=device)
self.encoded_seqs = torch.cat((self.encoded_seqs, assay_seqs))
else:
assay_labels = None
if assay_labels != None:
self.labels = assay_labels
if assay_labels == None:
self.labels = None
if ogt or topt:
if self.labels != None:
ogt_topt_labels = match_ogt_n_topt(kept_seqs, ogt, topt, device)
for col in ogt_topt_labels:
if 'UNKNOWN' in col:
if SSCVAE:
ogt_topt_labels = ogt_topt_labels.drop(col, axis=1)
num_extra_seqs = self.labels.size(0)-len(ogt_topt_labels)
filler_labels = fill_missing(num_extra_seqs, ogt_topt_labels, discretize, device)
ogt_topt_labels = torch.cat((torch.tensor(ogt_topt_labels.values, device=device).float(), filler_labels))
self.labels = torch.cat((self.labels, ogt_topt_labels), dim=1)
if self.labels == None:
ogt_topt_labels = match_ogt_n_topt(kept_seqs, ogt, topt, device)
for col in ogt_topt_labels:
if 'UNKNOWN' in col:
if SSCVAE:
ogt_topt_labels = ogt_topt_labels.drop(col, axis=1)
self.labels = torch.tensor(ogt_topt_labels.values, device=device).float()
if not SSVAE and not CVAE and not SSCVAE:
self.labels = None
# Calculate weights
weights = []
flat_one_hot = F.one_hot(self.encoded_seqs, num_classes=max(IUPAC_SEQ2IDX.values())+1).float().flatten(1)
weight_batch_size = 1000
for i in range(self.encoded_seqs.size(0) // weight_batch_size + 1):
x = flat_one_hot[i * weight_batch_size : (i + 1) * weight_batch_size]
similarities = torch.mm(x, flat_one_hot.T)
lengths = (self.encoded_seqs[i * weight_batch_size : (i + 1) * weight_batch_size] != IUPAC_SEQ2IDX["<mask>"]).sum(1).unsqueeze(-1)
w = 1.0 / (similarities / lengths).gt(0.8).sum(1).float()
weights.append(w)
self.weights = torch.cat(weights)
self.neff = self.weights.sum()
def write_to_file(self, filepath):
for s, w in zip(self.seqs, self.weights):
s.id = s.id + ':' + str(float(w))
SeqIO.write(self.seqs, filepath, 'fasta')
def __len__(self):
return len(self.encoded_seqs)
def __getitem__(self, i):
if self.labels == None:
labels = self.labels
else:
labels = self.labels[i]
return self.encoded_seqs[i], self.weights[i], self.neff, labels
def get_datasets_from_Lipase(file=None, backbone_idx=0, train_ratio=1, gappy_colx_threshold=1, device = None,
SSVAE=False, SSCVAE=False, CVAE=False, ssl_deg=False, ssl_iniact=False,
ssl_pnp=False, ssl_glad=False, tom_odor=False, tom_perf=False,
ogt=False, topt=False, add_ssl_seqs=False, add_tom_seqs=False, only_tom_seqs=False,
tom_val_index=None, over_sample=False):
seqs = list(SeqIO.parse(file, "fasta"))
backbone = seqs[backbone_idx]
data_len = len(seqs)
seq_len = len(seqs[0])
# Split into train/validation
train_length = int(train_ratio * data_len)
val_length = data_len - train_length
indices = list(range(data_len))
random.shuffle(indices)
train_indices = indices[:train_length]
val_indices = indices[train_length:]
train_seqs = [seqs[i] for i in train_indices]
val_seqs = [seqs[i] for i in val_indices]
all_data = LipaseDataset(seqs, backbone=backbone, gappy_colx_threshold=gappy_colx_threshold, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
ssl_deg=ssl_deg, ssl_iniact=ssl_iniact, ssl_pnp=ssl_pnp, ssl_glad=ssl_glad, tom_odor=tom_odor, tom_perf=tom_perf,
ogt=ogt, topt=topt, add_ssl_seqs=add_ssl_seqs, add_tom_seqs=add_tom_seqs, only_tom_seqs=only_tom_seqs,
tom_val_index=tom_val_index, over_sample=over_sample)
train_data = LipaseDataset(train_seqs, backbone=backbone, gappy_colx_threshold=gappy_colx_threshold, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
ssl_deg=ssl_deg, ssl_iniact=ssl_iniact, ssl_pnp=ssl_pnp, ssl_glad=ssl_glad, tom_odor=tom_odor, tom_perf=tom_perf,
ogt=ogt, topt=topt, add_ssl_seqs=add_ssl_seqs, add_tom_seqs=add_tom_seqs, only_tom_seqs=only_tom_seqs,
tom_val_index=tom_val_index, over_sample=over_sample)
val_data = LipaseDataset(val_seqs, backbone=backbone, gappy_colx_threshold=gappy_colx_threshold, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
ssl_deg=ssl_deg, ssl_iniact=ssl_iniact, ssl_pnp=ssl_pnp, ssl_glad=ssl_glad, tom_odor=tom_odor, tom_perf=tom_perf,
ogt=ogt, topt=topt, add_ssl_seqs=add_ssl_seqs, add_tom_seqs=add_tom_seqs, only_tom_seqs=only_tom_seqs,
tom_val_index=tom_val_index, over_sample=over_sample)
return all_data, train_data, val_data
def get_datasets_from_BLAT(file=None, train_ratio=1, device = None, SSVAE=False, SSCVAE=False, CVAE=False,
assay=False, add_assay_seqs=False, val_index=None):
seqs = list(SeqIO.parse(file, "fasta"))
data_len = len(seqs)
seq_len = len(seqs[0])
# Split into train/validation
train_length = int(train_ratio * data_len)
val_length = data_len - train_length
indices = list(range(data_len))
random.shuffle(indices)
train_indices = indices[:train_length]
val_indices = indices[train_length:]
train_seqs = [seqs[i] for i in train_indices]
val_seqs = [seqs[i] for i in val_indices]
all_data = BLATDataset(seqs, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
assay=assay, add_assay_seqs=add_assay_seqs, val_index=val_index)
train_data = BLATDataset(train_seqs, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
assay=assay, add_assay_seqs=add_assay_seqs, val_index=val_index)
val_data = BLATDataset(val_seqs, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
assay=assay, add_assay_seqs=add_assay_seqs, val_index=val_index)
return all_data, train_data, val_data
def get_datasets_from_PDE(file=None, train_ratio=1, backbone_idx=25295, gappy_colx_threshold=1, device='cuda', SSVAE=False, SSCVAE=False, CVAE=False,
logHIF=False, ogt=False, topt=False, add_logHIF_seqs=False, val_index=None):
seqs = list(SeqIO.parse(file, "fasta"))
backbone = seqs[backbone_idx]
data_len = len(seqs)
seq_len = len(seqs[0])
# Split into train/validation
train_length = int(train_ratio * data_len)
val_length = data_len - train_length
indices = list(range(data_len))
random.shuffle(indices)
train_indices = indices[:train_length]
val_indices = indices[train_length:]
train_seqs = [seqs[i] for i in train_indices]
val_seqs = [seqs[i] for i in val_indices]
all_data = PDEDataset(seqs, backbone=backbone, gappy_colx_threshold=gappy_colx_threshold, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
logHIF=logHIF, ogt=ogt, topt=topt, add_logHIF_seqs=add_logHIF_seqs, val_index=val_index)
train_data = PDEDataset(train_seqs, backbone=backbone, gappy_colx_threshold=gappy_colx_threshold, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
logHIF=logHIF, ogt=ogt, topt=topt, add_logHIF_seqs=add_logHIF_seqs, val_index=val_index)
val_data = PDEDataset(val_seqs, backbone=backbone, gappy_colx_threshold=gappy_colx_threshold, device = device, SSVAE=SSVAE, SSCVAE=SSCVAE, CVAE=CVAE,
logHIF=logHIF, ogt=ogt, topt=topt, add_logHIF_seqs=add_logHIF_seqs, val_index=val_index)
return all_data, train_data, val_data
def seqs_collate(tensors):
encoded_seq, weights, neffs, labels = zip(*tensors)
labels = labels if type(labels[0]) == type(None) else torch.stack(labels)
return torch.stack(encoded_seq), neffs[0], labels, torch.stack(weights)
def get_protein_dataloader(dataset, batch_size = 128, shuffle = False, get_seqs = False, random_weighted_sampling = False):
#sampler = WeightedRandomSampler(weights = dataset.weights, num_samples = len(dataset.weights), replacement = True) if random_weighted_sampling else None
#return DataLoader(dataset, batch_size = batch_size, shuffle = shuffle if not random_weighted_sampling else not random_weighted_sampling, collate_fn = seqs_collate, sampler = sampler)
return DataLoader(dataset, batch_size = batch_size, shuffle = shuffle, collate_fn = seqs_collate)
# ensure that alignments used have an amino acid more than a threshold percentage to ensure we dont have too gappy alignments included
def postprocess_aln_coverage(alignment, threshold=0.5):
keep_colx = []
for i, x in enumerate(alignment):
coverage = len(x[x!=IUPAC_SEQ2IDX['<mask>']])/alignment.size(1)
if coverage > threshold:
keep_colx.append(i)
seqs = alignment[keep_colx]
return seqs
def prepro_alignment(alignment, backbone, threshold):
seqs_in_int = []
seqs_aa = []
is_aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
gap = ['-','.']
for i, record in enumerate(alignment):
skip = False
for aa in record.seq.upper():
try:
assert aa in is_aa or aa in gap, f"{aa}"
except AssertionError:
skip = True
break
if not skip:
seqs_in_int.append([IUPAC_SEQ2IDX[aa] for aa in str(record.seq).upper()])
seqs_aa.append(str(record.seq).upper())
seqs_in_int = np.array(seqs_in_int)
keep_cols = []
for i, aa in enumerate([IUPAC_SEQ2IDX[aa] for aa in | np.array(backbone) | numpy.array |
import tensorflow as tf
import numpy as np
import cv2
from PIL import Image
import os
import traceback
import glob
from scipy.io import loadmat, savemat
import imageio as iio
import matplotlib.pyplot as plt
from preprocess_img import Preprocess
from load_data import *
from reconstruct_mesh import Reconstruction
from rendering import Renderer
def load_graph(graph_filename):
with tf.gfile.GFile(graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
import dlib
det = dlib.get_frontal_face_detector()
pred = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
renderer = Renderer()
def get_landmarks(image):
detection = det(image, 1)[0]
face_shape = pred(image, detection)
l_eye_x = np.mean([face_shape.part(i).x for i in range(42, 48)])
l_eye_y = np.mean([face_shape.part(i).y for i in range(42, 48)])
r_eye_x = np.mean([face_shape.part(i).x for i in range(36, 42)])
r_eye_y = np.mean([face_shape.part(i).y for i in range(36, 42)])
l_eye = (l_eye_x, l_eye_y)
r_eye = (r_eye_x, r_eye_y)
eyes = np.vstack((l_eye, r_eye))
nose = face_shape.part(30)
l_mouth = face_shape.part(48)
r_mouth = face_shape.part(54)
pp = [(p.x, p.y) for p in [nose, r_mouth, l_mouth]]
return np.vstack((eyes, pp))
def demo():
# input and output folder
in_dir = 'input_vids'
out_dir = 'output2'
# img_list = glob.glob(image_path + '/' + '*.jpg')
vid_list = [os.path.join(in_dir, f) for f in os.listdir(in_dir) if not f.startswith('.')]
# read BFM face model
# transfer original BFM model to our model
if not os.path.isfile('./BFM/BFM_model_front.mat'):
transferBFM09()
# read face model
facemodel = BFM()
# read standard landmarks for preprocessing images
lm3D = load_lm3d()
n = 0
# build reconstruction model
with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
images = tf.placeholder(name='input_imgs', shape=[None, 224, 224, 3], dtype=tf.float32)
graph_def = load_graph('network/FaceReconModel.pb')
tf.import_graph_def(graph_def, name='resnet', input_map={'input_imgs:0': images})
# output coefficients of R-Net (dim = 257)
coeff = graph.get_tensor_by_name('resnet/coeff:0')
with tf.Session() as sess:
print('reconstructing...')
for file in vid_list:
print(file)
with iio.get_reader(file) as reader:
fps = reader.get_meta_data()['fps']
name, ext = os.path.splitext(file)
file_name = os.path.basename(name)
l_writer = iio.get_writer(os.path.join(out_dir, file_name + ext), fps=fps)
# r_writer = iio.get_writer(os.path.join(out_dir, file_name + '_render' + ext), fps=fps)
for i, im in enumerate(reader):
print(i)
try:
# load images and corresponding 5 facial landmarks
# img,lm = load_img(file,file.replace('png','txt'))
img = Image.fromarray(im)
np_img = np.array(img)
lm = get_landmarks(np_img)
h, w = np_img.shape[:2]
# preprocess input image
input_img, lm_new, transform_params = Preprocess(img, lm, lm3D)
s = transform_params[2]
out_sh = int(np.round(224 / s))
out_sh = min(out_sh, min(w, h))
coef = sess.run(coeff, feed_dict={images: input_img})
# reconstruct 3D face with output coefficients and face model
face_shape, face_texture, face_color, tri, face_projection, z_buffer, landmarks_2d, translation, rotation, projection = Reconstruction(
coef, facemodel)
# reshape outputs
input_img = np.squeeze(input_img)
shape = np.squeeze(face_shape, (0))
color = np.squeeze(face_color, (0))
landmarks_2d = np.squeeze(landmarks_2d, (0))
cx, cy = transform_params[3][0], transform_params[4][0]
tx, ty = -(w / 2 - cx), -(cy - h / 2)
land_im = np_img.copy()
for x, y in landmarks_2d:
x = int(np.round((x + (w * s - 224) // 2) / s + tx))
y = int( | np.round((y + (h * s - 224) // 2) / s + ty) | numpy.round |
#==============================================================================
# WELCOME
#==============================================================================
# Welcome to RainyDay, a framework for coupling remote sensing precipitation
# fields with Stochastic Storm Transposition for assessment of rainfall-driven hazards.
# Copyright (C) 2017 <NAME> (<EMAIL>)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.#
#==============================================================================
# THIS DOCUMENT CONTAINS VARIOUS FUNCTIONS NEEDED TO RUN RainyDay
#==============================================================================
import os
import sys
import numpy as np
import scipy as sp
import glob
import math
from datetime import datetime, date, time, timedelta
import time
from copy import deepcopy
from mpl_toolkits.basemap import Basemap, addcyclic
from matplotlib.patches import Polygon
from scipy import stats
from netCDF4 import Dataset, num2date, date2num
#import gdal
import rasterio
import pandas as pd
from numba import prange,jit
import shapely
import geopandas as gp
from scipy.stats import norm
from scipy.stats import lognorm
# plotting stuff, really only needed for diagnostic plots
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import LogNorm
import subprocess
try:
os.environ.pop('PYTHONIOENCODING')
except KeyError:
pass
import warnings
warnings.filterwarnings("ignore")
from numba.types import int32,int64,float32,uint32
import linecache
GEOG="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
# =============================================================================
# Smoother that is compatible with nan values. Adapted from https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python
# =============================================================================
def mysmoother(inarray,sigma=[3,3]):
if len(sigma)!=len(inarray.shape):
sys.exit("there seems to be a mismatch between the sigma dimension and the dimension of the array you are trying to smooth")
V=inarray.copy()
V[np.isnan(inarray)]=0.
VV=sp.ndimage.gaussian_filter(V,sigma=sigma)
W=0.*inarray.copy()+1.
W[np.isnan(inarray)]=0.
WW=sp.ndimage.gaussian_filter(W,sigma=sigma)
outarray=VV/WW
outarray[np.isnan(inarray)]=np.nan
return outarray
def my_kde_bandwidth(obj, fac=1): # this 1.5 choice is completely subjective :(
#We use Scott's Rule, multiplied by a constant factor
return np.power(obj.n, -1./(obj.d+4)) * fac
def convert_3D_2D(geometry):
'''
Takes a GeoSeries of 3D Multi/Polygons (has_z) and returns a list of 2D Multi/Polygons
'''
new_geo = []
for p in geometry:
if p.has_z:
if p.geom_type == 'Polygon':
lines = [xy[:2] for xy in list(p.exterior.coords)]
new_p = shapely.geometry.Polygon(lines)
new_geo.append(new_p)
elif p.geom_type == 'MultiPolygon':
new_multi_p = []
for ap in p:
lines = [xy[:2] for xy in list(ap.exterior.coords)]
new_p = shapely.geometry.Polygon(lines)
new_multi_p.append(new_p)
new_geo.append(shapely.geometry.MultiPolygon(new_multi_p))
return new_geo
#==============================================================================
# LOOP TO DO SPATIAL SEARCHING FOR MAXIMUM RAINFALL LOCATION AT EACH TIME STEP
# THIS IS THE CORE OF THE STORM CATALOG CREATION TECHNIQUE
#==============================================================================
#def catalogweave(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum):
# rainsum[:]=0.
# code= """
# #include <stdio.h>
# int i,j,x,y;
# for (x=0;x<xlen;x++) {
# for (y=0;y<ylen;y++) {
# for (j=0;j<maskheight;j++) {
# for (i=0;i<maskwidth;i++) {
# rainsum(y,x)=rainsum(y,x)+temparray(y+j,x+i)*trimmask(j,i);
# }
# }
# }
# }
# """
# vars=['temparray','trimmask','xlen','ylen','maskheight','maskwidth','rainsum']
# sp.weave.inline(code,vars,type_converters=converters.blitz,compiler='gcc')
# rmax=np.nanmax(rainsum)
# wheremax=np.where(rainsum==rmax)
# return rmax, wheremax[0][0], wheremax[1][0]
#
def catalogAlt(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum,domainmask):
rainsum[:]=0.
for i in range(0,(ylen)*(xlen)):
y=i//xlen
x=i-y*xlen
#print x,
rainsum[y,x]=np.nansum(np.multiply(temparray[(y):(y+maskheight),(x):(x+maskwidth)],trimmask))
#wheremax=np.argmax(rainsum)
rmax=np.nanmax(rainsum)
wheremax=np.where(rainsum==rmax)
return rmax, wheremax[0][0], wheremax[1][0]
def catalogAlt_irregular(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum,domainmask):
rainsum[:]=0.
for i in range(0,(ylen)*(xlen)):
y=i//xlen
x=i-y*xlen
#print x,y
if np.any(np.equal(domainmask[y+maskheight/2,x:x+maskwidth],1.)) and np.any(np.equal(domainmask[y:y+maskheight,x+maskwidth/2],1.)):
rainsum[y,x]=np.nansum(np.multiply(temparray[(y):(y+maskheight),(x):(x+maskwidth)],trimmask))
else:
rainsum[y,x]=0.
#wheremax=np.argmax(rainsum)
rmax=np.nanmax(rainsum)
wheremax=np.where(rainsum==rmax)
return rmax, wheremax[0][0], wheremax[1][0]
@jit(nopython=True,fastmath=True)
def catalogNumba_irregular(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum,domainmask):
rainsum[:]=0.
halfheight=int32(np.ceil(maskheight/2))
halfwidth=int32(np.ceil(maskwidth/2))
for i in range(0,ylen*xlen):
y=i//xlen
x=i-y*xlen
#print x,y
if np.any(np.equal(domainmask[y+halfheight,x:x+maskwidth],1.)) and np.any(np.equal(domainmask[y:y+maskheight,x+halfwidth],1.)):
rainsum[y,x]=np.nansum(np.multiply(temparray[y:(y+maskheight),x:(x+maskwidth)],trimmask))
else:
rainsum[y,x]=0.
#wheremax=np.argmax(rainsum)
rmax=np.nanmax(rainsum)
wheremax=np.where(np.equal(rainsum,rmax))
return rmax, wheremax[0][0], wheremax[1][0]
@jit(nopython=True)
def catalogNumba(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum):
rainsum[:]=0.
for i in range(0,(ylen)*(xlen)):
y=i//xlen
x=i-y*xlen
#print x,y
rainsum[y,x]=np.nansum(np.multiply(temparray[(y):(y+maskheight),(x):(x+maskwidth)],trimmask))
#wheremax=np.argmax(rainsum)
rmax=np.nanmax(rainsum)
wheremax=np.where(np.equal(rainsum,rmax))
return rmax, wheremax[0][0], wheremax[1][0]
@jit(nopython=True)
def DistributionBuilder(intenserain,tempmax,xlen,ylen,checksep):
for y in np.arange(0,ylen):
for x in np.arange(0,xlen):
if np.any(checksep[:,y,x]):
#fixind=np.where(checksep[:,y,x]==True)
for i in np.arange(0,checksep.shape[0]):
if checksep[i,y,x]==True:
fixind=i
break
if tempmax[y,x]>intenserain[fixind,y,x]:
intenserain[fixind,y,x]=tempmax[y,x]
checksep[:,y,x]=False
checksep[fixind,y,x]=True
else:
checksep[fixind,y,x]=False
elif tempmax[y,x]>np.min(intenserain[:,y,x]):
fixind=np.argmin(intenserain[:,y,x])
intenserain[fixind,y,x]=tempmax[y,x]
checksep[fixind,y,x]=True
return intenserain,checksep
# slightly faster numpy-based version of above
def DistributionBuilderFast(intenserain,tempmax,xlen,ylen,checksep):
minrain=np.min(intenserain,axis=0)
if np.any(checksep):
flatsep=np.any(checksep,axis=0)
minsep=np.argmax(checksep[:,flatsep],axis=0)
islarger=np.greater(tempmax[flatsep],intenserain[minsep,flatsep])
if np.any(islarger):
intenserain[minsep,flatsep][islarger]=tempmax[flatsep][islarger]
checksep[:]=False
checksep[minsep,flatsep]=True
else:
checksep[minsep,flatsep]=False
elif np.any(np.greater(tempmax,minrain)):
#else:
fixind=np.greater(tempmax,minrain)
minrainind=np.argmin(intenserain,axis=0)
intenserain[minrainind[fixind],fixind]=tempmax[fixind]
checksep[minrainind[fixind],fixind]=True
return intenserain,checksep
#def SSTalt(passrain,sstx,ssty,trimmask,maskheight,maskwidth,intense_data=False):
# rainsum=np.zeros((len(sstx)),dtype='float32')
# nreals=len(rainsum)
#
# for i in range(0,nreals):
# rainsum[i]=np.nansum(np.multiply(passrain[(ssty[i]) : (ssty[i]+maskheight) , (sstx[i]) : (sstx[i]+maskwidth)],trimmask))
# return rainsum
@jit(fastmath=True)
def SSTalt(passrain,sstx,ssty,trimmask,maskheight,maskwidth,intensemean=None,intensestd=None,intensecorr=None,homemean=None,homestd=None,durcheck=False):
maxmultiplier=1.5
rainsum=np.zeros((len(sstx)),dtype='float32')
whichstep=np.zeros((len(sstx)),dtype='int32')
nreals=len(rainsum)
nsteps=passrain.shape[0]
multiout=np.empty_like(rainsum)
if (intensemean is not None) and (homemean is not None):
domean=True
else:
domean=False
if (intensestd is not None) and (intensecorr is not None) and (homestd is not None):
#rquant=np.random.random_integers(5,high=95,size=nreals)/100.
rquant=np.random.random_sample(size=nreals)
doall=True
else:
doall=False
rquant=np.nan
if durcheck==False:
exprain=np.expand_dims(passrain,0)
else:
exprain=passrain
for k in range(0,nreals):
y=int(ssty[k])
x=int(sstx[k])
if np.all(np.less(exprain[:,y:y+maskheight,x:x+maskwidth],0.5)):
rainsum[k]=0.
multiout[k]=-999.
else:
if domean:
#sys.exit('need to fix short duration part')
muR=homemean-intensemean[y,x]
if doall:
stdR=np.sqrt(np.power(homestd,2)+np.power(intensestd[y,x],2)-2.*intensecorr[y,x]*homestd*intensestd[y,x])
# multiplier=sp.stats.lognorm.ppf(rquant[k],stdR,loc=0,scale=np.exp(muR))
#multiplier=10.
#while multiplier>maxmultiplier: # who knows what the right number is to use here...
inverrf=sp.special.erfinv(2.*rquant-1.)
multiplier=np.exp(muR+np.sqrt(2.*np.power(stdR,2))*inverrf[k])
#multiplier=np.random.lognormal(muR,stdR)
if multiplier>maxmultiplier:
multiplier=1.
else:
multiplier=np.exp(muR)
if multiplier>maxmultiplier:
multiplier=1.
else:
multiplier=1.
# print("still going!")
if multiplier>maxmultiplier:
sys.exit("Something seems to be going horribly wrong in the multiplier scheme!")
else:
multiout[k]=multiplier
if durcheck==True:
storesum=0.
storestep=0
for kk in range(0,nsteps):
#tempsum=numba_multimask_calc(passrain[kk,:],rsum,train,trimmask,ssty[k],maskheight,sstx[k],maskwidth)*multiplier
tempsum=numba_multimask_calc(passrain[kk,:],trimmask,y,x,maskheight,maskwidth)*multiplier
if tempsum>storesum:
storesum=tempsum
storestep=kk
rainsum[k]=storesum
whichstep[k]=storestep
else:
rainsum[k]=numba_multimask_calc(passrain,trimmask,y,x,maskheight,maskwidth)*multiplier
if domean:
return rainsum,multiout,whichstep
else:
return rainsum,whichstep
#@jit(nopython=True,fastmath=True,parallel=True)
@jit(nopython=True,fastmath=True)
def numba_multimask_calc(passrain,trimmask,ssty,sstx,maskheight,maskwidth):
train=np.multiply(passrain[ssty : ssty+maskheight , sstx : sstx+maskwidth],trimmask)
rainsum=np.sum(train)
return rainsum
@jit(fastmath=True)
def SSTalt_singlecell(passrain,sstx,ssty,trimmask,maskheight,maskwidth,intensemean=None,intensestd=None,intensecorr=None,homemean=None,homestd=None,durcheck=False):
rainsum=np.zeros((len(sstx)),dtype='float32')
whichstep=np.zeros((len(sstx)),dtype='int32')
nreals=len(rainsum)
nsteps=passrain.shape[0]
multiout=np.empty_like(rainsum)
# do we do deterministic or dimensionless rescaling?
if (intensemean is not None) and (homemean is not None):
domean=True
else:
domean=False
# do we do stochastic rescaling?
if (intensestd is not None) and (intensecorr is not None) and (homestd is not None):
rquant=np.random.random_sample(size=nreals)
inverrf=sp.special.erfinv(2.*rquant-1.)
doall=True
else:
doall=False
#rquant=np.nan
if durcheck==False:
passrain=np.expand_dims(passrain,0)
# deterministic or dimensionless:
if domean and doall==False:
rain,multi,step=killerloop_singlecell(passrain,rainsum,whichstep,nreals,ssty,sstx,nsteps,durcheck=durcheck,intensemean=intensemean,homemean=homemean,multiout=multiout)
return rain,multi,step
# stochastic:
elif doall:
rain,multi,step=killerloop_singlecell(passrain,rainsum,whichstep,nreals,ssty,sstx,nsteps,durcheck=durcheck,intensemean=intensemean,intensestd=intensestd,intensecorr=intensecorr,homemean=homemean,homestd=homestd,multiout=multiout,inverrf=inverrf)
return rain,multi,step
# no rescaling:
else:
rain,_,step=killerloop_singlecell(passrain,rainsum,whichstep,nreals,ssty,sstx,nsteps,durcheck=durcheck,multiout=multiout)
return rain,step
#@jit(nopython=True,fastmath=True,parallel=True)
@jit(nopython=True,fastmath=True)
def killerloop_singlecell(passrain,rainsum,whichstep,nreals,ssty,sstx,nsteps,durcheck=False,intensemean=None,homemean=None,homestd=None,multiout=None,rquant=None,intensestd=None,intensecorr=None,inverrf=None):
maxmultiplier=1.5 # who knows what the right number is to use here...
for k in prange(nreals):
y=int(ssty[k])
x=int(sstx[k])
# deterministic or dimensionless:
if (intensemean is not None) and (homemean is not None) and (homestd is None):
if np.less(homemean,0.001) or np.less(intensemean[y,x],0.001):
multiplier=1. # or maybe this should be zero
else:
multiplier=np.exp(homemean-intensemean[y,x])
if multiplier>maxmultiplier:
multiplier=1. # or maybe this should be zero
# stochastic:
elif (intensemean is not None) and (homemean is not None) and (homestd is not None):
if np.less(homemean,0.001) or np.less(intensemean[y,x],0.001):
multiplier=1. # or maybe this should be zero
else:
muR=homemean-intensemean[y,x]
stdR=np.sqrt(np.power(homestd,2)+np.power(intensestd[y,x],2)-2*intensecorr[y,x]*homestd*intensestd[y,x])
multiplier=np.exp(muR+np.sqrt(2.*np.power(stdR,2))*inverrf[k])
if multiplier>maxmultiplier:
multiplier=1. # or maybe this should be zero
# no rescaling:
else:
multiplier=1.
if durcheck==False:
rainsum[k]=np.nansum(passrain[:,y, x])
else:
storesum=0.
storestep=0
for kk in range(nsteps):
tempsum=passrain[kk,y,x]
if tempsum>storesum:
storesum=tempsum
storestep=kk
rainsum[k]=storesum*multiplier
multiout[k]=multiplier
whichstep[k]=storestep
return rainsum,multiout,whichstep
#@jit(nopython=True,fastmath=True,parallel=True)
#def killerloop(passrain,rainsum,nreals,ssty,sstx,maskheight,maskwidth,trimmask,nsteps,durcheck):
# for k in prange(nreals):
# spanx=int64(sstx[k]+maskwidth)
# spany=int64(ssty[k]+maskheight)
# if np.all(np.less(passrain[:,ssty[k]:spany,sstx[k]:spanx],0.5)):
# rainsum[k]=0.
# else:
# if durcheck==False:
# rainsum[k]=np.nansum(np.multiply(passrain[ssty[k] : spany , sstx[k] : spanx],trimmask))
# else:
# storesum=float32(0.)
# for kk in range(nsteps):
# tempsum=np.nansum(np.multiply(passrain[kk,ssty[k]:spany,sstx[k]:spanx],trimmask))
# if tempsum>storesum:
# storesum=tempsum
# rainsum[k]=storesum
# return rainsum
#whichstep[k]=storestep
#return rainsum,whichstep
# this function below never worked for some unknown Numba problem-error messages indicated that it wasn't my fault!!! Some problem in tempsum
#@jit(nopython=True,fastmath=True,parallel=True)
#def killerloop(passrain,rainsum,nreals,ssty,sstx,maskheight,maskwidth,masktile,nsteps,durcheck):
# for k in prange(nreals):
# spanx=sstx[k]+maskwidth
# spany=ssty[k]+maskheight
# if np.all(np.less(passrain[:,ssty[k]:spany,sstx[k]:spanx],0.5)):
# rainsum[k]=0.
# else:
# if durcheck==False:
# #tempstep=np.multiply(passrain[:,ssty[k] : spany , sstx[k] : spanx],trimmask)
# #xnum=int64(sstx[k])
# #ynum=int64(ssty[k])
# #rainsum[k]=np.nansum(passrain[:,ssty[k], sstx[k]])
# rainsum[k]=np.nansum(np.multiply(passrain[:,ssty[k] : spany , sstx[k] : spanx],masktile))
# else:
# storesum=float32(0.)
# for kk in range(nsteps):
# #tempsum=0.
# #tempsum=np.multiply(passrain[kk,ssty[k]:spany,sstx[k]:spanx],masktile[0,:,:])
# tempsum=np.nansum(np.multiply(passrain[kk,ssty[k]:spany,sstx[k]:spanx],masktile[0,:,:]))
# return rainsum
#==============================================================================
# THIS VARIANT IS SIMPLER AND UNLIKE SSTWRITE, IT ACTUALLY WORKS RELIABLY!
#==============================================================================
#def SSTwriteAlt(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth):
# nyrs=np.int(rlzx.shape[0])
# raindur=np.int(catrain.shape[1])
# outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
# unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True)
# #ctr=0
# for i in range(0,len(unqstm)):
# unqwhere=np.where(unqstm[i]==rlzstm)[0]
# for j in unqwhere:
# #ctr=ctr+1
# #print ctr
# outrain[j,:]=np.multiply(catrain[unqstm[i],:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],trimmask)
# return outrain
#==============================================================================
# THIS VARIANT IS SAME AS ABOVE, BUT HAS A MORE INTERESTING RAINFALL PREPENDING PROCEDURE
#==============================================================================
#def SSTwriteAltPreCat(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth,precat,ptime):
# catyears=ptime.astype('datetime64[Y]').astype(int)+1970
# ptime=ptime.astype('datetime64[M]').astype(int)-(catyears-1970)*12+1
# nyrs=np.int(rlzx.shape[0])
# raindur=np.int(catrain.shape[1]+precat.shape[1])
# outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
# unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True)
#
# for i in range(0,len(unqstm)):
# unqwhere=np.where(unqstm[i]==rlzstm)[0]
# unqmonth=ptime[unqstm[i]]
# pretimeind=np.where(np.logical_and(ptime>unqmonth-2,ptime<unqmonth+2))[0]
# for j in unqwhere:
# temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],axis=0),catrain[unqstm[i],:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
# outrain[j,:]=np.multiply(temprain,trimmask)
# return outrain
#
#==============================================================================
# SAME AS ABOVE, BUT HANDLES STORM ROTATION
#==============================================================================
#def SSTwriteAltPreCatRotation(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth,precat,ptime,delarray,rlzanglebin,rainprop):
##def SSTwriteAltPreCatRotation(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth,precat,ptime,delarray,rlzanglebin):
# catyears=ptime.astype('datetime64[Y]').astype(int)+1970
# ptime=ptime.astype('datetime64[M]').astype(int)-(catyears-1970)*12+1
# nyrs=np.int(rlzx.shape[0])
# raindur=np.int(catrain.shape[1]+precat.shape[1])
# outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
# unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True) # unqstm is the storm number
#
# for i in range(0,len(unqstm)):
# unqwhere=np.where(unqstm[i]==rlzstm)[0]
# unqmonth=ptime[unqstm[i]]
# pretimeind=np.where(np.logical_and(ptime>unqmonth-2,ptime<unqmonth+2))[0]
# for j in unqwhere:
# inrain=catrain[unqstm[i],:].copy()
#
# xctr=rlzx[j]+maskwidth/2.
# yctr=rlzy[j]+maskheight/2.
# xlinsp=np.linspace(-xctr,rainprop.subdimensions[1]-xctr,rainprop.subdimensions[1])
# ylinsp=np.linspace(-yctr,rainprop.subdimensions[0]-yctr,rainprop.subdimensions[0])
#
# ingridx,ingridy=np.meshgrid(xlinsp,ylinsp)
# ingridx=ingridx.flatten()
# ingridy=ingridy.flatten()
# outgrid=np.column_stack((ingridx,ingridy))
#
# for k in range(0,inrain.shape[0]):
# interp=sp.interpolate.LinearNDInterpolator(delarray[unqstm[i]][rlzanglebin[j]-1],inrain[k,:].flatten(),fill_value=0.)
# inrain[k,:]=np.reshape(interp(outgrid),rainprop.subdimensions)
# #inrain[k,:]=temprain
#
# temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
#
# outrain[j,:]=np.multiply(temprain,trimmask)
# return outrain
@jit(fastmath=True)
def SSTspin_write_v2(catrain,rlzx,rlzy,rlzstm,trimmask,maskheight,maskwidth,precat,ptime,rainprop,rlzanglebin=None,delarray=None,spin=False,flexspin=True,samptype='uniform',cumkernel=None,rotation=False,domaintype='rectangular'):
catyears=ptime.astype('datetime64[Y]').astype(int)+1970
ptime=ptime.astype('datetime64[M]').astype(int)-(catyears-1970)*12+1
nyrs=np.int(rlzx.shape[0])
raindur=np.int(catrain.shape[1]+precat.shape[1])
outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True) # unqstm is the storm number
for i in range(0,len(unqstm)):
unqwhere=np.where(unqstm[i]==rlzstm)[0]
unqmonth=ptime[unqstm[i]]
pretimeind=np.where(np.logical_and(ptime>unqmonth-1,ptime<unqmonth+1))[0]
# flexspin allows you to use spinup rainfall from anywhere in transposition domain, rather than just storm locations, but it doesn't seem to be very useful based on initial testing
if spin==True and flexspin==True:
if samptype=='kernel' or domaintype=='irregular':
rndloc=np.random.random_sample(len(unqwhere))
shiftprex,shiftprey=numbakernel(rndloc,cumkernel)
else:
shiftprex=np.random.random_integers(0,np.int(rainprop.subdimensions[1])-maskwidth-1,len(unqwhere))
shiftprey=np.random.random_integers(0,np.int(rainprop.subdimensions[0])-maskheight-1,len(unqwhere))
ctr=0
for j in unqwhere:
inrain=catrain[unqstm[i],:].copy()
# this doesn't rotate the prepended rainfall
if rotation==True:
xctr=rlzx[j]+maskwidth/2.
yctr=rlzy[j]+maskheight/2.
xlinsp=np.linspace(-xctr,rainprop.subdimensions[1]-xctr,rainprop.subdimensions[1])
ylinsp=np.linspace(-yctr,rainprop.subdimensions[0]-yctr,rainprop.subdimensions[0])
ingridx,ingridy=np.meshgrid(xlinsp,ylinsp)
ingridx=ingridx.flatten()
ingridy=ingridy.flatten()
outgrid=np.column_stack((ingridx,ingridy))
for k in range(0,inrain.shape[0]):
interp=sp.interpolate.LinearNDInterpolator(delarray[unqstm[i]][rlzanglebin[j]-1],inrain[k,:].flatten(),fill_value=0.)
inrain[k,:]=np.reshape(interp(outgrid),rainprop.subdimensions)
if spin==True and flexspin==True:
temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(shiftprey[ctr]) : (shiftprey[ctr]+maskheight) , (shiftprex[ctr]) : (shiftprex[ctr]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
elif spin==True and flexspin==False:
temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
elif spin==False:
temprain=inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]
else:
sys.exit("what else is there?")
ctr=ctr+1
outrain[j,:]=np.multiply(temprain,trimmask)
return outrain
##==============================================================================
## SAME AS ABOVE, BUT A BIT MORE DYNAMIC IN TERMS OF SPINUP
##==============================================================================
#def SSTspin_write_v2(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth,precat,ptime,rainprop,rlzanglebin=None,delarray=None,spin=False,flexspin=True,samptype='uniform',cumkernel=None,rotation=False,domaintype='rectangular',intense_data=False):
# catyears=ptime.astype('datetime64[Y]').astype(int)+1970
# ptime=ptime.astype('datetime64[M]').astype(int)-(catyears-1970)*12+1
# nyrs=np.int(rlzx.shape[0])
# raindur=np.int(catrain.shape[1]+precat.shape[1])
# outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
# unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True) # unqstm is the storm number
#
# if intense_data!=False:
# sys.exit("Scenario writing for intensity-based resampling not tested!")
# intquant=intense_data[0]
# fullmu=intense_data[1]
# fullstd=intense_data[2]
# muorig=intense_data[3]
# stdorig=intense_data[4]
#
# for i in range(0,len(unqstm)):
# unqwhere=np.where(unqstm[i]==rlzstm)[0]
# unqmonth=ptime[unqstm[i]]
# pretimeind=np.where(np.logical_and(ptime>unqmonth-1,ptime<unqmonth+1))[0]
#
# if transpotype=='intensity':
# origmu=np.multiply(murain[caty[i]:caty[i]+maskheight,catx[i]:catx[i]+maskwidth],trimmask)
# origstd=np.multiply(stdrain[caty[i]:caty[i]+maskheight,catx[i]:catx[i]+maskwidth],trimmask)
# #intense_dat=[intquant[],murain,stdrain,origmu,origstd]
#
# # flexspin allows you to use spinup rainfall from anywhere in transposition domain, rather than just storm locations, but it doesn't seem to be very useful based on initial testing
# if spin==True and flexspin==True:
# if samptype=='kernel' or domaintype=='irregular':
# rndloc=np.random.random_sample(len(unqwhere))
# shiftprex,shiftprey=numbakernel(rndloc,cumkernel)
# else:
# shiftprex=np.random.random_integers(0,np.int(rainprop.subdimensions[1])-maskwidth-1,len(unqwhere))
# shiftprey=np.random.random_integers(0,np.int(rainprop.subdimensions[0])-maskheight-1,len(unqwhere))
#
# ctr=0
# for j in unqwhere:
# inrain=catrain[unqstm[i],:].copy()
#
# if intense_data!=False:
# transmu=np.multiply(fullmu[(rlzy[i]) : (rlzy[i]+maskheight) , (rlzx[i]) : (rlzx[i]+maskwidth)],trimmask)
# transtd=np.multiply(fullstd[(rlzy[i]) : (rlzy[i]+maskheight) , (rlzx[i]) : (rlzx[i]+maskwidth)],trimmask)
# mu_multi=transmu/muorig
# std_multi=np.abs(transtd-stdorig)/stdorig
# multipliermask=norm.ppf(intquant[i],loc=mu_multi,scale=std_multi)
# multipliermask[multipliermask<0.]=0.
# multipliermask[np.isnan(multipliermask)]=0.
#
# # this doesn't rotate the prepended rainfall
# if rotation==True:
# xctr=rlzx[j]+maskwidth/2.
# yctr=rlzy[j]+maskheight/2.
# xlinsp=np.linspace(-xctr,rainprop.subdimensions[1]-xctr,rainprop.subdimensions[1])
# ylinsp=np.linspace(-yctr,rainprop.subdimensions[0]-yctr,rainprop.subdimensions[0])
#
# ingridx,ingridy=np.meshgrid(xlinsp,ylinsp)
# ingridx=ingridx.flatten()
# ingridy=ingridy.flatten()
# outgrid=np.column_stack((ingridx,ingridy))
#
# for k in range(0,inrain.shape[0]):
# interp=sp.interpolate.LinearNDInterpolator(delarray[unqstm[i]][rlzanglebin[j]-1],inrain[k,:].flatten(),fill_value=0.)
# inrain[k,:]=np.reshape(interp(outgrid),rainprop.subdimensions)
#
# if spin==True and flexspin==True:
# temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(shiftprey[ctr]) : (shiftprey[ctr]+maskheight) , (shiftprex[ctr]) : (shiftprex[ctr]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
# elif spin==True and flexspin==False:
# temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
# elif spin==False:
# temprain=inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]
# else:
# sys.exit("what else is there?")
# ctr=ctr+1
# if intense_data!=False:
# outrain[j,:]=np.multiply(temprain,multipliermask)
# else:
# outrain[j,:]=np.multiply(temprain,trimmask)
# return outrain
#==============================================================================
# LOOP FOR KERNEL BASED STORM TRANSPOSITION
# THIS FINDS THE TRANSPOSITION LOCATION FOR EACH REALIZATION IF YOU ARE USING THE KERNEL-BASED RESAMPLER
# IF I CONFIGURE THE SCRIPT SO THE USER CAN PROVIDE A CUSTOM RESAMPLING SCHEME, THIS WOULD PROBABLY WORK FOR THAT AS WELL
#==============================================================================
#def weavekernel(rndloc,cumkernel):
# nlocs=len(rndloc)
# nrows=cumkernel.shape[0]
# ncols=cumkernel.shape[1]
# tempx=np.empty((len(rndloc)),dtype="int32")
# tempy=np.empty((len(rndloc)),dtype="int32")
# code= """
# #include <stdio.h>
# int i,x,y,brklp;
# double prevprob;
# for (i=0;i<nlocs;i++) {
# prevprob=0.0;
# brklp=0;
# for (y=0; y<nrows; y++) {
# for (x=0; x<ncols; x++) {
# if ( (rndloc(i)<=cumkernel(y,x)) && (rndloc(i)>prevprob) ) {
# tempx(i)=x;
# tempy(i)=y;
# prevprob=cumkernel(y,x);
# brklp=1;
# break;
# }
# }
# if (brklp==1) {
# break;
# }
# }
# }
# """
# vars=['rndloc','cumkernel','nlocs','nrows','ncols','tempx','tempy']
# sp.weave.inline(code,vars,type_converters=converters.blitz,compiler='gcc')
# return tempx,tempy
def pykernel(rndloc,cumkernel):
nlocs=len(rndloc)
ncols=cumkernel.shape[1]
tempx=np.empty((len(rndloc)),dtype="int32")
tempy=np.empty((len(rndloc)),dtype="int32")
flatkern=np.append(0.,cumkernel.flatten())
for i in range(0,nlocs):
x=rndloc[i]-flatkern
x[np.less(x,0.)]=1000.
whereind = np.argmin(x)
y=whereind//ncols
x=whereind-y*ncols
tempx[i]=x
tempy[i]=y
return tempx,tempy
@jit
def numbakernel(rndloc,cumkernel,tempx,tempy,ncols):
nlocs=len(rndloc)
#ncols=xdim
flatkern=np.append(0.,cumkernel.flatten())
#x=np.zeros_like(rndloc,dtype='float64')
for i in np.arange(0,nlocs):
x=rndloc[i]-flatkern
x[np.less(x,0.)]=10.
whereind=np.argmin(x)
y=whereind//ncols
x=whereind-y*ncols
tempx[i]=x
tempy[i]=y
return tempx,tempy
@jit
def numbakernel_fast(rndloc,cumkernel,tempx,tempy,ncols):
nlocs=int32(len(rndloc))
ncols=int32(cumkernel.shape[1])
flatkern=np.append(0.,cumkernel.flatten())
return kernelloop(nlocs,rndloc,flatkern,ncols,tempx,tempy)
#@jit(nopython=True,fastmath=True,parallel=True)
@jit(nopython=True,fastmath=True)
def kernelloop(nlocs,rndloc,flatkern,ncols,tempx,tempy):
for i in prange(nlocs):
diff=rndloc[i]-flatkern
diff[np.less(diff,0.)]=10.
whereind=np.argmin(diff)
y=whereind//ncols
x=whereind-y*ncols
tempx[i]=x
tempy[i]=y
return tempx,tempy
#==============================================================================
# FIND THE BOUNDARY INDICIES AND COORDINATES FOR THE USER-DEFINED SUBAREA
# NOTE THAT subind ARE THE MATRIX INDICIES OF THE SUBBOX, STARTING FROM UPPER LEFT CORNER OF DOMAIN AS (0,0)
# NOTE THAT subcoord ARE THE COORDINATES OF THE OUTSIDE BORDER OF THE SUBBOX
# THEREFORE THE DISTANCE FROM THE WESTERN (SOUTHERN) BOUNDARY TO THE EASTERN (NORTHERN) BOUNDARY IS NCOLS (NROWS) +1 TIMES THE EAST-WEST (NORTH-SOUTH) RESOLUTION
#==============================================================================
def findsubbox(inarea,rainprop):
outind=np.empty([4],dtype='int')
outextent=np.empty([4])
outdim=np.empty([2])
inbox=deepcopy(inarea)
rangex=np.arange(rainprop.bndbox[0],rainprop.bndbox[1]-rainprop.spatialres[0]/1000,rainprop.spatialres[0])
rangey=np.arange(rainprop.bndbox[3],rainprop.bndbox[2]+rainprop.spatialres[1]/1000,-rainprop.spatialres[1])
if rangex.shape[0]<rainprop.dimensions[1]:
rangex=np.append(rangex,rangex[-1])
if rangey.shape[0]<rainprop.dimensions[0]:
rangey=np.append(rangey,rangey[-1])
if rangex.shape[0]>rainprop.dimensions[1]:
rangex=rangex[0:-1]
if rangey.shape[0]>rainprop.dimensions[0]:
rangey=rangey[0:-1]
outextent=inbox
# "SNAP" output extent to grid
outind[0]=np.abs(rangex-outextent[0]).argmin()
outind[1]=np.abs(rangex-outextent[1]).argmin()-1
outind[2]=np.abs(rangey-outextent[2]).argmin()-1
outind[3]=np.abs(rangey-outextent[3]).argmin()
outextent[0]=rangex[outind[0]]
outextent[1]=rangex[outind[1]+1]
outextent[2]=rangey[outind[2]+1]
outextent[3]=rangey[outind[3]]
outdim[1]=np.shape(np.arange(outind[0],outind[1]+1))[0]
outdim[0]=np.shape(np.arange(outind[3],outind[2]+1))[0]
outdim=np.array(outdim,dtype='int32')
return outextent,outind,outdim
#==============================================================================
# THIS RETURNS A LOGICAL GRID THAT CAN THEN BE APPLIED TO THE GLOBAL GRID TO EXTRACT
# A USEER-DEFINED SUBGRID
# THIS HELPS TO KEEP ARRAY SIZES SMALL
#==============================================================================
def creategrids(rainprop):
globrangex=np.arange(0,rainprop.dimensions[1],1)
globrangey=np.arange(0,rainprop.dimensions[0],1)
subrangex=np.arange(rainprop.subind[0],rainprop.subind[1]+1,1)
subrangey=np.arange(rainprop.subind[3],rainprop.subind[2]+1,1)
subindx=np.logical_and(globrangex>=subrangex[0],globrangex<=subrangex[-1])
subindy=np.logical_and(globrangey>=subrangey[0],globrangey<=subrangey[-1])
gx,gy=np.meshgrid(subindx,subindy)
outgrid=np.logical_and(gx==True,gy==True)
return outgrid,subindx,subindy
#==============================================================================
# FUNCTION TO CREATE A MASK ACCORDING TO A USER-DEFINED POLYGON SHAPEFILE AND PROJECTION
# THIS USES GDAL COMMANDS FROM THE OS TO RASTERIZE
#==============================================================================
def rastermaskGDAL(shpname,shpproj,rainprop,masktype,fullpath,gdalpath=False):
bndbox=np.array(rainprop.subind)
bndcoords=np.array(rainprop.subextent)
if rainprop.projection==GEOG:
xdim=np.shape(np.linspace(bndcoords[0],bndcoords[1],rainprop.subind[1]-rainprop.subind[0]+1))[0]
ydim=np.shape(np.linspace(bndcoords[2],bndcoords[3],rainprop.subind[2]-rainprop.subind[3]+1))[0]
else:
sys.exit("unrecognized projection!")
rastertemplate=np.zeros((ydim,xdim),dtype='float32')
if masktype=='simple':
print('creating simple mask (0s and 1s)')
#os.system('gdal_rasterize -at -burn 1.0 -te '+str(rainprop.subextent[0])+' '+str(rainprop.subextent[2])+' '+str(rainprop.subextent[1])+' '+str(rainprop.subextent[3])+' -tr '+str(rainprop.spatialres[0])+' '+str(rainprop.spatialres[1])+' -ts '+str(np.int(rainprop.subdimensions[1]))+' '+str(np.int(rainprop.subdimensions[0]))+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff');
if gdalpath!=False:
rasterizecmd=gdalpath+'/gdal_rasterize -at -burn 1.0 -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -tr '+"%.9f"%(rainprop.spatialres[0])+' '+"%.9f"%(rainprop.spatialres[1])+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1]))+' '+"%.9f"%(np.int(rainprop.subdimensions[0]))+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff'
else:
rasterizecmd='gdal_rasterize -at -burn 1.0 -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -tr '+"%.9f"%(rainprop.spatialres[0])+' '+"%.9f"%(rainprop.spatialres[1])+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1]))+' '+"%.9f"%(np.int(rainprop.subdimensions[0]))+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff'
os.system(rasterizecmd)
ds=rasterio.open(fullpath+'/temp.tiff')
rastertemplate=ds.read(1)
os.system('rm '+fullpath+'/temp.tiff')
elif masktype=="fraction":
print('creating fractional mask (range from 0.0-1.0)')
#os.system('gdal_rasterize -at -burn 1.0 -te '+str(rainprop.subextent[0])+' '+str(rainprop.subextent[2])+' '+str(rainprop.subextent[1])+' '+str(rainprop.subextent[3])+' -tr '+str(rainprop.spatialres[0]/10.)+' '+str(rainprop.spatialres[1]/10.)+' -ts '+str(np.int(rainprop.subdimensions[1])*10)+' '+str(np.int(rainprop.subdimensions[0])*10)+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff');
#os.system('gdalwarp -r average -te '+str(rainprop.subextent[0])+' '+str(rainprop.subextent[2])+' '+str(rainprop.subextent[1])+' '+str(rainprop.subextent[3])+' -ts '+str(np.int(rainprop.subdimensions[1]))+' '+str(np.int(rainprop.subdimensions[0]))+' -overwrite '+fullpath+'/temp.tiff '+fullpath+'/tempAGG.tiff');
if gdalpath!=False:
rasterizecmd=gdalpath+'/gdal_rasterize -at -burn 1.0 -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -tr '+"%.9f"%(rainprop.spatialres[0]/10.)+' '+"%.9f"%(rainprop.spatialres[1]/10.)+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1])*10)+' '+"%.9f"%(np.int(rainprop.subdimensions[0])*10)+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff'
else:
rasterizecmd='gdal_rasterize -at -burn 1.0 -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -tr '+"%.9f"%(rainprop.spatialres[0]/10.)+' '+"%.9f"%(rainprop.spatialres[1]/10.)+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1])*10)+' '+"%.9f"%(np.int(rainprop.subdimensions[0])*10)+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff'
os.system(rasterizecmd)
if gdalpath!=False:
warpcmd=gdalpath+'/gdalwarp -r average -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1]))+' '+"%.9f"%(np.int(rainprop.subdimensions[0]))+' -overwrite '+fullpath+'/temp.tiff '+fullpath+'/tempAGG.tiff'
else:
warpcmd='gdalwarp -r average -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1]))+' '+"%.9f"%(np.int(rainprop.subdimensions[0]))+' -overwrite '+fullpath+'/temp.tiff '+fullpath+'/tempAGG.tiff'
os.system(warpcmd)
ds=rasterio.open(fullpath+'/tempAGG.tiff')
rastertemplate=ds.read(1)
os.system('rm '+fullpath+'/temp.tiff')
os.system('rm '+fullpath+'/tempAGG.tiff')
else:
sys.exit("You entered an incorrect mask type, options are 'simple' or 'fraction'")
rastertemplate=np.array(rastertemplate[:])
return rastertemplate
#==============================================================================
# WRITE SCENARIOS TO NETCDF ONE REALIZATION AT A TIME
#==============================================================================
def writerealization(rlz,nrealizations,writename,outrain,writemax,writestorm,writeperiod,writex,writey,writetimes,latrange,lonrange,whichorigstorm):
# SAVE outrain AS NETCDF FILE
dataset=Dataset(writename, 'w', format='NETCDF4')
# create dimensions
outlats=dataset.createDimension('outlat',len(latrange))
outlons=dataset.createDimension('outlon',len(lonrange))
time=dataset.createDimension('time',writetimes.shape[1])
nyears=dataset.createDimension('nyears',len(writeperiod))
# create variables
times=dataset.createVariable('time',np.float64, ('nyears','time'))
latitudes=dataset.createVariable('latitude',np.float32, ('outlat'))
longitudes=dataset.createVariable('longitude',np.float32, ('outlon'))
rainrate=dataset.createVariable('rainrate',np.float32,('nyears','time','outlat','outlon'),zlib=True,complevel=4,least_significant_digit=2)
basinrainfall=dataset.createVariable('basinrainfall',np.float32,('nyears'))
xlocation=dataset.createVariable('xlocation',np.int32,('nyears'))
ylocation=dataset.createVariable('ylocation',np.int32,('nyears'))
returnperiod=dataset.createVariable('returnperiod',np.float32,('nyears'))
stormnumber=dataset.createVariable('stormnumber',np.int32,('nyears'))
original_stormnumber=dataset.createVariable('original_stormnumber',np.int32,('nyears'))
#stormtimes=dataset.createVariable('stormtimes',np.float64,('nyears'))
# Global Attributes
dataset.description = 'SST Rainfall Scenarios Realization: '+str(rlz+1)+' of '+str(nrealizations)
dataset.history = 'Created ' + str(datetime.now())
dataset.source = 'Storm Catalog for (FILL IN THE BLANK)'
# Variable Attributes (time since 1970-01-01 00:00:00.0 in numpys)
latitudes.units = 'degrees north'
longitudes.units = 'degrees east'
rainrate.units = 'mm/h'
times.units = 'minutes since 1970-01-01 00:00.0'
times.calendar = 'gregorian'
#print dataset.description
#print dataset.history
# fill the netcdf file
latitudes[:]=latrange
longitudes[:]=lonrange
rainrate[:]=outrain
basinrainfall[:]=writemax
times[:]=writetimes
xlocation[:]=writex
ylocation[:]=writey
stormnumber[:]=writestorm
returnperiod[:]=writeperiod
original_stormnumber[:]=whichorigstorm
#stormtimes[:]=writetimes
dataset.close()
#==============================================================================
# WRITE The maximized storm
#==============================================================================
def writemaximized(writename,outrain,writemax,write_ts,writex,writey,writetimes,latrange,lonrange):
# SAVE outrain AS NETCDF FILE
dataset=Dataset(writename, 'w', format='NETCDF4')
# create dimensions
outlats=dataset.createDimension('outlat',len(latrange))
outlons=dataset.createDimension('outlon',len(lonrange))
time=dataset.createDimension('time',len(writetimes))
# create variables
times=dataset.createVariable('time',np.float64, ('time'))
latitudes=dataset.createVariable('latitude',np.float32, ('outlat'))
longitudes=dataset.createVariable('longitude',np.float32, ('outlon'))
rainrate=dataset.createVariable('rainrate',np.float32,('time','outlat','outlon'),zlib=True,complevel=4,least_significant_digit=2)
basinrainfall=dataset.createVariable('basinrainfall',np.float32)
xlocation=dataset.createVariable('xlocation',np.int32)
ylocation=dataset.createVariable('ylocation',np.int32)
#stormtimes=dataset.createVariable('stormtimes',np.float64,('nyears'))
# Global Attributes
dataset.description = 'SST Rainfall Maximum Storm'
dataset.history = 'Created ' + str(datetime.now())
dataset.source = 'Storm Catalog for (FILL IN THE BLANK)'
# Variable Attributes (time since 1970-01-01 00:00:00.0 in numpys)
latitudes.units = 'degrees north'
longitudes.units = 'degrees east'
rainrate.units = 'mm/h'
times.units = 'minutes since 1970-01-01 00:00.0'
times.calendar = 'gregorian'
#print dataset.description
#print dataset.history
# fill the netcdf file
latitudes[:]=latrange
longitudes[:]=lonrange
rainrate[:]=outrain
basinrainfall[:]=writemax
times[:]=writetimes
xlocation[:]=writex
ylocation[:]=writey
dataset.close()
#==============================================================================
# READ RAINFALL FILE FROM NETCDF
#==============================================================================
def readnetcdf(rfile,inbounds=False):
infile=Dataset(rfile,'r')
if np.any(inbounds!=False):
outrain=np.array(infile.variables['rainrate'][:,inbounds[3]:inbounds[2]+1,inbounds[0]:inbounds[1]+1])
outlatitude=np.array(infile.variables['latitude'][inbounds[3]:inbounds[2]+1])
outlongitude=np.array(infile.variables['longitude'][inbounds[0]:inbounds[1]+1])
else:
outrain=np.array(infile.variables['rainrate'][:])
outlatitude=np.array(infile.variables['latitude'][:])
outlongitude=np.array(infile.variables['longitude'][:])
outtime=np.array(infile.variables['time'][:],dtype='datetime64[m]')
infile.close()
return outrain,outtime,outlatitude,outlongitude
#==============================================================================
# READ RAINFALL FILE FROM NETCDF
#==============================================================================
def readcatalog(rfile):
infile=Dataset(rfile,'r')
outrain=np.array(infile.variables['rainrate'][:])
outtime=np.array(infile.variables['time'][:],dtype='datetime64[m]')
outlatitude=np.array(infile.variables['latitude'][:])
outlongitude=np.array(infile.variables['longitude'][:])
outlocx=np.array(infile.variables['xlocation'][:])
outlocy=np.array(infile.variables['ylocation'][:])
outmax=np.array(infile.variables['basinrainfall'][:])
outmask=np.array(infile.variables['gridmask'][:])
domainmask= | np.array(infile.variables['domainmask'][:]) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
from scipy import stats, interpolate
import matplotlib.pyplot as plt
from ReflectivitySolver import ReflectivitySolver
from sourcefunction import SourceFunctionGenerator
from utils import create_timevector, create_frequencyvector
def plot_PT_summary(samplers, burn_in=0):
n_temps = len(samplers)
burn_in = round(burn_in * samplers[0].masteriter)
plt.figure(num=2), plt.clf()
for t in range(n_temps):
plt.semilogy(samplers[t].betas)
if burn_in > 0:
min_temp = np.min(samplers[-1].betas)
plt.plot(np.array([burn_in, burn_in]), np.array([min_temp, 1]), 'k-', linewidth=2)
plt.xlabel('Iteration')
plt.ylabel('Beta')
plt.title('Inverse temperatures (betas) of the samplers')
def plot_chains(sampler, burn_in=0):
bounds = sampler.posterior_cls.priormodel.layer_bounds
burn_in = round(burn_in * sampler.masteriter)
par_names = sampler.posterior_cls.priormodel.par_names
par_units = sampler.posterior_cls.priormodel.par_units
k = stats.mode(sampler.master_model_iter[burn_in:, 0])[0][0]
maxk = np.max(sampler.master_model_iter[:, 0])
n_iter = sampler.masteriter
# Find the first sample of model k after the burn-in period
first_k_after_burn_in = np.argmax(sampler.master_model_iter[burn_in:, 0] == k)
k_start_iter = sampler.master_model_iter[burn_in + first_k_after_burn_in, 1]
minPost = np.min(sampler.log_posts[int(0.01 * n_iter):])
maxPost = np.max(sampler.log_posts[int(0.01 * n_iter):])
minsigma = np.min(sampler.noise_samples[int(0.01 * n_iter):])
maxsigma = np.max(sampler.noise_samples[int(0.01 * n_iter):])
min_src = np.min(sampler.source_samples[int(0.01 * n_iter):])
max_src = np.max(sampler.source_samples[int(0.01 * n_iter):])
mrkrsize = 0.5
plt.figure(num=1); plt.clf()
plt.subplot(3,4,1)
plt.plot(sampler.log_posts[int(0.01 * n_iter):],'.', markersize=mrkrsize)
plt.plot(np.array([burn_in, burn_in]), np.array([minPost, maxPost]), 'k-', linewidth=2)
plt.title("Log posterior")
plt.subplot(3,4,2)
plt.plot(sampler.master_model_iter[:, 0], '.', markersize=mrkrsize)
plt.plot(np.array([burn_in, burn_in]), np.array([0, maxk]), 'k-', linewidth=2)
plt.title("Model index (vert. line = burn in)")
plt.subplot(3,4,3)
plt.plot(sampler.layer_samples[k][0::6, :].T, '.', markersize=mrkrsize)
plt.plot(np.array([k_start_iter, k_start_iter]),
np.array([bounds[0, 0], bounds[0, 1]]), 'k-', linewidth=2)
plt.title(par_names[0])
plt.ylabel(par_units[0])
plt.subplot(3,4,4)
plt.plot(sampler.layer_samples[k][1::6, :].T, '.', markersize=mrkrsize)
plt.plot(np.array([k_start_iter, k_start_iter]),
np.array([bounds[1, 0], bounds[1, 1]]), 'k-', linewidth=2)
plt.title(par_names[1])
plt.ylabel(par_units[1])
plt.subplot(3,4,5)
plt.semilogy(sampler.noise_proposal.AM_factors, 'k--')
plt.semilogy(sampler.src_proposal.AM_factors, 'g--')
nmodels = len(sampler.iter)
for ii in range(nmodels):
if sampler.iter[ii] > -1:
plt.semilogy(sampler.layer_proposal[ii].AM_factors)
plt.title("Proposal scale factors")
plt.subplot(3,4,6)
n_min = sampler.posterior_cls.priormodel.n_layers_min
plt.hist(
n_min + sampler.master_model_iter[burn_in:, 0],
bins=np.arange(
n_min,
sampler.posterior_cls.priormodel.n_layers_max + 1
) + 0.5,
edgecolor='white',
linewidth=2,
density=True
)[0]
plt.title("Layer number probabilities (after burn-in)")
plt.subplot(3,4,7)
plt.plot(sampler.layer_samples[k][2::6, :].T, '.', markersize=mrkrsize)
plt.plot(np.array([k_start_iter, k_start_iter]),
np.array([bounds[2, 0], bounds[2, 1]]), 'k-', linewidth=2)
plt.title(par_names[2])
plt.ylabel(par_units[2])
plt.subplot(3,4,8)
plt.plot(sampler.layer_samples[k][3::6, :].T, '.', markersize=mrkrsize)
plt.plot(np.array([k_start_iter, k_start_iter]),
np.array([bounds[3, 0], bounds[3, 1]]), 'k-', linewidth=2)
plt.title(par_names[3])
plt.ylabel(par_units[3])
plt.subplot(3,4,9)
plt.plot(sampler.noise_samples[int(0.01 * n_iter):], '.', markersize=mrkrsize)
plt.plot(np.array([burn_in, burn_in]), np.array([minsigma, maxsigma]), 'k-', linewidth=2)
plt.title(par_names[6])
plt.ylabel(par_units[6])
plt.subplot(3,4,10)
plt.plot(sampler.source_samples[int(0.01 * n_iter):], '.', markersize=mrkrsize)
plt.plot(np.array([burn_in, burn_in]), np.array([min_src, max_src]), 'k-', linewidth=2)
plt.title(par_names[7])
plt.ylabel(par_units[7])
plt.subplot(3,4,11)
plt.plot(sampler.layer_samples[k][4::6, :].T, '.', markersize=mrkrsize)
plt.plot(np.array([k_start_iter, k_start_iter]),
np.array([bounds[4, 0], bounds[4, 1]]), 'k-', linewidth=2)
plt.title(par_names[4])
plt.ylabel(par_units[4])
plt.subplot(3,4,12)
depths = thickness_to_depth(sampler.layer_samples[k][5::6, :].T)
if depths.shape[1] > 1:
plt.plot(depths[:, :-1], '.', markersize=mrkrsize) # Don't plot the last layer 'depth'
plt.plot(np.array([k_start_iter, k_start_iter]),
np.array([bounds[5, 0], bounds[5, 1]]), 'k-', linewidth=2)
plt.title('Layer depth')
plt.ylabel(par_units[5])
plt.show(block=False)
def thickness_to_depth(thicknesses):
n_layers = thicknesses.shape[1]
depths = np.zeros_like(thicknesses)
depths[:, 0] = thicknesses[:, 0]
for i in range(1, n_layers):
depths[:, i] = depths[:, i - 1] + thicknesses[:, i] # cumulative sum
return depths
def plot_shotgather(datamatrix, timevec, receivers, **kwargs):
"""
Plot a common shot gather.
Parameters
----------
datamatrix : (n_timesamples x n_receivers)-sized np.ndarray
timevec : timevector of the measurements
receivers : receiver locations corresponding to the datamatrix
**kwargs :
fignum = Number of the figure you want to plot in.
plstyle = Style of the lines in the plot.
normcoeff = Coefficient with which you normalise the seismograms (so
that you can plot several seismograms with comparable
amplitudes). The default is that the largest amplitude in
the shotgather is normalised to one.
Returns
-------
None.
"""
options = {
'fignum' : None,
'pltstyle' : 'k-',
'normcoeff' : None,
'clf' : False,
'title' : None,
'alpha' : 1,
'linewidth' : 1}
options.update(kwargs)
if options['fignum'] is not None:
plt.figure(num=options['fignum'])
else:
plt.figure()
if options['normcoeff'] is not None:
norm_coeff = options['normcoeff']
else:
norm_coeff = np.max(abs(datamatrix[:]))
if options['clf']:
plt.clf()
n_rec = datamatrix.shape[1]
assert(len(receivers) == n_rec)
if len(receivers) > 1:
rec_dist = np.mean(np.diff(receivers)) * 1
else:
rec_dist = 1
for rec in range(n_rec):
seismogram_normalised = datamatrix[:, rec] / norm_coeff * rec_dist
plt.plot(receivers[rec] + seismogram_normalised, timevec, options['pltstyle'], alpha=options['alpha'])
plt.grid('on')
plt.axis('tight')
plt.ylim(timevec[0], timevec[-1])
plt.gca().invert_yaxis()
plt.title(options['title'])
plt.ylabel('Time (s)')
plt.xlabel('Receiver location and measurement (m)')
plt.show()
def posterior_predictive_distribution(sampler, burn_in=0):
receivers = sampler.posterior_cls.measurement.receivers
n_rec = len(receivers)
burn_in = round(burn_in * sampler.masteriter)
normarg = np.max(np.abs(sampler.posterior_cls.measurement.u_z))
plot_shotgather(
sampler.posterior_cls.measurement.u_z,
sampler.posterior_cls.measurement.time,
receivers,
fignum=101, normcoeff=normarg, clf=True,
title='Measured seismogram and 95 % credible intervals'
)
T_max_plot = sampler.posterior_cls.measurement.T_max
# Increase this for a smaller dt in the plot
f_max_plot = 1 * sampler.posterior_cls.measurement.f_max
freq_plot, dt_plot = create_frequencyvector(T_max_plot, f_max_plot)
n_f_plot = len(freq_plot)
plot_timevec = create_timevector(T_max_plot, dt_plot)
ReflectivitySolver.terminate()
ReflectivitySolver.initialize(
freq_plot,
receivers,
sampler.posterior_cls.priormodel.cP_max,
sampler.posterior_cls.priormodel.cS_min
)
source_generator = SourceFunctionGenerator(freq_plot)
n_realizations = 400
u_z_samples = np.zeros((n_realizations, 2 * (n_f_plot - 1), n_rec))
for i in range(n_realizations):
idx = np.random.randint(burn_in, sampler.masteriter)
k, k_iter = sampler.master_model_iter[idx]
randsample = sampler.layer_samples[k][:, k_iter]
randsample = np.asfortranarray(randsample.reshape(-1,6))
srcsample = sampler.source_samples[idx]
# source = source_generator.Ricker(srcsample[0], srcsample[1])
source = source_generator.Ricker(sampler.posterior_cls.priormodel.src_ampl, srcsample[0])
u_z_samples[i] = ReflectivitySolver.compute_timedomain_src(randsample, source)
u_z_samples[i] += sampler.noise_samples[idx] \
* np.random.randn(2 * (n_f_plot - 1), n_rec)
# # Uncomment this to plot some model realisations
# if( i < 2 ):
# plot_shotgather(
# u_z_samples[i], plot_timevec, receivers, fignum=101, normcoeff=normarg,
# pltstyle='b-', alpha=0.1
# )
ReflectivitySolver.terminate()
if len(receivers) > 1:
rec_dist = np.mean(np.diff(receivers)) * 1
else:
rec_dist = 1
# Percentiles (c.f. standard deviations when the distribution is normal)
pr1 = 50 + 68.27/2
pr2 = 50 + 95.45/2
pr3 = 50 + 99.73/2
for i in range(n_rec):
percentiles = np.percentile(
u_z_samples[:, :, i], (100-pr3, 100-pr2, 100-pr1, pr1, pr2, pr3), axis=0
)
plt.fill_betweenx(
plot_timevec,
receivers[i] + percentiles[1, :] / normarg * rec_dist,
receivers[i] + percentiles[4, :] / normarg * rec_dist,
color='C0',
alpha=0.3
)
plt.show(block=False)
def marginal_posterior_densities(sampler, normalize=False, burn_in=0):
n_z = 300 # number of pixels in the depth direction
n_samples_plot = int(2e4) # number of samples used to create the plots
burn_in = round(burn_in * sampler.masteriter)
bounds = sampler.posterior_cls.priormodel.layer_bounds
maxdepth = bounds[5, 1]
z_vector = np.linspace(0, maxdepth, n_z)
n_params = 5
oneD_CDF_plot = np.zeros(sampler.posterior_cls.priormodel.n_layers_max * n_samples_plot)
twoD_CDF_plot = np.zeros((n_params, 2, n_z * n_samples_plot))
counter = 0
for ii in range(n_samples_plot):
idx = np.random.randint(burn_in, sampler.masteriter)
k, k_iter = sampler.master_model_iter[idx]
thicknesses = sampler.layer_samples[k][5::6, k_iter]
depths = np.cumsum(thicknesses[:-1])
params = sampler.layer_samples[k][:, k_iter].reshape(-1, 6)[:, :-1]
if len(thicknesses) > 1:
n_new_vals = len(depths)
oneD_CDF_plot[counter : counter + n_new_vals] = depths
counter += n_new_vals
pltdepths = np.concatenate([[0], | np.repeat(depths, 2) | numpy.repeat |
"""Unit-test for stmetrics."""
def test_getmetrics():
import numpy
import stmetrics
series = numpy.array([0.157327502966,0.168894290924,0.141409546137,
0.113800831139,0.0922891944647,0.0747280195355,
0.0537555813789,0.0660935789347,0.0770644843578,
0.0739007592201,0.0983928665519,0.192401319742,
0.286366194487,0.367539167404,0.420437157154,
0.418041080236,0.413386583328,0.375436246395,
0.335108757019,0.307270467281,0.250428706408,
0,1,0,
0.103006377816,0.115561470389,0.114221975207,
0.172464296222,0.284338653088,0.386188000441,
0.45704460144,0.571164608002,0.707974851131,
0.648853778839,0.580699682236,0.566288888454,
0.547502994537,0.500209212303,0.447707682848,
0.39193546772,0.357513874769,0.290982276201,
0.217830166221,0.148564651608,0.101060912013,
0.111838668585,0.121473513544,0.113883294165,
0.114351868629,0.116994164884,0.0982540994883,
0.0843055993319,0.0827744230628,0.0758764594793,
0.0936531722546,0.0942907482386,0.172556817532])
metrics = {'basics': {'max_ts': 0.707974,
'min_ts': 0.0,
'mean_ts': 0.237823,
'std_ts': 0.183005,
'sum_ts': 13.318112,
'amplitude_ts': 0.707974,
'mse_ts': 5.042865,
'fslope_ts': 0.250428,
'skew_ts': 0.795801,
'amd_ts': 0.043546,
'abs_sum_ts': 13.318112,
'iqr_ts': 0.28086,
'fqr_ts': 0.096272,
'tqr_ts': 0.380812,
'sqr_ts': 0.158729},
'polar': {'ecc_metric': 0.987689,
'gyration_radius': 0.378319,
'area_ts': 0.276252,
'polar_balance': 0.069048,
'angle': 3.541431,
'area_q1': 0.046879,
'area_q2': 0.033173,
'area_q3': 0.186429,
'area_q4': 0.00977,
'csi': 2.658336},
'fractal': {'dfa_fd': 2.053765, 'hurst_exp': 0.87168, 'katz_fd': 1.437053}}
out = stmetrics.metrics.get_metrics(series,nodata=0.157327502966)
assert metrics == out
def test_basics():
import stmetrics
import numpy
basicas = {'max_ts': 1.0,
'min_ts': 1.0,
'mean_ts': 1.0,
'std_ts': 0.0,
'sum_ts': 360.0,
'amplitude_ts': 0.0,
'mse_ts': 360.0,
'fslope_ts': 0.0,
'skew_ts': 0.0,
'amd_ts': 0.0,
'abs_sum_ts': 360.0,
'iqr_ts': 0.0,
'fqr_ts': 1.0,
'tqr_ts': 1.0,
'sqr_ts': 1.0}
bmetrics = stmetrics.basics.ts_basics(numpy.ones((1,360)).T)
assert basicas == bmetrics
def test_fractal():
import stmetrics
import numpy
fractais = {'dfa_fd': nan,
'hurst_exp': nan,
'katz_fd': nan}
bmetrics = stmetrics.fractal.ts_fractal( | numpy.ones((1,360)) | numpy.ones |
import tempfile, os, glob
from scipy.stats import norm as ndist
from traitlets import (HasTraits,
Integer,
Unicode,
Float,
Integer,
Instance,
Dict,
Bool,
default)
import numpy as np
import regreg.api as rr
from selection.algorithms.lasso import lasso, lasso_full, lasso_full_modelQ
from selection.algorithms.sqrt_lasso import choose_lambda
from selection.truncated.gaussian import truncated_gaussian_old as TG
from selection.randomized.lasso import lasso as random_lasso_method, form_targets
from selection.randomized.modelQ import modelQ as randomized_modelQ
from utils import BHfilter
from selection.randomized.base import restricted_estimator
# Rpy
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
methods = {}
class generic_method(HasTraits):
need_CV = False
selectiveR_method = False
wide_ok = True # ok for p>= n?
# Traits
q = Float(0.2)
method_name = Unicode('Generic method')
model_target = Unicode()
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
(self.X,
self.Y,
self.l_theory,
self.l_min,
self.l_1se,
self.sigma_reid) = (X,
Y,
l_theory,
l_min,
l_1se,
sigma_reid)
def select(self):
raise NotImplementedError('abstract method')
@classmethod
def register(cls):
methods[cls.__name__] = cls
def selected_target(self, active, beta):
C = self.feature_cov[active]
Q = C[:,active]
return np.linalg.inv(Q).dot(C.dot(beta))
def full_target(self, active, beta):
return beta[active]
def get_target(self, active, beta):
if self.model_target not in ['selected', 'full']:
raise ValueError('Gaussian methods only have selected or full targets')
if self.model_target == 'full':
return self.full_target(active, beta)
else:
return self.selected_target(active, beta)
# Knockoff selection
class knockoffs_mf(generic_method):
method_name = Unicode('Knockoffs')
knockoff_method = Unicode('Second order')
model_target = Unicode("full")
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_mf.register()
class knockoffs_sigma(generic_method):
factor_method = 'asdp'
method_name = Unicode('Knockoffs')
knockoff_method = Unicode("ModelX (asdp)")
model_target = Unicode("full")
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
numpy2ri.activate()
# see if we've factored this before
have_factorization = False
if not os.path.exists('.knockoff_factorizations'):
os.mkdir('.knockoff_factorizations')
factors = glob.glob('.knockoff_factorizations/*npz')
for factor_file in factors:
factor = np.load(factor_file)
feature_cov_f = factor['feature_cov']
if ((feature_cov_f.shape == feature_cov.shape) and
(factor['method'] == cls.factor_method) and
np.allclose(feature_cov_f, feature_cov)):
have_factorization = True
print('found factorization: %s' % factor_file)
cls.knockoff_chol = factor['knockoff_chol']
if not have_factorization:
print('doing factorization')
cls.knockoff_chol = factor_knockoffs(feature_cov, cls.factor_method)
numpy2ri.deactivate()
def select(self):
numpy2ri.activate()
rpy.r.assign('chol_k', self.knockoff_chol)
rpy.r('''
knockoffs = function(X) {
mu = rep(0, ncol(X))
mu_k = X # sweep(X, 2, mu, "-") %*% SigmaInv_s
X_k = mu_k + matrix(rnorm(ncol(X) * nrow(X)), nrow(X)) %*%
chol_k
return(X_k)
}
''')
numpy2ri.deactivate()
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q, knockoffs=knockoffs)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_sigma.register()
def factor_knockoffs(feature_cov, method='asdp'):
numpy2ri.activate()
rpy.r.assign('Sigma', feature_cov)
rpy.r.assign('method', method)
rpy.r('''
# Compute the Cholesky -- from create.gaussian
diag_s = diag(switch(method, equi = create.solve_equi(Sigma),
sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))
if (is.null(dim(diag_s))) {
diag_s = diag(diag_s, length(diag_s))
}
SigmaInv_s = solve(Sigma, diag_s)
Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s
chol_k = chol(Sigma_k)
''')
knockoff_chol = np.asarray(rpy.r('chol_k'))
SigmaInv_s = np.asarray(rpy.r('SigmaInv_s'))
diag_s = np.asarray(rpy.r('diag_s'))
np.savez('.knockoff_factorizations/%s.npz' % (os.path.split(tempfile.mkstemp()[1])[1],),
method=method,
feature_cov=feature_cov,
knockoff_chol=knockoff_chol)
return knockoff_chol
class knockoffs_sigma_equi(knockoffs_sigma):
knockoff_method = Unicode('ModelX (equi)')
factor_method = 'equi'
knockoffs_sigma_equi.register()
class knockoffs_orig(generic_method):
wide_OK = False # requires at least n>p
method_name = Unicode("Knockoffs")
knockoff_method = Unicode('Candes & Barber')
model_target = Unicode('full')
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, statistic=stat.glmnet_lambdadiff, fdr=q, knockoffs=create.fixed)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
V = np.asarray(V, np.int)
return V, V
except:
return [], []
knockoffs_orig.register()
class knockoffs_fixed(generic_method):
wide_OK = False # requires at least n>p
method_name = Unicode("Knockoffs")
knockoff_method = Unicode('Fixed')
model_target = Unicode('full')
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q, knockoffs=create.fixed)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_fixed.register()
# Liu, Markovic, Tibs selection
class parametric_method(generic_method):
confidence = Float(0.95)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
generic_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self._fit = False
def select(self):
if not self._fit:
self.method_instance.fit()
self._fit = True
active_set, pvalues = self.generate_pvalues()
if len(pvalues) > 0:
selected = [active_set[i] for i in BHfilter(pvalues, q=self.q)]
return selected, active_set
else:
return [], active_set
class liu_theory(parametric_method):
sigma_estimator = Unicode('relaxed')
method_name = Unicode("Liu")
lambda_choice = Unicode("theory")
model_target = Unicode("full")
dispersion = Float(0.)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
n, p = X.shape
if n < p:
self.method_name = 'ROSI'
self.lagrange = l_theory * np.ones(X.shape[1])
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
def generate_summary(self, compute_intervals=False):
if not self._fit:
self.method_instance.fit()
self._fit = True
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
if len(L.active) > 0:
if self.sigma_estimator == 'reid' and n < p:
dispersion = self.sigma_reid**2
elif self.dispersion != 0:
dispersion = self.dispersion
else:
dispersion = None
S = L.summary(compute_intervals=compute_intervals, dispersion=dispersion)
return S
def generate_pvalues(self):
S = self.generate_summary(compute_intervals=False)
if S is not None:
active_set = np.array(S['variable'])
pvalues = np.asarray(S['pval'])
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
S = self.generate_summary(compute_intervals=True)
if S is not None:
active_set = np.array(S['variable'])
lower, upper = np.asarray(S['lower_confidence']), np.asarray(S['upper_confidence'])
return active_set, lower, upper
else:
return [], [], []
liu_theory.register()
class liu_aggressive(liu_theory):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
liu_aggressive.register()
class liu_modelQ_pop_aggressive(liu_aggressive):
method_name = Unicode("Liu (ModelQ population)")
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full_modelQ(self.feature_cov * n, self.X, self.Y, self.lagrange * | np.sqrt(n) | numpy.sqrt |
import env_utils as envu
import numpy as np
from time import time
class Dynamics_model(object):
"""
The dynamics model take a lander model object (and later an obstacle object) and modifies
the state of the lander.
The lander object instantiates an engine model, that maps body frame thrust and torque to
the inertial frame. Note that each lander can have its own intertial frame which can be
centered on the lander's target.
Currentlly this model does not model environmental dynamics, will be added later
The lander model maintains a state vector:
position [0:3]
velocity [3:6]
mass [7]
"""
def __init__(self, h=0.5, w_o=2*np.pi/2000, M=5e10, noise_u=np.zeros(3),noise_sd= | np.zeros(3) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Class of variatonal Gaussian Mixture Image models.
It serves as a baseline for a hidden Potts-MRF for Bayesian unsupervised image
segmentation.
Author: <NAME>
Date: 29-11-2018
"""
import numpy as np
import numpy.random as rnd
from numpy.linalg import inv, cholesky
from scipy.misc import logsumexp
from scipy.special import betaln, digamma, gammaln
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
from vis import plot_posteriors
class VariationalMixture(object):
"""
Superclass of variational mixture models.
Methods are functions common to all submixture models.
"""
def log_multivariate_gamma(self, n, p):
"""
Logarithmic multivariate gamma function.
This function is necessary for expectations and partition functions of
Wishart distributions. See also:
https://en.wikipedia.org/wiki/Multivariate_gamma_function
Parameters
----------
nu : float
Degrees of freedom.
p : int
Dimensionality.
Returns
-------
Gp : float
p-th order multivariate gamma function.
"""
# Check for appropriate degree of freedom
if not n > (p-1):
raise ValueError('Degrees of freedom too low for dimensionality.')
# Preallocate
Gp = 0
# Product from d=1 to p
for d in range(1, p+1):
# Gamma function of degrees of freedom and dimension
Gp += gammaln((n + 1 - d)/2)
return (p * (p-1) / 4)*np.log(np.pi) + Gp
def multivariate_digamma(self, n, p):
"""
Multivariate digamma function.
This function is necessary for expectations and partition functions of
Wishart distributions. See also:
https://en.wikipedia.org/wiki/Multivariate_gamma_function
Parameters
----------
nu : float
Degrees of freedom.
p : int
Dimensionality.
Returns
-------
Pp : float
p-th order multivariate digamma function.
"""
# Check for appropriate degree of freedom
if not n > (p-1):
raise ValueError('Degrees of freedom too low for dimensionality.')
# Preallocate
Pp = 0
# Sum from d=1 to D
for d in range(1, p+1):
# Digamma function of degrees of freedom and dimension
Pp += digamma((n + 1 - d)/2)
return Pp
def log_partition_Wishart(self, W, n):
"""
Logarithmic partition function of the Wishart distribution.
To compute variational expectations, the partition of the Wishart
distribution is sometimes needed. The current computation follows
Appendix B, equations B.78 to B.82 from Bishop's "Pattern Recognition &
Machine Learning."
Parameters
----------
W : array
Positive definite, symmetric precision matrix.
nu : int
Degrees of freedom.
Returns
-------
B : float
Partition of Wishart distribution.
"""
# Extract dimensionality
D, D_ = W.shape
# Check for symmetric matrix
if not D == D_:
raise ValueError('Matrix is not symmetric.')
# Check for appropriate degree of freedom
if not n > D-1:
raise ValueError('Degrees of freedom too low for dimensionality.')
# Compute log-multivariate gamma
lmG = self.log_multivariate_gamma(n, D)
# Compute partition function
B = (-n/2)*self.log_det(W) - (n*D/2)*np.log(2) - lmG
return B
def entropy_Wishart(self, W, n):
"""
Entropy of the Wishart distribution.
To compute variational expectations, the entropy of the Wishart
distribution is sometimes needed. The current computation follows
Appendix B, equations B.78 to B.82 from Bishop's "Pattern Recognition &
Machine Learning."
Parameters
----------
W : array
Positive definite, symmetric precision matrix.
nu : int
Degrees of freedom.
Returns
-------
H : float
Entropy of Wishart distribution.
"""
# Extract dimensionality
D, D_ = W.shape
# Check for symmetric matrix
if not D == D_:
raise ValueError('Matrix is not symmetric.')
# Check for appropriate degree of freedom
if not n > D-1:
raise ValueError('Degrees of freedom too low for dimensionality.')
# Expected log-determinant of precision matrix
E = self.multivariate_digamma(n, D) + D*np.log(2) + self.log_det(W)
# Entropy
H = -self.log_partition_Wishart(W, n) - (n - D - 1)/2 * E + n*D/2
return H
def log_det(self, A):
"""
Numerically stable computation of log determinant of a matrix.
Parameters
----------
A : array
Expecting a positive definite, symmetric matrix.
Returns
-------
float
Log-determinant of given matrix.
"""
# Perform cholesky decomposition
L = cholesky(A)
# Stable log-determinant
return np.sum(2*np.log(np.diag(L)))
def distW(self, X, S):
"""
Compute weighted distance.
Parameters
----------
X : array
Vectors (N by D) or (H by W by D).
W : array
Weights (D by D).
Returns
-------
array
Weighted distance for each vector.
"""
if not S.shape[0] == S.shape[1]:
raise ValueError('Weight matrix not symmetric.')
if not X.shape[-1] == S.shape[0]:
raise ValueError('Dimensionality of data and weights mismatch.')
if len(X.shape) == 2:
# Shapes
N, D = X.shape
# Preallocate
A = np.zeros((N,))
# Loop over samples
for n in range(N):
# Compute weighted inner product between vectors
A[n] = X[n, :] @ S @ X[n, :].T
elif len(X.shape) == 3:
# Shape
H, W, D = X.shape
# Preallocate
A = np.zeros((H, W))
# Loop over samples
for h in range(H):
for w in range(W):
# Compute weighted inner product between vectors
A[h, w] = X[h, w, :] @ S @ X[h, w, :].T
return A
def one_hot(self, A):
"""
Map array to pages with binary encodings.
Parameters
----------
A : array
2-dimensional array of integers
Returns
-------
B : array (height by width by number of unique integers in A)
3-dimensional array with each page as an indicator of value in A.
"""
# Unique values
labels = np.unique(A)
# Preallocate new array
B = np.zeros((*A.shape, len(labels)))
# Loop over unique values
for i, label in enumerate(labels):
B[:, :, i] = (A == label)
return B
class UnsupervisedGaussianMixture(VariationalMixture):
"""
Variational Gaussian Mixture Image model.
This implementation multivariate images (height by width by channel).
It is based on the RPubs note by <NAME>:
https://rpubs.com/cakapourani/variational-bayes-gmm
"""
def __init__(self, num_channels=1,
num_components=2,
init_params='nn',
max_iter=10,
tol=1e-5):
"""
Model-specific constructors.
Parameters
----------
num_channels : int
Number of channels of image (def: 1).
num_components : int
Number of components (def: 2).
theta0 : tuple
Prior hyperparameters.
max_iter : int
Maximum number of iterations to run for (def: 10).
tol : float
Tolerance on change in x-value (def: 1e-5).
Returns
-------
None
"""
# Store data dimensionality
if num_channels >= 1:
self.D = num_channels
else:
raise ValueError('Number of channels must be larger than 0.')
# Store model parameters
if num_components >= 2:
self.K = num_components
else:
raise ValueError('Too few components specified')
# Optimization parameters
self.init_params = init_params
self.max_iter = max_iter
self.tol = tol
# Set prior hyperparameters
self.set_prior_hyperparameters(D=num_channels,
K=num_components)
def set_prior_hyperparameters(self, D, K,
a0=np.array([0.1]),
b0=np.array([0.1]),
n0=np.array([2.0]),
m0=np.array([0.0]),
W0=np.array([1.0])):
"""
Set hyperparameters of prior distributions.
Default prior hyperparameters are minimally informative symmetric
parameters.
Parameters
----------
D : int
Dimensionality of data.
K : int
Number of components.
a0 : float / array (components by None)
Hyperparameters of Dirichlet distribution on component weights.
b0 : float / array (components by None)
Scale parameters for hypermean normal distribution.
n0 : array (components by None)
Degrees of freedom for Wishart precision prior.
m0 : array (components by dimensions)
Hypermeans.
W0 : array (dimensions by dimensions by components)
Wishart precision parameters.
Returns
-------
theta : tuple
"""
# Expand alpha's if necessary
if not a0.shape[0] == K:
a0 = np.tile(a0[0], (K,))
# Expand beta's if necessary
if not b0.shape[0] == K:
b0 = np.tile(b0[0], (K,))
# Expand nu's if necessary
if not n0.shape[0] == K:
# Check for sufficient degrees of freedom
if n0[0] < D:
print('Cannot set Wishart degrees of freedom lower than data \
dimensionality.\n Setting it to data dim.')
n0 = np.tile(D, (K,))
else:
n0 = np.tile(n0[0], (K,))
# Expand hypermeans if necessary
if not np.all(m0.shape == (K, D)):
# If mean vector given, replicate to each component
if len(m0.shape) == 2:
if m0.shape[1] == D:
m0 = np.tile(m0, (K, 1))
else:
m0 = np.tile(m0[0], (K, D))
# Expand hypermeans if necessary
if not np.all(W0.shape == (D, D, K)):
# If single covariance matrix given, replicate to each component
if len(W0.shape) == 2:
if np.all(m0.shape[:2] == (D, D)):
W0 = np.tile(W0, (1, 1, K))
else:
W0_ = np.zeros((D, D, K))
for k in range(K):
W0_[:, :, k] = W0[0]*np.eye(D)
# Store tupled parameters as model attribute
self.theta0 = (a0, b0, n0, m0, W0_)
def initialize_posteriors(self, X):
"""
Initialize posterior hyperparameters
Parameters
----------
X : array
Observed image (height by width by channels)
Returns
-------
theta : tuple
Set of parameters.
"""
# Current shape
H, W, D = X.shape
# Reshape arrays
X = X.reshape((H*W, D))
if self.init_params == 'random':
# Dirichlet concentration hyperparameters
at = np.ones((self.K,))*(H*W)/2
# Normal precision-scale hyperparameters
bt = np.ones((self.K,))*(H*W)/2
# Wishart degrees of freedom
nt = np.ones((self.K,))*(H*W)/2
mt = np.zeros((self.K, D))
Wt = np.zeros((D, D, self.K))
for k in range(self.K):
# Hypermeans
mt[k, :] = np.mean(X, axis=0) + rnd.randn(1, D)*.1
# Hyperprecisions
Wt[:, :, k] = np.eye(D)
# Initialize variational posterior responsibilities
rho = np.ones((H, W, self.K)) / self.K
elif self.init_params in ('kmeans', 'k-means'):
# Fit k-means to data and obtain cluster assignment
label = KMeans(n_clusters=self.K, n_init=1).fit(X).labels_
# Set rho based on cluster labels
rho = np.zeros((H*W, self.K))
rho[np.arange(H*W), label] = 1
# Dirichlet concentration hyperparameters
at = np.sum(rho, axis=0)
# Normal precision-scale hyperparameters
bt = np.sum(rho, axis=0)
# Wishart degrees of freedom
nt = np.sum(rho, axis=0)
mt = np.zeros((self.K, D))
Wt = np.zeros((D, D, self.K))
for k in range(self.K):
# Hypermeans
mt[k, :] = np.sum(rho[:, [k]] * X, axis=0) / np.sum(rho[:, k])
# Hyperprecisions
Wt[:, :, k] = np.eye(D)
else:
raise ValueError('Provided method not recognized.')
return (at, bt, nt, mt, Wt), rho
def free_energy(self, X, rho, thetat, report=True):
"""
Compute free energy term to monitor progress.
Parameters
----------
X : array
Observed image (height by width by channels).
rho : array
Array of variational parameters (height by width by channels).
thetat : array
Parameters of variational posteriors.
theta0 : array
Parameters of variational priors.
report : bool
Print value of free energy function.
Returns
-------
rho : array
Updated array of variational parameters.
"""
# Shapes
H, W, D = X.shape
# Reshape arrays
X = X.reshape((H*W, D))
rho = rho.reshape((H*W, self.K))
# Unpack parameter sets
a0, b0, n0, m0, W0 = self.theta0
at, bt, nt, mt, Wt = thetat
# Preallocate terms for energy function
E1 = 0
E2 = 0
E3 = 0
E4 = 0
E5 = 0
E6 = 0
E7 = 0
# Loop over classes
for k in range(self.K):
''' Convenience variables '''
# Proportion assigned to each component
Nk = np.sum(rho[:, k], axis=0)
# Responsibility-weighted mean
xk = np.sum(rho[:, [k]] * X, axis=0) / Nk
# Reponsibility-weighted variance
Sk = ((X - xk) * rho[:, [k]]).T @ (X - xk) / Nk
# Mahalanobis distance from hypermean
mWm = (mt[k, :] - m0[k, :]).T @ Wt[:, :, k] @ (mt[k, :] - m0[k, :])
# Mahalanobis distance from responsibility-weighted mean
xWx = (xk - mt[k, :]) @ Wt[:, :, k] @ (xk - mt[k, :]).T
# Entropy-based terms
Elog_pik = digamma(at[k]) - digamma(np.sum(at))
Elog_Lak = (D*np.log(2) +
self.log_det(Wt[:, :, k]) +
self.multivariate_digamma(nt[k], D))
''' Energy function '''
# First term
E1 += Nk/2*(Elog_Lak - D / bt[k] -
nt[k]*(np.trace(Sk @ Wt[:, :, k]) + xWx) -
D*np.log(2*np.pi))
# Second term
E2 += np.sum(rho[:, k] * Elog_pik, axis=0)
# Third term
E3 += (a0[k] - 1)*Elog_pik + (gammaln(np.sum(a0)) -
np.sum(gammaln(a0))) / self.K
# Fourth term
E4 += 1/2*(D*np.log(b0[k] / (2*np.pi)) +
Elog_Lak -
D*b0[k]/bt[k] -
b0[k]*nt[k]*mWm +
(n0[k] - D - 1)*Elog_Lak -
2*self.log_partition_Wishart(Wt[:, :, k], nt[k]) +
nt[k]*np.trace(inv(W0[:, :, k])*Wt[:, :, k]))
# Ignore underflow error from log rho
with np.errstate(under='ignore') and np.errstate(divide='ignore'):
# Set -inf to most negative number
lrho = np.maximum(np.log(rho[:, k]), np.finfo(rho.dtype).min)
# Fifth term
E5 += np.sum(rho[:, k] * lrho, axis=0)
# Sixth term
E6 += (at[k] - 1)*Elog_pik + (gammaln(np.sum(at)) -
np.sum(gammaln(at))) / self.K
# Seventh term
E7 += (Elog_Lak/2 +
D/2*np.log(bt[k] / (2*np.pi)) -
D/2 - self.entropy_Wishart(Wt[:, :, k], nt[k]))
# Compute free energy term
F = E1 + E2 + E3 + E4 - E5 - E6 - E7
# Print free energy
if report:
print('Free energy = ' + str(F))
return F
def expectation_step(self, X, thetat, savefn=''):
"""
Perform expectation step.
Parameters
----------
X : array
Observed image (height by width by channels).
thetat : array
Current iteration of parameters of variational posteriors.
Returns
-------
rho : array
Updated array of variational parameters / responsibilities.
"""
# Shape of variational parameter array
H, W, D = X.shape
# Reshape arrays
X = X.reshape((H*W, D))
# Unpack tuple of hyperparameters
at, bt, nt, mt, Wt = thetat
# Initialize logarithmic rho
log_rho = np.zeros((H*W, self.K))
for k in range(self.K):
# Compute expected log mixing coefficient
E1 = digamma(at[k]) - digamma( | np.sum(at) | numpy.sum |
ENABLE_MULTIPROCESSING = True
from dsl import cpp_trace_param_automata
def generate_public_submission():
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from xgboost import XGBClassifier
import pdb
# data_path = Path('.')
data_path = Path('.')
if not (data_path / 'test').exists():
data_path = Path('../input/abstraction-and-reasoning-challenge')
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
def plot_result(test_input, test_prediction,
input_shape):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 2, figsize=(15, 15))
test_input = test_input.reshape(input_shape[0], input_shape[1])
axs[0].imshow(test_input, cmap=cmap, norm=norm)
axs[0].axis('off')
axs[0].set_title('Actual Target')
test_prediction = test_prediction.reshape(input_shape[0], input_shape[1])
axs[1].imshow(test_prediction, cmap=cmap, norm=norm)
axs[1].axis('off')
axs[1].set_title('Model Prediction')
plt.tight_layout()
plt.show()
def plot_test(test_prediction, task_name):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 1, figsize=(15, 15))
axs.imshow(test_prediction, cmap=cmap, norm=norm)
axs.axis('off')
axs.set_title(f'Test Prediction {task_name}')
plt.tight_layout()
plt.show()
# https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
sample_sub1 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub1 = sample_sub1.set_index('output_id')
sample_sub1.head()
def get_moore_neighbours(color, cur_row, cur_col, nrows, ncols):
if cur_row <= 0:
top = -1
else:
top = color[cur_row - 1][cur_col]
if cur_row >= nrows - 1:
bottom = -1
else:
bottom = color[cur_row + 1][cur_col]
if cur_col <= 0:
left = -1
else:
left = color[cur_row][cur_col - 1]
if cur_col >= ncols - 1:
right = -1
else:
right = color[cur_row][cur_col + 1]
return top, bottom, left, right
def get_tl_tr(color, cur_row, cur_col, nrows, ncols):
if cur_row == 0:
top_left = -1
top_right = -1
else:
if cur_col == 0:
top_left = -1
else:
top_left = color[cur_row - 1][cur_col - 1]
if cur_col == ncols - 1:
top_right = -1
else:
top_right = color[cur_row - 1][cur_col + 1]
return top_left, top_right
def make_features(input_color, nfeat):
nrows, ncols = input_color.shape
feat = np.zeros((nrows * ncols, nfeat))
cur_idx = 0
for i in range(nrows):
for j in range(ncols):
feat[cur_idx, 0] = i
feat[cur_idx, 1] = j
feat[cur_idx, 2] = input_color[i][j]
feat[cur_idx, 3:7] = get_moore_neighbours(input_color, i, j, nrows, ncols)
feat[cur_idx, 7:9] = get_tl_tr(input_color, i, j, nrows, ncols)
feat[cur_idx, 9] = len(np.unique(input_color[i, :]))
feat[cur_idx, 10] = len(np.unique(input_color[:, j]))
feat[cur_idx, 11] = (i + j)
feat[cur_idx, 12] = len(np.unique(input_color[i - local_neighb:i + local_neighb,
j - local_neighb:j + local_neighb]))
cur_idx += 1
return feat
def features(task, mode='train'):
num_train_pairs = len(task[mode])
feat, target = [], []
global local_neighb
for task_num in range(num_train_pairs):
input_color = np.array(task[mode][task_num]['input'])
target_color = task[mode][task_num]['output']
nrows, ncols = len(task[mode][task_num]['input']), len(task[mode][task_num]['input'][0])
target_rows, target_cols = len(task[mode][task_num]['output']), len(task[mode][task_num]['output'][0])
if (target_rows != nrows) or (target_cols != ncols):
print('Number of input rows:', nrows, 'cols:', ncols)
print('Number of target rows:', target_rows, 'cols:', target_cols)
not_valid = 1
return None, None, 1
imsize = nrows * ncols
# offset = imsize*task_num*3 #since we are using three types of aug
feat.extend(make_features(input_color, nfeat))
target.extend(np.array(target_color).reshape(-1, ))
return np.array(feat), np.array(target), 0
# mode = 'eval'
mode = 'test'
if mode == 'eval':
task_path = evaluation_path
elif mode == 'train':
task_path = training_path
elif mode == 'test':
task_path = test_path
all_task_ids = sorted(os.listdir(task_path))
nfeat = 13
local_neighb = 5
valid_scores = {}
model_accuracies = {'ens': []}
pred_taskids = []
for task_id in all_task_ids:
task_file = str(task_path / task_id)
with open(task_file, 'r') as f:
task = json.load(f)
feat, target, not_valid = features(task)
if not_valid:
print('ignoring task', task_file)
print()
not_valid = 0
continue
xgb = XGBClassifier(n_estimators=10, n_jobs=-1)
xgb.fit(feat, target, verbose=-1)
# training on input pairs is done.
# test predictions begins here
num_test_pairs = len(task['test'])
for task_num in range(num_test_pairs):
cur_idx = 0
input_color = np.array(task['test'][task_num]['input'])
nrows, ncols = len(task['test'][task_num]['input']), len(
task['test'][task_num]['input'][0])
feat = make_features(input_color, nfeat)
print('Made predictions for ', task_id[:-5])
preds = xgb.predict(feat).reshape(nrows, ncols)
if (mode == 'train') or (mode == 'eval'):
ens_acc = (np.array(task['test'][task_num]['output']) == preds).sum() / (nrows * ncols)
model_accuracies['ens'].append(ens_acc)
pred_taskids.append(f'{task_id[:-5]}_{task_num}')
# print('ensemble accuracy',(np.array(task['test'][task_num]['output'])==preds).sum()/(nrows*ncols))
# print()
preds = preds.astype(int).tolist()
# plot_test(preds, task_id)
sample_sub1.loc[f'{task_id[:-5]}_{task_num}',
'output'] = flattener(preds)
if (mode == 'train') or (mode == 'eval'):
df = pd.DataFrame(model_accuracies, index=pred_taskids)
print(df.head(10))
print(df.describe())
for c in df.columns:
print(f'for {c} no. of complete tasks is', (df.loc[:, c] == 1).sum())
df.to_csv('ens_acc.csv')
sample_sub1.head()
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
eval_tasks = sorted(os.listdir(evaluation_path))
T = training_tasks
Trains = []
for i in range(400):
task_file = str(training_path / T[i])
task = json.load(open(task_file, 'r'))
Trains.append(task)
E = eval_tasks
Evals = []
for i in range(400):
task_file = str(evaluation_path / E[i])
task = json.load(open(task_file, 'r'))
Evals.append(task)
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
# 0:black, 1:blue, 2:red, 3:greed, 4:yellow,
# 5:gray, 6:magenta, 7:orange, 8:sky, 9:brown
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, norm=norm)
plt.xticks(list(range(10)))
plt.yticks([])
# plt.show()
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(4 * n, 8), dpi=50)
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
plt.tight_layout()
plt.show()
def plot_picture(x):
plt.imshow(np.array(x), cmap=cmap, norm=norm)
plt.show()
def Defensive_Copy(A):
n = len(A)
k = len(A[0])
L = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
L[i, j] = 0 + A[i][j]
return L.tolist()
def Create(task, task_id=0):
n = len(task['train'])
Input = [Defensive_Copy(task['train'][i]['input']) for i in range(n)]
Output = [Defensive_Copy(task['train'][i]['output']) for i in range(n)]
Input.append(Defensive_Copy(task['test'][task_id]['input']))
return Input, Output
def Recolor(task):
Input = task[0]
Output = task[1]
Test_Picture = Input[-1]
Input = Input[:-1]
N = len(Input)
for x, y in zip(Input, Output):
if len(x) != len(y) or len(x[0]) != len(y[0]):
return -1
Best_Dict = -1
Best_Q1 = -1
Best_Q2 = -1
Best_v = -1
# v ranges from 0 to 3. This gives an extra flexibility of measuring distance from any of the 4 corners
Pairs = []
for t in range(15):
for Q1 in range(1, 8):
for Q2 in range(1, 8):
if Q1 + Q2 == t:
Pairs.append((Q1, Q2))
for Q1, Q2 in Pairs:
for v in range(4):
if Best_Dict != -1:
continue
possible = True
Dict = {}
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
color2 = y[i][j]
if color1 != color2:
rule = (p1, p2, color1)
if rule not in Dict:
Dict[rule] = color2
elif Dict[rule] != color2:
possible = False
if possible:
# Let's see if we actually solve the problem
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
rule = (p1, p2, color1)
if rule in Dict:
color2 = 0 + Dict[rule]
else:
color2 = 0 + y[i][j]
if color2 != y[i][j]:
possible = False
if possible:
Best_Dict = Dict
Best_Q1 = Q1
Best_Q2 = Q2
Best_v = v
if Best_Dict == -1:
return -1 # meaning that we didn't find a rule that works for the traning cases
# Otherwise there is a rule: so let's use it:
n = len(Test_Picture)
k = len(Test_Picture[0])
answer = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
if Best_v == 0 or Best_v == 2:
p1 = i % Best_Q1
else:
p1 = (n - 1 - i) % Best_Q1
if Best_v == 0 or Best_v == 3:
p2 = j % Best_Q2
else:
p2 = (k - 1 - j) % Best_Q2
color1 = Test_Picture[i][j]
rule = (p1, p2, color1)
if (p1, p2, color1) in Best_Dict:
answer[i][j] = 0 + Best_Dict[rule]
else:
answer[i][j] = 0 + color1
return answer.tolist()
sample_sub2 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub2.head()
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
example_grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# display(example_grid)
print(flattener(example_grid))
Solved = []
Problems = sample_sub2['output_id'].values
Proposed_Answers = []
test_paths_my = {task.stem: json.load(task.open()) for task in test_path.iterdir()}
test_task_ids = np.sort(list(test_paths_my.keys()))
print(Problems, len(Problems))
task_number_my = dict(zip(test_task_ids, np.arange(100)))
for i in range(len(Problems)):
output_id = Problems[i]
task_id = output_id.split('_')[0]
pair_id = int(output_id.split('_')[1])
f = str(test_path / str(task_id + '.json'))
with open(f, 'r') as read_file:
task = json.load(read_file)
n = len(task['train'])
Input = [Defensive_Copy(task['train'][j]['input']) for j in range(n)]
Output = [Defensive_Copy(task['train'][j]['output']) for j in range(n)]
Input.append(Defensive_Copy(task['test'][pair_id]['input']))
solution = Recolor([Input, Output])
pred = ''
if solution != -1:
Solved.append(i)
pred1 = flattener(solution)
pred = pred + pred1 + ' '
if pred == '':
pred = flattener(example_grid)
Proposed_Answers.append(pred)
sample_sub2['output'] = Proposed_Answers
sample_sub1 = sample_sub1.reset_index()
sample_sub1 = sample_sub1.sort_values(by="output_id")
sample_sub2 = sample_sub2.sort_values(by="output_id")
out1 = sample_sub1["output"].astype(str).values
out2 = sample_sub2["output"].astype(str).values
merge_output = []
for o1, o2 in zip(out1, out2):
o = o1.strip().split(" ")[:1] + o2.strip().split(" ")[:2]
o = " ".join(o[:3])
merge_output.append(o)
sample_sub1["output"] = merge_output
sample_sub1["output"] = sample_sub1["output"].astype(str)
# test_paths_my = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
# test_task_ids = np.sort(list(test_paths_my.keys()))
# task_number_my = dict(zip(test_task_ids, np.arange(100)))
submission = sample_sub1.copy()
submission.to_csv("public_submission.csv", index=False)
#generate_public_submission()
import numpy as np
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
import time
from collections import defaultdict
import os
import json
import random
import copy
import networkx as nx
from pathlib import Path
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from itertools import product
import pandas as pd
import multiprocessing
import subprocess
# from moviepy.editor import ImageSequenceClip
# from moviepy.editor import clips_array, CompositeVideoClip
# from moviepy.video.io.html_tools import html_embed, HTML2
# def display_vid(vid, verbose=False, **html_kw):
# """
# Display a moviepy video clip, useful for removing loadbars
# """
# rd_kwargs = {
# 'fps': 10, 'verbose': verbose
# }
# if not verbose:
# rd_kwargs['logger'] = None
# return HTML2(html_embed(vid, filetype=None, maxduration=60,
# center=True, rd_kwargs=rd_kwargs, **html_kw))
data_path = Path('../input/abstraction-and-reasoning-challenge/')
# data_path = Path('.') # Artyom: it's better use symlinks locally
cmap_lookup = [
'#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'
]
cmap_lookup = [np.array([int(x[1:3], 16), int(x[3:5], 16), int(x[5:], 16)]) for x in cmap_lookup]
def cmap(x):
"""
Translate a task matrix to a color coded version
arguments
x : a h x w task matrix
returns
a h x w x 3 matrix with colors instead of numbers
"""
y = np.zeros((*x.shape, 3))
y[x < 0, :] = np.array([112, 128, 144])
y[x > 9, :] = np.array([255, 248, 220])
for i, c in enumerate(cmap_lookup):
y[x == i, :] = c
return y
def draw_one(x, k=20):
"""
Create a PIL image from a task matrix, the task will be
drawn using the default color coding with grid lines
arguments
x : a task matrix
k = 20 : an up scaling factor
returns
a PIL image
"""
img = Image.fromarray(cmap(x).astype(np.uint8)).resize((x.shape[1] * k, x.shape[0] * k), Image.NEAREST)
draw = ImageDraw.Draw(img)
for i in range(x.shape[0]):
draw.line((0, i * k, img.width, i * k), fill=(80, 80, 80), width=1)
for j in range(x.shape[1]):
draw.line((j * k, 0, j * k, img.height), fill=(80, 80, 80), width=1)
return img
def vcat_imgs(imgs, border=10):
"""
Concatenate images vertically
arguments:
imgs : an array of PIL images
border = 10 : the size of space between images
returns:
a PIL image
"""
h = max(img.height for img in imgs)
w = sum(img.width for img in imgs)
res_img = Image.new('RGB', (w + border * (len(imgs) - 1), h), color=(255, 255, 255))
offset = 0
for img in imgs:
res_img.paste(img, (offset, 0))
offset += img.width + border
return res_img
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(n * 4, 8))
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
def go(ax, title, x):
ax.imshow(draw_one(x), interpolation='nearest')
ax.set_title(title)
ax.set_yticks([])
ax.set_xticks([])
for i, t in enumerate(task["train"]):
go(axs[0][fig_num], f'Train-{i} in', t["input"])
go(axs[1][fig_num], f'Train-{i} out', t["output"])
fig_num += 1
for i, t in enumerate(task["test"]):
go(axs[0][fig_num], f'Test-{i} in', t["input"])
try:
go(axs[1][fig_num], f'Test-{i} out', t["output"])
except:
go(axs[1][fig_num], f'Test-{i} out', np.zeros_like(t["input"]))
fig_num += 1
plt.tight_layout()
plt.show()
def real_trace_param_automata(input, params, n_iter, n_hidden):
"""
Execute an automata and return all the intermediate states
arguments:
step_fn : transition rule function, should take two arguments `input` and `hidden_i`,
should return an output grid an a new hidden hidden grid
n_iter : num of iteration to perform
n_hidden: number of hidden grids, if set to 0 `hidden_i` will be set to None
laodbar = True: weather display loadbars
returns:
an array of tuples if output and hidden grids
"""
# hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
#
# global_rules, ca_rules = params
#
# trace = [(input, hidden)]
#
# for rule in global_rules:
#
# output, hidden = apply_rule(input, hidden, rule)
# trace.append((output, hidden))
# input = output
#
# its = range(n_iter)
#
# for i_it in its:
# output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
# trace.append((output, hidden))
#
# if (input.shape == output.shape) and (output == input).all():
# break
# input = output
hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
global_rules, ca_rules, split_rule, merge_rule = params
grids = apply_split_rule(input, hidden, split_rule)
#print(grids[0][0])
for rule in global_rules:
for i, (inp, hid) in enumerate(grids):
if rule['macro_type'] == 'global_rule':
if rule['apply_to'] == 'all' or \
(rule['apply_to'] == 'index' and i == rule['apply_to_index']%len(grids) or
(rule['apply_to'] == 'last' and i == len(grids) - 1)):
grids[i] = apply_rule(inp, hid, rule)
elif rule['macro_type'] == 'global_interaction_rule':
grids = apply_interaction_rule(grids, rule)
#print(grids[0][0])
#1/0
for i, (input, hidden) in enumerate(grids):
for _ in range(n_iter):
output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
if np.array_equal(input, output):
break
input = output
grids[i] = (output, hidden)
output = apply_merge_rule(grids, merge_rule, split_rule)
return output
def apply_interaction_rule(grids, rule):
if rule['type'] == 'align_pattern':
# index_from = rule['index_from'] % len(grids)
# index_to = rule['index_to'] % len(grids)
# allow_rotation = rule['allow_rotation']
if len(grids) > 5:
return grids
for index_from in range(len(grids)):
for index_to in range(index_from+1, len(grids)):
input_i = grids[index_from][0]
input_j = grids[index_to][0]
# print(np.max(input_i>0, axis=1))
# print(np.max(input_i>0, axis=1).shape)
# print(np.arange(input_i.shape[0]).shape)
#1/0
i_nonzero_rows = np.arange(input_i.shape[0])[np.max(input_i>0, axis=1)]
i_nonzero_columns = np.arange(input_i.shape[1])[np.max(input_i>0, axis=0)]
j_nonzero_rows = np.arange(input_j.shape[0])[np.max(input_j>0, axis=1)]
j_nonzero_columns = np.arange(input_j.shape[1])[np.max(input_j>0, axis=0)]
if i_nonzero_rows.shape[0] == 0 or i_nonzero_columns.shape[0] == 0 or \
j_nonzero_rows.shape[0] == 0 or j_nonzero_columns.shape[0] == 0:
continue
i_minrow = np.min(i_nonzero_rows)
i_mincol = np.min(i_nonzero_columns)
i_maxrow = np.max(i_nonzero_rows) + 1
i_maxcol = np.max(i_nonzero_columns) + 1
j_minrow = np.min(j_nonzero_rows)
j_mincol = np.min(j_nonzero_columns)
j_maxrow = np.max(j_nonzero_rows) + 1
j_maxcol = np.max(j_nonzero_columns) + 1
figure_to_align = input_i[i_minrow:i_maxrow, i_mincol:i_maxcol]
figure_target = input_j[j_minrow:j_maxrow, j_mincol:j_maxcol]
best_fit = 0
best_i_fit, best_j_fit = -1, -1
#print(figure_to_align)
#print(figure_target)
if figure_to_align.shape[0] < figure_target.shape[0] or figure_to_align.shape[1] < figure_target.shape[1]:
continue
#1/0
else:
for i_start in range((figure_to_align.shape[0] - figure_target.shape[0])+1):
for j_start in range((figure_to_align.shape[1] - figure_target.shape[1])+1):
fig_1 = figure_to_align[i_start:(i_start + figure_target.shape[0]), j_start:(j_start + figure_target.shape[1])]
if np.logical_and(np.logical_and(figure_target > 0, figure_target!=rule['allow_color']), figure_target != fig_1).any():
continue
fit = np.sum(figure_target==fig_1)
if fit > best_fit:
best_i_fit, best_j_fit = i_start, j_start
best_fit = fit
if best_fit == 0:
continue
imin = j_minrow-best_i_fit
imax = j_minrow-best_i_fit + figure_to_align.shape[0]
jmin = j_mincol - best_j_fit
jmax = j_mincol - best_j_fit + figure_to_align.shape[1]
begin_i = max(imin, 0)
begin_j = max(jmin, 0)
end_i = min(imax, input_j.shape[0])
end_j = min(jmax, input_j.shape[1])
i_fig_begin = (begin_i-imin)
i_fig_end = figure_to_align.shape[0]-(imax-end_i)
j_fig_begin = (begin_j-jmin)
j_fig_end = figure_to_align.shape[1]-(jmax-end_j)
if rule['fill_with_color'] == 0:
input_j[begin_i:end_i, begin_j:end_j] = figure_to_align[i_fig_begin:i_fig_end, j_fig_begin:j_fig_end]
else:
for i, j in product(range(end_i-begin_i + 1), range(end_j-begin_j + 1)):
if input_j[begin_i + i, begin_j + j] == 0:
input_j[begin_i + i, begin_j + j] = rule['fill_with_color'] * (figure_to_align[i_fig_begin + i, j_fig_begin + j])
return grids
def trace_param_automata(input, params, n_iter, n_hidden):
# expected = real_trace_param_automata(input, params, n_iter, n_hidden)
#
# testcase = {'input': input, 'params': params}
# print(str(testcase).replace('\'', '"').replace('array(', '').replace(')', ''))
output = cpp_trace_param_automata(input, params, n_iter)
# if not np.array_equal(expected, output):
# print('cpp result is wrong')
# print('input:')
# print(input)
# print('expected:')
# print(expected)
# print('got:')
# print(output)
#
# diff = [[str(g) if e != g else '-' for e, g in zip(exp_row, got_row)]
# for exp_row, got_row in zip(expected, output)]
# diff_lines = [' '.join(line) for line in diff]
# diff_str = '[[' + ']\n ['.join(diff_lines)
#
# print('diff:')
# print(diff_str)
# print('rules')
# print(params)
#
# assert False
return [[output]]
# def vis_automata_trace(states, loadbar=False, prefix_image=None):
# """
# Create a video from an array of automata states
#
# arguments:
# states : array of automata steps, returned by `trace_automata()`
# loadbar = True: weather display loadbars
# prefix_image = None: image to add to the beginning of each frame
# returns
# a moviepy ImageSequenceClip
# """
# frames = []
# if loadbar:
# states = tqdm(states, desc='Frame')
# for i, (canvas, hidden) in enumerate(states):
#
# frame = []
# if prefix_image is not None:
# frame.append(prefix_image)
# frame.append(draw_one(canvas))
# frames.append(vcat_imgs(frame))
#
# return ImageSequenceClip(list(map(np.array, frames)), fps=10)
# def vis_automata_paramed_task(tasks, parameters, n_iter, n_hidden, vis_only_ix=None):
# """
# Visualize the automata steps during the task solution
# arguments:
# tasks : the task to be solved by the automata
# step_fn : automata transition function as passed to `trace_automata()`
# n_iter : number of iterations to perform
# n_hidden : number of hidden girds
# """
#
# n_vis = 0
#
# def go(task, n_vis, test=False):
#
# if vis_only_ix is not None and vis_only_ix != n_vis:
# return
# trace = trace_param_automata(task['input'], parameters, n_iter, n_hidden)
# if not test:
# vid = vis_automata_trace(trace, prefix_image=draw_one(task['output']))
# else:
# vid = vis_automata_trace(trace, prefix_image=draw_one(np.zeros_like(task['input'])))
#
# # display(display_vid(vid))
#
# for task in (tasks['train']):
# n_vis += 1
# go(task, n_vis)
#
# for task in (tasks['test']):
# n_vis += 1
# go(task, n_vis, True)
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
evaluation_tasks = sorted(os.listdir(evaluation_path))
test_tasks = sorted(os.listdir(test_path))
def load_data(p, phase=None):
"""
Load task data
"""
if phase in {'training', 'test', 'evaluation'}:
p = data_path / phase / p
task = json.loads(Path(p).read_text())
dict_vals_to_np = lambda x: {k: np.array(v) for k, v in x.items()}
assert set(task) == {'test', 'train'}
res = dict(test=[], train=[])
for t in task['train']:
assert set(t) == {'input', 'output'}
res['train'].append(dict_vals_to_np(t))
for t in task['test']:
if phase == 'test':
assert set(t) == {'input'}
else:
assert set(t) == {'input', 'output'}
res['test'].append(dict_vals_to_np(t))
return res
nbh = lambda x, i, j: {
(ip, jp) : x[i+ip, j+jp]
for ip, jp in product([1, -1, 0], repeat=2)
if 0 <= i+ip < x.shape[0] and 0 <= j+jp < x.shape[1] and (not (ip==0 and jp==0))
}
def get_random_split_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['nothing', 'color_figures', 'figures', 'macro_multiply'])
if rule['type'] in ['color_figures', 'figures']:
rule['sort'] = random.choice(['biggest', 'smallest'])
if rule['type'] == 'macro_multiply':
rule['k1'] = np.random.randint(config['mink1'], config['maxk1']+1)
rule['k2'] = np.random.randint(config['mink2'], config['maxk2']+1)
return rule
def get_random_merge_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['cellwise_or', 'output_first', 'output_last'])
return rule
def apply_split_rule(input, hidden, split_rule):
if split_rule['type'] == 'nothing':
return [(input, hidden)]
if split_rule['type'] == 'macro_multiply':
ks = split_rule['k1'] * split_rule['k2']
grids = [(np.copy(input), np.copy(hidden)) for _ in range(ks)]
return grids
#split_rule['type'] = 'figures'
dif_c_edge = split_rule['type'] == 'figures'
communities = get_connectivity_info(input, ignore_black=True, edge_for_difcolors=dif_c_edge)
if len(communities) > 0:
if split_rule['sort'] == 'biggest':
communities = communities[::-1]
grids = [(np.zeros_like(input), np.zeros_like(hidden)) for _ in range(len(communities))]
for i in range(len(communities)):
for point in communities[i]:
grids[i][0][point] = input[point]
else:
grids = [(input, hidden)]
return grids
def apply_merge_rule(grids, merge_rule, split_rule):
if split_rule['type'] == 'macro_multiply':
shape_base = grids[0][0].shape
shapes = [arr[0].shape for arr in grids]
if not np.array([shape_base == sh for sh in shapes]).all():
return np.zeros((1, 1), dtype=np.int)
ks_1 = split_rule['k1']
ks_2 = split_rule['k2']
output = np.zeros((shape_base[0] * ks_1, shape_base[1] * ks_2), dtype=np.int8)
for k1 in range(ks_1):
for k2 in range(ks_2):
output[(k1*shape_base[0]):((k1+1) * shape_base[0]), (k2*shape_base[1]):((k2+1) * shape_base[1])] = grids[k1*ks_2 + k2][0]
return output
if merge_rule['type'] == 'cellwise_or':
output = np.zeros_like(grids[0][0])
for i in np.arange(len(grids))[::-1]:
if grids[i][0].shape == output.shape:
output[grids[i][0]>0] = grids[i][0][grids[i][0]>0]
return output
elif merge_rule['type'] == 'output_first':
output = grids[0][0]
elif merge_rule['type'] == 'output_last':
output = grids[-1][0]
return output
def get_random_ca_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'copy_color_by_direction',
'direct_check',
'indirect_check',
'nbh_check',
'corner_check',
'color_distribution',
]
ca_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
ca_rules += [c['type'] for c in ca]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(ca_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
def get_random_ignore_colors():
if config['possible_ignore_colors'].shape[0] > 0:
possible_colors = config['possible_ignore_colors']
return possible_colors[np.random.randint(2, size=possible_colors.shape[0]) == 1]
else:
return []
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return get_random_all_colors()
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'ca_rule'
rule['ignore_colors'] = list(config['ignore_colors'])
if np.random.rand() < 0.5 and config['possible_ignore_colors'].shape[0]:
rule['ignore_colors'] += [random.choice(config['possible_ignore_colors'])]
if random_type == 'copy_color_by_direction':
rule['direction'] = random.choice(['everywhere'])
rule['copy_color'] = [get_random_out_color()]
rule['look_back_color'] = rule['copy_color'][0]
elif random_type == 'corner_check':
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'direct_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'indirect_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'nbh_check':
rule['nbh_check_sum'] = np.random.randint(8)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'color_distribution':
rule['direction'] = random.choice(
['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['check_in_empty'] = np.random.randint(2)
rule['color_out'] = get_random_out_color()
if rule['check_in_empty'] == 0:
rule['color_in'] = rule['color_out']
else:
rule['color_in'] = get_random_all_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['color_out']]))
return rule
def get_random_global_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'distribute_colors',
'unity',
'color_for_inners',
'map_color',
'draw_lines',
'draw_line_to',
'gravity',
'make_holes',
'distribute_from_border',
'align_pattern',
'rotate',
'flip'
]
if config['allow_make_smaller']:
types_possible += \
[
'crop_empty',
'crop_figure',
'split_by_H',
'split_by_W',
'reduce'
]
# if config['allow_make_bigger']:
# types_possible += \
# [
# 'macro_multiply_by',
# 'micro_multiply_by',
# 'macro_multiply_k',
# ]
gl_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
gl_rules += [c['type'] for c in gl]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(gl_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'global_rule'
rule['apply_to'] = random.choice(['all', 'index'])
if np.random.rand()<0.2:
rule['apply_to'] = 'last'
if rule['apply_to'] == 'index':
rule['apply_to_index'] = np.random.choice(10)
if random_type == 'macro_multiply_k':
rule['k'] = (np.random.randint(1, 4), np.random.randint(1, 4))
elif random_type == 'flip':
rule['how'] = random.choice(['ver', 'hor'])
elif random_type == 'rotate':
rule['rotations_count'] = np.random.randint(1, 4)
elif random_type == 'micro_multiply_by':
rule['how_many'] = random.choice([2, 3, 4, 5, 'size'])
elif random_type == 'macro_multiply_by':
rule['how_many'] = random.choice(['both', 'hor', 'ver'])
rule['rotates'] = [np.random.randint(1) for _ in range(4)]
rule['flips'] = [random.choice(['hor', 'ver', 'horver', 'no']) for _ in range(4)]
elif random_type == 'distribute_from_border':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
elif random_type == 'draw_lines':
rule['direction'] = random.choice(['everywhere', 'horizontal', 'vertical', 'horver', 'diagonal'])
# 'top', 'bottom', 'left', 'right',
# 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['not_stop_by_color'] = 0 # get_random_all_color()
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'reduce':
rule['skip_color'] = get_random_all_color()
elif random_type == 'draw_line_to':
#rule['direction_type'] = random.choice(['border'])
rule['direction_color'] = get_random_all_color()
rule['not_stop_by_color'] = 0
if np.random.rand() < 0.5:
rule['not_stop_by_color_and_skip'] = get_random_all_color()
else:
rule['not_stop_by_color_and_skip'] = 0
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'distribute_colors':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
rule['horizontally'] = np.random.randint(2)
rule['vertically'] = np.random.randint(2)
rule['intersect'] = get_random_out_color()
elif random_type == 'color_for_inners':
rule['color_out'] = get_random_out_color()
elif random_type == 'crop_figure':
rule['mode'] = random.choice(['smallest', 'biggest'])
rule['dif_c_edge'] = random.choice([True, False])
elif random_type == 'unity':
rule['mode'] = random.choice(['diagonal', 'horizontal', 'vertical', 'horver'])
# rule['inner'] = np.random.choice(2)
rule['ignore_colors'] = [0]
if np.random.rand() < 0.5:
rule['ignore_colors'] += [get_random_all_color()]
rule['with_color'] = random.choice([get_random_out_color(), 0])
elif random_type == 'map_color':
rule['color_in'] = get_random_all_color()
rule['color_out'] = get_random_out_color()
elif random_type == 'gravity':
rule['gravity_type'] = random.choice(['figures', 'cells'])
rule['steps_limit'] = np.random.choice(2)
rule['look_at_what_to_move'] = np.random.choice(2)
if rule['look_at_what_to_move'] == 1:
rule['color_what'] = get_random_out_color()
rule['direction_type'] = random.choice(['border', 'color'])
if rule['direction_type'] == 'border':
rule['direction_border'] = random.choice(['top', 'bottom', 'left', 'right'])
else:
rule['direction_color'] = get_random_color()
elif random_type == 'split_by_H' or random_type == 'split_by_W':
rule['merge_rule'] = random.choice(['and', 'equal', 'or', 'xor'])
elif random_type == 'align_pattern':
rule['macro_type'] = 'global_interaction_rule'
# rule['allow_rotation'] = False
rule['allow_color'] = get_random_all_color()
rule['fill_with_color'] = 0 #random.choice([0, get_random_all_color()])
return rule
def get_task_metadata(task):
colors = []
shapes_input = [[], []]
shapes_output = [[], []]
for part in ['train']:
for uni_task in task[part]:
inp = uni_task['input']
colors += list(np.unique(inp))
out = uni_task['output']
colors += list(np.unique(out))
shapes_input[0].append(inp.shape[0])
shapes_input[1].append(inp.shape[1])
shapes_output[0].append(out.shape[0])
shapes_output[1].append(out.shape[1])
all_colors = np.unique(colors)
min_k1 = int(np.floor(np.min(np.array(shapes_output[0])/np.array(shapes_input[0]))))
min_k2 = int(np.floor(np.min(np.array(shapes_output[1])/np.array(shapes_input[1]))))
max_k1 = int(np.ceil(np.max(np.array(shapes_output[0])/np.array(shapes_input[0]))))
max_k2 = int(np.ceil(np.max(np.array(shapes_output[1])/np.array(shapes_input[1]))))
max_shape = np.max([shapes_input])
config = {}
config['mink1'] = max(1, min(min(min_k1, 30//max_shape), 3))
config['mink2'] = max(1, min(min(min_k2, 30//max_shape), 3))
config['maxk1'] = max(1, min(min(max_k1, 30//max_shape), 3))
config['maxk2'] = max(1, min(min(max_k2, 30//max_shape), 3))
config['allow_make_smaller'] = False
config['allow_make_bigger'] = False
for uni_task in task['train']:
if uni_task['input'].shape[0] > uni_task['output'].shape[0] or \
uni_task['input'].shape[1] > uni_task['output'].shape[1]:
config['allow_make_smaller'] = True
if uni_task['input'].shape[0] < uni_task['output'].shape[0] or \
uni_task['input'].shape[1] < uni_task['output'].shape[1]:
config['allow_make_bigger'] = True
colors_out = []
changed_colors = []
inp_colors = []
for uni_task in task['train']:
inp = uni_task['input']
out = uni_task['output']
for i in range(min(inp.shape[0], out.shape[0])):
for j in range(min(inp.shape[1], out.shape[1])):
inp_colors.append(inp[i, j])
if out[i, j] != inp[i, j]:
colors_out.append(out[i, j])
changed_colors.append(inp[i, j])
inp_colors = np.unique(inp_colors)
changed_colors = np.unique(changed_colors)
config['ignore_colors'] = [c for c in inp_colors if not c in changed_colors]
config['possible_ignore_colors'] = np.array([c for c in all_colors if not c in config['ignore_colors']])
if len(colors_out) == 0:
colors_out = [0]
config['possible_colors_out'] = np.unique(colors_out)
return all_colors, config
def compute_parametrized_automata(input, hidden_i, rules):
output = np.zeros_like(input, dtype=int)
hidden_o = np.copy(hidden_i)
for i, j in product(range(input.shape[0]), range(input.shape[1])):
i_c = input[i, j]
i_nbh = nbh(input, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
i_indirect_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (-1, -1), (-1, 1), (1, -1)}}
is_top_b, is_bottom_b = i == 0, i == input.shape[0] - 1
is_left_b, is_right_b = j == 0, j == input.shape[1] - 1
is_b = is_top_b or is_bottom_b or is_left_b or is_right_b
if i_c > 0:
output[i, j] = i_c
for rule in rules:
if i_c in rule['ignore_colors']:
continue
if rule['type'] == 'copy_color_by_direction':
if rule['direction'] == 'bottom' or rule['direction'] == 'everywhere':
if not is_top_b and input[i - 1, j] in rule['copy_color'] and \
(i == 1 or input[i - 2, j] == rule['look_back_color']):
output[i, j] = input[i - 1, j]
break
if rule['direction'] == 'top' or rule['direction'] == 'everywhere':
if not is_bottom_b and input[i + 1, j] in rule['copy_color'] and \
(i == input.shape[0] - 2 or input[i + 2, j] == rule['look_back_color']):
output[i, j] = input[i + 1, j]
break
if rule['direction'] == 'right' or rule['direction'] == 'everywhere':
if not is_left_b and input[i, j - 1] in rule['copy_color'] and \
(j == 1 or input[i, j - 2] == rule['look_back_color']):
output[i, j] = input[i, j - 1]
break
if rule['direction'] == 'left' or rule['direction'] == 'everywhere':
if not is_right_b and input[i, j + 1] in rule['copy_color'] and \
(j == input.shape[1] - 2 or input[i, j + 2] == rule['look_back_color']):
output[i, j] = input[i, j + 1]
break
elif rule['type'] == 'corner_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = 3
out_nbh = rule['nbh_check_out']
i_uplecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, -1), (-1, 0), (0, -1)}}
i_upricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, 1), (-1, 0), (0, 1)}}
i_dolecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, -1), (1, 0), (0, -1)}}
i_doricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (1, 0), (0, 1)}}
if sum(1 for v in i_nbh.values() if v in color_nbh) < 3:
continue
did_something = False
for corner_idx in [i_uplecorner_nbh, i_upricorner_nbh, i_dolecorner_nbh, i_doricorner_nbh]:
for color in color_nbh:
if sum(1 for v in corner_idx.values() if v == color) == sum_nbh:
output[i, j] = out_nbh
did_something = True
break
if did_something:
break
if did_something:
break
elif rule['type'] == 'nbh_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'direct_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_direct_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'indirect_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_indirect_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'color_distribution':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
not_border_conditions = \
[
not is_top_b,
not is_bottom_b,
not is_left_b,
not is_right_b,
not is_top_b and not is_left_b,
not is_bottom_b and not is_left_b,
not is_top_b and not is_right_b,
not is_bottom_b and not is_right_b
]
index_from = \
[
(i - 1, j),
(i + 1, j),
(i, j - 1),
(i, j + 1),
(i - 1, j - 1),
(i + 1, j - 1),
(i - 1, j + 1),
(i + 1, j + 1)
]
did_something = False
for i_dir, direction in enumerate(directions):
if rule['direction'] == direction:
if not_border_conditions[i_dir]:
if (rule['check_in_empty'] == 1 and input[index_from[i_dir]] > 0) or \
(rule['check_in_empty'] == 0 and input[index_from[i_dir]] == rule['color_in']):
output[i, j] = rule['color_out']
did_something = True
break
if did_something:
break
return output, hidden_o
def get_connectivity_info(color: np.array, ignore_black = False, von_neumann_only = False, edge_for_difcolors = False):
# UnionFind structure allows us to detect all connected areas in a linear time.
class UnionFind:
def __init__(self) -> None:
self.area = np.ones(color.size)
self.parent = np.arange(color.size)
def find(self, x: int) -> int:
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, u: int, v: int) -> None:
root_u, root_v = self.find(u), self.find(v)
if root_u != root_v:
area_u, area_v = self.area[root_u], self.area[root_v]
if area_u < area_v:
root_u, root_v = root_v, root_u
self.parent[root_v] = root_u
self.area[root_u] = area_u + area_v
union_find = UnionFind()
neighbours = [[-1, 0], [0, -1], [1, 0], [0, 1]]
if not von_neumann_only:
neighbours.extend([[-1, -1], [1, -1], [1, 1], [-1, 1]])
nrows, ncols = color.shape
for i in range(nrows):
for j in range(ncols):
for s, t in neighbours:
u, v = i + s, j + t
if u >= 0 and u < nrows and v >= 0 and v < ncols and \
(color[u, v] == color[i, j] or (edge_for_difcolors and (color[u, v]>0) == (color[i, j]>0))):
union_find.union(u * ncols + v, i * ncols + j)
# for every cell: write down the area of its corresponding area
communities = defaultdict(list)
for i, j in product(range(nrows), range(ncols)):
if not ignore_black or color[i, j] > 0:
communities[union_find.find(i * ncols + j)].append((i, j))
# the result is always sorted for consistency
communities = sorted(communities.values(), key = lambda area: (len(area), area))
return communities
def get_graph_communities(im, ignore_black=False):
G = nx.Graph()
I, J = im.shape
for i in range(I):
for j in range(J):
if ignore_black and im[i, j] == 0:
continue
G.add_node((i, j))
edges = []
if j >= 1:
if im[i, j] == im[i, j - 1]:
edges.append(((i, j), (i, j - 1)))
if j < J - 1:
if im[i, j] == im[i, j + 1]:
edges.append(((i, j), (i, j + 1)))
if i >= 1:
if im[i, j] == im[i - 1, j]:
edges.append(((i, j), (i - 1, j)))
if j >= 1:
if im[i, j] == im[i - 1, j - 1]:
edges.append(((i, j), (i - 1, j - 1)))
if j < J - 1:
if im[i, j] == im[i - 1, j + 1]:
edges.append(((i, j), (i - 1, j + 1)))
if i < I - 1:
if im[i, j] == im[i + 1, j]:
edges.append(((i, j), (i + 1, j)))
if j >= 1:
if im[i, j] == im[i + 1, j - 1]:
edges.append(((i, j), (i + 1, j - 1)))
if j < J - 1:
if im[i, j] == im[i + 1, j + 1]:
edges.append(((i, j), (i + 1, j + 1)))
G.add_edges_from(edges)
communities = list(nx.community.k_clique_communities(G, 2))
communities = [list(com) for com in communities]
for i in range(I):
for j in range(J):
i_nbh = nbh(im, i, j)
if sum(1 for v in i_nbh.values() if v == im[i, j]) == 0:
communities.append([(i, j)])
return communities
def apply_rule(input, hidden_i, rule):
output = np.zeros_like(input, dtype=int)
# print(type(input))
# print(input.shape)
hidden = np.zeros_like(input)
output[:, :] = input[:, :]
if rule['type'] == 'macro_multiply_k':
output = np.tile(output, rule['k'])
elif rule['type'] == 'flip':
if rule['how'] == 'ver':
output = output[::-1, :]
elif rule['how'] == 'hor':
output = output[:, ::-1]
elif rule['type'] == 'reduce':
skip_row = np.zeros(input.shape[0])
for i in range(1, input.shape[0]):
skip_row[i] = (input[i] == input[i-1]).all() or (input[i] == rule['skip_color']).all()
if (input[0] == rule['skip_color']).all():
skip_row[0] = 1
if np.sum(skip_row==0)>0:
output = input[skip_row == 0]
skip_column = np.zeros(input.shape[1])
for i in range(1, input.shape[1]):
skip_column[i] = (input[:, i] == input[:, i-1]).all() or (input[:, i] == rule['skip_color']).all()
if (input[:, 0] == rule['skip_color']).all():
skip_column[0] = 1
if np.sum(skip_column==0)>0:
output = output[:, skip_column == 0]
elif rule['type'] == 'rotate':
output = np.rot90(output, rule['rotations_count'])
elif rule['type'] == 'micro_multiply_by':
if rule['how_many'] == 'size':
k = output.shape[0]
else:
k = rule['how_many']
output = np.repeat(output, k, axis=0)
output = np.repeat(output, k, axis=1)
elif rule['type'] == 'macro_multiply_by':
if rule['how_many'] == 'both':
k = (2, 2)
elif rule['how_many'] == 'hor':
k = (1, 2)
elif rule['how_many'] == 'ver':
k = (2, 1)
output = np.tile(output, k)
if input.shape[0] == input.shape[1]:
for i in range(k[0]):
for j in range(k[1]):
sub = output[i * input.shape[0]: (i + 1) * input.shape[0],
j * input.shape[1]: (j + 1) * input.shape[1]]
sub_rotated = np.rot90(sub, rule['rotates'][i * 2 + j])
output[i * input.shape[0]: (i + 1) * input.shape[0],
j * input.shape[1]: (j + 1) * input.shape[1]] = sub_rotated
for i in range(k[0]):
for j in range(k[1]):
sub = output[i * input.shape[0]: (i + 1) * input.shape[0], j * input.shape[1]: (j + 1) * input.shape[1]]
if 'ver' in rule['flips'][i * 2 + j]:
sub = sub[::-1, :]
if 'hor' in rule['flips'][i * 2 + j]:
sub = sub[:, ::-1]
output[i * input.shape[0]: (i + 1) * input.shape[0], j * input.shape[1]: (j + 1) * input.shape[1]] = sub
elif rule['type'] == 'distribute_from_border':
hidden = np.zeros_like(input)
for i in range(1, input.shape[0] - 1):
if output[i, 0] in rule['colors']:
if not output[i, input.shape[1] - 1] in rule['colors'] or output[i, input.shape[1] - 1] == output[i, 0]:
output[i] = output[i, 0]
for j in range(1, input.shape[1] - 1):
if output[0, j] in rule['colors']:
if not output[input.shape[0] - 1, j] in rule['colors'] or output[input.shape[0] - 1, j] == output[0, j]:
output[:, j] = output[0, j]
elif rule['type'] == 'color_for_inners':
hidden = np.zeros_like(input)
changed = 1
while changed == 1:
changed = 0
for i, j in product(range(input.shape[0]), range(input.shape[1])):
i_c = input[i, j]
if i_c > 0 or hidden[i, j] == 1:
continue
if i == 0 or i == input.shape[0] - 1 or j == 0 or j == input.shape[1] - 1:
hidden[i, j] = 1
changed = 1
continue
i_nbh = nbh(hidden, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
if sum(1 for v in i_direct_nbh.values() if v == 1) > 0:
hidden[i, j] = 1
changed = 1
output[((hidden == 0).astype(np.int) * (input == 0).astype(np.int)) == 1] = rule['color_out']
hidden = np.copy(hidden)
elif rule['type'] == 'draw_lines':
hidden = np.zeros_like(input)
if rule['direction'] == 'everywhere':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
elif rule['direction'] == 'horizontal':
directions = ['left', 'right']
elif rule['direction'] == 'vertical':
directions = ['top', 'bottom']
elif rule['direction'] == 'horver':
directions = ['top', 'bottom', 'left', 'right']
elif rule['direction'] == 'diagonal':
directions = ['top_left', 'bottom_left', 'top_right', 'bottom_right']
else:
directions = [rule['direction']]
possible_directions = ['top', 'bottom', 'left', 'right',
'top_left', 'bottom_left', 'top_right', 'bottom_right']
index_change = \
[
[-1, 0],
[1, 0],
(0, -1),
(0, 1),
(-1, -1),
(+1, -1),
(-1, +1),
(+1, +1)
]
for i_dir, direction in enumerate(possible_directions):
if direction in directions:
idx_ch = index_change[i_dir]
for i in range(input.shape[0]):
for j in range(input.shape[1]):
if input[i, j] == rule['start_by_color']:
tmp_i = i + idx_ch[0]
tmp_j = j + idx_ch[1]
while 0 <= tmp_i < input.shape[0] and \
0 <= tmp_j < input.shape[1] and \
input[tmp_i, tmp_j] == rule['not_stop_by_color']:
output[tmp_i, tmp_j] = rule['with_color']
tmp_i += idx_ch[0]
tmp_j += idx_ch[1]
elif rule['type'] == 'draw_line_to':
hidden = np.zeros_like(input)
index_change = \
[
[-1, 0],
[1, 0],
(0, -1),
(0, 1),
]
for i, j in product(range(input.shape[0]), range(input.shape[1])):
if input[i, j] != rule['start_by_color']:
continue
number_0 = np.sum(output[:i] == rule['direction_color'])
number_1 = np.sum(output[(i + 1):] == rule['direction_color'])
number_2 = np.sum(output[:, :j] == rule['direction_color'])
number_3 = np.sum(output[:, (j + 1):] == rule['direction_color'])
i_dir = np.argmax([number_0, number_1, number_2, number_3])
# print([number_0, number_1, number_2, number_3])
# 1/0
idx_ch = index_change[i_dir]
tmp_i = i + idx_ch[0]
tmp_j = j + idx_ch[1]
while 0 <= tmp_i < input.shape[0] and \
0 <= tmp_j < input.shape[1] and \
(input[tmp_i, tmp_j] in [rule['not_stop_by_color'], rule['not_stop_by_color_and_skip']]):
skip_color = rule['not_stop_by_color_and_skip']
if skip_color == 0 or input[tmp_i, tmp_j] != skip_color:
output[tmp_i, tmp_j] = rule['with_color']
tmp_i += idx_ch[0]
tmp_j += idx_ch[1]
elif rule['type'] == 'distribute_colors':
non_zero_rows = []
non_zero_columns = []
color_for_row = np.zeros(input.shape[0])
color_for_column = np.zeros(input.shape[1])
for i in range(input.shape[0]):
row = input[i]
colors, counts = np.unique(row, return_counts=True)
good_colors = np.array([c in rule['colors'] for c in colors])
if not good_colors.any():
continue
colors = colors[good_colors]
counts = counts[good_colors]
best_color = colors[np.argmax(counts)]
color_for_row[i] = best_color
non_zero_rows.append(i)
for j in range(input.shape[1]):
row = input[:, j]
colors, counts = np.unique(row, return_counts=True)
good_colors = np.array([c in rule['colors'] for c in colors])
if not good_colors.any():
continue
colors = colors[good_colors]
counts = counts[good_colors]
best_color = colors[np.argmax(counts)]
color_for_column[j] = best_color
non_zero_columns.append(j)
if rule['horizontally'] == 1:
for i in non_zero_rows:
output[i] = color_for_row[i]
if rule['vertically'] == 1:
for j in non_zero_columns:
output[:, j] = color_for_column[j]
for i in non_zero_rows:
for j in non_zero_columns:
if input[i, j] == 0:
output[i, j] = rule['intersect']
hidden = np.copy(hidden_i)
elif rule['type'] == 'unity':
hidden = np.copy(hidden_i)
if rule['mode'] == 'vertical':
for j in range(input.shape[1]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for i in range(input.shape[0]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[(last_color_now[input[i, j]] + 1):i, j] = input[i, j]
else:
output[(last_color_now[input[i, j]] + 1):i, j] = rule['with_color']
last_color_now[input[i, j]] = i
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = i
elif rule['mode'] == 'horizontal':
for i in range(input.shape[0]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for j in range(input.shape[1]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[i, (last_color_now[input[i, j]] + 1):j] = input[i, j]
else:
output[i, (last_color_now[input[i, j]] + 1):j] = rule['with_color']
last_color_now[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = j
elif rule['mode'] == 'horver':
for j in range(input.shape[1]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for i in range(input.shape[0]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[(last_color_now[input[i, j]] + 1):i, j] = input[i, j]
else:
output[(last_color_now[input[i, j]] + 1):i, j] = rule['with_color']
last_color_now[input[i, j]] = i
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = i
for i in range(input.shape[0]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for j in range(input.shape[1]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[i, (last_color_now[input[i, j]] + 1):j] = input[i, j]
else:
output[i, (last_color_now[input[i, j]] + 1):j] = rule['with_color']
last_color_now[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = j
elif rule['mode'] == 'diagonal':
for diag_id in range(-input.shape[0] - 1, input.shape[1] + 1):
last_color_now_x = np.zeros(10, dtype=np.int) - 1
last_color_now_y = np.zeros(10, dtype=np.int) - 1
for i, j in zip(np.arange(input.shape[0]), diag_id + np.arange(input.shape[0])):
if 0 <= i < input.shape[0] and 0 <= j < input.shape[1]:
if not input[i, j] in rule['ignore_colors'] and last_color_now_x[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[np.arange(last_color_now_x[input[i, j]] + 1, i), np.arange(
last_color_now_y[input[i, j]] + 1, j)] = input[i, j]
else:
output[np.arange(last_color_now_x[input[i, j]] + 1, i), np.arange(
last_color_now_y[input[i, j]] + 1, j)] = rule[
'with_color']
last_color_now_x[input[i, j]] = i
last_color_now_y[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now_x[input[i, j]] = i
last_color_now_y[input[i, j]] = j
reflected_input = input[:, ::-1]
output = output[:, ::-1]
for diag_id in range(-reflected_input.shape[0] - 1, reflected_input.shape[1] + 1):
last_color_now_x = np.zeros(10, dtype=np.int) - 1
last_color_now_y = np.zeros(10, dtype=np.int) - 1
for i, j in zip(np.arange(reflected_input.shape[0]), diag_id + np.arange(reflected_input.shape[0])):
if 0 <= i < reflected_input.shape[0] and 0 <= j < reflected_input.shape[1]:
if not reflected_input[i, j] in rule['ignore_colors'] and last_color_now_x[
reflected_input[i, j]] >= 0:
if rule['with_color'] == 0:
output[np.arange(last_color_now_x[reflected_input[i, j]] + 1, i), np.arange(
last_color_now_y[reflected_input[i, j]] + 1, j)] = reflected_input[i, j]
else:
output[np.arange(last_color_now_x[reflected_input[i, j]] + 1, i), np.arange(
last_color_now_y[reflected_input[i, j]] + 1, j)] = rule[
'with_color']
last_color_now_x[reflected_input[i, j]] = i
last_color_now_y[reflected_input[i, j]] = j
elif not reflected_input[i, j] in rule['ignore_colors']:
last_color_now_x[reflected_input[i, j]] = i
last_color_now_y[reflected_input[i, j]] = j
output = output[:, ::-1]
elif rule['type'] == 'split_by_H':
hidden = np.copy(hidden_i)
if output.shape[0] >= 2:
part1 = output[:int(np.floor(output.shape[0] / 2))]
part2 = output[int(np.ceil(output.shape[0] / 2)):]
output = np.zeros_like(part1)
if rule['merge_rule'] == 'or':
output[part1 > 0] = part1[part1 > 0]
output[part2 > 0] = part2[part2 > 0]
elif rule['merge_rule'] == 'equal':
idx = np.logical_and(np.logical_and(part1 > 0, part2 > 0), part1 == part2)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'and':
idx = np.logical_and(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'xor':
idx = np.logical_xor(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['type'] == 'split_by_W':
hidden = np.copy(hidden_i)
if output.shape[1] >= 2:
part1 = output[:, :int(np.floor(output.shape[1] / 2))]
part2 = output[:, int(np.ceil(output.shape[1] / 2)):]
output = np.zeros_like(part1)
if rule['merge_rule'] == 'or':
output[part1 > 0] = part1[part1 > 0]
output[part2 > 0] = part2[part2 > 0]
elif rule['merge_rule'] == 'equal':
idx = np.logical_and(np.logical_and(part1 > 0, part2 > 0), part1 == part2)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'and':
idx = np.logical_and(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'xor':
idx = np.logical_xor(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['type'] == 'map_color':
hidden = np.copy(hidden_i)
output[output == rule['color_in']] = rule['color_out']
elif rule['type'] == 'crop_empty':
hidden = np.copy(hidden_i)
nonzerosi = np.max((output != 0).astype(np.int), axis=1)
nonzerosj = np.max((output != 0).astype(np.int), axis=0)
# print(nonzerosi)
# print(nonzerosj)
if np.max(nonzerosi) == 0 or np.max(nonzerosj) == 0:
output = output * 0
else:
mini = np.min(np.arange(output.shape[0])[nonzerosi == 1])
maxi = np.max(np.arange(output.shape[0])[nonzerosi == 1])
minj = np.min(np.arange(output.shape[1])[nonzerosj == 1])
maxj = np.max(np.arange(output.shape[1])[nonzerosj == 1])
output = output[mini:(maxi + 1), minj:(maxj + 1)]
elif rule['type'] == 'crop_figure':
hidden = np.copy(hidden_i)
communities = get_connectivity_info(output, ignore_black=True, edge_for_difcolors=rule['dif_c_edge'])
if len(communities) == 0:
output = np.zeros_like(output)
else:
if rule['mode'] == 'biggest':
biggest = list(communities[np.argmax([len(list(com)) for com in communities])])
else:
biggest = list(communities[np.argmin([len(list(com)) for com in communities])])
biggest = np.array(biggest)
min_bx = np.min(biggest[:, 0])
min_by = np.min(biggest[:, 1])
biggest[:, 0] -= min_bx
biggest[:, 1] -= min_by
output = np.zeros((np.max(biggest[:, 0]) + 1, np.max(biggest[:, 1]) + 1), dtype=np.int)
for i in range(biggest.shape[0]):
output[tuple(biggest[i])] = input[(min_bx + biggest[i][0], min_by + biggest[i][1])]
elif rule['type'] == 'make_holes':
hidden = np.copy(hidden_i)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
i_nbh = nbh(output, i, j)
proper_nbhs = i_nbh.values()
for color in range(1, 10):
if sum(1 for v in proper_nbhs if v == color) == 8:
output[i, j] = 0
break
elif rule['type'] == 'gravity':
changed_smth = 1
hidden = np.copy(hidden_i)
im = output
if rule['gravity_type'] == 'figures':
communities = get_connectivity_info(im, ignore_black=True)
else:
communities = []
for i in range(output.shape[0]):
for j in range(output.shape[1]):
if output[i, j] > 0:
communities.append([[i, j]])
directions = []
for com in communities:
community = list(com)
color_fig = output[community[0][0], community[0][1]]
if rule['look_at_what_to_move'] == 1 and color_fig != rule['color_what']:
directions.append('None')
continue
xs = [p[0] for p in community]
ys = [p[1] for p in community]
if rule['direction_type'] == 'border':
direction = rule['direction_border']
elif rule['direction_type'] == 'color':
color = rule['direction_color']
xmin, xmax = np.min(xs), np.max(xs)
ymin, ymax = np.min(ys), np.max(ys)
number_0 = np.sum(output[:xmin] == color)
number_1 = np.sum(output[(xmax + 1):] == color)
number_2 = np.sum(output[:, :ymin] == color)
number_3 = np.sum(output[:, (ymax + 1):] == color)
direction = ['top', 'bottom', 'left', 'right'][np.argmax([number_0, number_1, number_2, number_3])]
directions.append(direction)
already_moved = np.zeros(len(communities))
while changed_smth > 0:
changed_smth = 0
for i, com in enumerate(communities):
community = list(com)
color_fig = output[community[0][0], community[0][1]]
xs = [p[0] for p in community]
ys = [p[1] for p in community]
direction = directions[i]
if direction == 'top':
toper = np.array([[p[0] - 1, p[1]] for p in community if (p[0] - 1, p[1]) not in community])
xs = np.array([p[0] for p in toper])
ys = np.array([p[1] for p in toper])
if np.min(xs) < 0:
continue
if (output[xs, ys] == 0).all() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = np.array([p[0] for p in community])
com_ys = np.array([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs - 1, com_ys] = color_fig
communities[i] = [(p[0] - 1, p[1]) for p in community]
if direction == 'bottom':
toper = np.array([[p[0] + 1, p[1]] for p in community if (p[0] + 1, p[1]) not in community])
xs = np.array([p[0] for p in toper])
ys = np.array([p[1] for p in toper])
if np.max(xs) == input.shape[0]:
continue
if (output[xs, ys] == 0).all() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = np.array([p[0] for p in community])
com_ys = np.array([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs + 1, com_ys] = color_fig
communities[i] = [(p[0] + 1, p[1]) for p in community]
if direction == 'left':
toper = np.array([[p[0], p[1] - 1] for p in community if (p[0], p[1] - 1) not in community])
xs = np.array([p[0] for p in toper])
ys = np.array([p[1] for p in toper])
if np.min(ys) < 0:
continue
if (output[xs, ys] == 0).all() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = np.array([p[0] for p in community])
com_ys = np.array([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs, com_ys - 1] = color_fig
communities[i] = [(p[0], p[1] - 1) for p in community]
if direction == 'right':
toper = np.array([[p[0], p[1] + 1] for p in community if (p[0], p[1] + 1) not in community])
xs = np.array([p[0] for p in toper])
ys = np.array([p[1] for p in toper])
if np.max(ys) == input.shape[1]:
continue
if (output[xs, ys] == 0).all() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = np.array([p[0] for p in community])
com_ys = np.array([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs, com_ys + 1] = color_fig
communities[i] = [(p[0], p[1] + 1) for p in community]
return output, hidden
def compute_metrics(prediction_grid, answer_grid):
n_metrics = 11
def get_metrics(prediction, answer):
prediction_empty = (prediction == 0).astype(np.int)
answer_empty = (answer == 0).astype(np.int)
right = (prediction == answer).astype(np.int)
# empty_right = (prediction_empty == answer_empty).astype(np.int)
#
accuracy = np.mean(right)
# accuracy_empty = np.mean(empty_right)
# precision = 1 - np.mean((1 - prediction_empty) * (1 - right))
# recall = 1 - np.mean((1 - answer_empty) * (1 - right))
# precision_empty = 1 - np.mean((1 - prediction_empty) * (1 - empty_right))
# recall_empty = 1 - np.mean((1 - answer_empty) * (1 - empty_right))
# return [accuracy,
# accuracy_empty,
# precision, recall,
# precision_empty, recall_empty
# ][:n_metrics]
color_rights = []
for color in range(10):
idx = answer != color
# print(idx.astype(np.int))
color_right = float((np.logical_or(idx, right).all() and not (prediction[idx]==color).any()))
color_rights.append(color_right)
#print(color_rights)
#print(color_rights)
#1/0
# right = (prediction == answer).astype(np.int)
# empty_right = (prediction_empty == answer_empty).astype(np.int)
#
# accuracy = np.mean(right)
# accuracy_empty = np.mean(empty_right)
# precision = 1 - np.mean((1 - prediction_empty) * (1 - right))
# recall = 1 - np.mean((1 - answer_empty) * (1 - right))
# precision_empty = 1 - np.mean((1 - prediction_empty) * (1 - empty_right))
# recall_empty = 1 - np.mean((1 - answer_empty) * (1 - empty_right))
return [accuracy] + color_rights
#print(prediction_grid.shape, answer_grid.shape)
if prediction_grid.shape == answer_grid.shape:
# print(prediction_grid)
# print(answer_grid)
mets = get_metrics(prediction_grid, answer_grid) + [1]
#print(mets)
return mets
# elif prediction_grid.shape[0] >= answer_grid.shape[0] and prediction_grid.shape[1] >= answer_grid.shape[1]:
# metrics = np.zeros((prediction_grid.shape[0] - answer_grid.shape[0] + 1,
# prediction_grid.shape[1] - answer_grid.shape[1] + 1, n_metrics))
# for i in range(prediction_grid.shape[0] - answer_grid.shape[0] + 1):
# for j in range(prediction_grid.shape[1] - answer_grid.shape[1] + 1):
# prediction = prediction_grid[i:(i + answer_grid.shape[0]), j:(j + answer_grid.shape[1])]
# metrics[i, j] = get_metrics(prediction, answer_grid)
#
# maxi, maxj = np.unravel_index(metrics[:, :, 0].argmax(), metrics[:, :, 0].shape)
# # mean_metrics = list(np.mean(np.mean(metrics, axis=0), axis=0)/2 + np.array(metrics[maxi, maxj])/2)
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / \
# prediction_grid.shape[1]
# metrics = metrics[maxi, maxj]
# return list(metrics) + [size_proportion]
#
# elif prediction_grid.shape[0] <= answer_grid.shape[0] and prediction_grid.shape[1] <= answer_grid.shape[1]:
# metrics = np.zeros((answer_grid.shape[0] - prediction_grid.shape[0] + 1,
# answer_grid.shape[1] - prediction_grid.shape[1] + 1, n_metrics))
# for i in range(answer_grid.shape[0] - prediction_grid.shape[0] + 1):
# for j in range(answer_grid.shape[1] - prediction_grid.shape[1] + 1):
# answer = answer_grid[i:(i + prediction_grid.shape[0]), j:(j + prediction_grid.shape[1])]
# metrics[i, j] = get_metrics(prediction_grid, answer)
#
# maxi, maxj = np.unravel_index(metrics[:, :, 0].argmax(), metrics[:, :, 0].shape)
# # mean_metrics = list(np.mean(np.mean(metrics, axis=0), axis=0)/2 + np.array(metrics[maxi, maxj])/2)
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / \
# prediction_grid.shape[1]
# metrics = metrics[maxi, maxj]
# return list(metrics) + [1/size_proportion]
# elif prediction_grid.shape[0] >= answer_grid.shape[0] and prediction_grid.shape[1] >= answer_grid.shape[1]:
# maxi, maxj = 0, 0
# maxcommon = 0
#
# for i in range(prediction_grid.shape[0] - answer_grid.shape[0] + 1):
# for j in range(prediction_grid.shape[1] - answer_grid.shape[1] + 1):
# for i_check, j_check in product(range(answer_grid.shape[0]), range(answer_grid.shape[1])):
# if prediction_grid[i + i_check, j + j_check] != answer_grid[i_check, j_check]:
# common = i_check * j_check
# break
# if i_check == answer_grid.shape[0] - 1 and j_check == answer_grid.shape[1] - 1:
# common = i_check * j_check
#
# if common > maxcommon:
# maxi = i
# maxj = j
# maxcommon = common
# if common == answer_grid.shape[0] * answer_grid.shape[1]:
# break
#
# metrics = get_metrics(prediction_grid[maxi:(maxi + answer_grid.shape[0]),
# maxj:(maxj + answer_grid.shape[1])], answer_grid)
#
# modified_pred = np.zeros_like(prediction_grid)
# modified_pred[:] = prediction_grid[:]
# modified_pred[maxi:(maxi + answer_grid.shape[0]), maxj:(maxj + answer_grid.shape[1])] = 0
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / prediction_grid.shape[1]
# #print(np.mean(modified_pred==0))
# return list(size_proportion*np.array(metrics)) + [1.0]
#
# elif prediction_grid.shape[0] <= answer_grid.shape[0] and prediction_grid.shape[1] <= answer_grid.shape[1]:
# maxi, maxj = 0, 0
# maxcommon = 0
#
# for i in range(answer_grid.shape[0] - prediction_grid.shape[0] + 1):
# for j in range(answer_grid.shape[1] - prediction_grid.shape[1] + 1):
# for i_check, j_check in product(range(prediction_grid.shape[0]), range(prediction_grid.shape[1])):
# #print(i_check, j_check)
# if answer_grid[i + i_check, j + j_check] != prediction_grid[i_check, j_check]:
# common = i_check * j_check
# break
# if i_check == prediction_grid.shape[0] - 1 and j_check == prediction_grid.shape[1] - 1:
# common = i_check * j_check
#
# if common > maxcommon:
# maxi = i
# maxj = j
# maxcommon = common
# if common == prediction_grid.shape[0] * prediction_grid.shape[1]:
# break
#
# metrics = get_metrics(answer_grid[maxi:(maxi + prediction_grid.shape[0]),
# maxj:(maxj + prediction_grid.shape[1])], prediction_grid)
#
# modified_pred = np.zeros_like(answer_grid)
# modified_pred[:] = answer_grid[:]
# modified_pred[maxi:(maxi + prediction_grid.shape[0]), maxj:(maxj + prediction_grid.shape[1])] = 0
# size_proportion = prediction_grid.shape[0] * prediction_grid.shape[1] / answer_grid.shape[0] / answer_grid.shape[1]
# return list(size_proportion*np.array(metrics)) + [1.0]
return list(np.array(get_metrics(answer_grid, answer_grid)) * 0) + [0]
def validate_automata(task_global, params, n_iter_max, n_hidden):
def validate(task):
inp = task['input']
out = trace_param_automata(inp, params, n_iter_max, n_hidden)[-1][0]
metrics = compute_metrics(out, task['output'])
return metrics
metrics = []
for task in task_global['train']:
metrics.append(validate(task))
mean_metrics = list(np.round(np.mean(metrics, axis=0), 3))
min_metrics = list(np.round(np.min(metrics, axis=0), 3))
return tuple(mean_metrics + list(np.array(metrics)[:, 0].reshape(-1)))#tuple(mean_metrics + min_metrics)
def product_better(a, b):
""" Return True iff the two tuples a and b respect a<b for the partial order. """
a = np.array(a)
b = np.array(b)
return (np.array(a) >= np.array(b)).all() and (np.array(a) > np.array(b)).any()
def generate_random_ca(all_colors, best_candidates, temp, config, length=1):
rules = []
for _ in range(length):
rules.append(get_random_ca_rule(all_colors, best_candidates, temp, config))
return rules
def generate_random_global(all_colors, best_candidates, temp, config, length=1):
rules = []
for _ in range(length):
rules.append(get_random_global_rule(all_colors, best_candidates, temp, config))
return rules
def generate_population(all_colors, config, size=64, length=1):
population = []
for i in range(size):
split_rule = get_random_split_rule(all_colors, {}, 0, config)
merge_rule = get_random_merge_rule(all_colors, {}, 0, config)
global_rules = generate_random_global(all_colors, {}, 0, config, np.random.choice(2, p=[0.2, 0.8]))
ca_rules = generate_random_ca(all_colors, {}, 0, config, np.random.choice(2, p=[0.2, 0.8]))
population.append([global_rules, ca_rules, split_rule, merge_rule])
return population
from pathlib import Path
import json
train_path = data_path / 'training'
valid_path = data_path / 'evaluation'
test_path = data_path / 'test'
submission_path = data_path / 'public_submission.csv'
train_tasks = { task.stem: json.load(task.open()) for task in train_path.iterdir() }
valid_tasks = { task.stem: json.load(task.open()) for task in valid_path.iterdir() }
test_path = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
train_task_ids = np.sort(list(train_tasks.keys()))
valid_task_ids = np.sort(list(valid_tasks.keys()))
test_task_ids = np.sort(list(test_path.keys()))
from functools import partial
from itertools import product
from sklearn.preprocessing import MinMaxScaler
def change_color(colors_in, colors_out, grid):
out_grid = np.zeros_like(grid)
out_grid[:] = grid[:]
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
for color_in, color_out in zip(colors_in, colors_out):
if grid[i, j] == color_in:
out_grid[i, j] = color_out
break
return out_grid
def reduce_grid(grid_rows, grid_columns, color, grid):
out_grid = np.zeros((len(grid_rows), len(grid_columns)), dtype=np.int)
for i, j in product(range(len(grid_rows)), range(len(grid_columns))):
out_grid[i, j] = grid[grid_rows[i][0], grid_columns[j][0]]
return out_grid
def unreduce_grid(line_rows, line_columns, n, m, grid_rows, grid_columns, color, grid):
out_grid = np.zeros((n, m), dtype=np.int)
for i in range(len(line_rows)):
out_grid[line_rows[i]] = color
for j in range(len(line_columns)):
out_grid[:, line_columns[j]] = color
for i, j in product(range(len(grid_rows)), range(len(grid_columns))):
if grid[i, j] != 0:
for i_gr_row in list(grid_rows[i]):
for j_gr_col in list(grid_columns[j]):
out_grid[i_gr_row, j_gr_col] = grid[i, j]
return out_grid
def get_color_features(input_grid):
colors = np.unique(input_grid)
colors_numbers = np.array([np.mean(input_grid == color) for color in colors]).reshape((-1, 1))
# communities_1 = get_graph_communities(input_grid)
#
# communities_2 = get_connectivity_info(input_grid)
#
# communities_1 = sorted([sorted(com) for com in communities_1])
# communities_2 = sorted([sorted(com) for com in communities_2])
#
# assert all((a == b) for a, b in zip(communities_1, communities_2))
# colors_communities = [np.sum([input_grid[list(com)[0]] == color for com in communities]) / len(communities) for
# color in colors]
#colors_communities = np.array(colors_communities).reshape((-1, 1))
colors_borders = np.array([np.mean(input_grid[0] == color) for color in colors]).reshape((-1, 1))
colors_borders += np.array([np.mean(input_grid[-1] == color) for color in colors]).reshape((-1, 1))
colors_borders += np.array([np.mean(input_grid[:, 0] == color) for color in colors]).reshape((-1, 1))
colors_borders += np.array([np.mean(input_grid[:, -1] == color) for color in colors]).reshape((-1, 1))
colors_borders /= np.sum(colors_borders)
colors_features = np.concatenate([colors_numbers, colors_borders], axis=1)
return colors_features, colors
def get_train_color_features(task):
colors_in_train = []
colors_in_each_train = []
for uni_task in task['train']:
inp = uni_task['input']
colors_unique, color_numbers = np.unique(inp, return_counts=True)
colors_in_train += list(colors_unique)
colors_in_each_train.append(colors_unique)
max_color_task = np.argmax([clrs.shape[0] for clrs in colors_in_each_train])
colors = colors_in_each_train[max_color_task]
input_grid = task['train'][max_color_task]['input']
train_colors_features, _ = get_color_features(input_grid)
scaler = MinMaxScaler()
train_colors_features = scaler.fit_transform(train_colors_features)
sums = np.sum(train_colors_features, axis=1)
train_colors_features = train_colors_features[np.argsort(sums)[::-1]]
return train_colors_features, scaler, np.unique(colors_in_train)
def build_mapping(task, config):
reverse_functions = []
for part in ['train', 'test']:
for uni_task in task[part]:
if part == 'test':
reverse_functions.append({})
if config['reduce_grid']:
can_reduce_grid = True
for uni_task in task['train']:
if uni_task['input'].shape != uni_task['output'].shape:
can_reduce_grid = False
break
inp = uni_task['input']
colors_rows = []
line_rows = []
for i in range(inp.shape[0]):
if (inp[i] == inp[i][0]).all():
colors_rows.append(inp[i][0])
line_rows.append(i)
row_colors, row_counts = np.unique(colors_rows, return_counts=True)
colors_columns = []
line_columns = []
for i in range(inp.shape[1]):
if (inp[:, i] == inp[0, i]).all():
colors_columns.append(inp[0, i])
line_columns.append(i)
column_colors, column_counts = np.unique(colors_columns, return_counts=True)
if row_colors.shape[0] != 1 or column_colors.shape[0] != 1 or \
row_counts[0] < 2 or column_counts[0] < 2:
can_reduce_grid = False
break
line_rows.append(inp.shape[0])
line_rows = [-1] + line_rows
line_columns.append(inp.shape[1])
line_columns = [-1] + line_columns
for i in range(len(line_rows) - 1):
if (line_rows[i] + 1) < line_rows[i + 1]:
for j in range(len(line_columns) - 1):
if (line_columns[j] + 1) < line_columns[j + 1]:
color = inp[line_rows[i] + 1][line_columns[j] + 1]
if not (inp[(line_rows[i] + 1):(line_rows[i + 1]),
(line_columns[j] + 1):(line_columns[j + 1])] == color).all():
can_reduce_grid = False
break
for i in range(1, len(line_rows) - 1):
if not (uni_task['input'][line_rows[i]] == uni_task['output'][line_rows[i]]).all():
can_reduce_grid = False
break
for j in range(1, len(line_columns) - 1):
if not (uni_task['input'][:, line_columns[j]] == uni_task['output'][:, line_columns[j]]).all():
can_reduce_grid = False
break
if not can_reduce_grid:
break
if can_reduce_grid:
for part in ['train', 'test']:
for i_task, uni_task in enumerate(task[part]):
inp = uni_task['input']
colors_rows = []
line_rows = []
for i in range(inp.shape[0]):
if (inp[i] == inp[i][0]).all():
colors_rows.append(inp[i][0])
line_rows.append(i)
row_colors, row_counts = np.unique(colors_rows, return_counts=True)
colors_columns = []
line_columns = []
for i in range(inp.shape[1]):
if (inp[:, i] == inp[0, i]).all():
colors_columns.append(inp[0, i])
line_columns.append(i)
column_colors, column_counts = np.unique(colors_columns, return_counts=True)
line_rows.append(inp.shape[0])
line_rows = [-1] + line_rows
line_columns.append(inp.shape[1])
line_columns = [-1] + line_columns
grid_rows = []
grid_columns = []
for i in range(len(line_rows) - 1):
if (line_rows[i] + 1) < line_rows[i + 1]:
grid_rows.append(np.arange(line_rows[i] + 1, line_rows[i + 1]))
for j in range(len(line_columns) - 1):
if (line_columns[j] + 1) < line_columns[j + 1]:
grid_columns.append(np.arange(line_columns[j] + 1, line_columns[j + 1]))
uni_task['input'] = reduce_grid(grid_rows, grid_columns, row_colors[0], inp)
if part == 'train':
uni_task['output'] = reduce_grid(grid_rows, grid_columns, row_colors[0], uni_task['output'])
if part == 'test':
reverse_functions[i_task]['unreduce_grid'] = partial(unreduce_grid, line_rows[1:-1],
line_columns[1:-1], inp.shape[0],
inp.shape[1],
grid_rows, grid_columns, row_colors[0])
if config['map_color']:
go_map_color = True
train_colors_features, scaler, unique_train_colors = get_train_color_features(task)
for uni_task in task['test']:
inp = uni_task['input']
colors_test = list(np.unique(inp))
for color in colors_test:
if not color in unique_train_colors:
go_map_color = True
if go_map_color:
colors_in_all = [[], []]
colors_out_all = [[], []]
for i_part, part in enumerate(['train', 'test']):
for i_task, uni_task in enumerate(task[part]):
input_grid = uni_task['input']
colors_features, colors = get_color_features(input_grid)
proper_colors = list(np.arange(train_colors_features.shape[0]))
colors_features = scaler.transform(colors_features)
colors_in = []
colors_out = []
for i, color in enumerate(colors):
color_features = colors_features[i].reshape((1, -1))
distances = np.sum(np.power(train_colors_features - color_features, 2), axis=1)
closests = list(np.argsort(distances))
for closest in closests:
if closest in proper_colors:
proper_colors.remove(closest)
colors_in.append(color)
colors_out.append(closest)
break
if part == 'train':
colors_in_all[i_part].append(colors_in)
colors_out_all[i_part].append(colors_out)
if part == 'test':
colors_in_all[i_part].append(colors_out)
colors_out_all[i_part].append(colors_in)
reverse_functions[i_task]['train_colors_in'] = colors_out
reverse_functions[i_task]['train_colors_out'] = colors_in
unique_test_colors = []
for i_task, uni_task in enumerate(task['train']):
output_grid = uni_task['output']
colors = np.unique(output_grid)
for color in colors:
if not color in unique_train_colors:
unique_test_colors.append(color)
unique_test_colors = np.unique(unique_test_colors)
colors_out = 9 - np.arange(unique_test_colors.shape[0])
for part in ['train', 'test']:
for i_task, uni_task in enumerate(task[part]):
if part == 'train':
uni_task['input'] = change_color(colors_in_all[0][i_task], colors_out_all[0][i_task],
uni_task['input'])
colors_in_all[0][i_task] += list(unique_test_colors)
colors_out_all[0][i_task] += list(colors_out)
uni_task['output'] = change_color(colors_in_all[0][i_task], colors_out_all[0][i_task],
uni_task['output'])
if part == 'test':
reverse_functions[i_task]['test_colors_in'] = list(colors_out)
reverse_functions[i_task]['test_colors_out'] = list(unique_test_colors)
if config['find_wall']:
for i_part, part in enumerate(['train', 'test']):
for i_task, uni_task in enumerate(task[part]):
input_grid = uni_task['input']
colors_features, colors = get_color_features(input_grid)
sums = np.sum(colors_features, axis=1)
color_wall = colors[np.argsort(sums)[::-1][0]]
#print(color_wall)
if color_wall == 0:
continue
colors_in = [0, color_wall]
colors_out = [color_wall, 0]
uni_task['input'] = change_color(colors_in, colors_out, input_grid)
if part == 'train':
uni_task['output'] = change_color(colors_in, colors_out, uni_task['output'])
if part == 'test':
reverse_functions[i_task]['return_wall'] = partial(change_color, colors_out,
colors_in)
return task, reverse_functions
def update_pool(task, best_candidates, candidate, num_params):
start = time.time()
score = validate_automata(task, candidate, 25, 1)
is_uncomp = True
updated_keys = False
best_candidates_items = list(best_candidates.items())
for best_score, best_candidates_score in best_candidates_items:
if product_better(score, best_score):
# Remove previous best candidate and add the new one
del best_candidates[best_score]
best_candidates[score] = [candidate]
is_uncomp = False # The candidates are comparable
updated_keys = True
if product_better(best_score, score):
is_uncomp = False # The candidates are comparable
if is_uncomp: # The two candidates are uncomparable
best_candidates[score].append(candidate)
best_candidates[score] = sorted(best_candidates[score], key=lambda x: len(x[0]) + len(x[1]))
if len(best_candidates[score]) > num_params:
best_candidates[score] = [cand for cand in best_candidates[score] if
(len(cand[0]) + len(cand[1])) <= len(best_candidates[score][0][0]) + len(best_candidates[score][0][1]) + 2]
# best_candidates[score] = best_candidates[score][:num_params]
return updated_keys
def generate_asexual_part(best_candidates, temp, part, generate_func, all_colors, config, alpha_mutate_rule_same_type):
if type(part) == list:
if np.random.rand() < (1 / (len(part) + 1))**0.75:
part.append(generate_func(all_colors, best_candidates, temp, config))
else:
index = np.random.randint(len(part))
if np.random.rand() < 0.3:
part = part[:index] + part[(index + 1):]
else:
r_type = None
if np.random.rand() < alpha_mutate_rule_same_type:
r_type = part[index]['type']
if np.random.rand() < 0.5:
part[index] = generate_func(all_colors, best_candidates, temp, config, r_type)
else:
part = part[:index] + [generate_func(all_colors, best_candidates, temp, config, r_type)] + part[index:]
else:
part = generate_func(all_colors, best_candidates, temp, config)
return part
def generate_sexual_part(best_candidates, temp, first, second, generate_func, all_colors, config, alpha_sexual_mutate,
alpha_mutate_rule_same_type, alpha_mutate_rule_same_type_one_parameter):
if type(first) == list:
if len(first) == 0 and len(second) == 0:
child = []
elif len(first) == 0:
split2 = np.random.randint(len(second))
if np.random.rand() <= 0.5:
child = second[split2:]
else:
child = second[:split2]
elif len(second) == 0:
split1 = np.random.randint(len(first))
if np.random.rand() <= 0.5:
child = first[split1:]
else:
child = first[:split1]
else:
split1 = np.random.randint(len(first))
split2 = np.random.randint(len(second))
if np.random.rand() <= 0.5:
child = first[:split1] + second[split2:]
else:
child = second[:split2] + first[split1:]
if np.random.rand() < alpha_sexual_mutate:
index = np.random.randint(len(child) + 1)
if index == len(child):
child.append(generate_func(all_colors, best_candidates, temp, config))
else:
r_type = None
same_type = np.random.rand() < alpha_mutate_rule_same_type
one_param_modification = np.random.rand() < alpha_mutate_rule_same_type_one_parameter
if same_type:
r_type = child[index]['type']
same_type_rule = generate_func(all_colors, best_candidates, temp, config, r_type)
if not one_param_modification:
child[index] = same_type_rule
else:
key = random.choice(list(child[index].keys()))
child[index][key] = same_type_rule[key]
else:
if np.random.rand() < 0.5:
child[index] = generate_func(all_colors, best_candidates, temp, config)
else:
child = child[:index] + [generate_func(all_colors, best_candidates, temp, config, r_type)] + child[
index:]
else:
if np.random.rand() < 0.5:
child = copy.deepcopy(first)
else:
child = copy.deepcopy(second)
return child
def generate_asexual_child(best_candidates, temp, parent, all_colors, config, alpha_mutate_rule_same_type):
child = copy.deepcopy(parent)
gen_functions = [get_random_global_rule, get_random_ca_rule, get_random_split_rule, get_random_merge_rule]
idx_to_mutate = np.random.choice(len(child), p =[0.4, 0.4, 0.1, 0.1])
child[idx_to_mutate] = generate_asexual_part(best_candidates, temp, child[idx_to_mutate], gen_functions[idx_to_mutate],
all_colors, config, alpha_mutate_rule_same_type)
return child
def generate_sexual_child(best_candidates, temp, first, second, all_colors, config, alpha_sexual_mutate,
alpha_mutate_rule_same_type, alpha_mutate_rule_same_type_one_parameter):
gen_functions = [get_random_global_rule, get_random_ca_rule, get_random_split_rule, get_random_merge_rule]
what_to_mutate = np.random.choice(len(gen_functions), p=[0.5, 0.5, 0.0, 0.0])
child = []
for idx_to_mutate, gen_func in enumerate(gen_functions):
child.append(generate_sexual_part(best_candidates, temp, first[idx_to_mutate], second[idx_to_mutate],
gen_func, all_colors, config,
(what_to_mutate==idx_to_mutate) * alpha_sexual_mutate, alpha_mutate_rule_same_type,
alpha_mutate_rule_same_type_one_parameter))
return child
def post_solved_process(task, solved, all_colors, config, reverse_functions, config_mapping):
test_preds = []
best_candidates = defaultdict(list)
update_pool(task, best_candidates, solved, 1)
start_time = time.time()
while time.time() - start_time < 30:
best_scores = list(best_candidates.keys())
first_score = random.choice(best_scores)
idx = np.random.choice(len(list(best_candidates[first_score])))
first = list(best_candidates[first_score])[idx]
child = generate_asexual_child(best_candidates, 0.5, first, all_colors, config, 0.)
update_pool(task, best_candidates, child, 1)
train_colors_features, scaler, _ = get_train_color_features(task)
print(list(best_candidates.values())[0][0])
for i_task, uni_task in enumerate(task['test']):
predictions = []
for solved in list(best_candidates.values())[0]:
if reverse_functions[i_task].get('train_colors_in', None):
inp = uni_task['input']
colors_unique, color_numbers = np.unique(inp, return_counts=True)
input_grid = uni_task['input']
colors_features, colors = get_color_features(input_grid)
colors_features = scaler.transform(colors_features)
colors_in = []
colors_out = []
if colors_unique.shape[0] <= train_colors_features.shape[0]:
proper_colors = list(np.arange(train_colors_features.shape[0]))
for i, color in enumerate(colors):
color_features = colors_features[i].reshape((1, -1))
distances = np.sum(np.power(train_colors_features - color_features, 2), axis=1)
closests = list(np.argsort(distances))
for closest in closests:
if closest in proper_colors:
proper_colors.remove(closest)
colors_in.append(color)
colors_out.append(closest)
break
colors_in += list(reverse_functions[i_task]['train_colors_out'])
colors_out += list(reverse_functions[i_task]['train_colors_in'])
input_task = change_color(colors_in, colors_out, uni_task['input'])
trace = trace_param_automata(input_task, solved, 25, 0)
t_pred = trace[-1][0]
if not reverse_functions[i_task].get('unreduce_grid', None) is None:
t_pred = reverse_functions[i_task]['unreduce_grid'](t_pred)
if not reverse_functions[i_task].get('train_colors_in', None) is None:
colors_in = reverse_functions[i_task]['train_colors_in'] + reverse_functions[i_task][
'test_colors_in']
colors_out = reverse_functions[i_task]['train_colors_out'] + reverse_functions[i_task][
'test_colors_out']
t_pred = change_color(colors_in, colors_out, t_pred)
predictions.append(t_pred)
else:
closests_to = [[] for _ in range(train_colors_features.shape[0])]
for i, color in enumerate(colors):
color_features = colors_features[i].reshape((1, -1))
distances = np.sum(np.power(train_colors_features - color_features, 2), axis=1)
closest = np.argsort(distances)[0]
closests_to[closest].append(color)
for i in range(len(closests_to)):
if len(closests_to[i]) == 0:
closests_to[i] = [-1]
answers = []
for color_map in product(*closests_to):
input_task = np.zeros_like(uni_task['input'])
for i, color in enumerate(list(color_map)):
input_task[uni_task['input'] == color] = i
colors_in = np.array(list(color_map) + reverse_functions[i_task]['test_colors_out'])
colors_out = list(np.arange(colors_in.shape[0])) + reverse_functions[i_task]['test_colors_in']
trace = trace_param_automata(input_task, solved, 25, 0)
t_pred = trace[-1][0]
t_pred = change_color(colors_out, colors_in, t_pred)
if not reverse_functions[i_task].get('unreduce_grid', None) is None:
t_pred = reverse_functions[i_task]['unreduce_grid'](t_pred)
answers.append(t_pred)
shapes = [ans.shape for ans in answers]
diff_shapes, counts = np.unique(shapes, return_counts=True, axis=0)
best_shape = diff_shapes[np.argmax(counts)]
answers = [ans for ans in answers if ans.shape == tuple(best_shape)]
final_answer = np.zeros((10, best_shape[0], best_shape[1]))
for i in range(10):
for ans in answers:
final_answer[i][ans == i] += 1
final_answer = np.argmax(final_answer, axis=0)
predictions.append(final_answer)
else:
inp = uni_task['input']
trace = trace_param_automata(inp, solved, 25, 0)
t_pred = trace[-1][0]
if not reverse_functions[i_task].get('unreduce_grid', None) is None:
t_pred = reverse_functions[i_task]['unreduce_grid'](t_pred)
if not reverse_functions[i_task].get('return_wall', None) is None:
t_pred = reverse_functions[i_task]['return_wall'](t_pred)
predictions.append(t_pred)
shapes = [ans.shape for ans in predictions]
diff_shapes, counts = np.unique(shapes, return_counts=True, axis=0)
best_shape = diff_shapes[np.argmax(counts)]
predictions = [ans for ans in predictions if ans.shape == tuple(best_shape)]
unique_preds, nums = np.unique(np.array(predictions), return_counts=True, axis=0)
indexes = np.argsort(nums)[::-1]
preds = unique_preds[indexes[:3]]
preds = [pr for pr in preds]
test_preds.append(preds)
return test_preds
def train_model(name, task, params, time_for_task, config_mapping):
alpha_asexual_mutation = params['alpha_asexual_mutation']
alpha_sexual_mutate = params['alpha_sexual_mutate']
alpha_mutate_rule_same_type = params['alpha_mutate_rule_same_type']
alpha_mutate_rule_same_type_one_parameter = params['alpha_mutate_rule_same_type_one_parameter']
add_random = params['add_random']
num_params = params['num_params']
start_time = time.time()
param_name = str([alpha_asexual_mutation,
alpha_sexual_mutate,
alpha_mutate_rule_same_type,
alpha_mutate_rule_same_type_one_parameter,
add_random])
task, reverse_functions = build_mapping(task, config_mapping)
all_colors, config = get_task_metadata(task)
print(f'Trying to solve {name}... {param_name}')
best_candidates = defaultdict(list)
test_preds = []
population = generate_population(all_colors, config, size=2500)
mode = 'test'
# #
# cand = [[{'type': 'flip', 'macro_type': 'global_rule', 'apply_to': 'index', 'apply_to_index': 5, 'how': 'hor'}],
# [], {'type': 'macro_multiply', 'k': (3, 3)}, {'type': 'cellwise_or'}]
# #
#update_pool(task, best_candidates, cand, num_params)
# 1/0
for cand in population:
update_pool(task, best_candidates, cand, num_params)
# print('Population generated')
i_iteration = 0
updated = 0
num_successful_asexuals = 0
num_asexuals = 0
num_successful_sexuals = 0
num_sexuals = 0
while True:
was_asexual = False
was_sexual = False
temp = min(0.9, (time.time() - start_time) / 500)
if np.random.rand() < add_random:
split_rule = get_random_split_rule(all_colors, {}, 0, config)
merge_rule = get_random_merge_rule(all_colors, {}, 0, config)
child = [generate_random_global(all_colors, best_candidates, temp, config),
generate_random_ca(all_colors, best_candidates, temp, config), split_rule, merge_rule]
else:
best_scores = list(best_candidates.keys())
first_score = random.choice(best_scores)
first = random.choice(list(best_candidates[first_score]))
if np.random.rand() < alpha_asexual_mutation:
child = generate_asexual_child(best_candidates, temp, first, all_colors, config,
alpha_mutate_rule_same_type)
was_asexual = True
else:
second_score = random.choice(best_scores)
second = random.choice(list(best_candidates[second_score]))
child = generate_sexual_child(best_candidates, temp, first, second, all_colors, config,
alpha_sexual_mutate,
alpha_mutate_rule_same_type,
alpha_mutate_rule_same_type_one_parameter)
was_sexual = True
#print(was_asexual, was_sexual)
#print(child)
updated_keys = update_pool(task, best_candidates, child, num_params)
if was_asexual:
num_asexuals += 1
if updated_keys:
num_successful_asexuals += 1
elif was_sexual:
num_sexuals += 1
if updated_keys:
num_successful_sexuals += 1
if i_iteration % 100 == 0:
solved = None
max_scores = np.zeros(len(list(best_candidates.keys())[0]))
for score, params in best_candidates.items():
max_scores = | np.maximum(max_scores, score) | numpy.maximum |
"""
Gradient estimators to numerically approximate gradients.
"""
import logging
import warnings
import numpy as np
from .utils import batch_crossentropy
from . import nprng
from abc import abstractmethod, ABCMeta
class GradientEstimatorBase:
__metaclass__ = ABCMeta
@abstractmethod
def estimate_one(self, pred_fn, x, label, bounds):
raise NotImplementedError()
def estimate(self, pred_fn, xs, labels, bounds):
assert len(xs) == len(labels)
gradients = []
for x, label in zip(xs, labels):
gradients.append(self.estimate_one(pred_fn, x, label, bounds))
gradients = np.array(gradients)
return gradients
class CoordinateWiseGradientEstimator(GradientEstimatorBase):
"""Implements a simple gradient-estimator using
the coordinate-wise finite-difference method.
"""
def __init__(self, epsilon, clip=True):
self._epsilon = epsilon
self.clip = clip
def _get_noise(self, shape, dtype):
N = np.prod(shape)
noise = np.eye(N, N, dtype=dtype)
noise = noise.reshape((N,) + shape)
noise = np.concatenate([noise, -noise])
return noise
def estimate_one(self, pred_fn, x, label, bounds):
noise = self._get_noise(x.shape, x.dtype)
N = len(noise)
min_, max_ = bounds
scaled_epsilon = self._epsilon * (max_ - min_)
theta = x + scaled_epsilon * noise
if self.clip:
theta = | np.clip(theta, min_, max_) | numpy.clip |
"""
DESCRIPTION
Preprocesses audio data before sending to Neural Network
See demo in in main()
MIT License
Copyright (c) 2018 The-Instrumental-Specialists
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import neuralnet_02 as NN
import numpy as np
import os
import glob
import json
import time
import scipy
import matplotlib.pylab as plt
import scipy.io.wavfile as wavfile
import scipy.fftpack
from scipy.fftpack import dct
def getMax(array_list):
"""Returns a tuple (index,value) of the maximum in an 1D array or list"""
m = array_list[0]
m_index = 0
for i,value in enumerate(array_list):
if value > m:
m = value
m_index = i
return (m_index,m)
def processFile(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):
"""returns one sided FFT amplitudes of filename
filename (string): ex) 'sax.wav'
length (int): Number of datapoints of one-sided fft (must be even,preferably a power of 2)
q (int): (optional argument) Downsampling Rate
fs_in (int): (optional argument) throw ValueError if fs of filename != fs_in
divide (int): (optional argument) 1/divide*Nsamples is taken from FFT (preferably even)
plot (bool): (optional argument) plots the one sided FFT if True, otherwise does not plot
Note: length < total_time*fs//(2*q*divide)
Ex) length = 256 < (0.25sec)*(44100Hz)//(2*4*2) = 689
"""
length = length*divide
#fs = sample rate, sound = multichannel sound signal
try:
fs1, sound = wavfile.read(filename)
except ValueError:
print(str(filename) + ' failed to process')
return 'failed'
if fs1 != fs_in:
raise ValueError('Sampling rate should be ' + str(fs_in) + ' for: ' + filename)
sig1 = sound[:,0] #left channel
pre_emphasis = 0.97
sig1 = np.append(sig1[0], sig1[1:] - pre_emphasis * sig1[:-1])
fs2, sig2 = downsample(sig1,fs1,q)
N2 = len(sig2)
sig3 = sig2[N2//2-length:N2//2+length]
#print(len(sig3))
FFT = abs(scipy.fft(sig3))
FFT_side = FFT[range(len(FFT)//2)]
#freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)
#plt.plot(freqs,FFT)
if len(FFT_side) != length:
print('ERROR MESSAGE DETAILS')
print('filename: ' + filename)
print('length = ' + str(length))
print('fs_in = ' + str(fs_in))
print('q = ' + str(q))
print('divide = ' + str(divide))
total_time = len(sig1)/fs1
print('total_time = ' + str(total_time))
print('Please check: length < total_time*fs//(2*q)')
print('Check: ' + str(length) + ' < ' + str(total_time*fs1//(2*q)))
raise ValueError('Length FFT_side != length: ' + str(len(FFT_side)) + ' != ' + str(length))
FFT_log = []
# normalize FFT
for value in FFT_side:
value = np.log(value)
FFT_log.append(value)
max_val = getMax(FFT_log)[1]
FFT_norm = []
for value in FFT_log:
FFT_norm.append(value/max_val)
FFT_side = | np.array(FFT_norm) | numpy.array |
import datetime as dt
from unittest import SkipTest
import numpy as np
from holoviews.core import NdOverlay
from holoviews.core.options import Cycle
from holoviews.core.util import pd
from holoviews.element import Points
from holoviews.streams import Stream
from .testplot import TestBokehPlot, bokeh_renderer
from ..utils import ParamLogStream
try:
from bokeh.models import FactorRange, LinearColorMapper, CategoricalColorMapper
from bokeh.models import Scatter
except:
pass
class TestPointPlot(TestBokehPlot):
def test_points_colormapping(self):
points = Points(np.random.rand(10, 4), vdims=['a', 'b']).opts(plot=dict(color_index=3))
self._test_colormapping(points, 3)
def test_points_colormapping_with_nonselection(self):
opts = dict(plot=dict(color_index=3),
style=dict(nonselection_color='red'))
points = Points(np.random.rand(10, 4), vdims=['a', 'b']).opts(**opts)
self._test_colormapping(points, 3)
def test_points_colormapping_categorical(self):
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(plot=dict(color_index='b'))
plot = bokeh_renderer.get_plot(points)
plot.initialize_plot()
cmapper = plot.handles['color_mapper']
self.assertIsInstance(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, list(points['b']))
def test_points_color_selection_nonselection(self):
opts = dict(color='green', selection_color='red', nonselection_color='blue')
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_color, 'green')
self.assertEqual(glyph_renderer.glyph.line_color, 'green')
self.assertEqual(glyph_renderer.selection_glyph.fill_color, 'red')
self.assertEqual(glyph_renderer.selection_glyph.line_color, 'red')
self.assertEqual(glyph_renderer.nonselection_glyph.fill_color, 'blue')
self.assertEqual(glyph_renderer.nonselection_glyph.line_color, 'blue')
def test_points_alpha_selection_nonselection(self):
opts = dict(alpha=0.8, selection_alpha=1.0, nonselection_alpha=0.2)
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_alpha, 0.8)
self.assertEqual(glyph_renderer.glyph.line_alpha, 0.8)
self.assertEqual(glyph_renderer.selection_glyph.fill_alpha, 1)
self.assertEqual(glyph_renderer.selection_glyph.line_alpha, 1)
self.assertEqual(glyph_renderer.nonselection_glyph.fill_alpha, 0.2)
self.assertEqual(glyph_renderer.nonselection_glyph.line_alpha, 0.2)
def test_points_alpha_selection_partial(self):
opts = dict(selection_alpha=1.0, selection_fill_alpha=0.2)
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_alpha, 1.0)
self.assertEqual(glyph_renderer.glyph.line_alpha, 1.0)
self.assertEqual(glyph_renderer.selection_glyph.fill_alpha, 0.2)
self.assertEqual(glyph_renderer.selection_glyph.line_alpha, 1)
def test_batched_points(self):
overlay = NdOverlay({i: Points( | np.arange(i) | numpy.arange |
import logging
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from hdrbp._util import (
basic_repr,
basic_str,
compute_correlation,
compute_diversification_ratio,
compute_drawdowns,
compute_gini,
compute_prices,
compute_risk_contributions,
compute_turnover,
compute_variance,
count_dates_per_year,
count_years,
)
logger = logging.getLogger(__name__)
@basic_str
@basic_repr
class MetricCalculator(ABC):
@property
def name(self):
return repr(self)
@abstractmethod
def calculate(self, result: pd.DataFrame) -> float:
pass
class GeometricMeanReturn(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
log_returns = | np.log1p(returns) | numpy.log1p |
import os
import numpy as np
from sklearn.cluster import KMeans
from scipy.stats import norm
from matplotlib import pyplot as plt
import pickle as pkl
class NDB:
def __init__(self, training_data=None, number_of_bins=100, significance_level=0.05, z_threshold=None,
whitening=False, max_dims=None, cache_folder=None):
"""
NDB Evaluation Class
:param training_data: Optional - the training samples - array of m x d floats (m samples of dimension d)
:param number_of_bins: Number of bins (clusters) default=100
:param significance_level: The statistical significance level for the two-sample test
:param z_threshold: Allow defining a threshold in terms of difference/SE for defining a bin as statistically different
:param whitening: Perform data whitening - subtract mean and divide by per-dimension std
:param max_dims: Max dimensions to use in K-means. By default derived automatically from d
:param bins_file: Optional - file to write / read-from the clusters (to avoid re-calculation)
"""
self.number_of_bins = number_of_bins
self.significance_level = significance_level
self.z_threshold = z_threshold
self.whitening = whitening
self.ndb_eps = 1e-6
self.training_mean = 0.0
self.training_std = 1.0
self.max_dims = max_dims
self.cache_folder = cache_folder
self.bin_centers = None
self.bin_proportions = None
self.ref_sample_size = None
self.used_d_indices = None
self.results_file = None
self.test_name = 'ndb_{}_bins_{}'.format(self.number_of_bins, 'whiten' if self.whitening else 'orig')
self.cached_results = {}
if self.cache_folder:
self.results_file = os.path.join(cache_folder, self.test_name+'_results.pkl')
if os.path.isfile(self.results_file):
# print('Loading previous results from', self.results_file, ':')
self.cached_results = pkl.load(open(self.results_file, 'rb'))
# print(self.cached_results.keys())
if training_data is not None or cache_folder is not None:
bins_file = None
if cache_folder:
os.makedirs(cache_folder, exist_ok=True)
bins_file = os.path.join(cache_folder, self.test_name+'.pkl')
self.construct_bins(training_data, bins_file)
def construct_bins(self, training_samples, bins_file):
"""
Performs K-means clustering of the training samples
:param training_samples: An array of m x d floats (m samples of dimension d)
"""
if self.__read_from_bins_file(bins_file):
return
n, d = training_samples.shape
k = self.number_of_bins
if self.whitening:
self.training_mean = np.mean(training_samples, axis=0)
self.training_std = np.std(training_samples, axis=0) + self.ndb_eps
if self.max_dims is None and d > 1000:
# To ran faster, perform binning on sampled data dimension (i.e. don't use all channels of all pixels)
self.max_dims = d//6
whitened_samples = (training_samples-self.training_mean)/self.training_std
d_used = d if self.max_dims is None else min(d, self.max_dims)
self.used_d_indices = np.random.choice(d, d_used, replace=False)
print('Performing K-Means clustering of {} samples in dimension {} / {} to {} clusters ...'.format(n, d_used, d, k))
print('Can take a couple of minutes...')
if n//k > 1000:
print('Training data size should be ~500 times the number of bins (for reasonable speed and accuracy)')
clusters = KMeans(n_clusters=k, max_iter=100, n_jobs=-1).fit(whitened_samples[:, self.used_d_indices])
bin_centers = np.zeros([k, d])
for i in range(k):
bin_centers[i, :] = np.mean(whitened_samples[clusters.labels_ == i, :], axis=0)
# Organize bins by size
label_vals, label_counts = np.unique(clusters.labels_, return_counts=True)
bin_order = np.argsort(-label_counts)
self.bin_proportions = label_counts[bin_order] / np.sum(label_counts)
self.bin_centers = bin_centers[bin_order, :]
self.ref_sample_size = n
self.__write_to_bins_file(bins_file)
print('Done.')
def evaluate(self, query_samples, model_label=None):
"""
Assign each sample to the nearest bin center (in L2). Pre-whiten if required. and calculate the NDB
(Number of statistically Different Bins) and JS divergence scores.
:param query_samples: An array of m x d floats (m samples of dimension d)
:param model_label: optional label string for the evaluated model, allows plotting results of multiple models
:return: results dictionary containing NDB and JS scores and array of labels (assigned bin for each query sample)
"""
n = query_samples.shape[0]
query_bin_proportions, query_bin_assignments = self.__calculate_bin_proportions(query_samples)
# print(query_bin_proportions)
different_bins = NDB.two_proportions_z_test(self.bin_proportions, self.ref_sample_size, query_bin_proportions,
n, significance_level=self.significance_level,
z_threshold=self.z_threshold)
ndb = np.count_nonzero(different_bins)
js = NDB.jensen_shannon_divergence(self.bin_proportions, query_bin_proportions)
results = {'NDB': ndb,
'JS': js,
'Proportions': query_bin_proportions,
'N': n,
'Bin-Assignment': query_bin_assignments,
'Different-Bins': different_bins}
if model_label:
print('Results for {} samples from {}: '.format(n, model_label), end='')
self.cached_results[model_label] = results
if self.results_file:
# print('Storing result to', self.results_file)
pkl.dump(self.cached_results, open(self.results_file, 'wb'))
print('NDB =', ndb, 'NDB/K =', ndb/self.number_of_bins, ', JS =', js)
return results
def print_results(self):
print('NSB results (K={}{}):'.format(self.number_of_bins, ', data whitening' if self.whitening else ''))
for model in sorted(list(self.cached_results.keys())):
res = self.cached_results[model]
print('%s: NDB = %d, NDB/K = %.3f, JS = %.4f' % (model, res['NDB'], res['NDB']/self.number_of_bins, res['JS']))
def plot_results(self, models_to_plot=None):
"""
Plot the binning proportions of different methods
:param models_to_plot: optional list of model labels to plot
"""
K = self.number_of_bins
w = 1.0 / (len(self.cached_results)+1)
assert K == self.bin_proportions.size
assert self.cached_results
# Used for plotting only
def calc_se(p1, n1, p2, n2):
p = (p1 * n1 + p2 * n2) / (n1 + n2)
return np.sqrt(p * (1 - p) * (1/n1 + 1/n2))
if not models_to_plot:
models_to_plot = sorted(list(self.cached_results.keys()))
# Visualize the standard errors using the train proportions and size and query sample size
train_se = calc_se(self.bin_proportions, self.ref_sample_size,
self.bin_proportions, self.cached_results[models_to_plot[0]]['N'])
plt.bar(np.arange(0, K)+0.5, height=train_se*2.0, bottom=self.bin_proportions-train_se,
width=1.0, label='Train$\pm$SE', color='gray')
ymax = 0.0
for i, model in enumerate(models_to_plot):
results = self.cached_results[model]
label = '%s (%i : %.4f)' % (model, results['NDB'], results['JS'])
ymax = max(ymax, np.max(results['Proportions']))
if K <= 70:
plt.bar(np.arange(0, K)+(i+1.0)*w, results['Proportions'], width=w, label=label)
else:
plt.plot(np.arange(0, K)+0.5, results['Proportions'], '--*', label=label)
plt.legend(loc='best')
plt.ylim((0.0, min(ymax, np.max(self.bin_proportions)*4.0)))
plt.grid(True)
plt.title('Binning Proportions Evaluation Results for {} bins (NDB : JS)'.format(K))
plt.show()
def __calculate_bin_proportions(self, samples):
if self.bin_centers is None:
print('First run construct_bins on samples from the reference training data')
assert samples.shape[1] == self.bin_centers.shape[1]
n, d = samples.shape
k = self.bin_centers.shape[0]
D = | np.zeros([n, k], dtype=samples.dtype) | numpy.zeros |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import scipy.sparse as sp
from typing import List, Dict
from GridCal.Engine.basic_structures import Logger
import GridCal.Engine.Core.topology as tp
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.basic_structures import BranchImpedanceMode
from GridCal.Engine.basic_structures import BusMode
from GridCal.Engine.Simulations.PowerFlow.jacobian_based_power_flow import Jacobian
from GridCal.Engine.Core.common_functions import compile_types, find_different_states
class OpfTimeCircuit:
def __init__(self, nbus, nline, ntr, nvsc, nhvdc, nload, ngen, nbatt, nshunt, nstagen, ntime, sbase, time_array,
apply_temperature=False, branch_tolerance_mode: BranchImpedanceMode = BranchImpedanceMode.Specified):
"""
:param nbus: number of buses
:param nline: number of lines
:param ntr: number of transformers
:param nvsc:
:param nhvdc:
:param nload:
:param ngen:
:param nbatt:
:param nshunt:
"""
self.nbus = nbus
self.nline = nline
self.ntr = ntr
self.nvsc = nvsc
self.nhvdc = nhvdc
self.nload = nload
self.ngen = ngen
self.nbatt = nbatt
self.nshunt = nshunt
self.nstagen = nstagen
self.ntime = ntime
self.Sbase = sbase
self.apply_temperature = apply_temperature
self.branch_tolerance_mode = branch_tolerance_mode
self.time_array = time_array
# bus ----------------------------------------------------------------------------------------------------------
self.bus_names = np.empty(nbus, dtype=object)
self.bus_types = np.empty(nbus, dtype=int)
self.bus_installed_power = np.zeros(nbus, dtype=float)
self.bus_active = np.ones((ntime, nbus), dtype=int)
self.Vbus = np.ones((ntime, nbus), dtype=complex)
# branch common ------------------------------------------------------------------------------------------------
self.nbr = nline + ntr + nvsc # exclude the HVDC model since it is not a real branch
self.branch_names = np.empty(self.nbr, dtype=object)
self.branch_active = np.zeros((ntime, self.nbr), dtype=int)
self.F = np.zeros(self.nbr, dtype=int) # indices of the "from" buses
self.T = np.zeros(self.nbr, dtype=int) # indices of the "to" buses
self.branch_rates = np.zeros((ntime, self.nbr), dtype=float)
self.branch_cost = np.zeros((ntime, self.nbr), dtype=float)
self.branch_R = np.zeros(self.nbr, dtype=float)
self.branch_X = np.zeros(self.nbr, dtype=float)
self.C_branch_bus_f = sp.lil_matrix((self.nbr, nbus), dtype=int) # connectivity branch with their "from" bus
self.C_branch_bus_t = sp.lil_matrix((self.nbr, nbus), dtype=int) # connectivity branch with their "to" bus
# lines --------------------------------------------------------------------------------------------------------
self.line_names = np.zeros(nline, dtype=object)
self.line_R = np.zeros(nline, dtype=float)
self.line_X = np.zeros(nline, dtype=float)
self.line_B = np.zeros(nline, dtype=float)
self.line_temp_base = np.zeros(nline, dtype=float)
self.line_temp_oper = np.zeros(nline, dtype=float)
self.line_alpha = np.zeros(nline, dtype=float)
self.line_impedance_tolerance = np.zeros(nline, dtype=float)
self.C_line_bus = sp.lil_matrix((nline, nbus), dtype=int) # this ons is just for splitting islands
# transformer 2W + 3W ------------------------------------------------------------------------------------------
self.tr_names = np.zeros(ntr, dtype=object)
self.tr_R = np.zeros(ntr, dtype=float)
self.tr_X = np.zeros(ntr, dtype=float)
self.tr_G = np.zeros(ntr, dtype=float)
self.tr_B = np.zeros(ntr)
self.tr_tap_f = np.ones(ntr) # tap generated by the difference in nominal voltage at the form side
self.tr_tap_t = np.ones(ntr) # tap generated by the difference in nominal voltage at the to side
self.tr_tap_mod = np.ones(ntr) # normal tap module
self.tr_tap_ang = np.zeros(ntr) # normal tap angle
self.C_tr_bus = sp.lil_matrix((ntr, nbus), dtype=int) # this ons is just for splitting islands
# hvdc line ----------------------------------------------------------------------------------------------------
self.hvdc_names = np.zeros(nhvdc, dtype=object)
self.hvdc_active = np.zeros((ntime, nhvdc), dtype=bool)
self.hvdc_rate = np.zeros((ntime, nhvdc), dtype=float)
self.hvdc_Pf = np.zeros((ntime, nhvdc))
self.hvdc_Pt = np.zeros((ntime, nhvdc))
self.C_hvdc_bus_f = sp.lil_matrix((nhvdc, nbus), dtype=int) # this ons is just for splitting islands
self.C_hvdc_bus_t = sp.lil_matrix((nhvdc, nbus), dtype=int) # this ons is just for splitting islands
# vsc converter ------------------------------------------------------------------------------------------------
self.vsc_names = np.zeros(nvsc, dtype=object)
self.vsc_R1 = np.zeros(nvsc)
self.vsc_X1 = np.zeros(nvsc)
self.vsc_Gsw = np.zeros(nvsc)
self.vsc_Beq = np.zeros(nvsc)
self.vsc_m = np.zeros(nvsc)
self.vsc_theta = np.zeros(nvsc)
self.C_vsc_bus = sp.lil_matrix((nvsc, nbus), dtype=int) # this ons is just for splitting islands
# load ---------------------------------------------------------------------------------------------------------
self.load_names = np.empty(nload, dtype=object)
self.load_active = np.zeros((ntime, nload), dtype=bool)
self.load_s = np.zeros((ntime, nload), dtype=complex)
self.load_cost = np.zeros((ntime, nload))
self.C_bus_load = sp.lil_matrix((nbus, nload), dtype=int)
# static generators --------------------------------------------------------------------------------------------
self.static_generator_names = np.empty(nstagen, dtype=object)
self.static_generator_active = np.zeros((ntime, nstagen), dtype=bool)
self.static_generator_s = np.zeros((ntime, nstagen), dtype=complex)
self.C_bus_static_generator = sp.lil_matrix((nbus, nstagen), dtype=int)
# battery ------------------------------------------------------------------------------------------------------
self.battery_names = np.empty(nbatt, dtype=object)
self.battery_controllable = np.zeros(nbatt, dtype=bool)
self.battery_dispatchable = np.zeros(nbatt, dtype=bool)
self.battery_pmin = np.zeros(nbatt)
self.battery_pmax = np.zeros(nbatt)
self.battery_enom = np.zeros(nbatt)
self.battery_min_soc = np.zeros(nbatt)
self.battery_max_soc = np.zeros(nbatt)
self.battery_soc_0 = np.zeros(nbatt)
self.battery_charge_efficiency = np.zeros(nbatt)
self.battery_discharge_efficiency = np.zeros(nbatt)
self.battery_installed_p = np.zeros(nbatt)
self.battery_active = np.zeros((ntime, nbatt), dtype=bool)
self.battery_p = np.zeros((ntime, nbatt))
self.battery_pf = np.zeros((ntime, nbatt))
self.battery_v = np.zeros((ntime, nbatt))
self.battery_cost = np.zeros((ntime, nbatt))
self.C_bus_batt = sp.lil_matrix((nbus, nbatt), dtype=int)
# generator ----------------------------------------------------------------------------------------------------
self.generator_names = np.empty(ngen, dtype=object)
self.generator_controllable = np.zeros(ngen, dtype=bool)
self.generator_dispatchable = np.zeros(ngen, dtype=bool)
self.generator_installed_p = np.zeros(ngen)
self.generator_pmin = np.zeros(ngen)
self.generator_pmax = np.zeros(ngen)
self.generator_active = np.zeros((ntime, ngen), dtype=bool)
self.generator_p = np.zeros((ntime, ngen))
self.generator_pf = np.zeros((ntime, ngen))
self.generator_v = np.zeros((ntime, ngen))
self.generator_cost = np.zeros((ntime, ngen))
self.C_bus_gen = sp.lil_matrix((nbus, ngen), dtype=int)
# shunt --------------------------------------------------------------------------------------------------------
self.shunt_names = np.empty(nshunt, dtype=object)
self.shunt_active = np.zeros((ntime, nshunt), dtype=bool)
self.shunt_admittance = np.zeros((ntime, nshunt), dtype=complex)
self.C_bus_shunt = sp.lil_matrix((nbus, nshunt), dtype=int)
# --------------------------------------------------------------------------------------------------------------
# Arrays for the simulation
# --------------------------------------------------------------------------------------------------------------
self.Sbus = np.zeros((self.nbus, ntime), dtype=complex)
self.Ibus = np.zeros((self.nbus, ntime), dtype=complex)
self.Yshunt_from_devices = np.zeros((self.nbus, ntime), dtype=complex)
self.Qmax_bus = np.zeros((self.nbus, ntime))
self.Qmin_bus = np.zeros((self.nbus, ntime))
# only one Y matrix per time island, that is the guarantee we get by splitting the TimeCircuit in TimeIslands
self.Ybus = None
self.Yf = None
self.Yt = None
self.Yseries = None
self.Yshunt = None
# self.Ysh_helm = None
self.B1 = None
self.B2 = None
self.Bpqpv = None
self.Bref = None
self.original_time_idx = np.arange(self.ntime)
self.original_bus_idx = np.arange(self.nbus)
self.original_branch_idx = np.arange(self.nbr)
self.original_tr_idx = np.arange(self.ntr)
self.original_gen_idx = np.arange(self.ngen)
self.original_bat_idx = np.arange(self.nbatt)
self.pq = list()
self.pv = list()
self.vd = list()
self.pqpv = list()
self.available_structures = ['Vbus', 'Sbus', 'Ibus', 'Ybus', 'Yshunt', 'Yseries',
"B'", "B''", 'Types', 'Jacobian', 'Qmin', 'Qmax']
def consolidate(self):
"""
Consolidates the information of this object
:return:
"""
self.C_branch_bus_f = self.C_branch_bus_f.tocsc()
self.C_branch_bus_t = self.C_branch_bus_t.tocsc()
self.C_line_bus = self.C_line_bus.tocsc()
self.C_tr_bus = self.C_tr_bus.tocsc()
self.C_hvdc_bus_f = self.C_hvdc_bus_f.tocsc()
self.C_hvdc_bus_t = self.C_hvdc_bus_t.tocsc()
self.C_vsc_bus = self.C_vsc_bus.tocsc()
self.C_bus_load = self.C_bus_load.tocsr()
self.C_bus_batt = self.C_bus_batt.tocsr()
self.C_bus_gen = self.C_bus_gen.tocsr()
self.C_bus_shunt = self.C_bus_shunt.tocsr()
self.C_bus_static_generator = self.C_bus_static_generator.tocsr()
self.bus_installed_power = self.C_bus_gen * self.generator_installed_p
self.bus_installed_power += self.C_bus_batt * self.battery_installed_p
def get_power_injections(self):
"""
Compute the power
:return: Array of power injections
"""
# load
Sbus = - self.C_bus_load * (self.load_s * self.load_active).T # MW
# static generators
Sbus += self.C_bus_static_generator * (self.static_generator_s * self.static_generator_active).T # MW
# generators
Sbus += self.C_bus_gen * (self.generator_p * self.generator_active).T
# battery
Sbus += self.C_bus_batt * (self.battery_p * self.battery_active).T
# HVDC forced power
if self.nhvdc:
Sbus += ((self.hvdc_active * self.hvdc_Pf) * self.C_hvdc_bus_f).T
Sbus += ((self.hvdc_active * self.hvdc_Pt) * self.C_hvdc_bus_t).T
Sbus /= self.Sbase
return Sbus
def R_corrected(self):
"""
Returns temperature corrected resistances (numpy array) based on a formula
provided by: NFPA 70-2005, National Electrical Code, Table 8, footnote #2; and
https://en.wikipedia.org/wiki/Electrical_resistivity_and_conductivity#Linear_approximation
(version of 2019-01-03 at 15:20 EST).
"""
return self.line_R * (1.0 + self.line_alpha * (self.line_temp_oper - self.line_temp_base))
def compute_admittance_matrices(self):
"""
Compute the admittance matrices
:return: Ybus, Yseries, Yshunt
"""
t = self.original_time_idx[0]
# form the connectivity matrices with the states applied -------------------------------------------------------
br_states_diag = sp.diags(self.branch_active[t, :])
Cf = br_states_diag * self.C_branch_bus_f
Ct = br_states_diag * self.C_branch_bus_t
# Declare the empty primitives ---------------------------------------------------------------------------------
# The composition order is and will be: Pi model, HVDC, VSC
Ytt = np.empty(self.nbr, dtype=complex)
Yff = np.empty(self.nbr, dtype=complex)
Yft = np.empty(self.nbr, dtype=complex)
Ytf = np.empty(self.nbr, dtype=complex)
# Branch primitives in vector form, for Yseries
Ytts = np.empty(self.nbr, dtype=complex)
Yffs = np.empty(self.nbr, dtype=complex)
Yfts = np.empty(self.nbr, dtype=complex)
Ytfs = np.empty(self.nbr, dtype=complex)
ysh_br = np.empty(self.nbr, dtype=complex)
# line ---------------------------------------------------------------------------------------------------------
a = 0
b = self.nline
# use the specified of the temperature-corrected resistance
if self.apply_temperature:
line_R = self.R_corrected()
else:
line_R = self.line_R
# modify the branches impedance with the lower, upper tolerance values
if self.branch_tolerance_mode == BranchImpedanceMode.Lower:
line_R *= (1 - self.line_impedance_tolerance / 100.0)
elif self.branch_tolerance_mode == BranchImpedanceMode.Upper:
line_R *= (1 + self.line_impedance_tolerance / 100.0)
Ys_line = 1.0 / (line_R + 1.0j * self.line_X)
Ysh_line = 1.0j * self.line_B
Ys_line2 = Ys_line + Ysh_line / 2.0
# branch primitives in vector form for Ybus
Ytt[a:b] = Ys_line2
Yff[a:b] = Ys_line2
Yft[a:b] = - Ys_line
Ytf[a:b] = - Ys_line
# branch primitives in vector form, for Yseries
Ytts[a:b] = Ys_line
Yffs[a:b] = Ys_line
Yfts[a:b] = - Ys_line
Ytfs[a:b] = - Ys_line
ysh_br[a:b] = Ysh_line / 2.0
# transformer models -------------------------------------------------------------------------------------------
a = self.nline
b = a + self.ntr
Ys_tr = 1.0 / (self.tr_R + 1.0j * self.tr_X)
Ysh_tr = 1.0j * self.tr_B
Ys_tr2 = Ys_tr + Ysh_tr / 2.0
tap = self.tr_tap_mod * np.exp(1.0j * self.tr_tap_ang)
# branch primitives in vector form for Ybus
Ytt[a:b] = Ys_tr2 / (self.tr_tap_t * self.tr_tap_t)
Yff[a:b] = Ys_tr2 / (self.tr_tap_f * self.tr_tap_f * tap * np.conj(tap))
Yft[a:b] = - Ys_tr / (self.tr_tap_f * self.tr_tap_t * np.conj(tap))
Ytf[a:b] = - Ys_tr / (self.tr_tap_t * self.tr_tap_f * tap)
# branch primitives in vector form, for Yseries
Ytts[a:b] = Ys_tr
Yffs[a:b] = Ys_tr / (tap * np.conj(tap))
Yfts[a:b] = - Ys_tr / np.conj(tap)
Ytfs[a:b] = - Ys_tr / tap
ysh_br[a:b] = Ysh_tr / 2.0
# VSC MODEL ----------------------------------------------------------------------------------------------------
a = self.nline + self.ntr
b = a + self.nvsc
Y_vsc = 1.0 / (self.vsc_R1 + 1.0j * self.vsc_X1) # Y1
Yff[a:b] = Y_vsc
Yft[a:b] = -self.vsc_m * np.exp(1.0j * self.vsc_theta) * Y_vsc
Ytf[a:b] = -self.vsc_m * np.exp(-1.0j * self.vsc_theta) * Y_vsc
Ytt[a:b] = self.vsc_Gsw + self.vsc_m * self.vsc_m * (Y_vsc + 1.0j * self.vsc_Beq)
Yffs[a:b] = Y_vsc
Yfts[a:b] = -self.vsc_m * np.exp(1.0j * self.vsc_theta) * Y_vsc
Ytfs[a:b] = -self.vsc_m * np.exp(-1.0j * self.vsc_theta) * Y_vsc
Ytts[a:b] = self.vsc_m * self.vsc_m * (Y_vsc + 1.0j)
# HVDC LINE MODEL ----------------------------------------------------------------------------------------------
# does not apply since the HVDC-line model is the simplistic 2-generator model
# SHUNT --------------------------------------------------------------------------------------------------------
self.Yshunt_from_devices = self.C_bus_shunt * (self.shunt_admittance * self.shunt_active / self.Sbase).T
# form the admittance matrices ---------------------------------------------------------------------------------
self.Yf = sp.diags(Yff) * Cf + sp.diags(Yft) * Ct
self.Yt = sp.diags(Ytf) * Cf + sp.diags(Ytt) * Ct
self.Ybus = sp.csc_matrix(Cf.T * self.Yf + Ct.T * self.Yt)
# form the admittance matrices of the series and shunt elements ------------------------------------------------
Yfs = sp.diags(Yffs) * Cf + sp.diags(Yfts) * Ct
Yts = sp.diags(Ytfs) * Cf + sp.diags(Ytts) * Ct
self.Yseries = sp.csc_matrix(Cf.T * Yfs + Ct.T * Yts)
self.Yshunt = Cf.T * ysh_br + Ct.T * ysh_br
def get_generator_injections(self):
"""
Compute the active and reactive power of non-controlled generators (assuming all)
:return:
"""
pf2 = np.power(self.generator_pf, 2.0)
pf_sign = (self.generator_pf + 1e-20) / np.abs(self.generator_pf + 1e-20)
Q = pf_sign * self.generator_p * np.sqrt((1.0 - pf2) / (pf2 + 1e-20))
return self.generator_p + 1.0j * Q
def get_battery_injections(self):
"""
Compute the active and reactive power of non-controlled batteries (assuming all)
:return:
"""
pf2 = np.power(self.battery_pf, 2.0)
pf_sign = (self.battery_pf + 1e-20) / np.abs(self.battery_pf + 1e-20)
Q = pf_sign * self.battery_p * np.sqrt((1.0 - pf2) / (pf2 + 1e-20))
return self.battery_p + 1.0j * Q
def compute_injections(self):
"""
Compute the power
:return: nothing, the results are stored in the class
"""
# load
self.Sbus = - self.C_bus_load * (self.load_s * self.load_active).T # MW
# static generators
self.Sbus += self.C_bus_static_generator * (self.static_generator_s * self.static_generator_active).T # MW
# generators
self.Sbus += self.C_bus_gen * (self.get_generator_injections() * self.generator_active).T
# battery
self.Sbus += self.C_bus_batt * (self.get_battery_injections() * self.battery_active).T
# HVDC forced power
if self.nhvdc:
self.Sbus += ((self.hvdc_active * self.hvdc_Pf) * self.C_hvdc_bus_f).T
self.Sbus += ((self.hvdc_active * self.hvdc_Pt) * self.C_hvdc_bus_t).T
self.Sbus /= self.Sbase
def consolidate(self):
"""
Computes the parameters given the filled-in information
:return:
"""
self.compute_injections()
self.vd, self.pq, self.pv, self.pqpv = compile_types(Sbus=self.Sbus[:, 0], types=self.bus_types)
self.compute_admittance_matrices()
def get_structure(self, structure_type) -> pd.DataFrame:
"""
Get a DataFrame with the input.
Arguments:
**structure_type** (str): 'Vbus', 'Sbus', 'Ibus', 'Ybus', 'Yshunt', 'Yseries' or 'Types'
Returns:
pandas DataFrame
"""
if structure_type == 'Vbus':
df = pd.DataFrame(data=self.Vbus, columns=['Voltage (p.u.)'], index=self.bus_names)
elif structure_type == 'Sbus':
df = pd.DataFrame(data=self.Sbus, columns=['Power (p.u.)'], index=self.bus_names)
elif structure_type == 'Ibus':
df = pd.DataFrame(data=self.Ibus, columns=['Current (p.u.)'], index=self.bus_names)
elif structure_type == 'Ybus':
df = pd.DataFrame(data=self.Ybus.toarray(), columns=self.bus_names, index=self.bus_names)
elif structure_type == 'Yshunt':
df = pd.DataFrame(data=self.Yshunt, columns=['Shunt admittance (p.u.)'], index=self.bus_names)
elif structure_type == 'Yseries':
df = pd.DataFrame(data=self.Yseries.toarray(), columns=self.bus_names, index=self.bus_names)
elif structure_type == "B'":
df = pd.DataFrame(data=self.B1.toarray(), columns=self.bus_names, index=self.bus_names)
elif structure_type == "B''":
df = pd.DataFrame(data=self.B2.toarray(), columns=self.bus_names, index=self.bus_names)
elif structure_type == 'Types':
df = pd.DataFrame(data=self.bus_types, columns=['Bus types'], index=self.bus_names)
elif structure_type == 'Qmin':
df = pd.DataFrame(data=self.Qmin_bus, columns=['Qmin'], index=self.bus_names)
elif structure_type == 'Qmax':
df = pd.DataFrame(data=self.Qmax_bus, columns=['Qmax'], index=self.bus_names)
elif structure_type == 'Jacobian':
J = Jacobian(self.Ybus, self.Vbus, self.Ibus, self.pq, self.pqpv)
"""
J11 = dS_dVa[array([pvpq]).T, pvpq].real
J12 = dS_dVm[array([pvpq]).T, pq].real
J21 = dS_dVa[array([pq]).T, pvpq].imag
J22 = dS_dVm[array([pq]).T, pq].imag
"""
npq = len(self.pq)
npv = len(self.pv)
npqpv = npq + npv
cols = ['dS/dVa'] * npqpv + ['dS/dVm'] * npq
rows = cols
df = pd.DataFrame(data=J.toarray(), columns=cols, index=rows)
else:
raise Exception('PF input: structure type not found')
return df
def get_opf_time_island(self, bus_idx, time_idx) -> "OpfTimeCircuit":
"""
Get the island corresponding to the given buses
:param bus_idx: array of bus indices
:param time_idx: array of time indices
:return: TiTimeCircuitmeIsland
"""
# find the indices of the devices of the island
line_idx = tp.get_elements_of_the_island(self.C_line_bus, bus_idx)
tr_idx = tp.get_elements_of_the_island(self.C_tr_bus, bus_idx)
vsc_idx = tp.get_elements_of_the_island(self.C_vsc_bus, bus_idx)
hvdc_idx = tp.get_elements_of_the_island(self.C_hvdc_bus_f + self.C_hvdc_bus_t, bus_idx)
br_idx = tp.get_elements_of_the_island(self.C_branch_bus_f + self.C_branch_bus_t, bus_idx)
load_idx = tp.get_elements_of_the_island(self.C_bus_load.T, bus_idx)
stagen_idx = tp.get_elements_of_the_island(self.C_bus_static_generator.T, bus_idx)
gen_idx = tp.get_elements_of_the_island(self.C_bus_gen.T, bus_idx)
batt_idx = tp.get_elements_of_the_island(self.C_bus_batt.T, bus_idx)
shunt_idx = tp.get_elements_of_the_island(self.C_bus_shunt.T, bus_idx)
nc = OpfTimeCircuit(nbus=len(bus_idx),
nline=len(line_idx),
ntr=len(tr_idx),
nvsc=len(vsc_idx),
nhvdc=len(hvdc_idx),
nload=len(load_idx),
ngen=len(gen_idx),
nbatt=len(batt_idx),
nshunt=len(shunt_idx),
nstagen=len(stagen_idx),
ntime=len(time_idx),
sbase=self.Sbase,
time_array=self.time_array[time_idx],
apply_temperature=self.apply_temperature,
branch_tolerance_mode=self.branch_tolerance_mode)
nc.original_time_idx = time_idx
nc.original_bus_idx = bus_idx
nc.original_branch_idx = br_idx
nc.original_tr_idx = tr_idx
nc.original_gen_idx = gen_idx
nc.original_bat_idx = batt_idx
# bus ----------------------------------------------------------------------------------------------------------
nc.bus_names = self.bus_names[bus_idx]
nc.bus_types = self.bus_types[bus_idx]
nc.bus_installed_power = self.bus_installed_power[bus_idx]
nc.bus_active = self.bus_active[np.ix_(time_idx, bus_idx)]
nc.Vbus = self.Vbus[np.ix_(time_idx, bus_idx)]
# branch common ------------------------------------------------------------------------------------------------
nc.branch_names = self.branch_names[br_idx]
nc.branch_active = self.branch_active[np.ix_(time_idx, br_idx)]
nc.branch_rates = self.branch_rates[np.ix_(time_idx, br_idx)]
nc.branch_cost = self.branch_cost[np.ix_(time_idx, br_idx)]
nc.branch_R = self.branch_R[br_idx]
nc.branch_X = self.branch_X[br_idx]
nc.F = self.F[br_idx]
nc.T = self.T[br_idx]
nc.C_branch_bus_f = self.C_branch_bus_f[np.ix_(br_idx, bus_idx)]
nc.C_branch_bus_t = self.C_branch_bus_t[np.ix_(br_idx, bus_idx)]
# lines --------------------------------------------------------------------------------------------------------
nc.line_names = self.line_names[line_idx]
nc.line_R = self.line_R[line_idx]
nc.line_X = self.line_X[line_idx]
nc.line_B = self.line_B[line_idx]
nc.line_temp_base = self.line_temp_base[line_idx]
nc.line_temp_oper = self.line_temp_oper[line_idx]
nc.line_alpha = self.line_alpha[line_idx]
nc.line_impedance_tolerance = self.line_impedance_tolerance[line_idx]
nc.C_line_bus = self.C_line_bus[np.ix_(line_idx, bus_idx)]
# transformer 2W + 3W ------------------------------------------------------------------------------------------
nc.tr_names = self.tr_names[tr_idx]
nc.tr_R = self.tr_R[tr_idx]
nc.tr_X = self.tr_X[tr_idx]
nc.tr_G = self.tr_G[tr_idx]
nc.tr_B = self.tr_B[tr_idx]
nc.tr_tap_f = self.tr_tap_f[tr_idx]
nc.tr_tap_t = self.tr_tap_t[tr_idx]
nc.tr_tap_mod = self.tr_tap_mod[tr_idx]
nc.tr_tap_ang = self.tr_tap_ang[tr_idx]
nc.C_tr_bus = self.C_tr_bus[np.ix_(tr_idx, bus_idx)]
# hvdc line ----------------------------------------------------------------------------------------------------
nc.hvdc_names = self.hvdc_names[hvdc_idx]
nc.hvdc_active = self.hvdc_active[np.ix_(time_idx, hvdc_idx)]
nc.hvdc_rate = self.hvdc_rate[np.ix_(time_idx, hvdc_idx)]
nc.hvdc_Pf = self.hvdc_Pf[np.ix_(time_idx, hvdc_idx)]
nc.hvdc_Pt = self.hvdc_Pt[np.ix_(time_idx, hvdc_idx)]
nc.C_hvdc_bus_f = self.C_hvdc_bus_f[np.ix_(hvdc_idx, bus_idx)]
nc.C_hvdc_bus_t = self.C_hvdc_bus_t[np.ix_(hvdc_idx, bus_idx)]
# vsc converter ------------------------------------------------------------------------------------------------
nc.vsc_names = self.vsc_names[vsc_idx]
nc.vsc_R1 = self.vsc_R1[vsc_idx]
nc.vsc_X1 = self.vsc_X1[vsc_idx]
nc.vsc_Gsw = self.vsc_Gsw[vsc_idx]
nc.vsc_Beq = self.vsc_Beq[vsc_idx]
nc.vsc_m = self.vsc_m[vsc_idx]
nc.vsc_theta = self.vsc_theta[vsc_idx]
nc.C_vsc_bus = self.C_vsc_bus[np.ix_(vsc_idx, bus_idx)]
# load ---------------------------------------------------------------------------------------------------------
nc.load_names = self.load_names[load_idx]
nc.load_active = self.load_active[np.ix_(time_idx, load_idx)]
nc.load_s = self.load_s[np.ix_(time_idx, load_idx)]
nc.load_cost = self.load_cost[np.ix_(time_idx, load_idx)]
nc.C_bus_load = self.C_bus_load[np.ix_(bus_idx, load_idx)]
# static generators --------------------------------------------------------------------------------------------
nc.static_generator_names = self.static_generator_names[stagen_idx]
nc.static_generator_active = self.static_generator_active[np.ix_(time_idx, stagen_idx)]
nc.static_generator_s = self.static_generator_s[np.ix_(time_idx, stagen_idx)]
nc.C_bus_static_generator = self.C_bus_static_generator[np.ix_(bus_idx, stagen_idx)]
# battery ------------------------------------------------------------------------------------------------------
nc.battery_names = self.battery_names[batt_idx]
nc.battery_controllable = self.battery_controllable[batt_idx]
nc.battery_dispatchable = self.battery_dispatchable[batt_idx]
nc.battery_installed_p = self.battery_installed_p[batt_idx]
nc.battery_enom = self.battery_enom[batt_idx]
nc.battery_min_soc = self.battery_min_soc[batt_idx]
nc.battery_max_soc = self.battery_max_soc[batt_idx]
nc.battery_soc_0 = self.battery_soc_0[batt_idx]
nc.battery_charge_efficiency = self.battery_charge_efficiency[batt_idx]
nc.battery_discharge_efficiency = self.battery_discharge_efficiency[batt_idx]
nc.battery_active = self.battery_active[np.ix_(time_idx, batt_idx)]
nc.battery_p = self.battery_p[np.ix_(time_idx, batt_idx)]
nc.battery_pf = self.battery_pf[np.ix_(time_idx, batt_idx)]
nc.battery_v = self.battery_v[np.ix_(time_idx, batt_idx)]
nc.battery_cost = self.battery_cost[np.ix_(time_idx, batt_idx)]
nc.battery_pmin = self.battery_pmin[batt_idx]
nc.battery_pmax = self.battery_pmax[batt_idx]
nc.C_bus_batt = self.C_bus_batt[np.ix_(bus_idx, batt_idx)]
# generator ----------------------------------------------------------------------------------------------------
nc.generator_names = self.generator_names[gen_idx]
nc.generator_controllable = self.generator_controllable[gen_idx]
nc.generator_dispatchable = self.generator_dispatchable[gen_idx]
nc.battery_installed_p = self.battery_installed_p[gen_idx]
nc.generator_active = self.generator_active[np.ix_(time_idx, gen_idx)]
nc.generator_p = self.generator_p[np.ix_(time_idx, gen_idx)]
nc.generator_pf = self.generator_pf[np.ix_(time_idx, gen_idx)]
nc.generator_v = self.generator_v[np.ix_(time_idx, gen_idx)]
nc.generator_cost = self.generator_cost[np.ix_(time_idx, gen_idx)]
nc.generator_pmin = self.generator_pmin[gen_idx]
nc.generator_pmax = self.generator_pmax[gen_idx]
nc.C_bus_gen = self.C_bus_gen[np.ix_(bus_idx, gen_idx)]
# shunt --------------------------------------------------------------------------------------------------------
nc.shunt_names = self.shunt_names[shunt_idx]
nc.shunt_active = self.shunt_active[ | np.ix_(time_idx, shunt_idx) | numpy.ix_ |
#!/usr/bin/env python3
"""Specificity"""
import numpy as np
def specificity(confusion):
"""specificity: calculates the specificity for each class in a
confusion matrix
Args:
confusion is a confusion numpy.ndarray of shape (classes, classes)
where row indices represent the correct labels and column
indices represent the predicted labels
classes is the number of classes
Returns:
a numpy.ndarray of shape (classes,) containing
the specificity of each class
"""
classes, classes = confusion.shape
specificity = np.zeros(shape=(classes,))
for i in range(classes):
specificity[i] = (
np.sum(confusion) - np.sum(confusion, axis=1)[i]
- | np.sum(confusion, axis=0) | numpy.sum |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table
from regions import CircleSkyRegion
from gammapy.catalog import SourceCatalog3FHL
from gammapy.data import GTI
from gammapy.datasets import Datasets, MapDataset, MapDatasetOnOff
from gammapy.datasets.map import MapEvaluator, RAD_AXIS_DEFAULT
from gammapy.irf import (
EDispKernelMap,
EDispMap,
EnergyDispersion2D,
EffectiveAreaTable2D,
EnergyDependentMultiGaussPSF,
PSFMap,
PSFKernel,
)
from gammapy.makers.utils import make_map_exposure_true_energy, make_psf_map
from gammapy.maps import (
Map,
MapAxis,
WcsGeom,
WcsNDMap,
RegionGeom,
RegionNDMap,
HpxGeom,
)
from gammapy.modeling import Fit
from gammapy.modeling.models import (
FoVBackgroundModel,
GaussianSpatialModel,
Models,
PointSpatialModel,
PowerLawSpectralModel,
SkyModel,
ConstantSpectralModel,
DiskSpatialModel,
)
from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency
from gammapy.utils.gauss import Gauss2DPDF
@pytest.fixture
def geom_hpx():
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=3)
energy_axis_true = MapAxis.from_energy_bounds(
"1 TeV", "10 TeV", nbin=4, name="energy_true"
)
geom = HpxGeom.create(nside=32, axes=[axis], frame="galactic")
return {"geom": geom, "energy_axis_true": energy_axis_true}
@pytest.fixture
def geom_hpx_partial():
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=3)
energy_axis_true = MapAxis.from_energy_bounds(
"1 TeV", "10 TeV", nbin=4, name="energy_true"
)
geom = HpxGeom.create(
nside=32, axes=[axis], frame="galactic", region="DISK(110.,75.,10.)"
)
return {"geom": geom, "energy_axis_true": energy_axis_true}
@pytest.fixture
def geom():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=2)
return WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.02,
width=(2, 2),
frame="icrs",
axes=[axis],
)
@pytest.fixture
def geom_etrue():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=3, name="energy_true")
return WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.02,
width=(2, 2),
frame="icrs",
axes=[axis],
)
@pytest.fixture
def geom_image():
energy = np.logspace(-1.0, 1.0, 2)
axis = MapAxis.from_edges(energy, name="energy", unit=u.TeV, interp="log")
return WcsGeom.create(
skydir=(0, 0), binsz=0.02, width=(2, 2), frame="galactic", axes=[axis]
)
def get_exposure(geom_etrue):
filename = (
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
aeff = EffectiveAreaTable2D.read(filename, hdu="EFFECTIVE AREA")
exposure_map = make_map_exposure_true_energy(
pointing=SkyCoord(1, 0.5, unit="deg", frame="galactic"),
livetime=1 * u.hr,
aeff=aeff,
geom=geom_etrue,
)
return exposure_map
def get_psf():
filename = (
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
psf = EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION")
geom = WcsGeom.create(
skydir=(0, 0),
frame="galactic",
binsz=2,
width=(2, 2),
axes=[RAD_AXIS_DEFAULT, psf.axes["energy_true"]],
)
return make_psf_map(
psf=psf,
pointing=SkyCoord(0, 0.5, unit="deg", frame="galactic"),
geom=geom,
exposure_map=Map.from_geom(geom.squash("rad"), unit="cm2 s"),
)
@requires_data()
def get_edisp(geom, geom_etrue):
filename = "$GAMMAPY_DATA/hess-dl3-dr1/data/hess_dl3_dr1_obs_id_020136.fits.gz"
edisp2d = EnergyDispersion2D.read(filename, hdu="EDISP")
energy = geom.axes["energy"].edges
energy_true = geom_etrue.axes["energy_true"].edges
edisp_kernel = edisp2d.to_edisp_kernel(
offset="1.2 deg", energy=energy, energy_true=energy_true
)
edisp = EDispKernelMap.from_edisp_kernel(edisp_kernel)
return edisp
@pytest.fixture
def sky_model():
spatial_model = GaussianSpatialModel(
lon_0="0.2 deg", lat_0="0.1 deg", sigma="0.2 deg", frame="galactic"
)
spectral_model = PowerLawSpectralModel(
index=3, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV"
)
return SkyModel(
spatial_model=spatial_model, spectral_model=spectral_model, name="test-model"
)
def get_map_dataset(geom, geom_etrue, edisp="edispmap", name="test", **kwargs):
"""Returns a MapDatasets"""
# define background model
background = Map.from_geom(geom)
background.data += 0.2
psf = get_psf()
exposure = get_exposure(geom_etrue)
e_reco = geom.axes["energy"]
e_true = geom_etrue.axes["energy_true"]
if edisp == "edispmap":
edisp = EDispMap.from_diagonal_response(energy_axis_true=e_true)
data = exposure.get_spectrum(geom.center_skydir).data
edisp.exposure_map.data = np.repeat(data, 2, axis=-1)
elif edisp == "edispkernelmap":
edisp = EDispKernelMap.from_diagonal_response(
energy_axis=e_reco, energy_axis_true=e_true
)
data = exposure.get_spectrum(geom.center_skydir).data
edisp.exposure_map.data = np.repeat(data, 2, axis=-1)
else:
edisp = None
# define fit mask
center = SkyCoord("0.2 deg", "0.1 deg", frame="galactic")
circle = CircleSkyRegion(center=center, radius=1 * u.deg)
mask_fit = geom.region_mask([circle])
models = FoVBackgroundModel(dataset_name=name)
return MapDataset(
models=models,
exposure=exposure,
background=background,
psf=psf,
edisp=edisp,
mask_fit=mask_fit,
name=name,
**kwargs,
)
@requires_data()
def test_map_dataset_str(sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
dataset.mask_safe = dataset.mask_fit
assert "MapDataset" in str(dataset)
assert "(frozen)" in str(dataset)
assert "background" in str(dataset)
dataset.mask_safe = None
assert "MapDataset" in str(dataset)
def test_map_dataset_str_empty():
dataset = MapDataset()
assert "MapDataset" in str(dataset)
@requires_data()
def test_fake(sky_model, geom, geom_etrue):
"""Test the fake dataset"""
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
npred = dataset.npred()
assert np.all(npred.data >= 0) # npred must be positive
dataset.counts = npred
real_dataset = dataset.copy()
dataset.fake(314)
assert real_dataset.counts.data.shape == dataset.counts.data.shape
assert_allclose(real_dataset.counts.data.sum(), 9525.299054, rtol=1e-5)
assert_allclose(dataset.counts.data.sum(), 9711)
@requires_data()
def test_different_exposure_unit(sky_model, geom):
energy_range_true = np.logspace(2, 4, 3)
axis = MapAxis.from_edges(
energy_range_true, name="energy_true", unit="GeV", interp="log"
)
geom_gev = geom.to_image().to_cube([axis])
dataset = get_map_dataset(geom, geom_gev, edisp="None")
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
npred = dataset.npred()
assert_allclose(npred.data[0, 50, 50], 6.086019, rtol=1e-2)
@pytest.mark.parametrize(("edisp_mode"), ["edispmap", "edispkernelmap"])
@requires_data()
def test_to_spectrum_dataset(sky_model, geom, geom_etrue, edisp_mode):
dataset_ref = get_map_dataset(geom, geom_etrue, edisp=edisp_mode)
bkg_model = FoVBackgroundModel(dataset_name=dataset_ref.name)
dataset_ref.models = [sky_model, bkg_model]
dataset_ref.counts = dataset_ref.npred_background() * 0.0
dataset_ref.counts.data[1, 50, 50] = 1
dataset_ref.counts.data[1, 60, 50] = 1
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset_ref.gti = gti
on_region = CircleSkyRegion(center=geom.center_skydir, radius=0.05 * u.deg)
spectrum_dataset = dataset_ref.to_spectrum_dataset(on_region)
spectrum_dataset_corrected = dataset_ref.to_spectrum_dataset(
on_region, containment_correction=True
)
mask = np.ones_like(dataset_ref.counts, dtype="bool")
mask[1, 40:60, 40:60] = 0
dataset_ref.mask_safe = Map.from_geom(dataset_ref.counts.geom, data=mask)
spectrum_dataset_mask = dataset_ref.to_spectrum_dataset(on_region)
assert np.sum(spectrum_dataset.counts.data) == 1
assert spectrum_dataset.data_shape == (2, 1, 1)
assert spectrum_dataset.background.geom.axes[0].nbin == 2
assert spectrum_dataset.exposure.geom.axes[0].nbin == 3
assert spectrum_dataset.exposure.unit == "m2s"
energy_axis = geom.axes["energy"]
assert (
spectrum_dataset.edisp.get_edisp_kernel(energy_axis=energy_axis)
.axes["energy"]
.nbin
== 2
)
assert (
spectrum_dataset.edisp.get_edisp_kernel(energy_axis=energy_axis)
.axes["energy_true"]
.nbin
== 3
)
assert_allclose(spectrum_dataset.edisp.exposure_map.data[1], 3.070917e09, rtol=1e-5)
assert np.sum(spectrum_dataset_mask.counts.data) == 0
assert spectrum_dataset_mask.data_shape == (2, 1, 1)
assert spectrum_dataset_corrected.exposure.unit == "m2s"
assert_allclose(spectrum_dataset.exposure.data[1], 3.070884e09, rtol=1e-5)
assert_allclose(spectrum_dataset_corrected.exposure.data[1], 2.05201e09, rtol=1e-5)
@requires_data()
def test_info_dict(sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
info_dict = dataset.info_dict()
assert_allclose(info_dict["counts"], 9526, rtol=1e-3)
assert_allclose(info_dict["background"], 4000.0005, rtol=1e-3)
assert_allclose(info_dict["npred_background"], 4000.0, rtol=1e-3)
assert_allclose(info_dict["excess"], 5525.756, rtol=1e-3)
assert_allclose(info_dict["exposure_min"].value, 8.32e8, rtol=1e-3)
assert_allclose(info_dict["exposure_max"].value, 1.105e10, rtol=1e-3)
assert info_dict["exposure_max"].unit == "m2 s"
assert info_dict["name"] == "test"
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
info_dict = dataset.info_dict()
assert_allclose(info_dict["counts"], 9526, rtol=1e-3)
assert_allclose(info_dict["background"], 4000.0005, rtol=1e-3)
assert_allclose(info_dict["npred_background"], 4000.0, rtol=1e-3)
assert_allclose(info_dict["sqrt_ts"], 74.024180, rtol=1e-3)
assert_allclose(info_dict["excess"], 5525.756, rtol=1e-3)
assert_allclose(info_dict["ontime"].value, 3600)
assert info_dict["name"] == "test"
def get_fermi_3fhl_gc_dataset():
counts = Map.read("$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts-cube.fits.gz")
background = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background-cube.fits.gz"
)
bkg_model = FoVBackgroundModel(dataset_name="fermi-3fhl-gc")
exposure = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-exposure-cube.fits.gz"
)
return MapDataset(
counts=counts,
background=background,
models=[bkg_model],
exposure=exposure,
name="fermi-3fhl-gc",
)
@requires_data()
def test_resample_energy_3fhl():
dataset = get_fermi_3fhl_gc_dataset()
new_axis = MapAxis.from_edges([10, 35, 100] * u.GeV, interp="log", name="energy")
grouped = dataset.resample_energy_axis(energy_axis=new_axis)
assert grouped.counts.data.shape == (2, 200, 400)
assert grouped.counts.data[0].sum() == 28581
assert_allclose(
grouped.npred_background().data.sum(axis=(1, 2)),
[25074.366386, 2194.298612],
rtol=1e-5,
)
assert_allclose(grouped.exposure.data, dataset.exposure.data, rtol=1e-5)
axis = grouped.counts.geom.axes[0]
npred = dataset.npred()
npred_grouped = grouped.npred()
assert_allclose(npred.resample_axis(axis=axis).data.sum(), npred_grouped.data.sum())
@requires_data()
def test_to_image_3fhl():
dataset = get_fermi_3fhl_gc_dataset()
dataset_im = dataset.to_image()
assert dataset_im.counts.data.sum() == dataset.counts.data.sum()
assert_allclose(dataset_im.npred_background().data.sum(), 28548.625, rtol=1e-5)
assert_allclose(dataset_im.exposure.data, dataset.exposure.data, rtol=1e-5)
npred = dataset.npred()
npred_im = dataset_im.npred()
assert_allclose(npred.data.sum(), npred_im.data.sum())
def test_to_image_mask_safe():
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=2)
geom = WcsGeom.create(
skydir=(0, 0), binsz=0.5, width=(1, 1), frame="icrs", axes=[axis]
)
dataset = MapDataset.create(geom)
# Check map_safe handling
data = np.array([[[False, True], [True, True]], [[False, False], [True, True]]])
dataset.mask_safe = WcsNDMap.from_geom(geom=geom, data=data)
dataset_im = dataset.to_image()
assert dataset_im.mask_safe.data.dtype == bool
desired = np.array([[False, True], [True, True]])
assert (dataset_im.mask_safe.data == desired).all()
# Check that missing entries in the dataset do not break
dataset_copy = dataset.copy()
dataset_copy.exposure = None
dataset_im = dataset_copy.to_image()
assert dataset_im.exposure is None
dataset_copy = dataset.copy()
dataset_copy.counts = None
dataset_im = dataset_copy.to_image()
assert dataset_im.counts is None
@requires_data()
def test_downsample():
dataset = get_fermi_3fhl_gc_dataset()
downsampled = dataset.downsample(2)
assert downsampled.counts.data.shape == (11, 100, 200)
assert downsampled.counts.data.sum() == dataset.counts.data.sum()
assert_allclose(
downsampled.npred_background().data.sum(axis=(1, 2)),
dataset.npred_background().data.sum(axis=(1, 2)),
rtol=1e-5,
)
assert_allclose(downsampled.exposure.data[5, 50, 100], 3.318082e11, rtol=1e-5)
with pytest.raises(ValueError):
dataset.downsample(2, axis_name="energy")
@requires_data()
def test_map_dataset_fits_io(tmp_path, sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
dataset.counts = dataset.npred()
dataset.mask_safe = dataset.mask_fit
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
hdulist = dataset.to_hdulist()
actual = [hdu.name for hdu in hdulist]
desired = [
"PRIMARY",
"COUNTS",
"COUNTS_BANDS",
"EXPOSURE",
"EXPOSURE_BANDS",
"BACKGROUND",
"BACKGROUND_BANDS",
"EDISP",
"EDISP_BANDS",
"EDISP_EXPOSURE",
"EDISP_EXPOSURE_BANDS",
"PSF",
"PSF_BANDS",
"PSF_EXPOSURE",
"PSF_EXPOSURE_BANDS",
"MASK_SAFE",
"MASK_SAFE_BANDS",
"MASK_FIT",
"MASK_FIT_BANDS",
"GTI",
]
assert actual == desired
dataset.write(tmp_path / "test.fits")
dataset_new = MapDataset.read(tmp_path / "test.fits")
assert dataset_new.mask.data.dtype == bool
assert_allclose(dataset.counts.data, dataset_new.counts.data)
assert_allclose(
dataset.npred_background().data, dataset_new.npred_background().data
)
assert_allclose(dataset.edisp.edisp_map.data, dataset_new.edisp.edisp_map.data)
assert_allclose(dataset.psf.psf_map.data, dataset_new.psf.psf_map.data)
assert_allclose(dataset.exposure.data, dataset_new.exposure.data)
assert_allclose(dataset.mask_fit.data, dataset_new.mask_fit.data)
assert_allclose(dataset.mask_safe.data, dataset_new.mask_safe.data)
assert dataset.counts.geom == dataset_new.counts.geom
assert dataset.exposure.geom == dataset_new.exposure.geom
assert_allclose(dataset.exposure.meta["livetime"], 1 * u.h)
assert dataset.npred_background().geom == dataset_new.npred_background().geom
assert dataset.edisp.edisp_map.geom == dataset_new.edisp.edisp_map.geom
assert_allclose(
dataset.gti.time_sum.to_value("s"), dataset_new.gti.time_sum.to_value("s")
)
# To test io of psf and edisp map
stacked = MapDataset.create(geom)
stacked.write(tmp_path / "test-2.fits", overwrite=True)
stacked1 = MapDataset.read(tmp_path / "test-2.fits")
assert stacked1.psf.psf_map is not None
assert stacked1.psf.exposure_map is not None
assert stacked1.edisp.edisp_map is not None
assert stacked1.edisp.exposure_map is not None
assert stacked.mask.data.dtype == bool
assert_allclose(stacked1.psf.psf_map, stacked.psf.psf_map)
assert_allclose(stacked1.edisp.edisp_map, stacked.edisp.edisp_map)
@requires_dependency("iminuit")
@requires_dependency("matplotlib")
@requires_data()
def test_map_fit(sky_model, geom, geom_etrue):
dataset_1 = get_map_dataset(geom, geom_etrue, name="test-1")
dataset_2 = get_map_dataset(geom, geom_etrue, name="test-2")
datasets = Datasets([dataset_1, dataset_2])
models = Models(datasets.models)
models.insert(0, sky_model)
models["test-1-bkg"].spectral_model.norm.value = 0.5
models["test-model"].spatial_model.sigma.frozen = True
datasets.models = models
dataset_2.counts = dataset_2.npred()
dataset_1.counts = dataset_1.npred()
models["test-1-bkg"].spectral_model.norm.value = 0.49
models["test-2-bkg"].spectral_model.norm.value = 0.99
fit = Fit()
result = fit.run(datasets=datasets)
result = result["optimize_result"]
assert result.success
assert "minuit" in repr(result)
npred = dataset_1.npred().data.sum()
assert_allclose(npred, 7525.790688, rtol=1e-3)
assert_allclose(result.total_stat, 21625.845714, rtol=1e-3)
pars = result.parameters
assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
assert_allclose(pars["lon_0"].error, 0.002244, rtol=1e-2)
assert_allclose(pars["index"].value, 3, rtol=1e-2)
assert_allclose(pars["index"].error, 0.024277, rtol=1e-2)
assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
assert_allclose(pars["amplitude"].error, 4.216154e-13, rtol=1e-2)
# background norm 1
assert_allclose(pars[8].value, 0.5, rtol=1e-2)
assert_allclose(pars[8].error, 0.015811, rtol=1e-2)
# background norm 2
assert_allclose(pars[11].value, 1, rtol=1e-2)
assert_allclose(pars[11].error, 0.02147, rtol=1e-2)
# test mask_safe evaluation
dataset_1.mask_safe = geom.energy_mask(energy_min=1 * u.TeV)
dataset_2.mask_safe = geom.energy_mask(energy_min=1 * u.TeV)
stat = datasets.stat_sum()
assert_allclose(stat, 14823.772744, rtol=1e-5)
region = sky_model.spatial_model.to_region()
initial_counts = dataset_1.counts.copy()
with mpl_plot_check():
dataset_1.plot_residuals(kwargs_spectral=dict(region=region))
# check dataset has not changed
assert initial_counts == dataset_1.counts
# test model evaluation outside image
dataset_1.models[0].spatial_model.lon_0.value = 150
dataset_1.npred()
assert not dataset_1.evaluators["test-model"].contributes
@requires_dependency("iminuit")
@requires_data()
def test_map_fit_one_energy_bin(sky_model, geom_image):
energy_axis = geom_image.axes["energy"]
geom_etrue = geom_image.to_image().to_cube([energy_axis.copy(name="energy_true")])
dataset = get_map_dataset(geom_image, geom_etrue)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [sky_model, bkg_model]
sky_model.spectral_model.index.value = 3.0
sky_model.spectral_model.index.frozen = True
dataset.models[f"{dataset.name}-bkg"].spectral_model.norm.value = 0.5
dataset.counts = dataset.npred()
# Move a bit away from the best-fit point, to make sure the optimiser runs
sky_model.parameters["sigma"].value = 0.21
dataset.models[f"{dataset.name}-bkg"].parameters["norm"].frozen = True
fit = Fit()
result = fit.run(datasets=[dataset])
result = result["optimize_result"]
assert result.success
npred = dataset.npred().data.sum()
assert_allclose(npred, 16538.124036, rtol=1e-3)
assert_allclose(result.total_stat, -34844.125047, rtol=1e-3)
pars = result.parameters
assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
assert_allclose(pars["lon_0"].error, 0.001689, rtol=1e-2)
assert_allclose(pars["sigma"].value, 0.2, rtol=1e-2)
assert_allclose(pars["sigma"].error, 0.00092, rtol=1e-2)
assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
assert_allclose(pars["amplitude"].error, 8.127593e-14, rtol=1e-2)
def test_create():
# tests empty datasets created
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
e_reco = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 3), name="energy", unit=u.TeV, interp="log"
)
e_true = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 4), name="energy_true", unit=u.TeV, interp="log"
)
geom = WcsGeom.create(binsz=0.02, width=(2, 2), axes=[e_reco])
empty_dataset = MapDataset.create(
geom=geom, energy_axis_true=e_true, rad_axis=rad_axis
)
assert empty_dataset.counts.data.shape == (2, 100, 100)
assert empty_dataset.exposure.data.shape == (3, 100, 100)
assert empty_dataset.psf.psf_map.data.shape == (3, 50, 10, 10)
assert empty_dataset.psf.exposure_map.data.shape == (3, 1, 10, 10)
assert isinstance(empty_dataset.edisp, EDispKernelMap)
assert empty_dataset.edisp.edisp_map.data.shape == (3, 2, 10, 10)
assert empty_dataset.edisp.exposure_map.data.shape == (3, 1, 10, 10)
assert_allclose(empty_dataset.edisp.edisp_map.data.sum(), 300)
assert_allclose(empty_dataset.gti.time_delta, 0.0 * u.s)
def test_create_with_migra(tmp_path):
# tests empty datasets created
migra_axis = MapAxis(nodes=np.linspace(0.0, 3.0, 51), unit="", name="migra")
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
e_reco = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 3), name="energy", unit=u.TeV, interp="log"
)
e_true = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 4), name="energy_true", unit=u.TeV, interp="log"
)
geom = WcsGeom.create(binsz=0.02, width=(2, 2), axes=[e_reco])
empty_dataset = MapDataset.create(
geom=geom, energy_axis_true=e_true, migra_axis=migra_axis, rad_axis=rad_axis
)
empty_dataset.write(tmp_path / "test.fits")
dataset_new = MapDataset.read(tmp_path / "test.fits")
assert isinstance(empty_dataset.edisp, EDispMap)
assert empty_dataset.edisp.edisp_map.data.shape == (3, 50, 10, 10)
assert empty_dataset.edisp.exposure_map.data.shape == (3, 1, 10, 10)
assert_allclose(empty_dataset.edisp.edisp_map.data.sum(), 5000)
assert_allclose(empty_dataset.gti.time_delta, 0.0 * u.s)
assert isinstance(dataset_new.edisp, EDispMap)
assert dataset_new.edisp.edisp_map.data.shape == (3, 50, 10, 10)
def test_stack(sky_model):
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=3)
geom = WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.05,
width=(2, 2),
frame="icrs",
axes=[axis],
)
axis_etrue = MapAxis.from_energy_bounds(
"0.1 TeV", "10 TeV", nbin=5, name="energy_true"
)
geom_etrue = WcsGeom.create(
skydir=(266.40498829, -28.93617776),
binsz=0.05,
width=(2, 2),
frame="icrs",
axes=[axis_etrue],
)
edisp = EDispKernelMap.from_diagonal_response(
energy_axis=axis, energy_axis_true=axis_etrue, geom=geom
)
edisp.exposure_map.quantity = (
1e0 * u.m ** 2 * u.s * np.ones(edisp.exposure_map.data.shape)
)
bkg1 = Map.from_geom(geom)
bkg1.data += 0.2
cnt1 = Map.from_geom(geom)
cnt1.data = 1.0 * np.ones(cnt1.data.shape)
exp1 = Map.from_geom(geom_etrue)
exp1.quantity = 1e7 * u.m ** 2 * u.s * np.ones(exp1.data.shape)
mask1 = Map.from_geom(geom)
mask1.data = np.ones(mask1.data.shape, dtype=bool)
mask1.data[0][:][5:10] = False
dataset1 = MapDataset(
counts=cnt1,
background=bkg1,
exposure=exp1,
mask_safe=mask1,
name="dataset-1",
edisp=edisp,
meta_table=Table({"OBS_ID": [0]}),
)
bkg2 = Map.from_geom(geom)
bkg2.data = 0.1 * np.ones(bkg2.data.shape)
cnt2 = Map.from_geom(geom)
cnt2.data = 1.0 * np.ones(cnt2.data.shape)
exp2 = Map.from_geom(geom_etrue)
exp2.quantity = 1e7 * u.m ** 2 * u.s * np.ones(exp2.data.shape)
mask2 = Map.from_geom(geom)
mask2.data = np.ones(mask2.data.shape, dtype=bool)
mask2.data[0][:][5:10] = False
mask2.data[1][:][10:15] = False
dataset2 = MapDataset(
counts=cnt2,
background=bkg2,
exposure=exp2,
mask_safe=mask2,
name="dataset-2",
edisp=edisp,
meta_table=Table({"OBS_ID": [1]}),
)
background_model2 = FoVBackgroundModel(dataset_name="dataset-2")
background_model1 = FoVBackgroundModel(dataset_name="dataset-1")
dataset1.models = [background_model1, sky_model]
dataset2.models = [background_model2, sky_model]
stacked = MapDataset.from_geoms(**dataset1.geoms)
stacked.stack(dataset1)
stacked.stack(dataset2)
stacked.models = [sky_model]
npred_b = stacked.npred()
assert_allclose(npred_b.data.sum(), 1459.985035, 1e-5)
assert_allclose(stacked.npred_background().data.sum(), 1360.00, 1e-5)
assert_allclose(stacked.counts.data.sum(), 9000, 1e-5)
assert_allclose(stacked.mask_safe.data.sum(), 4600)
assert_allclose(stacked.exposure.data.sum(), 1.6e11)
assert_allclose(stacked.meta_table["OBS_ID"][0], [0, 1])
@requires_data()
def test_npred_sig(sky_model, geom, geom_etrue):
dataset = get_map_dataset(geom, geom_etrue)
pwl = PowerLawSpectralModel()
gauss = GaussianSpatialModel(
lon_0="0.0 deg", lat_0="0.0 deg", sigma="0.5 deg", frame="galactic"
)
model1 = SkyModel(pwl, gauss, name="m1")
bkg = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [bkg, sky_model, model1]
assert_allclose(dataset.npred().data.sum(), 9676.047906, rtol=1e-3)
assert_allclose(dataset.npred_signal().data.sum(), 5676.04790, rtol=1e-3)
assert_allclose(
dataset.npred_signal(model_name=model1.name).data.sum(), 150.7487, rtol=1e-3
)
with pytest.raises(
KeyError, match="m2",
):
dataset.npred_signal(model_name="m2")
def test_stack_npred():
pwl = PowerLawSpectralModel()
gauss = GaussianSpatialModel(sigma="0.2 deg")
model = SkyModel(pwl, gauss)
axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=5)
axis_etrue = MapAxis.from_energy_bounds(
"0.1 TeV", "10 TeV", nbin=11, name="energy_true"
)
geom = WcsGeom.create(
skydir=(0, 0), binsz=0.05, width=(2, 2), frame="icrs", axes=[axis],
)
dataset_1 = MapDataset.create(
geom,
energy_axis_true=axis_etrue,
name="dataset-1",
gti=GTI.create("0 min", "30 min"),
)
dataset_1.psf = None
dataset_1.exposure.data += 1
dataset_1.mask_safe = geom.energy_mask(energy_min=1 * u.TeV)
dataset_1.background.data += 1
bkg_model_1 = FoVBackgroundModel(dataset_name=dataset_1.name)
dataset_1.models = [model, bkg_model_1]
dataset_2 = MapDataset.create(
geom,
energy_axis_true=axis_etrue,
name="dataset-2",
gti=GTI.create("30 min", "60 min"),
)
dataset_2.psf = None
dataset_2.exposure.data += 1
dataset_2.mask_safe = geom.energy_mask(energy_min=0.2 * u.TeV)
dataset_2.background.data += 1
bkg_model_2 = FoVBackgroundModel(dataset_name=dataset_2.name)
dataset_2.models = [model, bkg_model_2]
npred_1 = dataset_1.npred()
npred_1.data[~dataset_1.mask_safe.data] = 0
npred_2 = dataset_2.npred()
npred_2.data[~dataset_2.mask_safe.data] = 0
stacked_npred = Map.from_geom(geom)
stacked_npred.stack(npred_1)
stacked_npred.stack(npred_2)
stacked = MapDataset.create(geom, energy_axis_true=axis_etrue, name="stacked")
stacked.stack(dataset_1)
stacked.stack(dataset_2)
npred_stacked = stacked.npred()
assert_allclose(npred_stacked.data, stacked_npred.data)
def to_cube(image):
# introduce a fake enery axis for now
axis = MapAxis.from_edges([1, 10] * u.TeV, name="energy")
geom = image.geom.to_cube([axis])
return WcsNDMap.from_geom(geom=geom, data=image.data)
@pytest.fixture
def images():
"""Load some `counts`, `counts_off`, `acceptance_on`, `acceptance_off" images"""
filename = "$GAMMAPY_DATA/tests/unbundled/hess/survey/hess_survey_snippet.fits.gz"
return {
"counts": to_cube(WcsNDMap.read(filename, hdu="ON")),
"counts_off": to_cube(WcsNDMap.read(filename, hdu="OFF")),
"acceptance": to_cube(WcsNDMap.read(filename, hdu="ONEXPOSURE")),
"acceptance_off": to_cube(WcsNDMap.read(filename, hdu="OFFEXPOSURE")),
"exposure": to_cube(WcsNDMap.read(filename, hdu="EXPGAMMAMAP")),
"background": to_cube(WcsNDMap.read(filename, hdu="BACKGROUND")),
}
def test_npred_psf_after_edisp():
energy_axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=3)
energy_axis_true = MapAxis.from_energy_bounds(
"0.8 TeV", "15 TeV", nbin=6, name="energy_true"
)
geom = WcsGeom.create(width=4 * u.deg, binsz=0.02, axes=[energy_axis])
dataset = MapDataset.create(geom=geom, energy_axis_true=energy_axis_true)
dataset.background.data += 1
dataset.exposure.data += 1e12
dataset.mask_safe.data += True
dataset.psf = PSFMap.from_gauss(
energy_axis_true=energy_axis_true, sigma=0.2 * u.deg
)
model = SkyModel(
spectral_model=PowerLawSpectralModel(),
spatial_model=PointSpatialModel(),
name="test-model",
)
model.apply_irf["psf_after_edisp"] = True
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [bkg_model, model]
npred = dataset.npred()
assert_allclose(npred.data.sum(), 129553.858658)
def get_map_dataset_onoff(images, **kwargs):
"""Returns a MapDatasetOnOff"""
mask_geom = images["counts"].geom
mask_data = np.ones(images["counts"].data.shape, dtype=bool)
mask_safe = Map.from_geom(mask_geom, data=mask_data)
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
energy_axis = mask_geom.axes["energy"]
energy_axis_true = energy_axis.copy(name="energy_true")
psf = PSFMap.from_gauss(
energy_axis_true=energy_axis_true, sigma=0.2 * u.deg, geom=mask_geom.to_image()
)
edisp = EDispKernelMap.from_diagonal_response(
energy_axis=energy_axis, energy_axis_true=energy_axis_true, geom=mask_geom
)
return MapDatasetOnOff(
counts=images["counts"],
counts_off=images["counts_off"],
acceptance=images["acceptance"],
acceptance_off=images["acceptance_off"],
exposure=images["exposure"],
mask_safe=mask_safe,
psf=psf,
edisp=edisp,
gti=gti,
**kwargs,
)
@requires_data()
def test_map_dataset_on_off_fits_io(images, tmp_path):
dataset = get_map_dataset_onoff(images)
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
hdulist = dataset.to_hdulist()
actual = [hdu.name for hdu in hdulist]
desired = [
"PRIMARY",
"COUNTS",
"COUNTS_BANDS",
"EXPOSURE",
"EXPOSURE_BANDS",
"EDISP",
"EDISP_BANDS",
"EDISP_EXPOSURE",
"EDISP_EXPOSURE_BANDS",
"PSF",
"PSF_BANDS",
"PSF_EXPOSURE",
"PSF_EXPOSURE_BANDS",
"MASK_SAFE",
"MASK_SAFE_BANDS",
"GTI",
"COUNTS_OFF",
"COUNTS_OFF_BANDS",
"ACCEPTANCE",
"ACCEPTANCE_BANDS",
"ACCEPTANCE_OFF",
"ACCEPTANCE_OFF_BANDS",
]
assert actual == desired
dataset.write(tmp_path / "test.fits")
dataset_new = MapDatasetOnOff.read(tmp_path / "test.fits")
assert dataset_new.mask.data.dtype == bool
assert_allclose(dataset.counts.data, dataset_new.counts.data)
assert_allclose(dataset.counts_off.data, dataset_new.counts_off.data)
assert_allclose(dataset.acceptance.data, dataset_new.acceptance.data)
assert_allclose(dataset.acceptance_off.data, dataset_new.acceptance_off.data)
assert_allclose(dataset.exposure.data, dataset_new.exposure.data)
assert_allclose(dataset.mask_safe, dataset_new.mask_safe)
assert np.all(dataset.mask_safe.data == dataset_new.mask_safe.data) == True
assert dataset.mask_safe.geom == dataset_new.mask_safe.geom
assert dataset.counts.geom == dataset_new.counts.geom
assert dataset.exposure.geom == dataset_new.exposure.geom
assert_allclose(
dataset.gti.time_sum.to_value("s"), dataset_new.gti.time_sum.to_value("s")
)
assert dataset.psf.psf_map == dataset_new.psf.psf_map
assert dataset.psf.exposure_map == dataset_new.psf.exposure_map
assert dataset.edisp.edisp_map == dataset_new.edisp.edisp_map
assert dataset.edisp.exposure_map == dataset_new.edisp.exposure_map
def test_create_onoff(geom):
# tests empty datasets created
migra_axis = MapAxis(nodes=np.linspace(0.0, 3.0, 51), unit="", name="migra")
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
energy_axis = geom.axes["energy"].copy(name="energy_true")
empty_dataset = MapDatasetOnOff.create(geom, energy_axis, migra_axis, rad_axis)
assert_allclose(empty_dataset.counts.data.sum(), 0.0)
assert_allclose(empty_dataset.counts_off.data.sum(), 0.0)
assert_allclose(empty_dataset.acceptance.data.sum(), 0.0)
assert_allclose(empty_dataset.acceptance_off.data.sum(), 0.0)
assert empty_dataset.psf.psf_map.data.shape == (2, 50, 10, 10)
assert empty_dataset.psf.exposure_map.data.shape == (2, 1, 10, 10)
assert empty_dataset.edisp.edisp_map.data.shape == (2, 50, 10, 10)
assert empty_dataset.edisp.exposure_map.data.shape == (2, 1, 10, 10)
assert_allclose(empty_dataset.edisp.edisp_map.data.sum(), 3333.333333)
assert_allclose(empty_dataset.gti.time_delta, 0.0 * u.s)
@requires_data()
def test_map_dataset_onoff_str(images):
dataset = get_map_dataset_onoff(images)
assert "MapDatasetOnOff" in str(dataset)
@requires_data()
def test_stack_onoff(images):
dataset = get_map_dataset_onoff(images)
stacked = dataset.copy()
stacked.stack(dataset)
assert_allclose(stacked.counts.data.sum(), 2 * dataset.counts.data.sum())
assert_allclose(stacked.counts_off.data.sum(), 2 * dataset.counts_off.data.sum())
assert_allclose(
stacked.acceptance.data.sum(), dataset.data_shape[1] * dataset.data_shape[2]
)
assert_allclose(np.nansum(stacked.acceptance_off.data), 2.925793e08, rtol=1e-5)
assert_allclose(stacked.exposure.data, 2.0 * dataset.exposure.data)
def test_dataset_cutout_aligned(geom):
dataset = MapDataset.create(geom)
kwargs = {"position": geom.center_skydir, "width": 1 * u.deg}
geoms = {name: geom.cutout(**kwargs) for name, geom in dataset.geoms.items()}
cutout = MapDataset.from_geoms(**geoms, name="cutout")
assert dataset.counts.geom.is_aligned(cutout.counts.geom)
assert dataset.exposure.geom.is_aligned(cutout.exposure.geom)
assert dataset.edisp.edisp_map.geom.is_aligned(cutout.edisp.edisp_map.geom)
assert dataset.psf.psf_map.geom.is_aligned(cutout.psf.psf_map.geom)
def test_stack_onoff_cutout(geom_image):
# Test stacking of cutouts
energy_axis_true = MapAxis.from_energy_bounds(
"1 TeV", "10 TeV", nbin=3, name="energy_true"
)
dataset = MapDatasetOnOff.create(geom_image, energy_axis_true=energy_axis_true)
dataset.gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
kwargs = {"position": geom_image.center_skydir, "width": 1 * u.deg}
geoms = {name: geom.cutout(**kwargs) for name, geom in dataset.geoms.items()}
dataset_cutout = MapDatasetOnOff.from_geoms(**geoms, name="cutout-dataset")
dataset_cutout.gti = GTI.create(
[0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00"
)
dataset_cutout.mask_safe.data += True
dataset_cutout.counts.data += 1
dataset_cutout.counts_off.data += 1
dataset_cutout.exposure.data += 1
dataset.stack(dataset_cutout)
assert_allclose(dataset.counts.data.sum(), 2500)
assert_allclose(dataset.counts_off.data.sum(), 2500)
assert_allclose(dataset.alpha.data.sum(), 0)
assert_allclose(dataset.exposure.data.sum(), 7500)
assert dataset_cutout.name == "cutout-dataset"
def test_datasets_io_no_model(tmpdir):
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=2)
geom = WcsGeom.create(npix=(5, 5), axes=[axis])
dataset_1 = MapDataset.create(geom, name="dataset_1")
dataset_2 = MapDataset.create(geom, name="dataset_2")
datasets = Datasets([dataset_1, dataset_2])
datasets.write(filename=tmpdir / "datasets.yaml")
filename_1 = tmpdir / "dataset_1.fits"
assert filename_1.exists()
filename_2 = tmpdir / "dataset_2.fits"
assert filename_2.exists()
@requires_data()
def test_map_dataset_on_off_to_spectrum_dataset(images):
dataset = get_map_dataset_onoff(images)
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
on_region = CircleSkyRegion(
center=dataset.counts.geom.center_skydir, radius=0.1 * u.deg
)
spectrum_dataset = dataset.to_spectrum_dataset(on_region)
assert spectrum_dataset.counts.data[0] == 8
assert spectrum_dataset.data_shape == (1, 1, 1)
assert spectrum_dataset.counts_off.data[0] == 33914
assert_allclose(spectrum_dataset.alpha.data[0], 0.0002143, atol=1e-7)
excess_map = images["counts"] - images["background"]
excess_true = excess_map.get_spectrum(on_region, np.sum).data[0]
excess = spectrum_dataset.excess.data[0]
assert_allclose(excess, excess_true, rtol=1e-3)
assert spectrum_dataset.name != dataset.name
@requires_data()
def test_map_dataset_on_off_to_spectrum_dataset_weights():
e_reco = MapAxis.from_bounds(1, 10, nbin=3, unit="TeV", name="energy")
geom = WcsGeom.create(
skydir=(0, 0), width=(2.5, 2.5), binsz=0.5, axes=[e_reco], frame="galactic"
)
counts = Map.from_geom(geom)
counts.data += 1
counts_off = Map.from_geom(geom)
counts_off.data += 2
acceptance = Map.from_geom(geom)
acceptance.data += 1
acceptance_off = Map.from_geom(geom)
acceptance_off.data += 4
weights = Map.from_geom(geom, dtype="bool")
weights.data[1:, 2:4, 2] = True
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset = MapDatasetOnOff(
counts=counts,
counts_off=counts_off,
acceptance=acceptance,
acceptance_off=acceptance_off,
mask_safe=weights,
gti=gti,
)
on_region = CircleSkyRegion(
center=dataset.counts.geom.center_skydir, radius=1.5 * u.deg
)
spectrum_dataset = dataset.to_spectrum_dataset(on_region)
assert_allclose(spectrum_dataset.counts.data[:, 0, 0], [0, 2, 2])
assert_allclose(spectrum_dataset.counts_off.data[:, 0, 0], [0, 4, 4])
assert_allclose(spectrum_dataset.acceptance.data[:, 0, 0], [0, 0.08, 0.08])
assert_allclose(spectrum_dataset.acceptance_off.data[:, 0, 0], [0, 0.32, 0.32])
assert_allclose(spectrum_dataset.alpha.data[:, 0, 0], [0, 0.25, 0.25])
@requires_data()
def test_map_dataset_on_off_cutout(images):
dataset = get_map_dataset_onoff(images)
gti = GTI.create([0 * u.s], [1 * u.h], reference_time="2010-01-01T00:00:00")
dataset.gti = gti
cutout_dataset = dataset.cutout(
images["counts"].geom.center_skydir, ["1 deg", "1 deg"]
)
assert cutout_dataset.counts.data.shape == (1, 50, 50)
assert cutout_dataset.counts_off.data.shape == (1, 50, 50)
assert cutout_dataset.acceptance.data.shape == (1, 50, 50)
assert cutout_dataset.acceptance_off.data.shape == (1, 50, 50)
assert cutout_dataset.name != dataset.name
def test_map_dataset_on_off_fake(geom):
rad_axis = MapAxis(nodes=np.linspace(0.0, 1.0, 51), unit="deg", name="rad")
energy_true_axis = geom.axes["energy"].copy(name="energy_true")
empty_dataset = MapDatasetOnOff.create(geom, energy_true_axis, rad_axis=rad_axis)
empty_dataset.acceptance.data = 1.0
empty_dataset.acceptance_off.data = 10.0
empty_dataset.acceptance_off.data[0, 50, 50] = 0
background_map = Map.from_geom(geom, data=1)
empty_dataset.fake(background_map, random_state=42)
assert_allclose(empty_dataset.counts.data[0, 50, 50], 0)
assert_allclose(empty_dataset.counts.data.mean(), 0.99445, rtol=1e-3)
assert_allclose(empty_dataset.counts_off.data.mean(), 10.00055, rtol=1e-3)
@requires_data()
def test_map_dataset_on_off_to_image():
axis = MapAxis.from_energy_bounds(1, 10, 2, unit="TeV")
geom = WcsGeom.create(npix=(10, 10), binsz=0.05, axes=[axis])
counts = Map.from_geom(geom, data=np.ones((2, 10, 10)))
counts_off = Map.from_geom(geom, data= | np.ones((2, 10, 10)) | numpy.ones |
# Classify images, based on training data
#
# Usage:
# 1. create folder with:
# - folder with training data (one folder for each type)
# - folder with images to be classified
# - this script
# 3. set required parameters:
# - data_dir = (relative) folder with traing/validation images ('document_images')
# - epoch = number of passes of the entire training dataset in the machine learning algorithm ('10')
# - path = (relative) folder with images that need to be predicted ('test')
# 3. in terminal: '$ python document_classifier_keras.py -d data_dir -p path [-e 10] '
# 4. results are written to csv file 'predicted_image_types.csv'
# see https://www.tensorflow.org/tutorials/images/classification
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
import pathlib
import argparse
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--data_dir", default="document_images",
help="path to traing images")
ap.add_argument("-p", "--path", default="path",
help="path to input images")
ap.add_argument("-e", "--epoch", default="10", type=int,
help="number of epochs")
args = vars(ap.parse_args())
path = args["path"]
data_dir = args["data_dir"]
epoch = args["epoch"]
data_dir = pathlib.Path(data_dir)
subfolders = os.listdir(data_dir)
num_classes = len(subfolders)
# Check if files are valif jpg
print("Reading and checking files from subfolders: ", subfolders, " in ", data_dir)
print("no. of subfolders: ",num_classes)
# Filter out corrupted images
# Change folder names accordingly
num_skipped = 0
for folder_name in subfolders:
folder_path = os.path.join(data_dir, folder_name)
for fname in os.listdir(folder_path):
fpath = os.path.join(folder_path, fname)
try:
fobj = open(fpath, "rb")
is_jfif = tf.compat.as_bytes("JFIF") in fobj.peek(10)
finally:
fobj.close()
if not is_jfif:
num_skipped += 1
# Delete corrupted image
os.remove(fpath)
print("- Deleted file ", fpath)
print("Deleted %d images" % num_skipped)
# list no. of files
image_count = len(list(data_dir.glob('*/*.jpg')))
print("Total no of images: ", image_count)
# Create a dataset
# Define some parameters for the loader
batch_size = 32
img_height = 180
img_width = 180
# Create a validation split: 80% of the images for training, and 20% for validation.
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print("class_names: ", class_names)
# Configure the dataset for performance
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# Standardize the data
# Create the model
model = Sequential([
layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
# Train the model
epochs=15
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# Visualize training results
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# No optimization necessary; check tutorial if it is (eg. solve overfitting)
# Predict on new data
path = "test"
files = os.listdir(path)
# Create csv with predictions
csv = open('predicted_image_types.csv','w')
for f in files:
f = path+'/'+f
img = keras.preprocessing.image.load_img(
f, target_size=(img_height, img_width)
)
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"Image {} most likely belongs to {} with a {:.2f} percent confidence."
.format(f, class_names[np.argmax(score)], 100 * np.max(score))
)
# write result per image
csv.write(str(f))
csv.write(";")
csv.write(class_names[ | np.argmax(score) | numpy.argmax |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 10:34:49 2019
@author: Zoletnik
Spectral analysis tools fro FLAP
"""
import math
import numpy as np
import flap.config
import flap.coordinate
#from .coordinate import *
from scipy import signal
import copy
#import matplotlib.pyplot as plt
def _spectral_calc_interval_selection(d, ref, coordinate,intervals,interval_n):
""" Helper function for spectral and correlation calculation.
Determines the processing intervals and returns in a
flap.Intervals object. The intervals will have identical length.
INPUT:
d, ref: flap.DataObjects
If ref is set it is assumed that the selection coordinate step size is identical in d and ref.
coordinate: Coordinate name (string)
intervals: Information of processing intervals.
If dictionary with a single key: {selection coordinate: description})
Key is a coordinate name which can be different from the calculation
coordinate.
Description can be flap.Intervals, flap.DataObject or
a list of two numbers. If it is a data object with data name identical to
the coordinate the error ranges of the data object will be used for
interval. If the data name is not the same as coordinate a coordinate with the
same name will be searched for in the data object and the value_ranges
will be used fromm it to set the intervals.
If not a dictionary and not None is is interpreted as the interval
description, the selection coordinate is taken the same as
coordinate.
If None, the whole data interval will be used as a single interval.
interval_n: Minimum number of intervals to use for the processing. These are identical
length intervals inserted into the input interval list.
Returns:
intervals, index_intervals
intervals: The intervals in the coordinate unit (Intervals object)
index_intervals: The index intervals in the data array (Intervals object)
"""
if (type(intervals) is dict):
sel_coordinate = list(intervals.keys())[0]
else:
sel_coordinate = coordinate
if (sel_coordinate != coordinate):
raise ("At present for spectral calculation the interval selection coordinate should be the same as the calculation coordinate.")
try:
coord = d.get_coordinate_object(coordinate)
except Exception as e:
raise e
try:
calc_int, calc_int_ind, sel_int, sel_int_ind = d.proc_interval_limits(coordinate, intervals=intervals)
except Exception as e:
raise e
intervals_low = sel_int[0]
intervals_high = sel_int[1]
# This part is commented out as we assume identiacl coordinate for d and ref
# d_intervals_low = sel_int[0]
# d_intervals_high = sel_int[1]
#
# if (ref is not None):
# try:
# calc_int, calc_int_ind, sel_int, sel_int_ind = ref.proc_interval_limits(coordinate, intervals=intervals)
# except Exception as e:
# raise e
# ref_intervals_low = sel_int[0]
# ref_intervals_high = sel_int[1]
# intervals_low = []
# intervals_high = []
# d_int_low_min = np.amin(d_intervals_low)
# d_int_low_max = np.amax(d_intervals_low)
# for i in range(len(ref_intervals_low)):
# if ((ref_intervals_low[i] >= d_int_low_min) and
# (ref_intervals_low <= d_int_low_max)):
# intervals_low.append(ref_intervals_low[i])
# intervals_high.append(ref_intervals_high[i])
# ref_coord = ref.get_coordinate_object(coordinate)
# if ((math.fabs(ref_coord.start - coord.start) > math.fabs(ref_coord.step[0] ) / 10)
# or (math.fabs(ref_coord.step[0] - coord.step[0]) / math.fabs(ref_coord.step[0]) > 1e-4)
# ):
# raise ValueError("The start and step of the calculating coordinates in the two data objects should be identical.")
# else:
# intervals_low = d_intervals_low
# intervals_high = d_intervals_high
if (len(intervals_low) > 1):
# Ensuring that the intervals are in asceding order
sort_ind = np.argsort(intervals_low)
intervals_low = intervals_low[sort_ind]
intervals_high = intervals_high[sort_ind]
ind_overlap = np.nonzero(intervals_high[0:-2] > intervals_low[1:-1])[0]
if (len(ind_overlap) != 0):
raise ValueError("Intervals overlap, not suitable for calculation.")
intervals_length = intervals_high - intervals_low
# Determining how many different intervals are available
int_lens = np.ndarray(0,dtype=intervals_low.dtype)
int_num = np.ndarray(0,dtype=np.int32)
margin = abs(coord.step[0])
i = 0
while (True):
ind_new = np.nonzero(intervals_length > 0)[0]
if (len(ind_new) == 0):
break
ind = np.nonzero(np.abs(intervals_length - intervals_length[ind_new[0]]) < margin)[0]
int_lens = np.append(int_lens, intervals_length[ind_new[0]])
int_num = np.append(int_num, len(ind))
intervals_length[ind] = -1
# Sorting in reverse order according to interval length
sort_ind = np.argsort(int_lens)
int_num = np.flip(int_num[sort_ind])
int_lens = np.flip(int_lens[sort_ind])
# Dropping too small intervals
ind_small = np.nonzero(int_lens < int_lens[0] / 2)[0]
if (len(ind_small) != 0):
int_lens = int_lens[0:ind_small[0]]
int_num = int_num[0:ind_small[0]]
# Trying to use the shortest interval as processing length
proc_len = int_lens[-1]
ind = np.nonzero(int_lens >= proc_len)[0]
proc_n = np.sum(int_num[ind])
if (proc_n < interval_n):
# If this is not successful splitting the intervals smaller and smaller
proc_len_start = proc_len
for n_split in range(2,interval_n):
proc_len = proc_len_start / n_split
proc_n = 0
for j in range(len(int_lens)):
proc_n += (int_lens[j] // proc_len) * int_num[j]
if (proc_n >= interval_n):
break
else:
raise ValueError("Could not find "+str(interval_n)+" processing intervals.")
proc_interval_start = np.ndarray(0,dtype=intervals_low.dtype)
proc_interval_end = np.ndarray(0,dtype=intervals_high.dtype)
for i in range(len(intervals_low)):
st = intervals_low[i]
while (st + proc_len <= intervals_high[i] + margin):
proc_interval_start = np.append(proc_interval_start, st)
proc_interval_end = np.append(proc_interval_end, st + proc_len)
st += proc_len
if (proc_interval_start.size < interval_n):
raise RuntimeError("Internal error in finding processing intervals.")
proc_interval_len = proc_len
else:
proc_interval_len = (intervals_high[0] - intervals_low[0]) / interval_n
proc_interval_start = np.arange(interval_n) * proc_interval_len + intervals_low[0]
proc_interval_end = proc_interval_start + proc_interval_len
if (coord.step[0] > 0):
proc_interval_index_start = np.round((proc_interval_start - coord.start) / coord.step[0]).astype(np.int32) + 1
proc_interval_index_len = int(np.round(proc_interval_len / coord.step[0])) - 2
proc_interval_index_end = proc_interval_index_start + proc_interval_index_len
else:
step = -coord.step[0]
#npoint = d.shape[coord.dimension_list[0]] #UNUSED VARIABLE
proc_interval_index_len = int(round(proc_interval_len / step)) - 2
proc_interval_index_start = np.round((proc_interval_end - coord.start) / coord.step[0]).astype(np.int32) + 1
proc_interval_index_end = proc_interval_index_start + proc_interval_index_len
return flap.coordinate.Intervals(proc_interval_start, proc_interval_end), \
flap.coordinate.Intervals(proc_interval_index_start, proc_interval_index_end),
def trend_removal_func(d,ax, trend, x=None, return_trend=False, return_poly=False):
""" This function makes the _trend_removal internal function public
"""
return _trend_removal(d, ax, trend, x=x, return_trend=return_trend, return_poly=return_poly)
def _trend_removal(d, ax, trend, x=None, return_trend=False, return_poly=False):
"""
Removes the trend from the data. Operates on one axis between two indices of the data array.
INPUT:
d: Data array (Numpy array)
ax: The axis along which to operate (0...)
trend: Trend removal description. A list, string or None.
None: Don't remove trend.
Strings:
'Mean': subtract mean
Lists:
['Poly', n]: Fit an n order polynomial to the data and subtract.
x: X axis. If not used equidistant will be assumed.
return_trend: If True the trend data is returned
return_poly: Return polynomial coefficients for poly trend removal. The coefficients will
be in axis ax.
RETURN value:
If return_trend == True the trend data is returned
If (return_poly == True) and polyfit then return polynomial parameters
Otherwise return None
The input array is modified.
"""
if (trend is None):
return
if ((type(trend) is list) and (trend[0] == 'Poly')):
pass
else:
if (return_poly):
raise ValueError("Polynomial trend fit parameters can be returned only for polynomial trend removal.")
if (type(trend) is str):
if (trend == 'Mean'):
d[:] = signal.detrend(d, axis=ax, type='constant')
return
else:
raise ValueError("Unknown trend removal method: "+trend)
elif (type(trend) is list):
if ((len(trend) == 2) and (trend[0] == 'Poly')):
try:
order = int(trend[1])
except ValueError:
raise ValueError("Bad order in polynomial trend removal.")
# This is a simple solution but not very effective.
# Flattens all dimensions except x and handles all functions one-by-one
# Finally rearranges the data back to the original shape
if (x is None):
_x = np.arange(d.shape[ax],dtype=float)
else:
_x = copy.deepcopy(x)
if ((_x.dtype.kind == 'i') or (_x.dtype.kind == 'u')):
_x = np.asarray(_x,'float')
if (d.ndim > 1):
if (ax != 0):
d = np.swapaxes(d,ax,0)
orig_shape = d.shape
if (d.ndim > 2):
new_shape = tuple([d.shape[0], d.size // d.shape[0]])
d = d.reshape(new_shape,order='F')
if (return_trend):
trend_data = np.ndarray(new_shape,dtype=d.dtype)
else:
new_shape = d.shape
xx = np.zeros((new_shape[0],order), dtype=float)
for i in range(order):
xx[:,i] = _x ** (i + 1)
p = np.polynomial.polynomial.polyfit(_x,d,order)
for i in range(new_shape[1]):
tr = p[0,i]
for j in range(order):
tr = tr + p[j + 1, i] * xx[:,j]
d[:,i] = d[:,i] - tr.astype(d.dtype)
if (return_trend):
trend_data[:,i] = tr.astype(d.dtype)
if (len(orig_shape) > 2):
d = d.reshape(orig_shape,order='F')
if (return_trend):
trend_data = trend_data.reshape(orig_shape,order='F')
if (return_poly):
p_shape = list(orig_shape)
p_shape[0] = p.shape[0]
p = p.reshape(p_shape,order='F')
if (ax != 0):
d = np.swapaxes(d,ax,0)
if (return_trend):
trend_data = np.swapaxes(trend_data,ax,0)
if (return_poly):
p = np.swapaxes(p,ax,0)
if (return_trend):
return trend_data
elif(return_poly):
return p
else:
return
else:
p = np.polynomial.polynomial.polyfit(_x,d,order)
d = d - p[0]
for i in range(order):
d = d - p[i + 1] * _x ** (i + 1)
return
raise ValueError("Unknown trend removal method.")
def _apsd(d, coordinate=None, intervals=None, options=None):
"""
Auto power Spectral Density caclculation for the data object d.
Returns a data object with the coordinate replaced by frequency or wavenumber.
The power spectrum is calculated in multiple intervals (described by slicing)
and the mean and variance will be returned.
INPUT:
d: A flap.DataObject.
coordinate: The name of the coordinate (string) along which to calculate APSD.
This coordinate should change only along one data dimension and should be equidistant.
This and all other cordinates changing along the data dimension of
this coordinate will be removed. A new coordinate with name
Frequency/Wavenumber will be added. The unit will be
derived from the unit of the coordinate (e.g., Hz cm-1, m-1)
intervals: Information of processing intervals.
If dictionary with a single key: {selection coordinate: description})
Key is a coordinate name which can be different from the calculation
coordinate.
Description can be flap.Intervals, flap.DataObject or
a list of two numbers. If it is a data object with data name identical to
the coordinate the error ranges of the data object will be used for
interval. If the data name is not the same as coordinate a coordinate with the
same name will be searched for in the data object and the value_ranges
will be used fromm it to set the intervals.
If not a dictionary and not None it is interpreted as the interval
description, the selection coordinate is taken the same as
coordinate.
If None, the whole data interval will be used as a single interval.
options: Dictionary. (Keys can be abbreviated)
'Wavenumber' : True/False. Will use 2*Pi*f for the output coordinate scale, this is useful for
wavenumber calculation.
'Resolution': Output resolution in the unit of the output coordinate.
'Range': Output range in the unit of the output coordinate.
'Logarithmic': True/False. If True will create logarithmic frequency binning.
'Interval_n': Minimum number of intervals to use for the processing. These are identical
length intervals inserted into the input interval list. Default is 8.
'Error calculation' : True/False. Calculate or not error. Omitting error calculation
increases speed. If Interval_n is 1 no error calculation is done.
'Trend removal': Trend removal description (see also _trend_removal()). A list, string or None.
None: Don't remove trend.
Strings:
'Mean': subtract mean
Lists:
['Poly', n]: Fit an n order polynomial to the data and subtract.
Trend removal will be applied to each interval separately.
'Hanning': True/False Use a Hanning window.
"""
if (d.data is None):
raise ValueError("Cannot do spectral analysis without data.")
default_options = {'Wavenumber': False,
'Resolution': None,
'Range': None,
'Logarithmic': False,
'Interval_n': 8,
'Trend removal': ['Poly', 2],
'Error calculation': True,
'Hanning' : True
}
_options = flap.config.merge_options(default_options, options, data_source=d.data_source, section='PS')
if (coordinate is None):
c_names = d.coordinate_names()
try:
c_names.index('Time')
_coordinate = 'Time'
except ValueError:
raise ValueError("No coordinate is given for spectrum calculation and no Time coordinate found.")
else:
_coordinate = coordinate
trend = _options['Trend removal']
wavenumber = _options['Wavenumber']
interval_n = _options['Interval_n']
log_scale = _options['Logarithmic']
hanning = _options['Hanning']
try:
coord_obj = d.get_coordinate_object(_coordinate)
except Exception as e:
raise e
if (len(coord_obj.dimension_list) != 1):
raise ValueError("Spectrum calculation is possible only along coordinates changing along one dimension.")
if (not coord_obj.mode.equidistant):
raise ValueError("Spectrum calculation is possible only along equidistant coordinates.")
try:
intervals, index_intervals = _spectral_calc_interval_selection(d,None,_coordinate,intervals,interval_n)
except Exception as e:
raise e
interval_n, start_ind = intervals.interval_number()
calc_error = _options['Error calculation']
if (interval_n < 2):
calc_error = False
int_low, int_high = intervals.interval_limits()
res_nat = 1./(int_high[0] - int_low[0])
range_nat = [0., 1./float(coord_obj.step[0])/2]
index_int_low, index_int_high = index_intervals.interval_limits()
interval_sample_n = (index_int_high[0] - index_int_low[0]) + 1
# Determining the output array shape.
out_shape = list(d.shape)
# Determining two pairs of index tuples for copying the data after PS calculation
# of one time interval. For complex data we need to copy in two steps
proc_dim = coord_obj.dimension_list[0]
if (d.data.dtype.kind == 'c' ):
# For complex data negative frequencies are also valuable
# n_apsd is the number of valuable points in the spectrum after rearrangement but before
# and range and resolution transformation
n_apsd = index_int_high[0] - index_int_low[0] + 1
# These will be tuples used in reorganizing the raw FFT spectra into continuous
# frequency scale. We need this as for complex data the negative frequencies are
# in the second half of the array
ind_in1 = [slice(0,d) for d in d.shape]
ind_in2 = copy.deepcopy(ind_in1)
ind_out1 = copy.deepcopy(ind_in1)
ind_out2 = copy.deepcopy(ind_in1)
ind_in1[proc_dim] = slice(0,int(n_apsd/2))
ind_out1[proc_dim] = slice(n_apsd-int(n_apsd/2), n_apsd)
# zero_ind is the index where the 0 frequency will be after rearranging the spectrum
zero_ind = n_apsd - int(n_apsd/2)
ind_in2[proc_dim] = slice(int(n_apsd/2),n_apsd)
ind_out2[proc_dim] = slice(0, n_apsd-int(n_apsd/2))
else:
n_apsd = int((index_int_high[0] - index_int_low[0] + 1) / 2)
ind_in1 = [slice(0,d) for d in d.shape]
ind_in1[proc_dim] = slice(0,n_apsd)
zero_ind = 0
ind_out1 = None
ind_in2 = None
ind_out2 = None
# Calculating the binning boxes from the resolution and range and related indices
ind_bin, ind_slice, out_data_num, ind_nonzero, index_nonzero, ind_zero, nf_out, f_cent, \
fcent_index_range, res = _spectrum_binning_indices(wavenumber,
n_apsd,
_options,
zero_ind,
res_nat,
range_nat,
log_scale,
out_shape,
proc_dim)
out_shape[proc_dim] = nf_out
# These arrays will collect the data and the square of the data to enable error calculation
out_data = np.zeros(tuple(out_shape), dtype=float)
if (calc_error):
out_data_square = np.zeros(tuple(out_shape), dtype=float)
# This is a tuple to index into the original data array to get data for processing
ind_proc = [slice(0,d) for d in d.shape]
# Number of processing intervals
n_proc_int = len(int_low)
if (hanning):
hanning_window = np.hanning(index_int_high[0] - index_int_low[0] + 1)
hanning_window /= math.sqrt(3./8)
if (len(d.shape) > 1):
han_sh = [1] * len(d.shape)
han_sh[proc_dim] = hanning_window.size
hanning_window = hanning_window.reshape(han_sh)
# We need to determine a shape to which the out_data_num array will be broadcasted to
# allow dividing all spectra. bs is this shape
if (ind_nonzero is not None):
bs = [1]*out_data.ndim
bs[proc_dim] = len(index_nonzero)
bs= tuple(bs)
else:
bs = [1]*out_data.ndim
bs[proc_dim] = out_data.shape[proc_dim]
bs = tuple(bs)
for i_int in range(n_proc_int):
# Setting the index range of the actual interval
ind_proc[proc_dim] = slice(index_int_low[i_int], index_int_high[i_int] + 1)
# Getting the data for processing, this might be multi-dim
data_proc = copy.deepcopy(d.data[tuple(ind_proc)])
if (trend is not None):
try:
_trend_removal(data_proc,proc_dim,trend)
except Exception as e:
raise e
if (hanning):
data_proc *= hanning_window
# Calculating APS on natural resolution, full frequency scale
dfft = np.fft.fft(data_proc,axis=proc_dim)
dps = (dfft.conjugate() * dfft).real
# Rearranging the negative frequencies
if (ind_in2 is not None):
dps1 = np.empty(dps.shape,dtype=dps.dtype)
dps1[tuple(ind_out1)] = dps[tuple(ind_in1)]
dps1[tuple(ind_out2)] = dps[tuple(ind_in2)]
dps = dps1
else:
dps = dps[tuple(ind_in1)]
# Cutting the range
if ((ind_slice) is not None):
dps = dps[tuple(ind_slice)]
# Binning the spectrum and summing up the time intervals
if (ind_bin is not None):
out_data_interval = np.zeros(tuple(out_shape), dtype=float)
np.add.at(out_data_interval, tuple(ind_bin), dps)
else:
out_data_interval = dps
# Dividing by the number of points in each bin
if (ind_nonzero is not None):
out_data_interval[tuple(ind_nonzero)] /= out_data_num[index_nonzero].reshape(bs)
else:
out_data_interval /= out_data_num.reshape(bs)
out_data_interval /= interval_sample_n
out_data += out_data_interval
if (calc_error):
out_data_square += out_data_interval ** 2
out_data /= n_proc_int
if (calc_error):
out_err = np.sqrt(np.clip(out_data_square / n_proc_int - out_data ** 2,
0,None)) / math.sqrt(n_proc_int)
else:
out_err = None
# If there are frequency bins without data setting them to np.NaN
if (ind_nonzero is not None):
out_data[tuple(ind_zero)] = np.NaN
# We create the new data object with this trick as data_object.py cannot be imported
d_out = type(d)(data_array=out_data,
error=out_err,
coordinates=d.coordinates,
exp_id=d.exp_id,
data_unit=flap.coordinate.Unit("Spectral density"))
if (wavenumber):
out_name = 'Wavenumber'
out_unit = '1/'+coord_obj.unit.unit
res *= 2 * math.pi
#fcent *= 2* math.pi #UNUSED VARIABLE
else:
out_name = 'Frequency'
out_unit = 'Hz'
# Finding all coordinates which have common dimension with the converted one.
# These will be deleted.
del_coord_list = []
for c in d_out.coordinates:
try:
c.dimension_list.index(proc_dim)
del_coord_list.append(c.unit.name)
except ValueError:
pass
for c in del_coord_list:
d_out.del_coordinate(c)
if (log_scale):
c = flap.coordinate.Coordinate(name = out_name,
unit = out_unit,
mode = flap.coordinate.CoordinateMode(equidistant=False),
shape = [f_cent.size],
values = f_cent,
dimension_list=[proc_dim])
else:
c = flap.coordinate.Coordinate(name = out_name,
unit = out_unit,
mode = flap.coordinate.CoordinateMode(equidistant=True),
shape = [],
start = (fcent_index_range[0] - zero_ind) * res,
step = res,
dimension_list=[proc_dim])
d_out.add_coordinate_object(c,index=0)
return d_out
def _spectrum_binning_indices(wavenumber, n_apsd, _options, zero_ind, res_nat, range_nat, log_scale, out_shape, proc_dim):
""" Helper routine for apsd and cpsd for calculating numbers and indices
for processing the spectra.
Returns: ind_bin, ind_slice, out_data_num, ind_nonzero, index_nonzero, ind_zero, nf_out,
f_cent, fcent_index_range, res
"""
# Calculating the binning boxes from the resolution and range
fscale_nat = (np.arange(n_apsd,dtype=float) - zero_ind) * res_nat
if (log_scale):
if (_options['Range'] is not None):
rang = _options['Range']
if ((type(rang) is not list) or (len(rang) != 2)):
raise ValueError("Invalid spectrum range setting.")
if (_options['Resolution'] is not None):
res = _options['Resolution']
else:
res = rang[0] / 10
else:
if (_options['Resolution'] is not None):
res = _options['Resolution']
rang = [res, range_nat[1]]
else:
res = res_nat
rang = [res_nat * 10, range_nat[1]]
if (rang[0] >= rang[1]):
raise ValueError("Illegal frequency range.")
if (rang[0] <= 0):
raise ValueError("Illegal frequency range for logarithmic frequency resolution.")
# Setting the lower and upper limit of the first box so as f_high-f_low=res and
# (log10(f_low)+log10(f_high))/2 = log10(range[0])
f_low = (-res + math.sqrt(res ** 2 + 4 * rang[0] ** 2))/ 2
f_high = rang[0] ** 2 / f_low
# Setting up a box list which is linear on the log scale
delta = math.log10(f_high/f_low)
nf_out = (math.log10(rang[1]) - math.log10(f_low)) // delta + 2
f_box = 10 ** (math.log10(f_low) + np.arange(nf_out) * delta)
# if (f_box[-1] > range_nat[1]):
# f_box[-1] = range_nat[1]
# Box index for the original spectrum points
apsd_index = np.digitize(fscale_nat, f_box) - 1
ind_out_low = np.nonzero(apsd_index < 0)[0]
ind_out_high = np.nonzero(apsd_index >= f_box.size - 1)[0]
if ((ind_out_low.size != 0) or (ind_out_high.size != 0)):
if (ind_out_low.size == 0):
slice_start = 0
else:
slice_start = ind_out_low[-1]+1
if (ind_out_high.size == 0):
slice_end = fscale_nat.size
else:
slice_end = ind_out_high[0]
apsd_slice = slice(slice_start, slice_end)
ind_slice = [slice(0,d) for d in out_shape]
ind_slice[proc_dim] = apsd_slice
apsd_index = apsd_index[apsd_slice]
else:
apsd_slice = None
ind_slice = None
f_cent = np.sqrt(f_box[0:-1] * f_box[1:])
nf_out = f_cent.size
out_data_num = np.zeros(nf_out,dtype=np.int32)
np.add.at(out_data_num, apsd_index, np.int32(1))
index_nonzero = np.nonzero(out_data_num != 0)[0]
if (index_nonzero.size == out_data_num.size):
ind_nonzero = None
ind_zero = None
else:
ind_nonzero = [slice(0,d) for d in out_shape]
ind_nonzero[proc_dim] = index_nonzero
index_zero = np.nonzero(out_data_num == 0)[0]
ind_zero = [slice(0,d) for d in out_shape]
ind_zero[proc_dim] = index_zero
ind_bin = [slice(0,d) for d in out_shape]
ind_bin[proc_dim] = apsd_index
fcent_index_range = None
else:
# Linear scale
if (_options['Resolution'] is not None):
if (wavenumber):
res = _options['Resolution']/2/math.pi
else:
res = _options['Resolution']
if (res > range_nat[1] / 2):
raise ValueError("Requested resolution is too coarse.")
else:
res = res_nat
res_bin = int(round(res / res_nat))
if (res_bin < 1):
res_bin = 1
res = res_nat * res_bin
# Determining the number of bins
nf_out = int(n_apsd / res_bin)
# The index range in the apsd array where the central frequencies are available
if (res_bin == 1):
fcent_index_range = [0, n_apsd - 1]
nf_out = n_apsd
else:
fcent_index_range = [zero_ind % res_bin,
(n_apsd - 1 - (zero_ind % res_bin)) // res_bin * res_bin
+ zero_ind % res_bin]
nf_out = (fcent_index_range[1] - fcent_index_range[0]) // res_bin + 1
if (_options['Range'] is not None):
rang = _options['Range']
if ((type(rang) is not list) or (len(rang) != 2)):
raise ValueError("Invalid spectrum range setting.")
if (rang[0] >= rang[1]):
raise ValueError("Illegal frequency range.")
if (fcent_index_range[0] < rang[0] / res_nat + zero_ind):
fcent_index_range[0] = int(round(rang[0] / res)) * res_bin + zero_ind
if (fcent_index_range[1] > rang[1] / res_nat + zero_ind):
fcent_index_range[1] = int(round(rang[1] / res)) * res_bin + zero_ind
nf_out = (fcent_index_range[1] - fcent_index_range[0]) // res_bin + 1
if (nf_out < 3):
raise ValueError("Too coarse spectrum resolution.")
if ((fcent_index_range[0] < 0) or (fcent_index_range[0] > n_apsd - 1) \
or (fcent_index_range[1] < 0) or (fcent_index_range[1] > n_apsd - 1)):
raise ValueError("Spectrum axis range is outside of natural ranges.")
# This slice will cut the necessary part from the raw APSD sepctrum
apsd_slice = slice(fcent_index_range[0] - res_bin // 2,
fcent_index_range[1] + (res_bin - res_bin // 2))
# A full box start is this number of apsd spectrum pints before apsd_slice.start
start_shift = 0
if (apsd_slice.start < 0):
start_shift = - apsd_slice.start
apsd_slice = slice(0, apsd_slice.stop)
if (apsd_slice.stop > n_apsd):
apsd_slice = slice(apsd_slice.start, n_apsd)
# This index array will contain the box index for each APSD spectral point remaining after
# the above slice
if (res_bin != 1):
apsd_index = (np.arange(apsd_slice.stop - apsd_slice.start,dtype=np.int32)
+ start_shift) // res_bin
else:
apsd_index = None
if (apsd_slice is not None):
ind_slice = [slice(0,d) for d in out_shape]
ind_slice[proc_dim] = apsd_slice
else:
ind_slice = None
if (apsd_index is not None):
ind_bin = [slice(0,d) for d in out_shape]
ind_bin[proc_dim] = apsd_index
out_data_num = np.zeros(nf_out,dtype=np.int32)
np.add.at(out_data_num, apsd_index, np.int32(1))
else:
ind_bin = None
out_data_num = np.zeros(nf_out,dtype=np.int32) + 1
ind_nonzero = None
index_nonzero = None
ind_zero = None
f_cent = None
return ind_bin, ind_slice, out_data_num, ind_nonzero, index_nonzero, ind_zero, nf_out, \
f_cent,fcent_index_range, res
def _cpsd(d, ref=None, coordinate=None, intervals=None, options=None):
"""
Complex Cross Power Spectrum calculation for the data object d taking d_ref as reference.
If ref is not set d is used as reference, that is all spectra are calculated within d.
Calculates all spectra between all signals in ref and d, but not inside d and ref.
d and ref both should have the same equidistant coordinate with equal sampling points.
Returns a data object with dimension number d.dim+ref.dim-1. The coordinate is replaced
by frequency or wavenumber.
The spectrum is calculated in multiple intervals (described by slicing)
and the mean and variance will be returned.
INPUT:
d: A flap.DataObject.
ref: Another flap.DataObject
coordinate: The name of the coordinate (string) along which to calculate CPSD.
This coordinate should change only along one data dimension and should be equidistant.
This and all other cordinates changing along the data dimension of
this coordinate will be removed. A new coordinate with name
Frequency/Wavenumber will be added. The unit will be
derived from the unit of the coordinate (e.g., Hz cm-1, m-1)
intervals: Information of processing intervals.
If dictionary with a single key: {selection coordinate: description})
Key is a coordinate name which can be different from the calculation
coordinate.
Description can be flap.Intervals, flap.DataObject or
a list of two numbers. If it is a data object with data name identical to
the coordinate the error ranges of the data object will be used for
interval. If the data name is not the same as coordinate a coordinate with the
same name will be searched for in the data object and the value_ranges
will be used fromm it to set the intervals.
If not a dictionary and not None it is interpreted as the interval
description, the selection coordinate is taken the same as
coordinate.
If None, the whole data interval will be used as a single interval.
options: Dictionary. (Keys can be abbreviated)
'Wavenumber' : True/False. Will use 2*Pi*f for the output coordinate scale, this is useful for
wavenumber calculation.
'Resolution': Output resolution in the unit of the output coordinate.
'Range': Output range in the unit of the output coordinate.
'Logarithmic': True/False. If True will create logarithmic frequency binning.
'Interval_n': Minimum number of intervals to use for the processing. These are identical
length intervals inserted into the input interval list. Default is 8.
'Error calculation' : True/False. Calculate or not error. Omitting error calculation
increases speed. If Interval_n is 1 no error calculation is done.
'Trend removal': Trend removal description (see also _trend_removal()). A list, string or None.
None: Don't remove trend.
Strings:
'mean': subtract mean
Lists:
['Poly', n]: Fit an n order polynomial to the data and subtract.
Trend removal will be applied to each interval separately.
'Hanning': True/False Use a Hanning window.
'Error calculation' : True/False. Calculate or not error. Omitting error calculation
increases speed. If Interval_n is 1 no error calculation is done.
'Normalize': Normalize crosspower spectrum, that is return
Return value:
Three data objects:
spectrum, phase, confidence
spectrum: The complex power spectrum or coherency if options['Normalize'] is True
The error will contain the condifence level.
"""
if (d.data is None):
raise ValueError("Cannot do spectral analysis without data.")
default_options = {'Wavenumber': False,
'Resolution': None,
'Range': None,
'Logarithmic': False,
'Interval_n': 8,
'Trend removal': ['Poly', 2],
'Hanning' : True,
'Error calculation': True,
'Normalize': False
}
_options = flap.config.merge_options(default_options, options, data_source=d.data_source, section='PS')
if (coordinate is None):
c_names = d.coordinate_names()
try:
c_names.index('Time')
_coordinate = 'Time'
except ValueError:
raise ValueError("No coordinate is given for spectrum calculation and no Time coordinate found.")
else:
_coordinate = coordinate
trend = _options['Trend removal']
wavenumber = _options['Wavenumber']
interval_n = _options['Interval_n']
log_scale = _options['Logarithmic']
hanning = _options['Hanning']
norm = _options['Normalize']
error_calc = _options['Error calculation']
try:
coord_obj = d.get_coordinate_object(_coordinate)
except Exception as e:
raise e
if (len(coord_obj.dimension_list) != 1):
raise ValueError("Spectrum calculation is possible only along coordinates changing in one dimension.")
if (not coord_obj.mode.equidistant):
raise ValueError("Spectrum calculation is possible only along equidistant coordinates.")
if (ref is None):
_ref = d
ref_coord_obj = coord_obj
try:
intervals, index_intervals = _spectral_calc_interval_selection(d,
None,
_coordinate,
intervals,
interval_n)
except Exception as e:
raise e
else:
_ref = ref
try:
ref_coord_obj = _ref.get_coordinate_object(_coordinate)
except Exception as e:
raise e
if (len(ref_coord_obj.dimension_list) != 1):
raise ValueError("Spectrum calculation is possible only along coordinates changing in one dimension (ref).")
if (not ref_coord_obj.mode.equidistant):
raise ValueError("Spectrum calculation is possible only along equidistant coordinates (ref).")
if (math.fabs(ref_coord_obj.step[0] - coord_obj.step[0]) * d.shape[coord_obj.dimension_list[0]] \
> math.fabs(ref_coord_obj.step[0])):
raise ValueError("Incompatible coordinate step sizes." )
if (math.fabs(ref_coord_obj.start - coord_obj.start) > math.fabs(coord_obj.step[0])):
raise ValueError("Incompatible coordinate start values." )
try:
intervals, index_intervals = _spectral_calc_interval_selection(d,
_ref,
_coordinate,
intervals,
interval_n)
except Exception as e:
raise e
interval_n, start_ind = intervals.interval_number()
int_low, int_high = intervals.interval_limits()
res_nat = 1./(int_high[0] - int_low[0])
range_nat = [0., 1./float(coord_obj.step[0])/2]
index_int_low, index_int_high = index_intervals.interval_limits()
interval_sample_n = (index_int_high[0] - index_int_low[0]) + 1
# The processing dimensions in the two objects
proc_dim = coord_obj.dimension_list[0]
proc_dim_ref = ref_coord_obj.dimension_list[0]
# Determining the output array shape. First the d object dimensions will come, the
# spectrum scale will be proc_dim. Then the ref dimensions will come with proc_dim_ref removed.
# The size of the output array in the processig dimension will be entered later
out_shape = list(d.shape)
out_shape_add = list(_ref.shape)
del out_shape_add[proc_dim_ref]
out_shape += out_shape_add
proc_dim_out = proc_dim
# Flag to show whether the APSDs should be calculated
aps_calc = error_calc or norm
# Determining two pairs of index tuples for copying the data after PS calculation
# of one time interval. For complex data we need to copy in two steps
if ((d.data.dtype.kind == 'c' ) or (_ref.data.dtype.kind == 'c')):
# For complex data negative frequencies are also valuable
# n_apsd is the number of valuable points in the spectrum after rearrangement but before
# and range and resolution transformation
n_apsd = index_int_high[0] - index_int_low[0] + 1 #THIS WAS n_cpsd BEFORE, WOULD HAVE CAUSED AN ERROR
# These will be tuples used in reorganizing the raw FFT spectra into continuous
# frequency scale. We need this as for complex data the negative frequencies are
# in the second half of the array
ind_in1 = [slice(0,d) for d in out_shape]
ind_in2 = copy.deepcopy(ind_in1)
ind_out1 = copy.deepcopy(ind_in1)
ind_out2 = copy.deepcopy(ind_in1)
ind_in1[proc_dim_out] = slice(0,int(n_apsd/2))
ind_out1[proc_dim_out] = slice(n_apsd-int(n_apsd/2), n_apsd)
# zero_ind is the index where the 0 frequency will be after rearranging the spectrum
zero_ind = n_apsd - int(n_apsd/2)
ind_in2[proc_dim_out] = slice(int(n_apsd/2),n_apsd)
ind_out2[proc_dim_out] = slice(0, n_apsd-int(n_apsd/2))
if (aps_calc):
ind_in1_apsd = [slice(0,ds) for ds in d.shape]
ind_in2_apsd = copy.deepcopy(ind_in1_apsd)
ind_out1_apsd = copy.deepcopy(ind_in1_apsd)
ind_out2_apsd = copy.deepcopy(ind_in1_apsd)
ind_in1_apsd[proc_dim] = slice(0,int(n_apsd/2))
ind_in2_apsd[proc_dim] = slice(int(n_apsd/2),n_apsd)
ind_out1_apsd[proc_dim] = slice(n_apsd-int(n_apsd/2), n_apsd)
ind_out2_apsd[proc_dim] = slice(0, n_apsd-int(n_apsd/2))
ind_in1_apsd_ref = [slice(0,ds) for ds in _ref.shape]
ind_in2_apsd_ref = copy.deepcopy(ind_in1_apsd_ref)
ind_out1_apsd_ref = copy.deepcopy(ind_in1_apsd_ref)
ind_out2_apsd_ref = copy.deepcopy(ind_in1_apsd_ref)
ind_in1_apsd_ref[proc_dim_ref] = slice(0,int(n_apsd/2))
ind_in2_apsd_ref[proc_dim_ref] = slice(int(n_apsd/2),n_apsd)
ind_out1_apsd_ref[proc_dim_ref] = slice(n_apsd-int(n_apsd/2), n_apsd)
ind_out2_apsd_ref[proc_dim_ref] = slice(0, n_apsd-int(n_apsd/2))
else:
n_apsd = int((index_int_high[0] - index_int_low[0] + 1) / 2)
ind_in1 = [slice(0,ds) for ds in out_shape]
ind_in1[proc_dim_out] = slice(0,n_apsd)
zero_ind = 0
ind_out1 = None
ind_in2 = None
ind_out2 = None
if (aps_calc):
ind_in1_apsd = [slice(0,ds) for ds in d.shape]
ind_in1_apsd[proc_dim] = slice(0,n_apsd)
ind_in1_apsd_ref = [slice(0,ds) for ds in _ref.shape]
ind_in1_apsd_ref[proc_dim_ref] = slice(0,n_apsd)
ind_bin, ind_slice, out_data_num, ind_nonzero, index_nonzero, ind_zero, nf_out, f_cent, \
fcent_index_range, res = _spectrum_binning_indices(wavenumber, n_apsd,
_options,
zero_ind,
res_nat,
range_nat,
log_scale,
out_shape,
proc_dim_out)
if (aps_calc):
if (ind_slice is not None):
ind_slice_apsd = [slice(0,ds) for ds in d.shape]
ind_slice_apsd[proc_dim] = ind_slice[proc_dim_out]
ind_slice_apsd_ref = [slice(0,ds) for ds in _ref.shape]
ind_slice_apsd_ref[proc_dim_ref] = ind_slice[proc_dim_out]
else:
ind_slice_apsd = None
ind_slice_apsd_ref = None
if (ind_bin is not None):
ind_bin_apsd = [slice(0,ds) for ds in d.shape]
ind_bin_apsd[proc_dim] = ind_bin[proc_dim_out]
ind_bin_apsd_ref = [slice(0,ds) for ds in _ref.shape]
ind_bin_apsd_ref[proc_dim_ref] = ind_bin[proc_dim_out]
else:
ind_bin_apsd = None
ind_bin_apsd_ref = None
if (ind_nonzero is not None):
ind_nonzero_apsd = [slice(0,ds) for ds in d.shape]
ind_nonzero_apsd[proc_dim] = index_nonzero
ind_nonzero_apsd_ref = [slice(0,ds) for ds in _ref.shape]
ind_nonzero_apsd_ref[proc_dim_ref] = index_nonzero
else:
ind_nonzero_apsd = None
ind_nonzero_apsd_ref = None
out_shape[proc_dim_out] = nf_out
# This will collect the output data
out_data = np.zeros(tuple(out_shape), dtype=complex)
# These will collect the autospectra
if (aps_calc):
apsd_shape = list(d.shape)
apsd_shape[proc_dim] = nf_out
apsd_ref_shape = list(_ref.shape)
apsd_ref_shape[proc_dim_ref] = nf_out
apsd = np.zeros(apsd_shape, dtype=float)
apsd_ref = np.zeros(apsd_ref_shape, dtype=float)
# This is a tuple to index into the original data arrays to get data for processing
ind_proc = [slice(0,ds) for ds in d.shape]
ind_proc_ref = [slice(0,ds) for ds in _ref.shape]
# Number of processing intervals
n_proc_int = len(int_low)
if (hanning):
hanning_window = np.hanning(index_int_high[0] - index_int_low[0] + 1)
hanning_window /= math.sqrt(3./8)
if (len(d.shape) > 1):
han_sh = [1] * len(d.shape)
han_sh[proc_dim] = hanning_window.size
hanning_window = hanning_window.reshape(han_sh)
if (len(_ref.shape) > 1):
han_sh = [1] * len(_ref.shape)
han_sh[proc_dim_ref] = hanning_window.size
hanning_window_ref = hanning_window.reshape(han_sh)
# We need to determine a shape to which the out_data_num array will be broadcasted to
# allow dividing all spectra. bs is this shape
if (ind_nonzero is not None):
bs = [1]*out_data.ndim
bs[proc_dim_out] = len(index_nonzero)
bs= tuple(bs)
if (aps_calc):
bs_apsd = [1]*d.data.ndim
bs_apsd[proc_dim] = len(index_nonzero)
bs_apsd_ref = [1]*_ref.data.ndim
bs_apsd_ref[proc_dim_ref] = len(index_nonzero)
else:
bs = [1]*out_data.ndim
bs[proc_dim_out] = out_data.shape[proc_dim_out]
bs = tuple(bs)
if (aps_calc):
bs_apsd = [1]*d.data.ndim
bs_apsd[proc_dim] = out_data.shape[proc_dim_out]
bs_apsd_ref = [1]*_ref.data.ndim
bs_apsd_ref[proc_dim_ref] = out_data.shape[proc_dim_out]
for i_int in range(n_proc_int):
# Setting the index range of the actual interval
ind_proc[proc_dim] = slice(index_int_low[i_int], index_int_high[i_int] + 1)
ind_proc_ref[proc_dim_ref] = slice(index_int_low[i_int], index_int_high[i_int] + 1)
# Getting the data for processing, this might be multi-dim
data_proc = copy.deepcopy(d.data[tuple(ind_proc)])
data_proc_ref = copy.deepcopy(_ref.data[tuple(ind_proc_ref)])
if (trend is not None):
try:
_trend_removal(data_proc,proc_dim,trend)
_trend_removal(data_proc_ref,proc_dim_ref,trend)
except Exception as e:
raise e
if (hanning):
data_proc *= hanning_window.astype(data_proc.dtype)
data_proc_ref *= hanning_window_ref.astype(data_proc_ref.dtype)
# Calculating FFT
dfft = np.fft.fft(data_proc,axis=proc_dim)
dfft_ref = np.fft.fft(data_proc_ref,axis=proc_dim_ref)
dps, axis_source, axis_number = flap.tools.multiply_along_axes(dfft,
dfft_ref.conjugate(),
[proc_dim, proc_dim_ref])
if (aps_calc):
dfft_aps = (dfft * dfft.conjugate()).real
dfft_aps_ref = (dfft_ref * dfft_ref.conjugate()).real
# Rearranging the negative frequencies
if (ind_in2 is not None):
dps1 = np.empty(dps.shape,dtype=dps.dtype)
dps1[tuple(ind_out1)] = dps[tuple(ind_in1)]
dps1[tuple(ind_out2)] = dps[tuple(ind_in2)]
dps = dps1
if (aps_calc):
dfft_aps1 = np.empty(dfft_aps.shape,dtype=dfft_aps.dtype) #THIS USED TO BE dfft_aps1 WHICH IS UNDEFINED
dfft_aps1[tuple(ind_out1_apsd)] = dfft_aps[tuple(ind_in1_apsd)]
dfft_aps1[tuple(ind_out2_apsd)] = dfft_aps[tuple(ind_in2_apsd)]
dfft_aps = dfft_aps1
dfft_aps1 = np.empty(dfft_aps_ref.shape,dtype=dfft_aps_ref.dtype)
dfft_aps1[tuple(ind_out1_apsd_ref)] = dfft_aps_ref[tuple(ind_in1_apsd_ref)]
dfft_aps1[tuple(ind_out2_apsd_ref)] = dfft_aps_ref[tuple(ind_in2_apsd_ref)]
dfft_aps_ref = dfft_aps1
else:
dps = dps[tuple(ind_in1)]
if (aps_calc):
dfft_aps = dfft_aps[tuple(ind_in1_apsd)]
dfft_aps_ref = dfft_aps_ref[tuple(ind_in1_apsd_ref)]
# Cutting the range
if ((ind_slice) is not None):
dps = dps[tuple(ind_slice)]
if (aps_calc):
dfft_aps = dfft_aps[tuple(ind_slice_apsd)]
dfft_aps_ref = dfft_aps_ref[tuple(ind_slice_apsd_ref)]
# Binning the spectrum and summing up the time intervals
if (ind_bin is not None):
out_data_interval = np.zeros(tuple(out_shape), dtype=complex)
np.add.at(out_data_interval, tuple(ind_bin), dps)
if (aps_calc):
apsd_interval = np.zeros(tuple(apsd_shape), dtype=float)
np.add.at(apsd_interval, tuple(ind_bin_apsd), dfft_aps)
apsd_ref_interval = np.zeros(tuple(apsd_ref_shape), dtype=float)
np.add.at(apsd_ref_interval, tuple(ind_bin_apsd_ref), dfft_aps_ref)
else:
out_data_interval = dps
if (aps_calc):
apsd_interval = dfft_aps
apsd_ref_interval = dfft_aps_ref
# Dividing by the number of points in each bin
if (ind_nonzero is not None):
out_data_interval[tuple(ind_nonzero)] /= out_data_num[index_nonzero].reshape(bs)
if (aps_calc):
apsd_interval[tuple(ind_nonzero_apsd)] /= out_data_num[index_nonzero].reshape(bs_apsd)
apsd_ref_interval[tuple(ind_nonzero_apsd_ref)] /= out_data_num[index_nonzero].reshape(bs_apsd_ref)
else:
out_data_interval /= out_data_num.reshape(bs)
if (aps_calc):
apsd_interval /= out_data_num.reshape(bs_apsd)
apsd_ref_interval /= out_data_num.reshape(bs_apsd_ref)
out_data_interval /= interval_sample_n
out_data += out_data_interval
if (aps_calc):
apsd_interval /= interval_sample_n
apsd_ref_interval /= interval_sample_n
apsd += apsd_interval
apsd_ref += apsd_ref_interval
out_data /= n_proc_int
if (aps_calc):
apsd_norm, axis_source, axis_number = flap.tools.multiply_along_axes(apsd,
apsd_ref,
[proc_dim, proc_dim_ref])
apsd_norm /= n_proc_int ** 2
if (norm):
if (ind_nonzero is not None):
out_data[tuple(ind_nonzero)] /= np.sqrt(apsd_norm[tuple(ind_nonzero)])
else:
out_data /= np.sqrt(apsd_norm)
# If there are frequency bins without data setting them to np.NaN
if (ind_nonzero is not None):
out_data[tuple(ind_zero)] = np.NaN
# Putting significance into error
error = np.full(tuple(out_shape), np.NaN, dtype = float)
error_arr = np.full(out_data_num.shape, np.NaN, dtype=float)
if (ind_nonzero is not None):
error_arr[index_nonzero] = 1./ | np.sqrt(out_data_num[index_nonzero] * n_proc_int) | numpy.sqrt |
import numpy as np
import numpy.testing as npt
import pytest
from quara.objects import matrix_basis
from quara.objects.composite_system import CompositeSystem
from quara.objects.elemental_system import ElementalSystem
from quara.objects.mprocess import (
MProcess,
convert_var_index_to_mprocess_index,
convert_mprocess_index_to_var_index,
convert_hss_to_var,
convert_var_to_hss,
)
from quara.objects.mprocess_typical import generate_mprocess_from_name
from quara.settings import Settings
from quara.objects.povm_typical import generate_povm_from_name
class TestMProcess:
def test_init_error(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# Test that HS must be square matrix
hs = np.array(
[[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.float64,
)
with pytest.raises(ValueError):
MProcess(c_sys, [hs])
# Test that dim of HS must be square number
hs = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=np.float64)
with pytest.raises(ValueError):
MProcess(c_sys, [hs])
# Test that HS must be real matrix
hs = np.array(
[[1j, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.complex128,
)
with pytest.raises(ValueError):
MProcess(c_sys, [hs])
# Test that dim of HS equals dim of CompositeSystem
hs = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
],
dtype=np.float64,
)
with pytest.raises(ValueError):
MProcess(c_sys, [hs])
# Test shape
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
with pytest.raises(ValueError):
MProcess(c_sys, hss, shape=(1,))
# Test
e_sys = ElementalSystem(0, matrix_basis.get_comp_basis())
c_sys = CompositeSystem([e_sys])
# Test that c_sys.is_orthonormal_hermitian_0thprop_identity == False
hs = np.array(
[[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.float64,
)
with pytest.raises(ValueError):
MProcess(c_sys, [hs])
def test_init_is_physicality_required(self):
e_sys = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# gate is not TP
hs_0 = np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]], dtype=np.float64
)
hs_1 = np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]], dtype=np.float64
)
hs_not_tp = [hs_0, hs_1]
with pytest.raises(ValueError):
MProcess(c_sys, hs_not_tp)
with pytest.raises(ValueError):
MProcess(c_sys, hs_not_tp, is_physicality_required=True)
# gate is not CP
hs_0 = (1 / 2) * np.array(
[[2, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]], dtype=np.float64
)
hs_1 = (1 / 2) * np.array(
[[-1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]],
dtype=np.float64,
)
hs_not_cp = [hs_0, hs_1]
with pytest.raises(ValueError):
MProcess(c_sys, hs_not_cp)
with pytest.raises(ValueError):
MProcess(c_sys, hs_not_cp, is_physicality_required=True)
# case: when is_physicality_required is False, it is not happened ValueError
MProcess(c_sys, hs_not_tp, is_physicality_required=False)
MProcess(c_sys, hs_not_cp, is_physicality_required=False)
def test_access_dim(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
# Act
actual = mprocess.dim
# Assert
expected = 2
assert actual == expected
# Test that "dim" cannot be updated
with pytest.raises(AttributeError):
mprocess.dim = 100
def test_access_num_outcomes(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
# Act
actual = mprocess.num_outcomes
# Assert
expected = 2
assert actual == expected
# Test that "num_outcomes" cannot be updated
with pytest.raises(AttributeError):
mprocess.num_outcomes = 100
def test_access_hss(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
# Act
actual = mprocess.hss
# Assert
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
expected = [hs_0, hs_1]
for a, e in zip(actual, expected):
npt.assert_almost_equal(a, e, decimal=15)
# Test that "num_outcomes" cannot be updated
with pytest.raises(AttributeError):
mprocess.num_outcomes = 100
def test_hs(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# case 1: one-dimensional
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
actual = mprocess.hs(0)
expected = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
npt.assert_almost_equal(actual, expected, decimal=15)
actual = mprocess.hs(1)
expected = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
npt.assert_almost_equal(actual, expected, decimal=15)
# case 2: multi-dimensional
hs = np.zeros((4, 4), dtype=np.float64)
hss = []
for index in range(6):
tmp_hs = hs.copy()
tmp_hs[0][0] = index
hss.append(tmp_hs)
mprocess = MProcess(c_sys, hss, shape=(2, 3), is_physicality_required=False)
assert mprocess.hs((0, 0))[0][0] == 0
assert mprocess.hs((0, 1))[0][0] == 1
assert mprocess.hs((0, 2))[0][0] == 2
assert mprocess.hs((1, 0))[0][0] == 3
assert mprocess.hs((1, 1))[0][0] == 4
assert mprocess.hs((1, 2))[0][0] == 5
def test_access_shape(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# case 1: one-dimensional
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
actual = mprocess.shape
expected = (2,)
assert actual == expected
# case 2: multi-dimensional
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
mprocess = MProcess(c_sys, hss, shape=(1, 2))
actual = mprocess.shape
expected = (1, 2)
assert actual == expected
# Test that "shape" cannot be updated
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
with pytest.raises(AttributeError):
mprocess.shape = (2,)
def test_access_mode_sampling(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
# case 1: default(False)
mprocess = MProcess(c_sys, hss)
assert mprocess.mode_sampling == False
# case 2: mode_sampling=False
mprocess = MProcess(c_sys, hss, mode_sampling=False)
assert mprocess.mode_sampling == False
# case 3: mode_sampling=True
mprocess = MProcess(c_sys, hss, mode_sampling=True)
assert mprocess.mode_sampling == True
# Test that "mode_sampling" cannot be updated
with pytest.raises(AttributeError):
mprocess.mode_sampling = False
def test_access_random_seed_or_generator(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
# case 1: default(None)
mprocess = MProcess(c_sys, hss)
assert mprocess.random_seed_or_generator == None
# case 2: random_seed_or_generator=1
mprocess = MProcess(c_sys, hss, mode_sampling=True, random_seed_or_generator=1)
assert mprocess.random_seed_or_generator == 1
# Test that "random_seed_or_generator" cannot be updated
with pytest.raises(AttributeError):
mprocess.random_seed_or_generator = 1
def test_set_mode_sampling(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
# case 1: mode_sampling=True, random_seed_or_generator=None
mprocess.set_mode_sampling(True)
assert mprocess.mode_sampling == True
assert mprocess.random_seed_or_generator == None
# case 2: mode_sampling=True, random_seed_or_generator=1
mprocess.set_mode_sampling(True, random_seed_or_generator=1)
assert mprocess.mode_sampling == True
assert mprocess.random_seed_or_generator == 1
# case 3: mode_sampling=True -> mode_sampling=False
mprocess.set_mode_sampling(True, random_seed_or_generator=1)
mprocess.set_mode_sampling(False)
assert mprocess.mode_sampling == False
assert mprocess.random_seed_or_generator == None
# case 4: mode_sampling=False, mode_sampling is not None
with pytest.raises(ValueError):
mprocess.set_mode_sampling(False, random_seed_or_generator=1)
def test_access_eps_zero(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
# case 1: default(10 ** -8)
mprocess = MProcess(c_sys, hss)
assert mprocess.eps_zero == 10 ** -8
# case 2: eps_zero=1
mprocess = MProcess(c_sys, hss, eps_zero=10 ** -5)
assert mprocess.eps_zero == 10 ** -5
# Test that "eps_zero" cannot be updated
with pytest.raises(AttributeError):
mprocess.eps_zero = 1
def test_is_eq_constraint_satisfied(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# case 1: is_eq_constraint_satisfied=True
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
mprocess = MProcess(c_sys, hss, is_physicality_required=False)
assert mprocess.is_eq_constraint_satisfied() == True
# case 2: is_eq_constraint_satisfied=False
hs_0 = np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]], dtype=np.float64
)
hs_1 = np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]], dtype=np.float64
)
hss = [hs_0, hs_1]
mprocess = MProcess(c_sys, hss, is_physicality_required=False)
assert mprocess.is_eq_constraint_satisfied() == False
# case 3: atol=1e-1
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1.1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
mprocess = MProcess(c_sys, hss, is_physicality_required=False)
assert mprocess.is_eq_constraint_satisfied(atol=1e-1) == True
def test_is_ineq_constraint_satisfied(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# case 1: is_eq_constraint_satisfied=True
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
mprocess = MProcess(c_sys, hss, is_physicality_required=False)
assert mprocess.is_ineq_constraint_satisfied() == True
# case 2: is_eq_constraint_satisfied=False
hs_0 = (1 / 2) * np.array(
[[2, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]], dtype=np.float64
)
hs_1 = (1 / 2) * np.array(
[[-1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]],
dtype=np.float64,
)
hss = [hs_0, hs_1]
mprocess = MProcess(c_sys, hss, is_physicality_required=False)
assert mprocess.is_ineq_constraint_satisfied() == False
# case 3: atol=1e-1
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1.1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
mprocess = MProcess(c_sys, hss, is_physicality_required=False)
assert mprocess.is_ineq_constraint_satisfied(atol=1e-1) == True
def test_set_zero(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
# Act
mprocess.set_zero()
actual = mprocess.hss
# Assert
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
for a, e in zip(actual, expected):
npt.assert_almost_equal(a, e, decimal=15)
assert mprocess.dim == 2
assert mprocess.shape == (2,)
assert mprocess.mode_sampling == False
assert mprocess.is_physicality_required == False
assert mprocess.is_estimation_object == True
assert mprocess.on_para_eq_constraint == True
assert mprocess.on_algo_eq_constraint == True
assert mprocess.on_algo_ineq_constraint == True
assert mprocess.eps_proj_physical == Settings.get_atol() / 10.0
def test_generate_zero_obj(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
z = generate_mprocess_from_name(c_sys, "z-type1")
# Act
mprocess = z.generate_zero_obj()
actual = mprocess.hss
# Assert
expected = [
np.array(
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.float64,
),
np.array(
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.float64,
),
]
for a, e in zip(actual, expected):
npt.assert_almost_equal(a, e, decimal=15)
assert mprocess.dim == 2
assert mprocess.shape == (2,)
assert mprocess.mode_sampling == False
assert mprocess.is_physicality_required == False
assert mprocess.is_estimation_object == False
assert mprocess.on_para_eq_constraint == True
assert mprocess.on_algo_eq_constraint == True
assert mprocess.on_algo_ineq_constraint == True
assert mprocess.eps_proj_physical == Settings.get_atol() / 10.0
def test_generate_origin_obj(self):
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
z = generate_mprocess_from_name(c_sys, "z-type1")
# Act
mprocess = z.generate_origin_obj()
actual = mprocess.hss
# Assert
expected = [
np.array(
[[1 / 2, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.float64,
),
np.array(
[[1 / 2, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.float64,
),
]
for a, e in zip(actual, expected):
npt.assert_almost_equal(a, e, decimal=15)
assert mprocess.dim == 2
assert mprocess.shape == (2,)
assert mprocess.mode_sampling == False
assert mprocess.is_physicality_required == False
assert mprocess.is_estimation_object == False
assert mprocess.on_para_eq_constraint == True
assert mprocess.on_algo_eq_constraint == True
assert mprocess.on_algo_ineq_constraint == True
assert mprocess.eps_proj_physical == Settings.get_atol() / 10.0
def test_to_var(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
# case 1: on_para_eq_constraint=default(True)
mprocess = MProcess(c_sys, hss)
actual = mprocess.to_var()
expected = (1 / 2) * np.array(
[
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, 0, 0, 1],
]
).flatten()
npt.assert_almost_equal(actual, expected, decimal=15)
# case 2: on_para_eq_constraint=True
mprocess = MProcess(c_sys, hss, on_para_eq_constraint=True)
actual = mprocess.to_var()
expected = (1 / 2) * np.array(
[
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, 0, 0, 1],
]
).flatten()
npt.assert_almost_equal(actual, expected, decimal=15)
# case 3: on_para_eq_constraint=False
mprocess = MProcess(c_sys, hss, on_para_eq_constraint=False)
actual = mprocess.to_var()
expected = (1 / 2) * np.array(
[
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, -1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, 0, 0, 1],
]
).flatten()
npt.assert_almost_equal(actual, expected, decimal=15)
def test_to_stacked_vector(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
expected = (1 / 2) * np.array(
[
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, -1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, 0, 0, 1],
]
).flatten()
# case 1: on_para_eq_constraint=default(True)
mprocess = MProcess(c_sys, hss)
actual = mprocess.to_stacked_vector()
npt.assert_almost_equal(actual, expected, decimal=15)
# case 2: on_para_eq_constraint=True
mprocess = MProcess(c_sys, hss, on_para_eq_constraint=True)
actual = mprocess.to_stacked_vector()
npt.assert_almost_equal(actual, expected, decimal=15)
# case 3: on_para_eq_constraint=False
mprocess = MProcess(c_sys, hss, on_para_eq_constraint=False)
actual = mprocess.to_stacked_vector()
npt.assert_almost_equal(actual, expected, decimal=15)
def test_calc_gradient(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
hs_0 = (1 / 2) * np.array(
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]
)
hs_1 = (1 / 2) * np.array(
[[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]
)
hss = [hs_0, hs_1]
expected = (1 / 2) * np.array(
[
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, -1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, 0, 0, 1],
]
).flatten()
# case 1: on_para_eq_constraint=default(True)
mprocess = MProcess(c_sys, hss)
# var_index = 0
actual = mprocess.calc_gradient(0)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][0][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 1
actual = mprocess.calc_gradient(1)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][0][1] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 4
actual = mprocess.calc_gradient(4)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][1][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 16
actual = mprocess.calc_gradient(16)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[1][1][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 27
actual = mprocess.calc_gradient(27)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[1][3][3] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
## case 2: on_para_eq_constraint=True
mprocess = MProcess(c_sys, hss, on_para_eq_constraint=True)
# var_index = 0
actual = mprocess.calc_gradient(0)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][0][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 1
actual = mprocess.calc_gradient(1)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][0][1] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 4
actual = mprocess.calc_gradient(4)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][1][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 16
actual = mprocess.calc_gradient(16)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[1][1][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 27
actual = mprocess.calc_gradient(27)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[1][3][3] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
## case 3: on_para_eq_constraint=False
mprocess = MProcess(c_sys, hss, on_para_eq_constraint=False)
# var_index = 0
actual = mprocess.calc_gradient(0)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][0][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 1
actual = mprocess.calc_gradient(1)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][0][1] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 4
actual = mprocess.calc_gradient(4)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[0][1][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 16
actual = mprocess.calc_gradient(16)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[1][0][0] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
# var_index = 31
actual = mprocess.calc_gradient(31)
expected = [
np.zeros((4, 4), dtype=np.float64),
np.zeros((4, 4), dtype=np.float64),
]
expected[1][3][3] = 1
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
def test_calc_proj_eq_constraint(self):
## case 1: z
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
# Act
actual = mprocess.calc_proj_eq_constraint()
# Assert
expected = [
(1 / 2)
* np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]),
(1 / 2)
* np.array([[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]),
]
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
assert mprocess.dim == 2
assert mprocess.shape == (2,)
assert mprocess.mode_sampling == False
assert mprocess.is_physicality_required == True
assert mprocess.is_estimation_object == True
assert mprocess.on_para_eq_constraint == True
assert mprocess.on_algo_eq_constraint == True
assert mprocess.on_algo_ineq_constraint == True
assert mprocess.eps_proj_physical == Settings.get_atol() / 10.0
## case 2:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
hss = [
(1 / 2)
* np.array([[1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]),
(1 / 2)
* np.array([[1, 1, 1, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]),
]
mprocess = MProcess(c_sys, hss, is_physicality_required=False)
# Act
actual = mprocess.calc_proj_eq_constraint()
# Assert
expected = [
(1 / 2)
* np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]),
(1 / 2)
* np.array([[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]),
]
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
def test_calc_proj_eq_constraint_with_var(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
# case 1: on_para_eq_constraint=default(True)
actual = mprocess.calc_proj_eq_constraint_with_var(c_sys, mprocess.to_var())
expected = (1 / 2) * np.array(
[
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, 0, 0, 1],
]
).flatten()
npt.assert_almost_equal(actual, expected, decimal=15)
# case 2: on_para_eq_constraint=True
actual = mprocess.calc_proj_eq_constraint_with_var(
c_sys, mprocess.to_var(), on_para_eq_constraint=True
)
expected = (1 / 2) * np.array(
[
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, 0, 0, 1],
]
).flatten()
npt.assert_almost_equal(actual, expected, decimal=15)
# case 3: on_para_eq_constraint=False
actual = mprocess.calc_proj_eq_constraint_with_var(
c_sys, mprocess.to_stacked_vector(), on_para_eq_constraint=False
)
expected = (1 / 2) * np.array(
[
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, -1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, 0, 0, 1],
]
).flatten()
npt.assert_almost_equal(actual, expected, decimal=15)
def test_calc_proj_ineq_constraint(self):
## case 1: z
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
mprocess = generate_mprocess_from_name(c_sys, "z-type1")
# Act
actual = mprocess.calc_proj_ineq_constraint()
# Assert
expected = [
(1 / 2)
* np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]),
(1 / 2)
* np.array([[1, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 0, 0, 1]]),
]
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
assert mprocess.dim == 2
assert mprocess.shape == (2,)
assert mprocess.mode_sampling == False
assert mprocess.is_physicality_required == True
assert mprocess.is_estimation_object == True
assert mprocess.on_para_eq_constraint == True
assert mprocess.on_algo_eq_constraint == True
assert mprocess.on_algo_ineq_constraint == True
assert mprocess.eps_proj_physical == Settings.get_atol() / 10.0
## case 2:
# Arrange
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
hss = [
(1 / 2)
* | np.array([[0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) | numpy.array |
"""
For a session where there is DLC already computed,
load DLC traces to cut video ROIs and then
compute motion energy for these ROIS.
bodyCamera: cut ROI such that mouse body but not wheel motion is in ROI
left(right)Camera: cut whisker pad region
"""
import time
import numpy as np
import pandas as pd
import cv2
import logging
from ibllib.io.video import get_video_frames_preload, label_from_path
from ibllib.io.extractors.camera import get_video_length
_log = logging.getLogger('ibllib')
def grayscale(x):
return cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
def get_dlc_midpoints(dlc_pqt, target):
# Load dataframe
dlc_df = pd.read_parquet(dlc_pqt)
# Set values to nan if likelihood is too low and calcualte midpoints
idx = dlc_df.loc[dlc_df[f'{target}_likelihood'] < 0.9].index
dlc_df.loc[idx, [f'{target}_x', f'{target}_y']] = np.nan
if all(np.isnan(dlc_df[f'{target}_x'])) or all(np.isnan(dlc_df[f'{target}_y'])):
raise ValueError(f'Failed to calculate midpoint, {target} all NaN in {dlc_pqt}')
else:
mloc = [int(np.nanmean(dlc_df[f'{target}_x'])), int(np.nanmean(dlc_df[f'{target}_y']))]
return mloc
def motion_energy(file_mp4, dlc_pqt, frames=10000):
"""
Compute motion energy on cropped frames of a single video
:param file_mp4: Video file to run motion energy for
:param dlc_pqt: Path to dlc result in pqt file format.
:param frames: Number of frames to load into memory at once. If None all frames are loaded.
:return me_file: Path to numpy file contaiing motion energy.
:return me_roi: Path to numpy file containing ROI coordinates.
The frames parameter determines how many cropped frames per camera are loaded into memory at
once and should be set depending on availble RAM. Some approximate numbers for orientation,
assuming 90 min video and frames set to:
1 : 152 KB (body), 54 KB (left), 15 KB (right)
50000 : 7.6 GB (body), 2.7 GB (left), 0.75 GB (right)
None : 25 GB (body), 17.5 GB (left), 12.5 GB (right)
"""
start_T = time.time()
label = label_from_path(dlc_pqt)
# Crop ROI
if label == 'body':
tail_mid = get_dlc_midpoints(dlc_pqt, 'tail_start')
anchor = np.array(tail_mid)
w, h = int(anchor[0] * 3 / 5), 210
x, y = int(anchor[0] - anchor[0] * 3 / 5), int(anchor[1] - 120)
else:
nose_mid = get_dlc_midpoints(dlc_pqt, 'nose_tip')
# Go through the different pupil points to see if any has not all NaNs
try:
pupil_mid = get_dlc_midpoints(dlc_pqt, 'pupil_top_r')
except ValueError:
try:
pupil_mid = get_dlc_midpoints(dlc_pqt, 'pupil_left_r')
except ValueError:
try:
pupil_mid = get_dlc_midpoints(dlc_pqt, 'pupil_right_r')
except ValueError:
pupil_mid = get_dlc_midpoints(dlc_pqt, 'pupil_bottom_r')
anchor = np.mean([nose_mid, pupil_mid], axis=0)
dist = np.sqrt(np.sum((np.array(nose_mid) - np.array(pupil_mid))**2, axis=0))
w, h = int(dist / 2), int(dist / 3)
x, y = int(anchor[0] - dist / 4), int(anchor[1])
# Check if the mask has negative values (sign that the midpoint location is off)
if any(i < 0 for i in [x, y, w, h]) is True:
raise ValueError(f"ROI for motion energy on {label}Camera could not be computed. "
f"Check for issues with the raw video or dlc output.")
# Note that x and y are flipped when loading with cv2, therefore:
mask = np.s_[y:y + h, x:x + w]
# save ROI coordinates
roi = np.asarray([w, h, x, y])
alf_path = file_mp4.parent.parent.joinpath('alf')
alf_path.mkdir(exist_ok=True)
roi_file = alf_path.joinpath(f'{label}ROIMotionEnergy.position.npy')
np.save(roi_file, roi)
frame_count = get_video_length(file_mp4)
me = np.zeros(frame_count,)
cap = cv2.VideoCapture(str(file_mp4))
if frames:
n, keep_reading = 0, True
while keep_reading:
# Set the frame numbers to the next #frames, with 1 frame overlap
frame_numbers = range(n * (frames - 1), n * (frames - 1) + frames)
# Make sure not to load empty frames
if np.max(frame_numbers) >= frame_count:
frame_numbers = range(frame_numbers.start, frame_count)
keep_reading = False
# Load, crop and grayscale frames.
cropped_frames = get_video_frames_preload(cap, frame_numbers=frame_numbers,
mask=mask, func=grayscale,
quiet=True).astype(np.float32)
# Calculate motion energy for those frames and append to big array
me[frame_numbers[:-1]] = np.mean(np.abs( | np.diff(cropped_frames, axis=0) | numpy.diff |
from __future__ import absolute_import, division, print_function
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from numbers import Number
from typing import List, Union
import numpy as np
import tensorflow as tf
from six import add_metaclass
from tensorflow.python.keras.callbacks import Callback
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.distributions import Distribution
from odin.bay.distributions import ZeroInflated
from odin.utils import catch_warnings_ignore
from sisua.analysis.imputation_benchmarks import (correlation_scores,
imputation_mean_score,
imputation_score,
imputation_std_score)
from sisua.analysis.latent_benchmarks import clustering_scores
from sisua.data import SingleCellOMIC
from sisua.models import SingleCellModel
from sisua.models.base import _to_sco
__all__ = [
'SingleCellMetric', 'NegativeLogLikelihood', 'ImputationError',
'CorrelationScores', 'ClusteringScores'
]
def _preprocess_output_distribution(y_pred):
r""" In case of zero inflated distribution, extract the underlying count
distribution """
if isinstance(y_pred, tfd.Independent) and \
isinstance(y_pred.distribution, ZeroInflated):
y_pred = tfd.Independent(
y_pred.distribution.count_distribution,
reinterpreted_batch_ndims=y_pred.reinterpreted_batch_ndims)
return y_pred
def _to_binary(protein):
labels = protein.X
if 'X_prob' in protein.obsm:
labels = protein.obsm['X_prob']
elif 'X_bin' in protein.obsm:
labels = protein.obsm['X_bin']
if labels.ndim == 2:
labels = np.argmax(labels, axis=1)
elif labels.ndim > 2:
raise RuntimeError("protein labels has %d dimensions, no support" %
labels.ndim)
return labels
_CORRUPTED_INPUTS = {}
# ===========================================================================
# Base class
# ===========================================================================
@add_metaclass(ABCMeta)
class SingleCellMetric(Callback):
r""" Single cell metrics for evaluating the imputation and latent space
during training
Parameters
----------
inputs : {`SingleCellOMIC`, `numpy.ndarray`}
extras : None
extras object (e.g. protein) used for calculating the metric
sample_shape : `int` (default=`1`)
number of MCMC samples for prediction
batch_size : `int` (default=`64`)
freq : `int` (default=`3`)
frequency of evaluating the metric, some metrics are very computational
intensive and could slow down the training progress significantly
"""
def __init__(self,
inputs: Union[SingleCellOMIC, List[SingleCellOMIC], np.
ndarray, List[np.ndarray], None] = None,
extras=None,
sample_shape=1,
batch_size=64,
freq=3,
name=None,
**kwargs):
super(SingleCellMetric, self).__init__(**kwargs)
self.sample_shape = sample_shape
self.batch_size = batch_size
self.inputs = inputs
self.extras = extras
self.freq = int(freq)
self._name = name
# store the last epoch that the metric was calculated
self._last_epoch = 0
assert self.freq > 0
@property
def name(self):
return self.__class__.__name__.lower() if self._name is None else self._name
def set_model(self, model: SingleCellModel):
assert isinstance(
model, SingleCellModel), "This callback only support SingleCellModel"
self.model = model
return self
@abstractmethod
def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],
y_pred: List[Distribution], latents: List[Distribution], extras):
raise NotImplementedError
def __call__(self, inputs=None, sample_shape=None):
if inputs is None:
inputs = self.inputs
if sample_shape is None:
sample_shape = self.sample_shape
model = self.model
if not isinstance(inputs, (tuple, list)):
inputs = [inputs]
inputs = _to_sco(inputs, model.omic_outputs)
if model.corruption_rate is not None:
corruption_text = str(model.corruption_dist) + str(model.corruption_rate)
inputs_corrupt = [
(data.corrupt(corruption_rate=model.corruption_rate,
corruption_dist=model.corruption_dist,
inplace=False) \
if str(id(data)) + corruption_text not in _CORRUPTED_INPUTS else
_CORRUPTED_INPUTS[str(id(data)) + corruption_text]) \
if idx == 0 else data
for idx, data in enumerate(inputs)
]
_CORRUPTED_INPUTS[str(id(inputs[0])) +
corruption_text] = inputs_corrupt[0]
else:
inputs_corrupt = inputs
outputs, latents = model.predict(inputs_corrupt,
sample_shape=self.sample_shape,
batch_size=self.batch_size,
verbose=0,
apply_corruption=False)
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
if not isinstance(latents, (tuple, list)):
latents = [latents]
metrics = self.call(y_true=inputs,
y_pred=outputs,
y_crpt=inputs_corrupt,
latents=latents,
extras=self.extras)
if metrics is None:
metrics = {}
elif tf.is_tensor(metrics) or \
isinstance(metrics, np.ndarray) or \
isinstance(metrics, Number):
metrics = {self.name: metrics}
assert isinstance(metrics, dict), \
"Return metrics must be a dictionary mapping metric name to scalar value"
metrics = {
i: j.numpy() if tf.is_tensor(j) else j for i, j in metrics.items()
}
return metrics
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
if epoch % self.freq == 0 and logs is not None:
self._last_epoch = epoch
# calculating the metric
try:
metrics = self()
except Exception as e:
print("Error:", e)
metrics = {}
# update the log
for key, val in metrics.items():
logs[key] = val
logs[key + '_epoch'] = epoch
def on_train_end(self, logs=None):
if self.model.epochs != self._last_epoch:
self._last_epoch = self.model.epochs
# calculating the metric
try:
metrics = self()
except Exception as e:
print("Error:", e)
metrics = {}
# update the log
history = self.model.history.history
for key, val in metrics.items():
if key in history:
history[key].append(val)
history[key + '_epoch'].append(self._last_epoch)
# ===========================================================================
# Losses
# ===========================================================================
class NegativeLogLikelihood(SingleCellMetric):
""" Log likelihood metric
Parameters
----------
inputs : {`SingleCellOMIC`, `numpy.ndarray`}
extras : None
extras object (e.g. protein) used for calculating the metric
sample_shape : `int` (default=`1`)
number of MCMC samples for prediction
batch_size : `int` (default=`64`)
freq : `int` (default=`3`)
frequency of evaluating the metric, some metrics are very computational
intensive and could slow down the training progress significantly
Returns
-------
dict:
'nllk%d' for each tuple of input and output
"""
def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],
y_pred: List[Distribution], latents: List[Distribution], extras):
nllk = {}
for idx, (t, p) in enumerate(zip(y_true, y_pred)):
nllk['nllk%d' % idx] = -tf.reduce_mean(p.log_prob(t.X))
return nllk
class ImputationError(SingleCellMetric):
""" Imputation error
Parameters
----------
inputs : {`SingleCellOMIC`, `numpy.ndarray`}
extras : None
extras object (e.g. protein) used for calculating the metric
sample_shape : `int` (default=`1`)
number of MCMC samples for prediction
batch_size : `int` (default=`64`)
freq : `int` (default=`3`)
frequency of evaluating the metric, some metrics are very computational
intensive and could slow down the training progress significantly
Return
------
dict :
'imp_med'
'imp_mean'
"""
def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],
y_pred: List[Distribution], latents: List[Distribution], extras):
# only care about the first data input
y_true = y_true[0]
y_crpt = y_crpt[0]
y_pred = y_pred[0]
y_pred = _preprocess_output_distribution(y_pred)
y_pred = y_pred.mean()
if y_pred.shape.ndims == 3:
y_pred = tf.reduce_mean(y_pred, axis=0)
return {
'imp_med':
imputation_score(original=y_true.X, imputed=y_pred),
'imp_mean':
imputation_mean_score(original=y_true.X,
corrupted=y_crpt.X,
imputed=y_pred)
}
class CorrelationScores(SingleCellMetric):
""" (1 - correlation_coefficients) to represent the loss
Parameters
----------
inputs : {`SingleCellOMIC`, `numpy.ndarray`}
extras : {`SingleCellOMIC`, `numpy.ndarray`}
the protein array
sample_shape : `int` (default=`1`)
number of MCMC samples for prediction
batch_size : `int` (default=`64`)
freq : `int` (default=`3`)
frequency of evaluating the metric, some metrics are very computational
intensive and could slow down the training progress significantly
Returns
-------
dict :
'pearson_mean': np.mean(pearson),
'spearman_mean': np.mean(spearman),
'pearson_med': np.median(pearson),
'spearman_med': np.median(spearman),
Example
-------
>>> CorrelationScores(extras=y_train, freq=1)
"""
def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],
y_pred: List[Distribution], latents: List[Distribution], extras):
y_true = y_true[0]
y_crpt = y_crpt[0]
y_pred = y_pred[0]
assert isinstance(extras, SingleCellOMIC), \
"protein data must be provided as extras in form of SingleCellOMIC"
protein = extras[y_true.indices]
y_true.assert_matching_cells(protein)
y_pred = _preprocess_output_distribution(y_pred)
y_pred = y_pred.mean()
if y_pred.shape.ndims == 3:
y_pred = tf.reduce_mean(y_pred, axis=0)
scores = correlation_scores(X=y_pred,
y=protein.X,
gene_name=y_true.var['geneid'],
protein_name=protein.var['protid'],
return_series=False)
if len(scores) == 0:
return {}
spearman = []
pearson = []
for _, (s, p) in scores.items():
spearman.append(-s)
pearson.append(-p)
return {
'pearson_mean': np.mean(pearson),
'spearman_mean': np.mean(spearman),
'pearson_med': np.median(pearson),
'spearman_med': np.median(spearman),
}
class ClusteringScores(SingleCellMetric):
"""
Parameters
----------
inputs : {`SingleCellOMIC`, `numpy.ndarray`}
extras : {`SingleCellOMIC`, `numpy.ndarray`}
the protein array
sample_shape : `int` (default=`1`)
number of MCMC samples for prediction
batch_size : `int` (default=`64`)
freq : `int` (default=`3`)
frequency of evaluating the metric, some metrics are very computational
intensive and could slow down the training progress significantly
Returns
-------
dict :
silhouette_score (higher is better, best is 1, worst is -1)
adjusted_rand_score (higher is better)
normalized_mutual_info_score (higher is better)
unsupervised_clustering_accuracy (higher is better)
Example
-------
>>> ClusteringScores(extras=y_train, freq=1)
"""
def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],
y_pred: List[Distribution], latents: List[Distribution], extras):
y_true = y_true[0]
y_crpt = y_crpt[0]
y_pred = y_pred[0]
assert isinstance(extras, SingleCellOMIC), \
"protein data must be provided as extras in form of SingleCellOMIC"
protein = extras[y_true.indices]
y_true.assert_matching_cells(protein)
labels = _to_binary(protein)
scores = {}
scores_avg = defaultdict(list)
# support multiple latents also
for idx, z in enumerate(latents):
for key, val in clustering_scores(latent=z.mean().numpy(),
labels=labels,
n_labels=protein.var.shape[0]).items():
# since all score higher is better, we want them as loss value
val = -val
scores['%s_%d' % (key, idx)] = val
scores_avg[key].append(val)
# average scores
scores.update({i: | np.mean(j) | numpy.mean |
import numpy as np
from datetime import datetime
cnn_layer_types = ["CONV", "MAXPOOL"]
# ( layer type , x_length , y_length , zero_padding, no of mask ) zero_padding and no of mask not applicable for MAXPOOL
cnn_layer_info = []
ndelst = inpt_dim = [] # contains the node numbers in FC layer
mask_depth = [] # contains the mask depths of each layer
epoch_itr = optLyr = hydLyr = 0
lrn_rate = nrm_fac = 0.0
read_wt = 0
instructions_file = "instructions.txt"
data_input_file = "data_input_train.txt"
data_output_file = "data_output_train.txt"
weight_file = ""
f_ins = open(instructions_file, "r")
lns = f_ins.readlines()
# reading the instructions from the instruction files
try:
lrn_rate = float(lns[0].strip(' \n')) # first line should be learning rate
epoch_itr = int(lns[1].strip(' \n')) # second line should contain no of iterations
inpt_dim = lns[2].strip(' \n').split(' ') # third line should contain the input matrix dimensions
inpt_dim = [int(inpt_dim[i]) for i in range(len(inpt_dim))]
if (len(inpt_dim) == 3):
mask_depth.append(inpt_dim[2])
else:
mask_depth.append(1)
optLyr = int(lns[3].strip(' \n')) # fourth line should contain no of nodes in output layer
nrm_fac = float(lns[4].strip(' \n')) # fifth line should contain normalization factor
hydLyr = int(lns[5].strip(' \n')) # sixth line should contain no of hidden layer
ndelst.extend(
[int(x) for x in lns[6].strip(' \n').split(' ')]) # seventh line should contain no of nodes in hidden layer
ndelst.append(optLyr)
read_wt_ln = lns[7].strip(' \n')
if (int(read_wt_ln[0]) == 1):
weight_file = (read_wt_ln.split(' '))[1]
read_wt = 1
for i in range(8, len(lns)): # From eighth line the convolutions and pooling instructions are given
intgs = lns[i].strip(' \n').split(' ')
operate = cnn_layer_types.index(intgs[0])
if (operate == 0): # check for convolution or pooling
cnn_layer_info.append((operate, int(intgs[1]), int(intgs[2]), int(intgs[3]), int(intgs[4])))
mask_depth.append(int(intgs[4]))
else:
cnn_layer_info.append((operate, int(intgs[1]), int(intgs[2])))
mask_depth.append(mask_depth[-1])
except:
print("Wrong Instruction list .. Exitting code")
exit(1)
f_ins.close()
# checking whether convolution operations are correct or not
def check_input():
row, col = inpt_dim[0], inpt_dim[1]
for i in range(len(cnn_layer_info)):
pad = 0 # the pad applied
if (cnn_layer_info[i][0] == 0):
pad = cnn_layer_info[i][3]
row = row - cnn_layer_info[i][1] + 2 * pad + 1
col = col - cnn_layer_info[i][2] + 2 * pad + 1
return row, col
row, col = check_input()
if (row <= 0 or col <= 0): # row and column should be positive to be valid
print("Invalid Convolution and pooling layers .. Exitting code")
exit(1)
inpLyr = row * col * mask_depth[-1] # no of input nodes for the fully connected layer
ndelst.insert(0, inpLyr)
# printing the layer informations
print(" Learn Rate = " + str(lrn_rate))
print(" No of epoch iterations = " + str(epoch_itr))
print(" No of input layer node = " + str(inpLyr))
print(" No of output layer node = " + str(optLyr))
print(" No of normalization = " + str(nrm_fac))
for i in range(len(cnn_layer_info)):
pad = 0
no_mask = None
if (cnn_layer_info[i][0] == 0):
pad = cnn_layer_info[i][3]
no_mask = cnn_layer_info[i][4]
print(" " + cnn_layer_types[cnn_layer_info[i][0]] + " " + str(cnn_layer_info[i][1]) + "X" + str(
cnn_layer_info[i][2]) + " pad " + str(pad) + " no of masks " + str(no_mask))
print(" No of Hidden layers = " + str(hydLyr))
print(" No of nodes in the hidden layers = ", end="")
for i in range(1, len(ndelst) - 1):
print(str(ndelst[i]), end=" ")
print("")
train_input = []
train_input_data = []
train_output = []
no_of_input_data = 0
# accepting input in the specified format and also the output
f_in = open(data_input_file, "r")
f_out = open(data_output_file, "r")
for lns in f_in:
intgs = [(float(x)) for x in lns.strip(' \n').split()]
if (len(intgs) == 0):
train_input.append(np.array(train_input_data))
train_input_data = []
no_of_input_data += 1
continue
train_input_data.append(np.multiply(1.0 / nrm_fac, intgs))
f_in.close()
for lns in f_out:
intgs = [float(x) for x in lns.split()]
train_output.append(intgs)
f_out.close()
def make_conv_mask(dep, row, col): # creating the mask for the convolution
return np.random.rand(dep, row, col) - .5 * np.ones(shape=(dep, row, col), dtype=float)
def make_max_pool(dep, row, col): # creating a dummy mask of same shape -- no use
return np.zeros(shape=(dep, row, col), dtype=float)
# for max pool, the positions of the maximum wrt to the weight mask is stored
def create_masks(): # returning the masks for the convolution
cnn_masks = [] # contains all the corelation masks for each layer
func_dict = {0: make_conv_mask, 1: make_max_pool} # the functions acc to masks
for i in range(len(cnn_layer_info)):
lyr_cnn_msk = [] # contains the mask for each layers
if (cnn_layer_info[i][0] != 1): # create masks for CONV Pool
for k in range(mask_depth[i + 1]): # creating specified no of masks in each layer
lyr_cnn_msk.append(
func_dict[cnn_layer_info[i][0]](mask_depth[i], cnn_layer_info[i][1], cnn_layer_info[i][2]))
else:
lyr_cnn_msk.append(
func_dict[cnn_layer_info[i][0]](mask_depth[i], cnn_layer_info[i][1], cnn_layer_info[i][2]))
cnn_masks.append(lyr_cnn_msk)
return cnn_masks
#read weights and masks from a file
def read_masks_wts():
f_wt = open(weight_file, "r")
lns = f_wt.readlines()
c = 0
wtmtx = [] # the array of the corresponding weight matrices
masks_list = [] # the convolution masks
for i in range(len(cnn_layer_info)):
if( cnn_layer_info[i][0] == 0 ):
masks_list_lyr = []
for j in range(cnn_layer_info[i][-1]):
masks = np.zeros(shape=(mask_depth[i],cnn_layer_info[i][1],cnn_layer_info[i][2]), dtype=float)
for row in range(len(masks[0])):
row_ln = [x for x in lns[c].strip(' \n').split('\t')]
c+=1
for dep in range(len(masks)):
mtx_row = [(float(x)) for x in row_ln[dep].strip(' \n').split(' ')]
for col in range(len(masks[0][0])):
masks[dep][row][col] = mtx_row[col]
masks_list_lyr.append(masks)
c+=1
c+=1
else:
masks_list_lyr = []
masks = np.zeros(shape=(mask_depth[i], cnn_layer_info[i][1], cnn_layer_info[i][2]), dtype=float)
c = c + 3 + len(masks)
masks_list_lyr.append(masks)
masks_list.append(masks_list_lyr)
c+=1
for i in range(hydLyr + 1):
wt = [] # the weights
for j in range(0, ndelst[i + 1]):
intgs = [(float(x)) for x in lns[c].split()]
wt.append(np.array(intgs))
c += 1
wtmtx.append(np.array(wt))
c += 2
f_wt.close()
return wtmtx, masks_list
# creates the initial weights for the FC layer
def create_initial_wts():
wtmtx = [] # initial weight matrix list
for i in range(1, len(ndelst), 1):
# creating zero-centered weights
wtmtx.append(
np.random.rand(ndelst[i], ndelst[i - 1]) - .5 * np.ones(shape=(ndelst[i], ndelst[i - 1]), dtype=float))
return wtmtx
# used for adding zero pad as necessary
def add_padding(inpt, p):
opt_arr = np.zeros((len(inpt), len(inpt[0]) + 2 * p, len(inpt[0][0]) + 2 * p), dtype=float)
opt_arr[:, p:len(inpt[0]) + p, p:len(inpt[0][0]) + p] = inpt
return opt_arr
# used for removing the pad
def remove_pad(inpt, p):
return inpt[:, p:len(inpt[0]) - p, p:len(inpt[0][0]) - p]
def sigmoid(z):
# sigmoid function
return 1 / (1 + np.exp(-z))
def sigmoidPrime(z):
# gradient of sigmoid function
return np.exp(-z) / ((1 + np.exp(-z)) ** 2)
# used for applying convolution for CONV layers
def convolute(mask, inpt, opt_dep):
row = len(inpt[0]) - len(mask[0][0]) + 1
col = len(inpt[0][0]) - len(mask[0][0][0]) + 1
result = np.zeros(shape=(opt_dep, row, col), dtype=float)
for k in range(opt_dep):
for i in range(row):
for j in range(col):
result[k][i][j] = np.sum(
np.multiply(mask[k], inpt[:, i:(i + len(mask[0][0])), j:j + len(mask[0][0][0])]))
return result
# used for applying MAX Pool layers
def convolute_max_pool(mask, inpt, dep):
row = len(inpt[0]) - len(mask[0]) + 1
col = len(inpt[0][0]) - len(mask[0][0]) + 1
# print("row "+str(row))
# print("col " + str(col))
max_pos = np.zeros(shape=(dep, row, col), dtype=float)
result = np.zeros(shape=(dep, row, col), dtype=float)
for k in range(dep):
for i in range(row):
for j in range(col):
a = inpt[k, i:i + len(mask[0]), j:j + len(mask[0][0])]
pos = np.unravel_index(np.argmax(a, axis=None), a.shape)
max_pos[k][i][j] = 2 * pos[0] + pos[1] # stores the 2D position where maximum occurs
result[k][i][j] = np.amax(a)
return max_pos, result
# performs the forward pass of the CONV and MAXPOOL layers
def forword_cnn(inpt, cnn_masks):
inpt_list = []
for i in range(len(cnn_layer_info)):
if (cnn_layer_info[i][0] == 1): # special treatment for MAXPOOL layers
# print(str(len(inpt[0])) + " in forward_cnn1")
inpt_list.append(inpt)
cnn_masks[i][0] = make_max_pool(mask_depth[i], cnn_layer_info[i][1], cnn_layer_info[i][2])
cnn_masks[i][0], inpt = convolute_max_pool(cnn_masks[i][0], inpt, mask_depth[i])
# print(str(len(inpt[0])) + " in forward_cnn2")
else:
if (cnn_layer_info[i][0] == 0): # adding padding for CONV layers
inpt = add_padding(inpt, cnn_layer_info[i][-2])
inpt_list.append(inpt)
inpt = convolute(cnn_masks[i], inpt, mask_depth[i + 1])
inpt_list.append(inpt)
return inpt_list, cnn_masks
# performs the forward pass of the FC layer
def forward_pass(wtmtx, lyrs):
lyrs_list = [] # the layers contained in a list
lyrs_list_no_sgm = [] # the layers before the sigmoid is applied
lyrs_list.append(lyrs)
lyrs_list_no_sgm.append(lyrs)
for i in range(0, len(ndelst) - 1):
lyrs_list_no_sgm.append(np.matmul(wtmtx[i], lyrs))
lyrs = sigmoid(lyrs_list_no_sgm[-1])
lyrs_list.append(lyrs)
return lyrs_list, lyrs_list_no_sgm
# calculating mask gradient for CONV
def calc_mask_grad(mask, opt_lyr_grad, inpt_lyr):
mask_grad = np.zeros(shape=(len(mask), len(mask[0]), len(mask[0][0])), dtype=float)
for k in range(len(inpt_lyr)): # calculating mask gradient layer-wise
grad_2d = np.zeros(shape=(len(mask[0]), len(mask[0][0])), dtype=float)
for i in range(len(mask[0])):
for j in range(len(mask[0][0])):
grad_2d[i][j] = np.sum(
np.multiply(opt_lyr_grad, inpt_lyr[k, i:i + len(opt_lyr_grad), j:j + len(opt_lyr_grad[0])]))
mask_grad[k, :, :] = grad_2d
return mask_grad
# calculating layer gradients at each position for CONV
def jugar_grad(mask, opt_grad, i1, j1):
res = 0.0
for i in range(i1, i1 - len(mask), -1):
for j in range(j1, j1 - len(mask[0]), -1):
try: # for exitting index greater than highest length
if (i < 0 or j < 0): # for exitting negative indices
continue
res += opt_grad[i][j] * mask[i1 - i][j1 - j]
except:
pass
return res
# calculating layer gradients for CONV
def cnn_lyr_grad(mask_list, opt_lyr_grad, inpt_lyr):
inpt_lyr_grad = np.zeros(shape=(len(inpt_lyr), len(inpt_lyr[0]), len(inpt_lyr[0][0])), dtype=float)
for k in range(len(mask_list)):
mask = mask_list[k]
opt_grad = opt_lyr_grad[k]
for k1 in range(len(inpt_lyr)):
for i1 in range(len(inpt_lyr[0])):
for j1 in range(len(inpt_lyr[0][0])):
inpt_lyr_grad[k1][i1][j1] += jugar_grad(mask[k1], opt_grad, i1, j1)
return inpt_lyr_grad
# calculating layer gradients for MAX_POOL
def jugar_grad_max_pool(pos_mask, opt_grad, i1, j1, row_mask, col_mask):
res = 0.0
for i in range(i1, i1 - row_mask, -1):
for j in range(j1, j1 - col_mask, -1):
try: # for exitting index greater than highest length
if (i < 0 or j < 0): # for exitting negative indices
continue
mask = np.zeros(shape=(row_mask, col_mask), dtype=float)
rw = int(pos_mask[i1 - i][j1 - j] / col_mask)
cl = int(pos_mask[i1 - i][j1 - j]) - int(pos_mask[i1 - i][j1 - j] / col_mask)
mask[rw][cl] = 1.0
res += opt_grad[i][j] * mask[i1 - i][j1 - j]
except:
pass
return res
# calculating layer gradients for MAX_POOL
def cnn_lyr_grad_max_pool(pos_mask_list, opt_lyr_grad, inpt_lyr):
inpt_lyr_grad = np.zeros(shape=(len(inpt_lyr), len(inpt_lyr[0]), len(inpt_lyr[0][0])), dtype=float)
row_mask = len(inpt_lyr[0]) - len(opt_lyr_grad[0]) + 1
col_mask = len(inpt_lyr[0][0]) - len(opt_lyr_grad[0][0]) + 1
for k1 in range(len(inpt_lyr)):
pos_mask = pos_mask_list[k1]
opt_grad = opt_lyr_grad[k1]
for i1 in range(len(inpt_lyr[0])):
for j1 in range(len(inpt_lyr[0][0])):
inpt_lyr_grad[k1][i1][j1] = jugar_grad_max_pool(pos_mask, opt_grad, i1, j1, row_mask, col_mask)
return inpt_lyr_grad
# calculates the backward pass of the CONV and MAXPOOL layers
def backward_cnn(inpt_list, cnn_masks, last_lyr_grad):
mask_grad_list = []
layer_grad_list = []
layer_grad_list.append(last_lyr_grad)
for i in range(1, len(cnn_masks) + 1):
if (cnn_layer_info[-1 * i][0] == 0):
mask_grad_lyr = []
for j in range(len(cnn_masks[-1 * i])):
mask_grad_lyr.append(
calc_mask_grad(cnn_masks[-1 * i][j], layer_grad_list[-1][j], inpt_list[-1 * i - 1]))
mask_grad_list.append(mask_grad_lyr)
lyr_grad = cnn_lyr_grad(cnn_masks[-1 * i], layer_grad_list[-1], inpt_list[-1 * i - 1])
layer_grad_list.append(remove_pad(lyr_grad, cnn_layer_info[-1 * i][-2]))
inpt_list[-1 * i - 1] = remove_pad(inpt_list[-1 * i - 1], cnn_layer_info[-1 * i][-2])
elif (cnn_layer_info[-1 * i][0] == 1):
layer_grad_list.append(
cnn_lyr_grad_max_pool(cnn_masks[-1 * i][0], layer_grad_list[-1], inpt_list[-1 * i - 1]))
mask_grad_list.append(cnn_masks[-1 * i]) # adding dummy gradients to maintain indices
mask_grad_list = mask_grad_list[::-1]
layer_grad_list = layer_grad_list[::-1]
return mask_grad_list, layer_grad_list
# performs the cost function of the entire network
def cost_func(final_lyr, label):
for i in range(len(final_lyr)):
final_lyr[i] = final_lyr[i] - label[i] # difference between the required labels
err = np.linalg.norm(final_lyr) ** 2 # taking the squares
return final_lyr, err
# performs the backpropagation of the FC layer
def backprop(wtmtx, lyrs, lyrs_list_no_sgm):
lyr_grad = [] # gradient for the corresponding layers
wt_grad = [] # gradient for the weight matrices
opt_lyr = np.multiply(2, lyrs[-1]) # gradient from the error function
x = sigmoidPrime(np.array(lyrs_list_no_sgm[-1])) # gradient while passing the sigmoid layer
opt_lyr = np.multiply(opt_lyr, x) # final output layer gradient with weights multiplied
lyr_grad.append(opt_lyr)
for i in range(2, len(lyrs) + 1):
x = np.matmul(lyr_grad[-1], np.transpose(lyrs[-1 * i]))
wt_grad.append(x)
opt_lyr = np.matmul(np.transpose(wtmtx[1 - i]), lyr_grad[-1])
opt_lyr = np.multiply(opt_lyr, sigmoidPrime(np.array(lyrs_list_no_sgm[-1 * i])))
lyr_grad.append(opt_lyr)
wt_grad = wt_grad[::-1] # reversing the array
lyr_grad = lyr_grad[::-1] # reversing the array
return wt_grad, lyr_grad
# update the CONV and the MAXPOOL layers masks
def cnn_update_masks(masks, masks_grad):
global lrn_rate
new_masks = []
for i in range(len(masks)):
if (cnn_layer_info[i][0] == 1):
new_masks.append(masks[i])
else:
new_masks_lyr = []
for j in range(len(masks[i])):
new_masks_lyr.append(masks[i][j] + np.multiply(lrn_rate * (-1), masks_grad[i][j]))
new_masks.append(new_masks_lyr)
return new_masks
# updating the new weight matrix as per gradient of the FC layer
def wt_update(wtx_grad_dt_pts, wtx):
global lrn_rate
return np.add(wtx, np.multiply(lrn_rate * (-1), wtx_grad_dt_pts[0]))
#used for calculating gradients over all the data points
def run(cnn_masks, wtmx, k):
mask_grad_dt_pts = []
wt_grad_dt_pts = []
err_total = 0.0
for i in range(no_of_input_data):
inptt = np.array(train_input[i]).reshape(mask_depth[0], len(train_input[i]), len(train_input[i][0]))
inp, msk = forword_cnn(inptt, cnn_masks)
inp_last = np.array(inp[-1])
sgm, no_sgm = forward_pass(wtmx, inp_last.reshape(inpLyr, 1))
sgm[-1], err = cost_func(sgm[-1], train_output[i])
err_total += err # taking up for the total error
wt_grad, lyrs_grad = backprop(wtmx, sgm, no_sgm)
fst_lyr_grad = np.array(lyrs_grad[0]).reshape(inp_last.shape)
msk_grad, inp_grad = backward_cnn(inp, msk, fst_lyr_grad)
wt_grad_dt_pts.append(wt_grad)
mask_grad_dt_pts.append(msk_grad)
if (i != 0):
wt_grad_dt_pts[0] = np.add(wt_grad_dt_pts[0], wt_grad_dt_pts[1]) # the zeroth element is the sum
wt_grad_dt_pts = wt_grad_dt_pts[:1] # discarding the next element, the grad weight for that data point
for i in range(len(mask_grad_dt_pts[0])):
for j in range(len(mask_grad_dt_pts[0][i])):
mask_grad_dt_pts[0][i][j] = np.add(mask_grad_dt_pts[0][i][j], mask_grad_dt_pts[1][i][j])
mask_grad_dt_pts = mask_grad_dt_pts[:1] # discarding the next element, the grad mask for that data point
wtmx = wt_update(wt_grad_dt_pts, wtmx)
cnn_masks = cnn_update_masks(cnn_masks, mask_grad_dt_pts[0])
print("The error for the epoch " + str(k) + " " + str(err_total), end="")
return wtmx, cnn_masks, err_total
# used for copying CNN masks
def copy_cnn_mask(cnn_masks):
mask_new = []
for i in range(len(cnn_masks)):
mask_lyr_new = []
for j in range(len(cnn_masks[i])):
mask_lyr_new.append( | np.copy(cnn_masks[i][j]) | numpy.copy |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import json
import os, sys
mod_path = os.path.abspath(os.path.join('..','Model'))
sys.path.append(mod_path)
from oo_Parameters import *
from MorphologyData import *
#start_scope()
######################################################
## Load Morpho
######################################################
#morph = '../Model/Branco2010_Morpho.swc'
#morph_data = BrancoData
morph = '../Model/Acker2008.swc'
morph_data = AckerData
loc1 = 'basal' #'tuft','apical','basal'
print('loc1: ',loc1)
if loc1 == 'tuft':
distComps = distal_Acker_tuft
proxComps = proximal_Acker_tuft
elif loc1 == 'apical':
distComps = distal_Acker_apical
proxComps = proximal_Acker_apical
elif loc1 == 'basal':
distComps = distal_Acker_basal
proxComps = proximal_Acker_basal
else:
print('Error!')
sys.exit(1)
branchNr = len(proxComps)
print('branchNr: ',branchNr)
d_compartm = proxComps+distComps
nrIn = len(d_compartm)
hz_array = np.array([1.,3.,5.,10.,20.,30.,40.,50.])
nrHz = hz_array.size
synmodel = 'Chen' # synmodel = 'Chen' , synmodel = 'Clopath', synmodel = 'nonPlast'
print('synmodel: ',synmodel)
ME_Ascale = 4.0
nr_clst = 1
init_weight = 0.5
ME_A = 0.02
ME_Vrhigh = -60*mV
ME_Ar = 0.2
MEmaxRatio = 175.0
MEtau = 2.0*second
ChenW = np.zeros((nrIn,nrHz))
ChenEr = np.zeros((nrIn,nrHz))
ChenEf = np.zeros((nrIn,nrHz))
ChenMEdamp = np.zeros((nrIn,nrHz))
ChenMEmax = np.zeros((nrIn,nrHz))
ChenPE = np.zeros((nrIn,nrHz))
for zzz in range(nrIn):
titlestr = 'DataPoissonInput/'+synmodel+'_'+loc1+'_'+str(ME_Ascale)+'_'+str(nr_clst)+'_'+str(init_weight)+'_'+str(ME_A)+'_'+str(ME_Vrhigh/mV)+'_'+str(ME_Ar)+'_'+str(MEmaxRatio)+'_'+str(MEtau/second)+'_'+str(d_compartm[zzz])
data1 = open(titlestr+'_w1.txt','r')
ChenW[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_Er1.txt','r')
ChenEr[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_Ef1.txt','r')
ChenEf[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_MEdamp1.txt','r')
ChenMEdamp[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_MEmax1.txt','r')
ChenMEmax[zzz,:] = json.load(data1)
data1.close()
data1 = open(titlestr+'_PE1.txt','r')
ChenPE[zzz,:] = json.load(data1)
data1.close()
ChenWmean = 100.0*np.mean(ChenW,axis=0)/init_weight
ChenWstd = 100.0*np.std(ChenW,axis=0) #/np.sqrt(ChenW.shape[0])
ChenErmean = np.mean(ChenEr,axis=0)
ChenErstd = np.std(ChenEr,axis=0) #/np.sqrt(ChenEr.shape[0])
ChenEfmean = np.mean(ChenEf,axis=0)
ChenEfstd = np.std(ChenEf,axis=0) #/np.sqrt(ChenEf.shape[0])
ChenMEdampmean = np.mean(ChenMEdamp,axis=0)
ChenMEdampstd = | np.std(ChenMEdamp,axis=0) | numpy.std |
# -*- coding: utf-8 -*-
#Created on Sun Dec 12 03:35:29 2021
#@author: maout
### calculate score function from empirical distribution
### uses RBF kernel
import math
import numpy as np
from functools import reduce
from scipy.spatial.distance import cdist
import numba
__all__ = ["my_cdist", "score_function_multid_seperate",
"score_function_multid_seperate_all_dims",
"score_function_multid_seperate_old" ]
#%%
@numba.njit(parallel=True,fastmath=True)
def my_cdist(r,y, output,dist='euclidean'):
"""
Fast computation of pairwise distances between data points in r and y matrices.
Stores the distances in the output array.
Available distances: 'euclidean' and 'seucledian'
Parameters
----------
r : NxM array
First set of N points of dimension M.
y : N2xM array
Second set of N2 points of dimension M.
output : NxN2 array
Placeholder for storing the output of the computed distances.
dist : type of distance, optional
Select 'euclidian' or 'sqeuclidian' for Euclidian or squared Euclidian
distances. The default is 'euclidean'.
Returns
-------
None. (The result is stored in place in the provided array "output").
"""
N, M = r.shape
N2, M2 = y.shape
#assert( M == M2, 'The two inpus have different second dimention! Input should be N1xM and N2xM')
if dist == 'euclidean':
for i in numba.prange(N):
for j in numba.prange(N2):
tmp = 0.0
for k in range(M):
tmp += (r[i, k] - y[j, k])**2
output[i,j] = math.sqrt(tmp)
elif dist == 'sqeuclidean':
for i in numba.prange(N):
for j in numba.prange(N2):
tmp = 0.0
for k in range(M):
tmp += (r[i, k] - y[j, k])**2
output[i,j] = tmp
elif dist == 'l1':
for i in numba.prange(N):
for j in numba.prange(N2):
tmp = 0.0
for k in range(M):
tmp += (r[i, k] - y[j, k])**2
output[i,j] = math.sqrt(tmp)
return 0
def score_function_multid_seperate(X,Z,func_out=False, C=0.001,kern ='RBF',l=1,which=1,which_dim=1):
"""
Sparse kernel based estimation of multidimensional logarithmic gradient of empirical density represented
by samples X across dimension "which_dim" only.
- When `funct_out == False`: computes grad-log at the sample points.
- When `funct_out == True`: return a function for the grad log to be
employed for interpolation/estimation of
the logarithmic gradient in the vicinity of the samples.
For estimation across all dimensions simultaneously see also
See also
----------
score_function_multid_seperate_all_dims
Parameters
----------
X: N x dim array ,
N samples from the density (N x dim), where dim>=2 the dimensionality of the system.
Z: M x dim array,
inducing points points (M x dim).
func_out : Boolean,
True returns function, if False return grad-log-p on data points.
l: float or array-like,
lengthscale of rbf kernel (scalar or vector of size dim).
C: float,
weighting constant (leave it at default value to avoid
unreasonable contraction of deterministic trajectories).
which: (depracated) ,
do not use.
which_dim: int,
which gradient of log density we want to compute
(starts from 1 for the 0-th dimension).
Returns
-------
res1: array with logarithmic gadient of the density along the given dimension N_s x 1 or function
that accepts as inputs 2dimensional arrays of dimension (K x dim), where K>=1.
"""
if kern=='RBF':
"""
<EMAIL>(parallel=True,fastmath=True)
def Knumba(x,y,l,res,multil=False): #version of kernel in the numba form when the call already includes the output matrix
if multil:
for ii in range(len(l)):
tempi = np.zeros((x[:,ii].size, y[:,ii].size ), dtype=np.float64)
##puts into tempi the cdist result
my_cdist(x[:,ii:ii+1], y[:,ii:ii+1],tempi,'sqeuclidean')
res = np.multiply(res,np.exp(-tempi/(2*l[ii]*l[ii])))
else:
tempi = np.zeros((x.shape[0], y.shape[0] ), dtype=np.float64)
my_cdist(x, y,tempi,'sqeuclidean') #this sets into the array tempi the cdist result
res = np.exp(-tempi/(2*l*l))
#return 0
"""
def K(x,y,l,multil=False):
if multil:
res = np.ones((x.shape[0],y.shape[0]))
for ii in range(len(l)):
#tempi = np.zeros((x[:,ii].size, y[:,ii].size ))
##puts into tempi the cdist result
#my_cdist(x[:,ii:ii+1], y[:,ii:ii+1],tempi,'sqeuclidean')
tempi = cdist(x[:,ii:ii+1], y[:,ii:ii+1],'sqeuclidean')
res = np.multiply(res, np.exp(-tempi/(2*l[ii]*l[ii])))
return res
else:
tempi = np.zeros((x.shape[0], y.shape[0] ))
my_cdist(x, y,tempi,'sqeuclidean') #this sets into the array tempi the cdist result
return | np.exp(-tempi/(2*l*l)) | numpy.exp |
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from collections import OrderedDict
from tqdm import tqdm
from config import get_config
from agent import get_agent
import numpy as np
import random
from joblib import Parallel, delayed
import pymesh
import torch
import struct
RESOLUTION = 33
TOTAL_POINTS = RESOLUTION * RESOLUTION * RESOLUTION
SPLIT_SIZE = int(np.ceil(TOTAL_POINTS / 50000.0 ))
NUM_SAMPLE_POINTS = int(np.ceil(TOTAL_POINTS / SPLIT_SIZE))
def main():
config = get_config('test')
print(config.exp_dir)
# create network and training agent
tr_agent = get_agent(config)
if config.ckpt:
tr_agent.load_ckpt(config.ckpt)
extra_pts = np.zeros((1, SPLIT_SIZE * NUM_SAMPLE_POINTS - TOTAL_POINTS, 3), dtype=np.float32)
batch_points = np.zeros((SPLIT_SIZE, 0, NUM_SAMPLE_POINTS, 3), dtype=np.float32)
num_sp_point = 6
for b in range(config.batch_size):
sdf_params = [-1.0,-1.0,-1.0,1.0,1.0,1.0]
x_ = np.linspace(sdf_params[0], sdf_params[3], num=RESOLUTION)
y_ = np.linspace(sdf_params[1], sdf_params[4], num=RESOLUTION)
z_ = np.linspace(sdf_params[2], sdf_params[5], num=RESOLUTION)
z, y, x = np.meshgrid(z_, y_, x_, indexing='ij')
x = np.expand_dims(x, 3)
y = np.expand_dims(y, 3)
z = np.expand_dims(z, 3)
all_pts = np.concatenate((x, y, z), axis=3).astype(np.float32)
all_pts = all_pts.reshape(1, -1, 3)
all_pts = np.concatenate((all_pts, extra_pts), axis=1).reshape(SPLIT_SIZE, 1, -1, 3)
batch_points = np.concatenate((batch_points, all_pts), axis=1)
pred_affs_all = np.zeros((SPLIT_SIZE, config.batch_size, NUM_SAMPLE_POINTS, 3*num_sp_point))
for sp in range(SPLIT_SIZE):
tr_agent.net.eval()
with torch.no_grad():
pred_affs = tr_agent.net.module.get_aff(torch.tensor(batch_points[sp]).cuda())
pred_affs_all[sp, :, :, :] = pred_affs.detach().cpu().numpy()
pred_affs_all = np.swapaxes(pred_affs_all, 0, 1) # B, S, NUM SAMPLE, 1 or 2
pred_affs_all = pred_affs_all.reshape((config.batch_size, -1, 3*num_sp_point))[:, :TOTAL_POINTS, :]
batch_points = np.swapaxes(batch_points, 0, 1) # B, S, NUM SAMPLE, 3
batch_points = batch_points.reshape((config.batch_size, -1, 3))[:, :TOTAL_POINTS, :]
fixed_affs_global = np.concatenate((
np.concatenate((batch_points[:, :, 0:2], -batch_points[:, :, 2:3]), axis=2),
np.concatenate((-batch_points[:, :, 0:1], batch_points[:, :, 1:3]), axis=2),
np.concatenate((batch_points[:, :, 0:1], -batch_points[:, :, 1:2], batch_points[:, :, 2:3]), axis=2),
| np.concatenate((-batch_points[:, :, 0:2], batch_points[:, :, 2:3]), axis=2) | numpy.concatenate |
import sys
import numpy as np
import scipy.integrate
import scipy.special
from ._dblquad import dblquad
HAVE_PYGSL = False
try:
import pygsl.integrate
import pygsl.sf
HAVE_PYGSL = True
except ImportError:
pass
class BinEB(object):
def __init__(
self, tmin, tmax, Nb, windows=None, linear=False, useArcmin=True, fname=None
):
if fname is not None:
self.read_data(fname)
else:
# set basic params
if useArcmin:
am2r = np.pi / 180.0 / 60.0
else:
am2r = 1.0
self.Nb = Nb
self.L = tmin * am2r
self.H = tmax * am2r
if linear:
self.Lb = (self.H - self.L) / Nb * np.arange(Nb) + self.L
self.Hb = (self.H - self.L) / Nb * (np.arange(Nb) + 1.0) + self.L
else:
self.Lb = np.exp(np.log(self.H / self.L) / Nb * np.arange(Nb)) * self.L
self.Hb = (
np.exp(np.log(self.H / self.L) / Nb * ( | np.arange(Nb) | numpy.arange |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import theano.tensor as tt
from scipy.special import logsumexp
from scipy.stats import multivariate_normal
from scipy.optimize import approx_fprime
from theano import function as theano_function
import arviz as az
from pymc3.backends.ndarray import NDArray
from pymc3.model import Point, modelcontext
from pymc3.sampling import sample_prior_predictive
from pymc3.theanof import (
floatX,
inputvars,
join_nonshared_inputs,
make_shared_replacements,
gradient,
)
from pymc3.sinf.GIS import GIS
import torch
class NF_SMC:
"""Sequential Monte Carlo with normalizing flow based sampling."""
def __init__(
self,
draws=2000,
start=None,
threshold=0.5,
model=None,
random_seed=-1,
chain=0,
frac_validate=0.1,
iteration=None,
alpha=(0,0),
k_trunc=0.5,
pareto=False,
epsilon=1e-3,
local_thresh=3,
local_step_size=0.1,
local_grad=True,
nf_local_iter=0,
max_line_search=2,
verbose=False,
n_component=None,
interp_nbin=None,
KDE=True,
bw_factor=0.5,
edge_bins=None,
ndata_wT=None,
MSWD_max_iter=None,
NBfirstlayer=True,
logit=False,
Whiten=False,
batchsize=None,
nocuda=False,
patch=False,
shape=[28,28,1],
):
self.draws = draws
self.start = start
self.threshold = threshold
self.model = model
self.random_seed = random_seed
self.chain = chain
self.frac_validate = frac_validate
self.iteration = iteration
self.alpha = alpha
self.k_trunc = k_trunc
self.pareto = pareto
self.epsilon = epsilon
self.local_thresh = local_thresh
self.local_step_size = local_step_size
self.local_grad = local_grad
self.nf_local_iter = nf_local_iter
self.max_line_search = max_line_search
self.verbose = verbose
self.n_component = n_component
self.interp_nbin = interp_nbin
self.KDE = KDE
self.bw_factor = bw_factor
self.edge_bins = edge_bins
self.ndata_wT = ndata_wT
self.MSWD_max_iter = MSWD_max_iter
self.NBfirstlayer = NBfirstlayer
self.logit = logit
self.Whiten = Whiten
self.batchsize = batchsize
self.nocuda = nocuda
self.patch = patch
self.shape = shape
self.model = modelcontext(model)
if self.random_seed != -1:
np.random.seed(self.random_seed)
self.beta = 0
self.variables = inputvars(self.model.vars)
self.weights = np.ones(self.draws) / self.draws
#self.sinf_logq = np.array([])
self.log_marginal_likelihood = 0
def initialize_population(self):
"""Create an initial population from the prior distribution."""
population = []
var_info = OrderedDict()
if self.start is None:
init_rnd = sample_prior_predictive(
self.draws,
var_names=[v.name for v in self.model.unobserved_RVs],
model=self.model,
)
else:
init_rnd = self.start
init = self.model.test_point
for v in self.variables:
var_info[v.name] = (init[v.name].shape, init[v.name].size)
for i in range(self.draws):
point = Point({v.name: init_rnd[v.name][i] for v in self.variables}, model=self.model)
population.append(self.model.dict_to_array(point))
self.nf_samples = np.array(floatX(population))
#self.posterior = np.copy(self.nf_samples)
self.var_info = var_info
def setup_logp(self):
"""Set up the likelihood logp function based on the chosen kernel."""
shared = make_shared_replacements(self.variables, self.model)
self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared)
self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared)
self.posterior_logp_func = logp_forw([self.model.logpt], self.variables, shared)
self.posterior_dlogp_func = logp_forw([gradient(self.model.logpt, self.variables)], self.variables, shared)
self.prior_dlogp_func = logp_forw([gradient(self.model.varlogpt, self.variables)], self.variables, shared)
self.likelihood_dlogp_func = logp_forw([gradient(self.model.datalogpt, self.variables)], self.variables, shared)
def get_nf_logp(self):
"""Get the prior, likelihood and tempered posterior log probabilities, for the current NF samples."""
priors = [self.prior_logp_func(sample) for sample in self.nf_samples]
likelihoods = [self.likelihood_logp_func(sample) for sample in self.nf_samples]
self.nf_prior_logp = np.array(priors).squeeze()
self.nf_likelihood_logp = np.array(likelihoods).squeeze()
self.nf_posterior_logp = self.nf_prior_logp + self.nf_likelihood_logp * self.beta
def get_full_logp(self):
"""Get the prior, likelihood and tempered posterior log probabilities, for the full sample set."""
priors = [self.prior_logp_func(sample) for sample in self.posterior]
likelihoods = [self.likelihood_logp_func(sample) for sample in self.posterior]
self.prior_logp = np.array(priors).squeeze()
self.likelihood_logp = np.array(likelihoods).squeeze()
self.posterior_logp = self.prior_logp + self.likelihood_logp * self.beta
def eval_prior_logp(self, param_vals):
"""Evaluates the prior logp for given parameter values."""
prior_logps = [self.prior_logp_func(val) for val in param_vals]
return np.array(prior_logps).squeeze()
def eval_prior_dlogp(self, param_vals):
"""Evaluates the gradient of the prior logp for given parameter values."""
prior_dlogps = [self.prior_dlogp_func(val) for val in param_vals]
return np.array(prior_dlogps).squeeze()
def sinf_logq(self, param_vals):
"""Function for evaluating the SINF gradient."""
sinf_logq = self.nf_model.evaluate_density(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_logq.item()
def target_logp(self, param_vals):
"""Evaluates logp of the target distribution for given parameter values."""
logps = [self.posterior_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def tempered_logp(self, param_vals):
"""Evaluates the tempered logp of the target distribution for given parameter values."""
logps = [self.prior_logp_func(val) + self.beta * self.likelihood_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp(self, param_vals):
"""Evaluates the gradient of the target distribution logp for given parameter values."""
dlogps = [self.posterior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def tempered_dlogp(self, param_vals):
"""Evaluates the gradient of the temepered target distribution for given parameter values."""
dlogps = [self.prior_dlogp_func(val) + self.beta * self.likelihood_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def regularize_weights(self):
"""Either performs Pareto-smoothing of the IW, or applies clipping."""
if self.pareto:
psiw = az.psislw(self.log_sinf_weights)
self.log_sinf_weights = psiw[0]
self.sinf_weights = np.exp(self.log_sinf_weights)
elif not self.pareto:
self.log_sinf_weights = np.clip(self.log_sinf_weights, a_min=None,
a_max=logsumexp(self.log_sinf_weights) + (self.k_trunc - 1) * np.log(len(self.log_sinf_weights)))
self.log_sinf_weights = self.log_sinf_weights - logsumexp(self.log_sinf_weights)
self.sinf_weights = np.exp(self.log_sinf_weights)
def local_exploration(self, logq_func=None, dlogq_func=None):
"""Perform local exploration."""
self.high_iw_idx = np.where(self.log_sinf_weights >= | np.log(self.local_thresh) | numpy.log |
import numpy as np
import sys,os
import torch
from torchvision import transforms
from PIL import Image
import cv2
TAG_CHAR = np.array([202021.25], np.float32)
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col + YG, 0] = 255 - np.transpose(np.floor(255 * np.arange(0, YG) / YG))
colorwheel[col:col + YG, 1] = 255
col += YG
# GC
colorwheel[col:col + GC, 1] = 255
colorwheel[col:col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col + CB, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, CB) / CB))
colorwheel[col:col + CB, 2] = 255
col += CB
# BM
colorwheel[col:col + BM, 2] = 255
colorwheel[col:col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col + MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col + MR, 0] = 255
return colorwheel
def compute_color(u, v):
"""
compute optical flow color map
:param u: horizontal optical flow
:param v: vertical optical flow
:return:
"""
height, width = u.shape
img = np.zeros((height, width, 3))
NAN_idx = np.isnan(u) | np.isnan(v)
u[NAN_idx] = v[NAN_idx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols + 1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k0 - 1] / 255
col1 = tmp[k1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col * (1 - NAN_idx)))
return img
def getPerspectiveTransformMatrix(p1, p2):
matrixIndex = 0
A=[]
for i in range(0, len(p1)):
x, y = p1[i][0], p1[i][1]
u, v = p2[i][0], p2[i][1]
A.append( [-x, -y, -1, 0, 0, 0, u * x, u * y, u])
for i in range(0, len(p1)):
x, y = p1[i][0], p1[i][1]
u, v = p2[i][0], p2[i][1]
A.append([0, 0, 0, -x, -y, -1, v*x, v*y, v])
A = np.asarray(A)
U, S, Vh = np.linalg.svd(A)
np.set_printoptions(suppress=True)
#print(Vh)
L = Vh[-1,:]
H = np.reshape(L,(3, 3))
H=H/H[0,0]
return H
def readFlow(fn):
""" Read .flo file in Middlebury format"""
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
#print('Reading %d x %d flo file\n' % (w, h))
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
x=np.resize(data, (int(h), int(w), 2))
return x
def homography(flow_filename):
flow_data = readFlow(flow_filename)
u = flow_data[:, :,0]
v = flow_data[:, :,1]
# u = cv2.normalize(flow_data[..., 0], None, -10, 10, cv2.NORM_MINMAX)
# v = cv2.normalize(flow_data[..., 1], None, -10, 10, cv2.NORM_MINMAX)
print(np.mean(u))
print(np.mean(v))
print(np.std(u))
print(np.std(v))
print(np.max(u))
print(np.min(u))
print(np.max(v))
print(np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
u = u / maxrad + np.finfo(float).eps
v = v / maxrad + np.finfo(float).eps
img = compute_color(u, v)
out = "KTrial.png"
print("Saving aligned image : ", out)
cv2.imwrite(out, img)
dx=np.zeros((37,37))
dy = np.zeros((37, 37))
a=0
for i in range(9,190,5):
b=0
for j in range(9,190,5):
dx[a,b]=u[i,j]
dy[a,b]=v[i,j]
b=b+1
a=a+1
#print(dx)
sy, sx = np.mgrid[10:191:5, 10:191:5]
tx=sx+dx;
ty=sy+dy;
aa = sx.flatten('F')
bb = sy.flatten('F')
cc = tx.flatten('F')
dd = ty.flatten('F')
p1=np.column_stack((aa, bb))
p2=np.column_stack((cc, dd))
p1 = np.round_(p1, 4)
p2=np.round_(p2,4)
np.set_printoptions(suppress=True)
np.set_printoptions(suppress=True)
H=getPerspectiveTransformMatrix(p1,p2)
return H
if __name__ == '__main__':
H = homography("/home/nudlesoup/Research/flownet2-pytorch/rangetest/kdata/k.flo")
np.set_printoptions(suppress=True)
| np.round_(H, 4) | numpy.round_ |
from __future__ import division
import glob
import cv2
import numpy as np
from tqdm import tqdm
def safe_ln(x, minval=0.0000000001):
return np.log(x.clip(min=minval))
def __normalize_staining(I=None):
I = I.astype(np.float64)
Io = 240
beta = 0.15
alpha = 1
HERef = np.array([[0.5626, 0.2159], [0.7201, 0.8012], [0.4062, 0.5581]])
maxCRef = np.array([1.9705, 1.0308])
(h, w, c) = np.shape(I)
I = np.reshape(I, (h * w, c), order='F')
# Step 1. Convert RGB to OD.
OD = - np.log((I + 1) / Io) # optical density where each channel in the image is normalized to values between
# [0, 1]
# Step 2. Remove data with OD intensity less than beta
ODhat = (OD[(np.logical_not((OD < beta).any(axis=1))), :])
# Step 3. Calculate SVD on the OD tuples
cov = np.cov(ODhat, rowvar=False)
(W, V) = np.linalg.eig(cov)
# Step 4. create plane from the SVD directions
# corresponding to the two largest singular values
Vec = - np.transpose(np.array([V[:, 1], V[:, 0]]))
# Step 5. Project data onto the plane and normalize to unit Length
That = np.dot(ODhat, Vec)
# Step 6. Calculate angle of each point w.r.t the first SVD direction
phi = np.arctan2(That[:, 1], That[:, 0])
# Step 7. Find robust extremes (some alpha th and (100 - alpha th) percentiles of the angle
minPhi = np.percentile(phi, alpha)
maxPhi = np.percentile(phi, 100 - alpha)
vMin = np.dot(Vec, np.array([np.cos(minPhi), np.sin(minPhi)]))
vMax = np.dot(Vec, np.array([np.cos(maxPhi), np.sin(maxPhi)]))
if vMin[0] > vMax[0]:
HE = np.array([vMin, vMax])
else:
HE = np.array([vMax, vMin])
HE = np.transpose(HE)
# Step 8. Convert extreme values back to OD space
Y = np.transpose(np.reshape(OD, (h * w, c)))
C = np.linalg.lstsq(HE, Y)
maxC = | np.percentile(C[0], 99, axis=1) | numpy.percentile |
'''
Methods which sonify annotations for "evaluation by ear".
All functions return a raw signal at the specified sampling rate.
'''
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy.interpolate import interp1d
from . import util
from . import chord
def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal
def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None,
n_dec=1):
"""Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
n_dec : int
the number of decimals used to approximate each sonfied frequency.
Defaults to 1 decimal place. Higher precision will be slower.
Returns
-------
output : np.ndarray
synthesized version of the piano roll
"""
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
n_times = gram.shape[1]
times = times[:n_times]
def _fast_synthesize(frequency):
"""A faster way to synthesize a signal.
Generate one cycle, and simulate arbitrary repetitions
using array indexing tricks.
"""
# hack so that we can ensure an integer number of periods and samples
# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int
frequency = np.round(frequency, n_dec)
# Generate 10*frequency periods at this frequency
# Equivalent to n_samples = int(n_periods * fs / frequency)
# n_periods = 10*frequency is the smallest integer that guarantees
# that n_samples will be an integer, since assuming 10*frequency
# is an integer
n_samples = int(10.0**n_dec * fs)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
def _const_interpolator(value):
"""Return a function that returns `value`
no matter the input.
"""
def __interpolator(x):
return value
return __interpolator
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
time_centers = np.mean(times, axis=1) * float(fs)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Interpolate the values in gram over the time grid
if len(time_centers) > 1:
gram_interpolator = interp1d(
time_centers, gram[n, :],
kind='linear', bounds_error=False,
fill_value=0.0)
# If only one time point, create constant interpolator
else:
gram_interpolator = _const_interpolator(gram[n, 0])
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# add to waveform
output[start:end] += (
wave[start:end] * gram_interpolator(np.arange(start, end)))
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output
def pitch_contour(times, frequencies, fs, amplitudes=None, function=np.sin,
length=None, kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp( | np.arange(length) | numpy.arange |
from mahotas import interpolate
import numpy as np
from nose.tools import raises
def test_spline_filter1d_smoke():
f = (np.arange(64*64, dtype=np.intc) % 64).reshape((64,64)).astype(np.float64)
f2 =interpolate.spline_filter1d(f,2,0)
assert f.shape == f2.shape
def test_spline_filter_smoke():
f = (np.arange(64*64, dtype=np.intc) % 64).reshape((64,64)).astype(np.float64)
f2 = interpolate.spline_filter(f,3)
assert f.shape == f2.shape
def test_zoom_ratio():
f = np.zeros((128,128))
f[32:64,32:64] = 128
for z in [.7,.5,.2,.1]:
output = interpolate.zoom(f,z)
ratio = output.sum()/f.sum()
assert np.abs(ratio - z*z) < .1
def test_zoom_ratio_2():
f = | np.zeros((128,128)) | numpy.zeros |
#Approximate_Randomization code by <NAME>
import numpy as np
def meandiff(sample1,sample2):
mean1 = np.mean(sample1)
mean2 = np.mean(sample2)
diff = abs(mean1-mean2)
return diff
def meangt(sample1,sample2):
mean1 = np.mean(sample1)
mean2 = np.mean(sample2)
diff = mean1-mean2
return diff
def meanlt(sample1,sample2):
mean1 = np.mean(sample1)
mean2 = np.mean(sample2)
diff = mean2-mean1
return diff
#Return the likelihood that sample1's mean is greater than sample2's merely by chance
def chanceByChance(sample1,sample2,comparer=None,pairwise=True,repetitions=10000):
if not comparer:
comparer = meangt
true_diff = comparer(sample1,sample2)
n = len(sample1)
m = len(sample2)
if pairwise and n != m:
raise Exception("samples must be same size for pairwise. Got sample sizes {} and {}".format(n,m))
combined = np.concatenate([sample1,sample2])
def run_test(_):
np.random.shuffle(combined)
diff = comparer(combined[:n],combined[n:])
return diff > true_diff
def run_pairwise_test(_):
swapper = | np.random.rand(n) | numpy.random.rand |
# -*- coding: utf-8 -*-
"""
Created on Wed May 08 10:39:48 2019
@author: Darin
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import Shape_Functions
import scipy.sparse as sparse
import scipy.sparse.linalg as spla
import Material
import pyamg
from time import time
import cvxopt; import cvxopt.cholmod
class it_counter(object):
def __init__(self, disp=False):
self._disp = disp
self.it = 0
def __call__(self, rk=None):
self.it += 1
if self._disp:
print('iter %3i\trk = %s' % (self.it, str(rk)))
class FEM:
"""Provides functionality to solve the beam QC problem
"""
def __init__(self):
"""Create a 1-element rectangular mesh by default
Parameters
----------
None
Notes
-----
The proper calling order of functions is
1 - CreateRecMesh
2 - AddBc, AddLoad, and AddSpring; in any order
3 - SetMaterial
4 - Initialize
5 - ConstructSystem
6 - SolveSystem
An example of this process is at the end of the file
"""
self.elements = np.array([[0, 1, 3, 2]])
self.nElem = 1
self.nodes = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
self.nNode, self.nDof = self.nodes.shape
self.edgeElems = np.array([[0, 1], [0, 1], [0, 1], [0, 1]])
self.edgeLengths = | np.ones(4) | numpy.ones |
"""Utility functions for ICA-AROMA."""
import os
import os.path as op
import shutil
import nibabel as nib
import numpy as np
from nilearn import image, masking
def runICA(fsl_dir, in_file, out_dir, mel_dir_in, mask, dim, TR):
"""Run MELODIC and merge the thresholded ICs into a single 4D nifti file.
Parameters
----------
fsl_dir : str
Full path of the bin-directory of FSL
in_file : str
Full path to the fMRI data file (nii.gz) on which MELODIC
should be run
out_dir : str
Full path of the output directory
mel_dir_in : str or None
Full path of the MELODIC directory in case it has been run
before, otherwise None.
mask : str
Full path of the mask to be applied during MELODIC
dim : int
Dimensionality of ICA
TR : float
TR (in seconds) of the fMRI data
Output
------
melodic.ica/: MELODIC directory
melodic_IC_thr.nii.gz: Merged file containing the mixture modeling
thresholded Z-statistical maps located in
melodic.ica/stats/
"""
# Define the 'new' MELODIC directory and predefine some associated files
mel_dir = op.join(out_dir, 'melodic.ica')
mel_IC = op.join(mel_dir, 'melodic_IC.nii.gz')
mel_IC_mix = op.join(mel_dir, 'melodic_mix')
mel_IC_thr = op.join(out_dir, 'melodic_IC_thr.nii.gz')
# When a MELODIC directory is specified,
# check whether all needed files are present.
# Otherwise... run MELODIC again
if (mel_dir_in and op.isfile(op.join(mel_dir_in, 'melodic_IC.nii.gz'))
and op.isfile(op.join(mel_dir_in, 'melodic_FTmix'))
and op.isfile(op.join(mel_dir_in, 'melodic_mix'))):
print(' - The existing/specified MELODIC directory will be used.')
# If a 'stats' directory is present (contains thresholded spatial maps)
# create a symbolic link to the MELODIC directory.
# Otherwise create specific links and
# run mixture modeling to obtain thresholded maps.
if op.isdir(op.join(mel_dir_in, 'stats')):
os.symlink(mel_dir_in, mel_dir)
else:
print(" - The MELODIC directory does not contain the required "
"'stats' folder. Mixture modeling on the Z-statistical "
"maps will be run.")
# Create symbolic links to the items in the specified melodic
# directory
os.makedirs(mel_dir)
for item in os.listdir(mel_dir_in):
os.symlink(op.join(mel_dir_in, item),
op.join(mel_dir, item))
# Run mixture modeling
melodic_command = ("{0} --in={1} --ICs={1} --mix={2} --out_dir={3} "
"--0stats --mmthresh=0.5").format(
op.join(fsl_dir, 'melodic'),
mel_IC,
mel_IC_mix,
mel_dir,
)
os.system(melodic_command)
else:
# If a melodic directory was specified, display that it did not
# contain all files needed for ICA-AROMA (or that the directory
# does not exist at all)
if mel_dir_in:
if not op.isdir(mel_dir_in):
print(' - The specified MELODIC directory does not exist. '
'MELODIC will be run separately.')
else:
print(' - The specified MELODIC directory does not contain '
'the required files to run ICA-AROMA. MELODIC will be '
'run separately.')
# Run MELODIC
melodic_command = ("{0} --in={1} --outdir={2} --mask={3} --dim={4} "
"--Ostats --nobet --mmthresh=0.5 --report "
"--tr={5}").format(
op.join(fsl_dir, 'melodic'),
in_file,
mel_dir,
mask,
dim,
TR
)
os.system(melodic_command)
# Get number of components
mel_IC_img = nib.load(mel_IC)
nr_ICs = mel_IC_img.shape[3]
# Merge mixture modeled thresholded spatial maps. Note! In case that
# mixture modeling did not converge, the file will contain two spatial
# maps. The latter being the results from a simple null hypothesis test.
# In that case, this map will have to be used (first one will be empty).
zstat_imgs = []
for i in range(1, nr_ICs + 1):
# Define thresholded zstat-map file
z_temp = op.join(mel_dir, "stats", "thresh_zstat{0}.nii.gz".format(i))
# Get number of volumes in component's thresholded image
z_temp_img = nib.load(z_temp)
if z_temp_img.ndim == 4:
len_IC = z_temp_img.shape[3]
# Extract last spatial map within the thresh_zstat file
zstat_img = image.index_img(z_temp_img, len_IC - 1)
else:
zstat_img = z_temp_img
zstat_imgs.append(zstat_img)
# Merge to 4D
zstat_4d_img = image.concat_imgs(zstat_imgs)
# Apply the mask to the merged image (in case a melodic-directory was
# predefined and run with a different mask)
zstat_4d_img = image.math_img(
"stat * mask[:, :, :, None]", stat=zstat_4d_img, mask=mask
)
zstat_4d_img.to_filename(mel_IC_thr)
def register2MNI(fsl_dir, in_file, out_file, affmat, warp):
"""Register an image (or time-series of images) to MNI152 T1 2mm.
If no affmat is defined, it only warps (i.e. it assumes that the data has
been registered to the structural scan associated with the warp-file
already). If no warp is defined either, it only resamples the data to 2mm
isotropic if needed (i.e. it assumes that the data has been registered to
a MNI152 template). In case only an affmat file is defined, it assumes that
the data has to be linearly registered to MNI152 (i.e. the user has a
reason not to use non-linear registration on the data).
Parameters
----------
fsl_dir : str
Full path of the bin-directory of FSL
in_file : str
Full path to the data file (nii.gz) which has to be registerd to
MNI152 T1 2mm
out_file : str
Full path of the output file
affmat : str
Full path of the mat file describing the linear registration (if data
is still in native space)
warp : str
Full path of the warp file describing the non-linear registration (if
data has not been registered to MNI152 space yet)
Output
------
melodic_IC_mm_MNI2mm.nii.gz : merged file containing the mixture modeling
thresholded Z-statistical maps registered to
MNI152 2mm
"""
# Define the MNI152 T1 2mm template
fslnobin = fsl_dir.rsplit('/', 2)[0]
ref = op.join(fslnobin, 'data', 'standard', 'MNI152_T1_2mm_brain.nii.gz')
# If the no affmat- or warp-file has been specified, assume that the data
# is already in MNI152 space. In that case only check if resampling to
# 2mm is needed
if not affmat and not warp:
in_img = nib.load(in_file)
# Get 3D voxel size
pixdim1, pixdim2, pixdim3 = in_img.header.get_zooms()[:3]
# If voxel size is not 2mm isotropic, resample the data, otherwise
# copy the file
if (pixdim1 != 2) or (pixdim2 != 2) or (pixdim3 != 2):
os.system(' '.join([op.join(fsl_dir, 'flirt'),
' -ref ' + ref,
' -in ' + in_file,
' -out ' + out_file,
' -applyisoxfm 2 -interp trilinear']))
else:
os.copyfile(in_file, out_file)
# If only a warp-file has been specified, assume that the data has already
# been registered to the structural scan. In that case apply the warping
# without a affmat
elif not affmat and warp:
# Apply warp
os.system(' '.join([op.join(fsl_dir, 'applywarp'),
'--ref=' + ref,
'--in=' + in_file,
'--out=' + out_file,
'--warp=' + warp,
'--interp=trilinear']))
# If only a affmat-file has been specified perform affine registration to
# MNI
elif affmat and not warp:
os.system(' '.join([op.join(fsl_dir, 'flirt'),
'-ref ' + ref,
'-in ' + in_file,
'-out ' + out_file,
'-applyxfm -init ' + affmat,
'-interp trilinear']))
# If both a affmat- and warp-file have been defined, apply the warping
# accordingly
else:
os.system(' '.join([op.join(fsl_dir, 'applywarp'),
'--ref=' + ref,
'--in=' + in_file,
'--out=' + out_file,
'--warp=' + warp,
'--premat=' + affmat,
'--interp=trilinear']))
def cross_correlation(a, b):
"""Perform cross-correlations between columns of two matrices.
Parameters
----------
a : (M x X) array_like
First array to cross-correlate
b : (N x X) array_like
Second array to cross-correlate
Returns
-------
correlations : (M x N) array_like
Cross-correlations of columns of a against columns of b.
"""
assert a.ndim == b.ndim == 2
_, ncols_a = a.shape
# nb variables in columns rather than rows hence transpose
# extract just the cross terms between cols in a and cols in b
return np.corrcoef(a.T, b.T)[:ncols_a, ncols_a:]
def classification(out_dir, max_RP_corr, edge_fract, HFC, csf_fract):
"""Classify components as motion or non-motion based on four features.
The four features used for classification are: maximum RP correlation,
high-frequency content, edge-fraction, and CSF-fraction.
Parameters
----------
out_dir : str
Full path of the output directory
max_RP_corr : (C,) array_like
Array of the 'maximum RP correlation' feature scores of the components
edge_fract : (C,) array_like
Array of the 'edge fraction' feature scores of the components
HFC : (C,) array_like
Array of the 'high-frequency content' feature scores of the components
csf_fract : (C,) array_like
Array of the 'CSF fraction' feature scores of the components
Returns
-------
motion_ICs : array_like
Array containing the indices of the components identified as motion
components
Output
------
classified_motion_ICs.txt : A text file containing the indices of the
components identified as motion components
"""
# Classify the ICs as motion or non-motion
# Define criteria needed for classification (thresholds and
# hyperplane-parameters)
thr_csf = 0.10
thr_HFC = 0.35
hyp = np.array([-19.9751070082159, 9.95127547670627, 24.8333160239175])
# Project edge & max_RP_corr feature scores to new 1D space
x = np.array([max_RP_corr, edge_fract])
proj = hyp[0] + np.dot(x.T, hyp[1:])
# Classify the ICs
motion_ICs = np.squeeze(
np.array(
np.where(
(proj > 0)
+ (csf_fract > thr_csf)
+ (HFC > thr_HFC)
)
)
)
# Put the feature scores in a text file
np.savetxt(op.join(out_dir, 'feature_scores.txt'),
np.vstack((max_RP_corr, edge_fract, HFC, csf_fract)).T)
# Put the indices of motion-classified ICs in a text file
with open(op.join(out_dir, 'classified_motion_ICs.txt'), 'w') as fo:
if motion_ICs.size > 1:
fo.write(','.join(['{:.0f}'.format(num) for num in
(motion_ICs + 1)]))
elif motion_ICs.size == 1:
fo.write('{:.0f}'.format(motion_ICs + 1))
# Create a summary overview of the classification
with open(op.join(out_dir, 'classification_overview.txt'), 'w') as fo:
fo.write('\t'.join(['IC',
'Motion/noise',
'maximum RP correlation',
'Edge-fraction',
'High-frequency content',
'CSF-fraction']))
fo.write('\n')
for i in range(0, len(csf_fract)):
if (proj[i] > 0) or (csf_fract[i] > thr_csf) or (HFC[i] > thr_HFC):
classif = "True"
else:
classif = "False"
fo.write('\t'.join(['{:d}'.format(i + 1),
classif,
'{:.2f}'.format(max_RP_corr[i]),
'{:.2f}'.format(edge_fract[i]),
'{:.2f}'.format(HFC[i]),
'{:.2f}'.format(csf_fract[i])]))
fo.write('\n')
return motion_ICs
def denoising(fsl_dir, in_file, out_dir, mixing, den_type, den_idx):
"""Remove noise components from fMRI data.
Parameters
----------
fsl_dir : str
Full path of the bin-directory of FSL
in_file : str
Full path to the data file (nii.gz) which has to be denoised
out_dir : str
Full path of the output directory
mixing : str
Full path of the melodic_mix text file
den_type : {"aggr", "nonaggr", "both"}
Type of requested denoising ('aggr': aggressive, 'nonaggr':
non-aggressive, 'both': both aggressive and non-aggressive
den_idx : array_like
Index of the components that should be regressed out
Output
------
denoised_func_data_<den_type>.nii.gz : The denoised fMRI data
"""
# Check if denoising is needed (i.e. are there motion components?)
motion_components_found = den_idx.size > 0
nonaggr_denoised_file = op.join(out_dir, "denoised_func_data_nonaggr.nii.gz")
aggr_denoised_file = op.join(out_dir, "denoised_func_data_aggr.nii.gz")
if motion_components_found:
mixing = | np.loadtxt(mixing) | numpy.loadtxt |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. All rights reserved.
from math import sqrt
import numpy as np
def get_deltas(mat1, mat2):
mat1 = np.vstack((mat1, np.array([0, 0, 0, 1])))
mat2 = np.vstack((mat2, np.array([0, 0, 0, 1])))
dMat = np.matmul(np.linalg.inv(mat1), mat2)
dtrans = dMat[0:3, 3] ** 2
dtrans = sqrt(dtrans.sum())
origVec = np.array([[0], [0], [1]])
rotVec = np.matmul(dMat[0:3, 0:3], origVec)
arccos = (rotVec * origVec).sum() / sqrt((rotVec ** 2).sum())
dAngle = | np.arccos(arccos) | numpy.arccos |
import numpy
import six.moves
import cellprofiler_core.image
import cellprofiler_core.measurement
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import (
M_LOCATION_CENTER_X,
M_LOCATION_CENTER_Y,
FF_COUNT,
FF_PARENT,
COLTYPE_FLOAT,
M_NUMBER_OBJECT_NUMBER,
COLTYPE_INTEGER,
FF_CHILDREN_COUNT,
)
import cellprofiler.modules.splitormergeobjects
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.workspace
import tests.modules
INPUT_OBJECTS_NAME = "inputobjects"
OUTPUT_OBJECTS_NAME = "outputobjects"
IMAGE_NAME = "image"
OUTLINE_NAME = "outlines"
def test_load_v5():
file = tests.modules.get_test_resources_directory("splitormergeobjects/v5.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.loadtxt(six.moves.StringIO(data))
module = pipeline.modules()[0]
assert module.objects_name.value == "IdentifyPrimaryObjects"
assert module.output_objects_name.value == "SplitOrMergeObjects"
assert module.relabel_option.value == "Merge"
assert module.distance_threshold.value == 0
assert not module.wants_image.value
assert module.image_name.value == "None"
assert module.minimum_intensity_fraction.value == 0.9
assert module.where_algorithm.value == "Closest point"
assert module.merge_option.value == "Distance"
assert module.parent_object.value == "None"
assert module.merging_method.value == "Disconnected"
def test_load_v4():
file = tests.modules.get_test_resources_directory("splitormergeobjects/v4.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.loadtxt(six.moves.StringIO(data))
assert len(pipeline.modules()) == 2
module = pipeline.modules()[0]
assert isinstance(
module, cellprofiler.modules.splitormergeobjects.SplitOrMergeObjects
)
assert module.objects_name == "blobs"
assert module.output_objects_name == "RelabeledBlobs"
assert (
module.relabel_option == cellprofiler.modules.splitormergeobjects.OPTION_MERGE
)
assert module.distance_threshold == 2
assert not module.wants_image
assert module.image_name == "Guide"
assert module.minimum_intensity_fraction == 0.8
assert (
module.where_algorithm
== cellprofiler.modules.splitormergeobjects.CA_CLOSEST_POINT
)
assert module.merge_option == cellprofiler.modules.splitormergeobjects.UNIFY_PARENT
assert module.parent_object == "Nuclei"
assert (
module.merging_method == cellprofiler.modules.splitormergeobjects.UM_CONVEX_HULL
)
module = pipeline.modules()[1]
assert (
module.relabel_option == cellprofiler.modules.splitormergeobjects.OPTION_SPLIT
)
assert module.wants_image
assert (
module.where_algorithm == cellprofiler.modules.splitormergeobjects.CA_CENTROIDS
)
assert (
module.merge_option == cellprofiler.modules.splitormergeobjects.UNIFY_DISTANCE
)
assert (
module.merging_method
== cellprofiler.modules.splitormergeobjects.UM_DISCONNECTED
)
def rruunn(
input_labels,
relabel_option,
merge_option=cellprofiler.modules.splitormergeobjects.UNIFY_DISTANCE,
unify_method=cellprofiler.modules.splitormergeobjects.UM_DISCONNECTED,
distance_threshold=5,
minimum_intensity_fraction=0.9,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CLOSEST_POINT,
image=None,
parent_object="Parent_object",
parents_of=None,
):
"""Run the SplitOrMergeObjects module
returns the labels matrix and the workspace.
"""
module = cellprofiler.modules.splitormergeobjects.SplitOrMergeObjects()
module.set_module_num(1)
module.objects_name.value = INPUT_OBJECTS_NAME
module.output_objects_name.value = OUTPUT_OBJECTS_NAME
module.relabel_option.value = relabel_option
module.merge_option.value = merge_option
module.merging_method.value = unify_method
module.parent_object.value = parent_object
module.distance_threshold.value = distance_threshold
module.minimum_intensity_fraction.value = minimum_intensity_fraction
module.wants_image.value = image is not None
module.where_algorithm.value = where_algorithm
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.RunException)
pipeline.add_listener(callback)
pipeline.add_module(module)
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
if image is not None:
img = cellprofiler_core.image.Image(image)
image_set.add(IMAGE_NAME, img)
module.image_name.value = IMAGE_NAME
object_set = cellprofiler_core.object.ObjectSet()
o = cellprofiler_core.object.Objects()
o.segmented = input_labels
object_set.add_objects(o, INPUT_OBJECTS_NAME)
workspace = cellprofiler_core.workspace.Workspace(
pipeline,
module,
image_set,
object_set,
cellprofiler_core.measurement.Measurements(),
image_set_list,
)
if parents_of is not None:
m = workspace.measurements
ftr = FF_PARENT % parent_object
m[INPUT_OBJECTS_NAME, ftr] = parents_of
module.run(workspace)
output_objects = workspace.object_set.get_objects(OUTPUT_OBJECTS_NAME)
return output_objects.segmented, workspace
def test_split_zero():
labels, workspace = rruunn(
numpy.zeros((10, 20), int),
cellprofiler.modules.splitormergeobjects.OPTION_SPLIT,
)
assert numpy.all(labels == 0)
assert labels.shape[0] == 10
assert labels.shape[1] == 20
assert isinstance(workspace, cellprofiler_core.workspace.Workspace)
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
count = m.get_current_image_measurement(FF_COUNT % OUTPUT_OBJECTS_NAME)
assert count == 0
for feature_name in (
M_LOCATION_CENTER_X,
M_LOCATION_CENTER_Y,
):
values = m.get_current_measurement(OUTPUT_OBJECTS_NAME, feature_name)
assert len(values) == 0
module = workspace.module
assert isinstance(
module, cellprofiler.modules.splitormergeobjects.SplitOrMergeObjects
)
columns = module.get_measurement_columns(workspace.pipeline)
assert len(columns) == 6
for object_name, feature_name, coltype in (
(OUTPUT_OBJECTS_NAME, M_LOCATION_CENTER_X, COLTYPE_FLOAT,),
(OUTPUT_OBJECTS_NAME, M_LOCATION_CENTER_Y, COLTYPE_FLOAT,),
(OUTPUT_OBJECTS_NAME, M_NUMBER_OBJECT_NUMBER, COLTYPE_INTEGER,),
(INPUT_OBJECTS_NAME, FF_CHILDREN_COUNT % OUTPUT_OBJECTS_NAME, COLTYPE_INTEGER,),
(OUTPUT_OBJECTS_NAME, FF_PARENT % INPUT_OBJECTS_NAME, COLTYPE_INTEGER,),
("Image", FF_COUNT % OUTPUT_OBJECTS_NAME, COLTYPE_INTEGER,),
):
assert any(
[
object_name == c[0] and feature_name == c[1] and coltype == c[2]
for c in columns
]
)
categories = module.get_categories(workspace.pipeline, "Image")
assert len(categories) == 1
assert categories[0] == "Count"
categories = module.get_categories(workspace.pipeline, OUTPUT_OBJECTS_NAME)
assert len(categories) == 3
assert any(["Location" in categories])
assert any(["Parent" in categories])
assert any(["Number" in categories])
categories = module.get_categories(workspace.pipeline, INPUT_OBJECTS_NAME)
assert len(categories) == 1
assert categories[0] == "Children"
f = module.get_measurements(workspace.pipeline, "Image", "Count")
assert len(f) == 1
assert f[0] == OUTPUT_OBJECTS_NAME
f = module.get_measurements(workspace.pipeline, OUTPUT_OBJECTS_NAME, "Location")
assert len(f) == 2
assert all([any([x == y for y in f]) for x in ("Center_X", "Center_Y")])
f = module.get_measurements(workspace.pipeline, OUTPUT_OBJECTS_NAME, "Parent")
assert len(f) == 1
assert f[0] == INPUT_OBJECTS_NAME
f = module.get_measurements(workspace.pipeline, OUTPUT_OBJECTS_NAME, "Number")
assert len(f) == 1
assert f[0] == "Object_Number"
f = module.get_measurements(workspace.pipeline, INPUT_OBJECTS_NAME, "Children")
assert len(f) == 1
assert f[0] == "%s_Count" % OUTPUT_OBJECTS_NAME
def test_split_one():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels_out, workspace = rruunn(
labels, cellprofiler.modules.splitormergeobjects.OPTION_SPLIT
)
assert numpy.all(labels == labels_out)
assert isinstance(workspace, cellprofiler_core.workspace.Workspace)
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
count = m.get_current_image_measurement(FF_COUNT % OUTPUT_OBJECTS_NAME)
assert count == 1
for feature_name, value in (
(M_LOCATION_CENTER_X, 5),
(M_LOCATION_CENTER_Y, 3),
(FF_PARENT % INPUT_OBJECTS_NAME, 1),
):
values = m.get_current_measurement(OUTPUT_OBJECTS_NAME, feature_name)
assert len(values) == 1
assert round(abs(values[0] - value), 7) == 0
values = m.get_current_measurement(
INPUT_OBJECTS_NAME, FF_CHILDREN_COUNT % OUTPUT_OBJECTS_NAME,
)
assert len(values) == 1
assert values[0] == 1
def test_split_one_into_two():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 1
labels_out, workspace = rruunn(
labels, cellprofiler.modules.splitormergeobjects.OPTION_SPLIT
)
index = numpy.array([labels_out[3, 5], labels_out[3, 15]])
assert index[0] != index[1]
assert all([x in index for x in (1, 2)])
expected = numpy.zeros((10, 20), int)
expected[2:5, 3:8] = index[0]
expected[2:5, 13:18] = index[1]
assert numpy.all(labels_out == expected)
m = workspace.measurements
values = m.get_current_measurement(
OUTPUT_OBJECTS_NAME, FF_PARENT % INPUT_OBJECTS_NAME,
)
assert len(values) == 2
assert numpy.all(values == 1)
values = m.get_current_measurement(
INPUT_OBJECTS_NAME, FF_CHILDREN_COUNT % OUTPUT_OBJECTS_NAME,
)
assert len(values) == 1
assert values[0] == 2
def test_unify_zero():
labels, workspace = rruunn(
numpy.zeros((10, 20), int),
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
)
assert numpy.all(labels == 0)
assert labels.shape[0] == 10
assert labels.shape[1] == 20
def test_unify_one():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels_out, workspace = rruunn(
labels, cellprofiler.modules.splitormergeobjects.OPTION_MERGE
)
assert numpy.all(labels == labels_out)
def test_unify_two_to_one():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
)
assert numpy.all(labels_out[labels != 0] == 1)
assert numpy.all(labels_out[labels == 0] == 0)
def test_unify_two_stays_two():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=4,
)
assert numpy.all(labels_out == labels)
def test_unify_image_centroids():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
image = numpy.ones((10, 20)) * (labels > 0) * 0.5
image[3, 8:13] = 0.41
image[3, 5] = 0.6
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
image=image,
minimum_intensity_fraction=0.8,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CENTROIDS,
)
assert numpy.all(labels_out[labels != 0] == 1)
assert numpy.all(labels_out[labels == 0] == 0)
def test_dont_unify_image_centroids():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
image = numpy.ones((10, 20)) * labels * 0.5
image[3, 8:12] = 0.41
image[3, 5] = 0.6
image[3, 15] = 0.6
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
image=image,
minimum_intensity_fraction=0.8,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CENTROIDS,
)
assert numpy.all(labels_out == labels)
def test_unify_image_closest_point():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
image = numpy.ones((10, 20)) * (labels > 0) * 0.6
image[2, 8:13] = 0.41
image[2, 7] = 0.5
image[2, 13] = 0.5
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
image=image,
minimum_intensity_fraction=0.8,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CLOSEST_POINT,
)
assert numpy.all(labels_out[labels != 0] == 1)
assert numpy.all(labels_out[labels == 0] == 0)
def test_dont_unify_image_closest_point():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
image = numpy.ones((10, 20)) * labels * 0.6
image[3, 8:12] = 0.41
image[2, 7] = 0.5
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
image=image,
minimum_intensity_fraction=0.8,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CLOSEST_POINT,
)
assert numpy.all(labels_out == labels)
def test_unify_per_parent():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
merge_option=cellprofiler.modules.splitormergeobjects.UNIFY_PARENT,
parent_object="Parent_object",
parents_of=numpy.array([1, 1]),
)
assert numpy.all(labels_out[labels != 0] == 1)
def test_unify_convex_hull():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
expected = numpy.zeros(labels.shape, int)
expected[2:5, 3:18] = 1
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
merge_option=cellprofiler.modules.splitormergeobjects.UNIFY_PARENT,
unify_method=cellprofiler.modules.splitormergeobjects.UM_CONVEX_HULL,
parent_object="Parent_object",
parents_of=numpy.array([1, 1]),
)
assert numpy.all(labels_out == expected)
def test_unify_nothing():
labels = numpy.zeros((10, 20), int)
for um in (
cellprofiler.modules.splitormergeobjects.UM_DISCONNECTED,
cellprofiler.modules.splitormergeobjects.UM_CONVEX_HULL,
):
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
merge_option=cellprofiler.modules.splitormergeobjects.UNIFY_PARENT,
unify_method=cellprofiler.modules.splitormergeobjects.UM_CONVEX_HULL,
parent_object="Parent_object",
parents_of= | numpy.zeros(0, int) | numpy.zeros |
# Copyright 2019-2020 Toyota Research Institute. All rights reserved.
"""
Defines a new XAS Spectrum object built on top of Pymatgen's
Spectrum object.
"""
import os
import numpy as np
from pymatgen.core.structure import Structure
from trixs.spectra.core import XAS_Spectrum, XAS_Collation
from trixs.spectra.spectrum_io import parse_spectrum
from copy import deepcopy
from numpy import eye
from pytest import fixture, raises
from json import loads, dumps
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, 'test_files')
@fixture
def fake_structure():
lattice = eye(3)
species = ['H']
coords = np.array([[0, 0, 0]])
yield Structure(lattice, species, coords)
@fixture
def fake_spectrum(fake_structure):
x = np.random.uniform(size=100)
y = np.random.uniform(size=100)
return XAS_Spectrum(x, y, structure=fake_structure,
absorbing_site=0)
def test_instantiate_XAS_spectra(fake_structure):
x = np.random.uniform(size=100)
y = np.random.uniform(size=100)
absorbing_site = 0
spec = XAS_Spectrum(x, y, fake_structure, absorbing_site)
assert isinstance(spec, XAS_Spectrum)
def test_XAS_full_spec_attributes():
x = np.random.uniform(size=100)
y = np.random.uniform(size=100)
structure = Structure.from_file(os.path.join(TEST_FILE_DIR, 'Cu_structure.cif'))
absorbing_site = 0
full_spectrum = np.random.uniform(size=(100, 6))
spec = XAS_Spectrum(x, y, structure, absorbing_site, full_spectrum=full_spectrum)
assert isinstance(spec, XAS_Spectrum)
assert np.array_equal(spec.E, full_spectrum[:, 0])
assert np.array_equal(spec.Enorm, full_spectrum[:, 1])
assert np.array_equal(spec.k, full_spectrum[:, 2])
assert np.array_equal(spec.mu, full_spectrum[:, 3])
assert np.array_equal(spec.mu0, full_spectrum[:, 4])
assert np.array_equal(spec.chi, full_spectrum[:, 5])
assert spec.abs_idx == 0
assert isinstance(spec.as_dict(), dict)
def test_exceptions(fake_spectrum):
with raises(ValueError):
fake_spectrum.E()
with raises(ValueError):
fake_spectrum.mu()
with raises(ValueError):
fake_spectrum.Enorm()
with raises(ValueError):
fake_spectrum.mu0()
with raises(ValueError):
fake_spectrum.k()
with raises(ValueError):
fake_spectrum.chi()
with raises(ValueError):
fake_spectrum.shifted_Enorm(shift=0)
with raises(NotImplementedError):
fake_spectrum.normalize('zappa')
def test_load_from_doc_and_object():
with open(os.path.join(TEST_FILE_DIR, 'sample_spectrum_e.txt'), 'r') as f:
data = loads(f.readline())
spec1 = XAS_Spectrum.from_atomate_document(data)
spec2 = XAS_Spectrum.load_from_object(data)
line = dumps(data)
spec3 = XAS_Spectrum.load_from_object(line)
for spec in [spec1, spec2, spec3]:
assert isinstance(spec,XAS_Spectrum)
assert spec.has_full_spectrum()
assert spec.E[0] == 8334.08
assert spec.Enorm[0] == -9.293
assert spec.k[0] == -0.8
assert spec.mu[0] == 0.0519168
assert spec.mu0[0] == 0.0795718
assert spec.chi[0] == -0.027655
assert len(spec.E) == 100
assert len(spec.Enorm) == 100
assert len(spec.mu) == 100
assert len(spec.mu0) == 100
assert len(spec.k) == 100
assert len(spec.chi) == 100
enorm = spec1.Enorm
sub_enorm = | np.add(enorm,1) | numpy.add |
from flask import Flask
from flask import render_template
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
import os
import numpy as np
import tensorflow as tf
import PIL
from tensorflow import keras
#backend instantiation
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = "static/upload_folder"
#loading ai model
model = tf.keras.models.load_model('ai/fingernail_model')
class_names = ['long', 'short']
@app.route('/')
def home(name=None):
return render_template("index.html")
@app.route("/upload", methods = ['POST'])
def upload():
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file:
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
img_array = tf.keras.preprocessing.image.load_img(file_path, target_size = (64, 64))
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions)
statement = "I am {:.2f} percent confident that your fingernails are {}".format(100 * np.max(score), class_names[ | np.argmax(score) | numpy.argmax |
# -*- coding: utf-8 -*-
"""
Support functions for reading or calculating DISPATCH2 grid and geometry information.
"""
import numpy as np
class GeometricFactors(dict):
"""Calculate and store the geometric factors used by curvilinear grids."""
def __init__(self, patch):
"""Constructor."""
# Define geometric factors with the same notation as in `mesh_mod` ("c"
# for zone-centred and "f" for face-centred).
self['h2c'] = None
self['h2f'] = None
self['h31c'] = None
self['h31f'] = None
self['h32c'] = None
self['h32f'] = None
self['dx1c'] = None
self['dx1f'] = None
self['dx2c'] = None
self['dx2f'] = None
self['dx3c'] = None
self['dx3f'] = None
self['dvol1c'] = None
self['dvol1f'] = None
self['dvol2c'] = None
self['dvol2f'] = None
self['dvol3c'] = None
self['dvol3f'] = None
self['dar1c'] = None
self['dar1f'] = None
self['dar2c'] = None
self['dar2f'] = None
self['dar31c'] = None
self['dar31f'] = None
self['dar32c'] = None
self['dar32f'] = None
# initialize the grid
self.init_grid(patch)
def init_grid(self, p):
"""Initialise geometric factors based on coord. type."""
if p.mesh_type == 'Cartesian': self.init_Cartesian(p)
elif p.mesh_type == 'cylindrical': self.init_cylindrical(p)
elif p.mesh_type == 'spherical': self.init_spherical(p)
def init_Cartesian(self, p):
"""Initialise geometric factors for a Cartesian coord. system."""
n1, n2, n3 = p.ncell
# 1-direction
self['h2c'] = np.ones(n1)
self['h2f'] = np.ones(n1)
self['h31c'] = self['h2c'].view()
self['h31f'] = self['h2f'].view()
# 2-direction
self['h32c'] = np.ones(n2)
self['h32f'] = self['h32c'].view()
# linear size elements
self['dx1c'] = np.ones(n1) * p.ds[0]
self['dx1f'] = np.ones(n1) * p.ds[0]
self['dx2c'] = np.ones(n2) * p.ds[1]
self['dx2f'] = np.ones(n2) * p.ds[1]
self['dx3c'] = np.ones(n3) * p.ds[2]
self['dx3f'] = np.ones(n3) * p.ds[2]
# volume elements
self['dvol1c'] = np.ones(n1) * p.ds[0]
self['dvol1f'] = np.ones(n1) * p.ds[0]
self['dvol2c'] = np.ones(n2) * p.ds[1]
self['dvol2f'] = np.ones(n2) * p.ds[1]
self['dvol3c'] = np.ones(n3) * p.ds[2]
self['dvol3f'] = np.ones(n3) * p.ds[2]
# area elements
self['dar1c'] = self['h31c'] * self['h2c']
self['dar1f'] = self['h31f'] * self['h2f']
self['dar2c'] = self['h31f'] * p.ds[0] / self['dvol1c']
self['dar2f'] = self['h31c'] * p.ds[0] / self['dvol1f']
self['dar31c'] = self['h2f'] * p.ds[0] / self['dvol1c']
self['dar31f'] = self['h2c'] * p.ds[0] / self['dvol1f']
self['dar32c'] = p.ds[1] / self['dvol2c']
self['dar32f'] = p.ds[1] / self['dvol2f']
def init_cylindrical(self, p):
"""Initialise geometric factors for a cylindrical coord. system."""
n1, n2, n3 = p.ncell
# 1-direction
self['h2c'] = np.ones(n1)
self['h2f'] = np.ones(n1)
self['h31c'] = self['h2c'].view()
self['h31f'] = self['h2f'].view()
# 2-direction
pos_c = np.array(p.y )
pos_f = np.array(p.ys)
self['h32c'] = abs(pos_c)
self['h32f'] = abs(pos_f)
# linear size elements
self['dx1c'] = np.ones(n1) * p.ds[0]
self['dx1f'] = np.ones(n1) * p.ds[0]
self['dx2c'] = np.ones(n2) * p.ds[1]
self['dx2f'] = np.ones(n2) * p.ds[1]
self['dx3c'] = np.ones(n3) * p.ds[2]
self['dx3f'] = | np.ones(n3) | numpy.ones |
#!/usr/bin/env python
# Copyright (c) 2020 IBM Corp. - <NAME> <<EMAIL>>
# Based on: masked_language_modeling.py
# https://keras.io/examples/nlp/masked_language_modeling/
# Fixed spelling errors in messages and comments.
# Preparation on dyce2:
# virtualenv --system-site-packages tf-nightly
# source tf-nightly/bin/activate
# pip install tf-nightly
# pip install dataclasses
# pip install pandas
# pip install pydot
# Results in TF 2.5.0 using the available CUDA 11
import os
#0 = all messages are logged (default behavior)
#1 = INFO messages are not printed
#2 = INFO and WARNING messages are not printed
#3 = INFO, WARNING, and ERROR messages are not printed
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
from dataclasses import dataclass
import pandas as pd
import numpy as np
import glob
import re
from pprint import pprint
@dataclass
class Config:
MAX_LEN = 256 # length of each input sample in tokens
BATCH_SIZE = 32 # batch size
LR = 0.001 # learning rate
VOCAB_SIZE = 512 # max number of words in vocabulary
EMBED_DIM = 128 # word embedding vector size
NUM_HEAD = 8 # used in bert model
FF_DIM = 128 # feedforward; used in bert model
NUM_LAYERS = 1 # number of BERT module layers
config = Config()
# Every sample file contains a single line of text.
# Returns these lines as a list of strings.
def get_text_list_from_files(files):
text_list = []
for name in files:
with open(name) as f:
for line in f:
text_list.append(line)
return text_list
# Compose the full path names to the token files.
# Creates and returns a dataframe.
# Frame has single key "tokens".
def get_data_from_text_files(folder_name):
files = glob.glob(folder_name + "/*.toks")
texts = get_text_list_from_files(files)
df = pd.DataFrame({"tokens": texts})
df = df.sample(len(df)).reset_index(drop=True)
return df
all_data = get_data_from_text_files("train")
#print("all_data:", all_data)
# Part of TF dataflow graph.
def custom_standardization(input_data):
# No special prep.
return input_data
def get_vectorize_layer(texts, vocab_size, max_seq):
"""Build Text vectorization layer
Args:
texts (list): List of string, i.e., input texts
vocab_size (int): vocab size
max_seq (int): Maximum sequence length.
Returns:
layers.Layer: Return TextVectorization Keras Layer
"""
vectorize_layer = TextVectorization(
max_tokens=vocab_size,
output_mode="int",
standardize=custom_standardization,
output_sequence_length=max_seq,
)
vectorize_layer.adapt(texts)
# Insert mask token in vocabulary
vocab = vectorize_layer.get_vocabulary()
#print("len(vocab):", len(vocab)) #177
#vocab: ['', '[UNK]', 'the', 'and', 'a', 'of', ...] all lower-case
#GJ20: where do the empty string and [UNK] come from?
# they are created by adapt() as words 0 and 1
# '' is padding token; [UNK] is OOV token
vocab = vocab[2:len(vocab)-1] + ["[mask]"]
#print("len(vocab):", len(vocab)) #175
#GJ20: anyway first 2 words removed and '[mask]' added at the end
vectorize_layer.set_vocabulary(vocab)
# '' and [UNK] are back in
#vocab = vectorize_layer.get_vocabulary()
#print("len(vocab):", len(vocab)) #177
# '[mask]' has been added as last (least frequent) word in the vocab
return vectorize_layer
vectorize_layer = get_vectorize_layer(
all_data.tokens.values.tolist(),
config.VOCAB_SIZE,
config.MAX_LEN,
)
# Serialize vocabulary and dump to file:
import pickle
with open("vocabulary.pkl", "wb") as out:
pickle.dump(vectorize_layer.get_vocabulary(), out)
# Get mask token id for masked language model
mask_token_id = vectorize_layer(["[mask]"]).numpy()[0][0]
#print("mask_token_id:", mask_token_id) #176 (always last index in vocab)
# Encodes the token strings by int vocab indices.
def encode(texts):
encoded_texts = vectorize_layer(texts)
return encoded_texts.numpy()
# Randomly replace tokens by the [mask] and keep replaced token as label.
def get_masked_input_and_labels(encoded_texts):
# These numbers come from something called "BERT recipe":
# 15% used for prediction. 80% of that is masked. 10% is random token,
# 10% is just left as is.
# 15% BERT masking
#print("encoded_texts.shape:", encoded_texts.shape) #(50000, 256)
inp_mask = np.random.rand(*encoded_texts.shape) < 0.15
#print("inp_mask:", inp_mask) #[[False False True ...] ...]
# Do not mask special tokens
# GJ20: what are these special tokens? 0 and 1! But why <= 2? Mistake?
inp_mask[encoded_texts < 2] = False
# Set targets to -1 by default, it means ignore
labels = -1 * np.ones(encoded_texts.shape, dtype=int)
# Set labels for masked tokens
labels[inp_mask] = encoded_texts[inp_mask]
# False positions -> -1, True -> encoded word (vocab index)
#print("labels:", labels) #[[10 -1 -1 ...] [-1 -1 -1 994 ...] ... ]
# Prepare input
encoded_texts_masked = np.copy(encoded_texts)
# Set input to [MASK] which is the last token for the 90% of tokens
# This means leaving 10% unchanged
inp_mask_2mask = inp_mask & (np.random.rand(*encoded_texts.shape) < 0.90)
# mask token is the last in the dict
encoded_texts_masked[inp_mask_2mask] = mask_token_id
# Set 10% to a random token
inp_mask_2random = inp_mask_2mask & (np.random.rand(*encoded_texts.shape) < 1 / 9)
#GJ20: why 3 and not 2?
encoded_texts_masked[inp_mask_2random] = np.random.randint(
2, mask_token_id, inp_mask_2random.sum()
)
# Prepare sample_weights to pass to .fit() method
sample_weights = np.ones(labels.shape)
sample_weights[labels == -1] = 0
# y_labels would be same as encoded_texts, i.e., input tokens
y_labels = np.copy(encoded_texts)
return encoded_texts_masked, y_labels, sample_weights
# Prepare data for masked language model
x_all_tokens = encode(all_data.tokens.values)
#print("x_all_tokens.shape:", x_all_tokens.shape) #(50000, 256)
# Encoding and masking step:
x_masked_train, y_masked_labels, sample_weights = get_masked_input_and_labels(
x_all_tokens
)
mlm_ds = (
tf.data.Dataset.from_tensor_slices(
(x_masked_train, y_masked_labels, sample_weights))
.shuffle(1000)
.batch(config.BATCH_SIZE)
)
# i is layer number 0,1,2...
def bert_module(query, key, value, i):
# Multi headed self-attention
attention_output = layers.MultiHeadAttention(
num_heads=config.NUM_HEAD,
key_dim=config.EMBED_DIM // config.NUM_HEAD,
name="encoder_{}/multiheadattention".format(i),
)(query, key, value)
attention_output = layers.Dropout(0.1, name="encoder_{}/att_dropout".format(i))(attention_output)
attention_output = layers.LayerNormalization(
epsilon=1e-6, name="encoder_{}/att_layernormalization".format(i)
)(query + attention_output)
# Feed-forward layer
ffn = keras.Sequential(
[
layers.Dense(config.FF_DIM, activation="relu"),
layers.Dense(config.EMBED_DIM),
],
name="encoder_{}/ffn".format(i),
)
ffn_output = ffn(attention_output)
ffn_output = layers.Dropout(0.1, name="encoder_{}/ffn_dropout".format(i))(
ffn_output
)
sequence_output = layers.LayerNormalization(
epsilon=1e-6, name="encoder_{}/ffn_layernormalization".format(i)
)(attention_output + ffn_output)
return sequence_output
def get_pos_encoding_matrix(max_len, d_emb):
pos_enc = np.array(
[
[pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)]
if pos != 0
else np.zeros(d_emb)
for pos in range(max_len)
]
)
#pos_enc.shape = (512, 128)
# fdf8:f53e:61e4::18 means start at 0 and step 2 (all even)
pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i
pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1
return pos_enc
loss_fn = keras.losses.SparseCategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE
)
loss_tracker = tf.keras.metrics.Mean(name="loss")
class MaskedLanguageModel(tf.keras.Model):
def train_step(self, inputs):
if len(inputs) == 3:
features, labels, sample_weight = inputs
else:
features, labels = inputs
sample_weight = None
with tf.GradientTape() as tape:
predictions = self(features, training=True)
loss = loss_fn(labels, predictions, sample_weight=sample_weight)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Compute our own metrics
loss_tracker.update_state(loss, sample_weight=sample_weight)
# Return a dict mapping metric names to current value
return {"loss": loss_tracker.result()}
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
return [loss_tracker]
def create_masked_language_bert_model():
inputs = layers.Input((config.MAX_LEN,), dtype=tf.int64)
word_embeddings = layers.Embedding(
input_dim=config.VOCAB_SIZE,
output_dim=config.EMBED_DIM,
name="word_embedding"
)(inputs)
# GJ20: what does this do? Positional embedding part of transformer.
position_embeddings = layers.Embedding(
input_dim=config.MAX_LEN,
output_dim=config.EMBED_DIM,
weights=[get_pos_encoding_matrix(config.MAX_LEN, config.EMBED_DIM)],
name="position_embedding",
)(tf.range(start=0, limit=config.MAX_LEN, delta=1))
embeddings = word_embeddings + position_embeddings
encoder_output = embeddings
for i in range(config.NUM_LAYERS):
encoder_output = bert_module(encoder_output, encoder_output, encoder_output, i)
mlm_output = layers.Dense(config.VOCAB_SIZE, name="mlm_cls", activation="softmax")(encoder_output)
mlm_model = MaskedLanguageModel(inputs, mlm_output, name="masked_bert_model")
optimizer = keras.optimizers.Adam(learning_rate=config.LR)
mlm_model.compile(optimizer=optimizer)
return mlm_model
# token<->id mappings as dicts:
id2token = dict(enumerate(vectorize_layer.get_vocabulary()))
token2id = {y: x for x, y in id2token.items()}
class MaskedTextGenerator(keras.callbacks.Callback):
def __init__(self, sample_tokens, top_k=5):
# encoded review
self.sample_tokens = sample_tokens
self.k = top_k
def decode(self, tokens):
return " ".join([id2token[t] for t in tokens if t != 0])
def convert_ids_to_tokens(self, id):
return id2token[id]
def on_epoch_end(self, epoch, logs=None):
prediction = self.model.predict(self.sample_tokens)
# index of token2id['[mask]'] in list:
masked_index = | np.where(self.sample_tokens == mask_token_id) | numpy.where |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for polynomial models."""
# pylint: disable=invalid-name
import os
import unittest.mock as mk
import warnings
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import wcs
from astropy.io import fits
from astropy.modeling import fitting
from astropy.modeling.functional_models import Linear1D
from astropy.modeling.mappings import Identity
from astropy.modeling.polynomial import (SIP, Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D,
Legendre1D, Legendre2D, OrthoPolynomialBase, Polynomial1D,
Polynomial2D, PolynomialBase)
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
linear1d = {
Chebyshev1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Hermite1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Legendre1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Polynomial1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Linear1D: {
'args': (),
'kwargs': {},
'parameters': {'intercept': 1.2, 'slope': 23.1},
'constraints': {'fixed': {'intercept': True}}
}
}
linear2d = {
Chebyshev2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Hermite2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Legendre2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Polynomial2D: {
'args': (1,),
'kwargs': {},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3},
'constraints': {'fixed': {'c0_0': True}}
}
}
@pytest.mark.skipif('not HAS_SCIPY')
class TestFitting:
"""Test linear fitter with polynomial models."""
def setup_class(self):
self.N = 100
self.M = 100
self.x1 = np.linspace(1, 10, 100)
self.y2, self.x2 = np.mgrid[:100, :83]
rsn = np.random.default_rng(0)
self.n1 = rsn.standard_normal(self.x1.size) * .1
self.n2 = rsn.standard_normal(self.x2.size)
self.n2.shape = self.x2.shape
self.linear_fitter = fitting.LinearLSQFitter()
self.non_linear_fitter = fitting.LevMarLSQFitter()
# TODO: Most of these test cases have some pretty repetitive setup that we
# could probably factor out
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message=r'The fit may be poorly conditioned',
category=AstropyUserWarning)
model_lin = self.linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
# For the constraints tests we're not checking the overall fit,
# just that the constraint was maintained
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_non_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
model_nlin = self.non_linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message=r'The fit may be poorly conditioned',
category=AstropyUserWarning)
model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_non_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
model_nlin = self.non_linear_fitter(model, self.x2, self.y2,
z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize('model_class',
[cls for cls in list(linear1d) + list(linear2d)])
def test_polynomial_init_with_constraints(model_class):
"""
Test that polynomial models can be instantiated with constraints, but no
parameters specified.
Regression test for https://github.com/astropy/astropy/issues/3606
"""
# Just determine which parameter to place a constraint on; it doesn't
# matter which parameter it is to exhibit the problem so long as it's a
# valid parameter for the model
if '1D' in model_class.__name__:
param = 'c0'
else:
param = 'c0_0'
if issubclass(model_class, Linear1D):
param = 'intercept'
if issubclass(model_class, OrthoPolynomialBase):
degree = (2, 2)
else:
degree = (2,)
m = model_class(*degree, fixed={param: True})
assert m.fixed[param] is True
assert getattr(m, param).fixed is True
if issubclass(model_class, OrthoPolynomialBase):
assert repr(m) ==\
f"<{model_class.__name__}(2, 2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., c1_1=0., c2_1=0., c0_2=0., c1_2=0., c2_2=0.)>"
assert str(m) ==\
f"Model: {model_class.__name__}\n" +\
"Inputs: ('x', 'y')\n" +\
"Outputs: ('z',)\n" +\
"Model set size: 1\n" +\
"X_Degree: 2\n" +\
"Y_Degree: 2\n" +\
"Parameters:\n" +\
" c0_0 c1_0 c2_0 c0_1 c1_1 c2_1 c0_2 c1_2 c2_2\n" +\
" ---- ---- ---- ---- ---- ---- ---- ---- ----\n" +\
" 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0"
else:
if model_class.__name__ == 'Polynomial2D':
assert repr(m) ==\
"<Polynomial2D(2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., c0_2=0., c1_1=0.)>"
assert str(m) ==\
"Model: Polynomial2D\n" +\
"Inputs: ('x', 'y')\n" +\
"Outputs: ('z',)\n" +\
"Model set size: 1\n" +\
"Degree: 2\n" +\
"Parameters:\n" +\
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1\n" +\
" ---- ---- ---- ---- ---- ----\n" +\
" 0.0 0.0 0.0 0.0 0.0 0.0"
elif model_class.__name__ == 'Linear1D':
assert repr(m) ==\
"<Linear1D(slope=2., intercept=0.)>"
assert str(m) ==\
"Model: Linear1D\n" +\
"Inputs: ('x',)\n" +\
"Outputs: ('y',)\n" +\
"Model set size: 1\n" +\
"Parameters:\n" +\
" slope intercept\n" +\
" ----- ---------\n" +\
" 2.0 0.0"
else:
assert repr(m) ==\
f"<{model_class.__name__}(2, c0=0., c1=0., c2=0.)>"
assert str(m) ==\
f"Model: {model_class.__name__}\n" +\
"Inputs: ('x',)\n" +\
"Outputs: ('y',)\n" +\
"Model set size: 1\n" +\
"Degree: 2\n" +\
"Parameters:\n" +\
" c0 c1 c2\n" +\
" --- --- ---\n" +\
" 0.0 0.0 0.0"
def test_sip_hst():
"""Test SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'hst_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars)
coords = [1, 1]
rel_coords = [1 - crpix1, 1 - crpix2]
astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords
assert_allclose(sip(1, 1), astwcs_result)
# Test changing of inputs and calling it with keyword argumenrts.
sip.inputs = ("r", "t")
assert_allclose(sip(r=1, t=1), astwcs_result)
assert_allclose(sip(1, t=1), astwcs_result)
# Test representations
assert repr(sip) ==\
"<SIP([<Shift(offset=-2048.)>, <Shift(offset=-1024.)>, " +\
"<_SIP1D(4, 'A', A_2_0=0.00000855, A_3_0=-0., A_4_0=0., A_0_2=0.00000217, " +\
"A_0_3=0., A_0_4=0., A_1_1=-0.0000052, A_1_2=-0., A_1_3=-0., " +\
"A_2_1=-0., A_2_2=0., A_3_1=0.)>, " +\
"<_SIP1D(4, 'B', B_2_0=-0.00000175, B_3_0=0., B_4_0=-0., B_0_2=-0.00000722, " +\
"B_0_3=-0., B_0_4=-0., B_1_1=0.00000618, B_1_2=-0., B_1_3=0., " +\
"B_2_1=-0., B_2_2=-0., B_3_1=-0.)>])>"
assert str(sip) ==\
"Model: SIP\n" +\
" Model: Shift\n" +\
" Inputs: ('x',)\n" +\
" Outputs: ('y',)\n" +\
" Model set size: 1\n" +\
" Parameters:\n" +\
" offset\n" +\
" -------\n" +\
" -2048.0\n" +\
"\n" +\
" Model: Shift\n" +\
" Inputs: ('x',)\n" +\
" Outputs: ('y',)\n" +\
" Model set size: 1\n" +\
" Parameters:\n" +\
" offset\n" +\
" -------\n" +\
" -1024.0\n" +\
"\n" +\
" Model: _SIP1D\n" +\
" Inputs: ('x', 'y')\n" +\
" Outputs: ('z',)\n" +\
" Model set size: 1\n" +\
" Order: 4\n" +\
" Coeff. Prefix: A\n" +\
" Parameters:\n" +\
" A_2_0 A_3_0 ... A_3_1 \n" +\
" --------------------- ---------------------- ... ---------------------\n" +\
" 8.551277582556502e-06 -4.730444829222791e-10 ... 1.971022971660309e-15\n" +\
"\n" +\
" Model: _SIP1D\n" +\
" Inputs: ('x', 'y')\n" +\
" Outputs: ('z',)\n" +\
" Model set size: 1\n" +\
" Order: 4\n" +\
" Coeff. Prefix: B\n" +\
" Parameters:\n" +\
" B_2_0 B_3_0 ... B_3_1 \n" +\
" ---------------------- --------------------- ... ----------------------\n" +\
" -1.746491877058669e-06 8.567635427816317e-11 ... -3.779506805487476e-15\n"
# Test get num of coeffs
assert sip.sip1d_a.get_num_coeff(1) == 6
# Test error
message = "Degree of polynomial must be 2< deg < 9"
sip.sip1d_a.order = 1
with pytest.raises(ValueError) as err:
sip.sip1d_a.get_num_coeff(1)
assert str(err.value) == message
sip.sip1d_a.order = 10
with pytest.raises(ValueError) as err:
sip.sip1d_a.get_num_coeff(1)
assert str(err.value) == message
def test_sip_irac():
"""Test forward and inverse SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'irac_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
ap_pars = dict(**hdr['AP_*'])
bp_pars = dict(**hdr['BP_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
ap_order = ap_pars.pop('AP_ORDER')
bp_order = bp_pars.pop('BP_ORDER')
del a_pars['A_DMAX']
del b_pars['B_DMAX']
pix = [200, 200]
rel_pix = [200 - crpix1, 200 - crpix2]
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars,
ap_order=ap_order, ap_coeff=ap_pars, bp_order=bp_order,
bp_coeff=bp_pars)
foc = wobj.sip_pix2foc([pix], 1)
newpix = wobj.sip_foc2pix(foc, 1)[0]
assert_allclose(sip(*pix), foc[0] - rel_pix)
assert_allclose(sip.inverse(*foc[0]) +
foc[0] - rel_pix, newpix - pix)
# Test inverse representations
assert repr(sip.inverse) ==\
"<InverseSIP([<Polynomial2D(2, c0_0=0., c1_0=0.0000114, c2_0=0.00002353, " +\
"c0_1=-0.00000546, c0_2=-0.00000667, c1_1=-0.00001801)>, " +\
"<Polynomial2D(2, c0_0=0., c1_0=-0.00001495, c2_0=0.00000122, c0_1=0.00001975, " +\
"c0_2=-0.00002601, c1_1=0.00002944)>])>"
assert str(sip.inverse) ==\
"Model: InverseSIP\n" +\
" Model: Polynomial2D\n" +\
" Inputs: ('x', 'y')\n" +\
" Outputs: ('z',)\n" +\
" Model set size: 1\n" +\
" Degree: 2\n" +\
" Parameters:\n" +\
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n" +\
" ---- -------- --------- ---------- ---------- ----------\n" +\
" 0.0 1.14e-05 2.353e-05 -5.463e-06 -6.666e-06 -1.801e-05\n" +\
"\n" +\
" Model: Polynomial2D\n" +\
" Inputs: ('x', 'y')\n" +\
" Outputs: ('z',)\n" +\
" Model set size: 1\n" +\
" Degree: 2\n" +\
" Parameters:\n" +\
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n" +\
" ---- ---------- --------- --------- ---------- ---------\n" +\
" 0.0 -1.495e-05 1.225e-06 1.975e-05 -2.601e-05 2.944e-05\n"
def test_sip_no_coeff():
sip = SIP([10, 12], 2, 2)
assert_allclose(sip.sip1d_a.parameters, [0., 0., 0])
assert_allclose(sip.sip1d_b.parameters, [0., 0., 0])
with pytest.raises(NotImplementedError):
sip.inverse
# Test model set
sip = SIP([10, 12], 2, 2, n_models=2)
assert sip.sip1d_a.model_set_axis == 0
assert sip.sip1d_b.model_set_axis == 0
@pytest.mark.parametrize('cls', (Polynomial1D, Chebyshev1D, Legendre1D,
Polynomial2D, Chebyshev2D, Legendre2D))
def test_zero_degree_polynomial(cls):
"""
A few tests that degree=0 polynomials are correctly evaluated and
fitted.
Regression test for https://github.com/astropy/astropy/pull/3589
"""
message = "Degree of polynomial must be positive or null"
if cls.n_inputs == 1: # Test 1D polynomials
p1 = cls(degree=0, c0=1)
assert p1(0) == 1
assert np.all(p1(np.zeros(5)) == np.ones(5))
x = np.linspace(0, 1, 100)
# Add a little noise along a straight line
y = 1 + np.random.uniform(0, 0.1, len(x))
p1_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p1_fit = fitter(p1_init, x, y)
# The fit won't be exact of course, but it should get close to within
# 1%
assert_allclose(p1_fit.c0, 1, atol=0.10)
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(degree=-1)
assert str(err.value) == message
elif cls.n_inputs == 2: # Test 2D polynomials
if issubclass(cls, OrthoPolynomialBase):
p2 = cls(x_degree=0, y_degree=0, c0_0=1)
# different shaped x and y inputs
a = np.array([1, 2, 3])
b = np.array([1, 2])
with mk.patch.object(PolynomialBase, 'prepare_inputs', autospec=True,
return_value=((a, b), mk.MagicMock())):
with pytest.raises(ValueError) as err:
p2.prepare_inputs(mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"Expected input arrays to have the same shape"
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(x_degree=-1, y_degree=0)
assert str(err.value) == message
with pytest.raises(ValueError) as err:
cls(x_degree=0, y_degree=-1)
assert str(err.value) == message
else:
p2 = cls(degree=0, c0_0=1)
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(degree=-1)
assert str(err.value) == message
assert p2(0, 0) == 1
assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))
y, x = np.mgrid[0:1:100j, 0:1:100j]
z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)
if issubclass(cls, OrthoPolynomialBase):
p2_init = cls(x_degree=0, y_degree=0)
else:
p2_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p2_fit = fitter(p2_init, x, y, z)
assert_allclose(p2_fit.c0_0, 1, atol=0.10)
@pytest.mark.skipif('not HAS_SCIPY')
def test_2d_orthopolynomial_in_compound_model():
"""
Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get
evaluated & fitted correctly when part of a compound model.
Regression test for https://github.com/astropy/astropy/pull/6085.
"""
y, x = np.mgrid[0:5, 0:5]
z = x + y
fitter = fitting.LevMarLSQFitter()
simple_model = Chebyshev2D(2, 2)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
simple_fit = fitter(simple_model, x, y, z)
fitter = fitting.LevMarLSQFitter() # re-init to compare like with like
compound_model = Identity(2) | Chebyshev2D(2, 2)
compound_model.fittable = True
compound_model.linear = True
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
compound_fit = fitter(compound_model, x, y, z)
assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-15)
def test_Hermite1D_clenshaw():
model = Hermite1D(degree=2)
assert model.clenshaw(1, [3]) == 3
assert model.clenshaw(1, [3, 4]) == 11
assert model.clenshaw(1, [3, 4, 5]) == 21
assert model.clenshaw(1, [3, 4, 5, 6]) == -3
def test__fcache():
model = OrthoPolynomialBase(x_degree=2, y_degree=2)
with pytest.raises(NotImplementedError) as err:
model._fcache(np.asanyarray(1), np.asanyarray(1))
assert str(err.value) == "Subclasses should implement this"
model = Hermite2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) ==\
{
0: np.asanyarray(1),
1: 2,
3: np.asanyarray(1),
4: 2,
2: 2.0,
5: -4.0
}
model = Legendre2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) ==\
{
0: np.asanyarray(1),
1: np.asanyarray(1),
2: 1.0,
3: np.asanyarray(1),
4: np.asanyarray(1),
5: 1.0
}
model = Chebyshev2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) ==\
{
0: np.asanyarray(1),
1: np.asanyarray(1),
2: 1.0,
3: np.asanyarray(1),
4: np.asanyarray(1),
5: 1.0
}
def test_fit_deriv_shape_error():
model = Hermite2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv( | np.array([1, 2]) | numpy.array |
Subsets and Splits