prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
import unittest
import numpy
import ctf
import os
import sys
def allclose(a, b):
return abs(ctf.to_nparray(a) - ctf.to_nparray(b)).sum() < 1e-14
class KnowValues(unittest.TestCase):
def test_abs(self):
a0 = numpy.arange(2., 5.)
a1 = ctf.from_nparray(a0)
self.assertTrue(ctf.all(ctf.abs(a1) == ctf.abs(a0)))
self.assertTrue(ctf.all(ctf.abs(a1) == numpy.abs(a0)))
try:
a1 = a1 + 1j
self.assertAlmostEqual(ctf.abs(a1).sum(), numpy.abs(a1.to_nparray()).sum(), 14)
except AttributeError:
pass
def test_eq(self):
a0 = numpy.arange(6).reshape(2,3)
a1 = ctf.array(a0)
a2 = ctf.array(a0)
self.assertTrue(ctf.all(a1==a2))
self.assertTrue(ctf.all(a0==a1))
a1[:] = 0
self.assertTrue(ctf.all(a1==0))
def test_conj(self):
a0 = ctf.zeros((2,3))
self.assertTrue(ctf.conj(a0).dtype == numpy.double)
self.assertTrue(a0.conj().dtype == numpy.double)
a0 = ctf.zeros((2,3), dtype=numpy.complex)
self.assertTrue(ctf.conj(a0).dtype == numpy.complex128)
self.assertTrue(a0.conj().dtype == numpy.complex128)
a0[:] = 1j
a0 = a0.conj()
self.assertTrue(ctf.all(a0 == -1j))
def test__mul__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0)
self.assertTrue(allclose(a1*.5, a0*.5))
a2 = ctf.astensor(a0*.2+1j)
self.assertTrue(allclose(a1*a2, a0*(a0*.2+1j)))
self.assertTrue(allclose(a1*a0, a0*a0))
a2 = numpy.arange(6.).reshape(3,2)
self.assertTrue(allclose(a1*a2, a0*a2))
a0 = ctf.astensor(numpy.arange(4.))
a1 = ctf.astensor(numpy.arange(3.))
self.assertTrue((a0.reshape(4,1)*a1).shape == (4,3))
self.assertTrue((a1*a0.reshape(4,1)).shape == (4,3))
self.assertTrue((a1.reshape(1,3)*a0.reshape(4,1)).shape == (4,3))
self.assertTrue((a1.reshape(1,1,3)*a0.reshape(4,1)).shape == (1,4,3))
self.assertTrue((a1.reshape(1,1,3)*a0.reshape(4,1,1)).shape == (4,1,3))
def test__add__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0)
self.assertTrue(allclose(a1+.5, a0+.5))
a2 = ctf.astensor(a0*.2+1j)
self.assertTrue(allclose(a1+a2, a0+(a0*.2+1j)))
self.assertTrue(allclose(a1+a0, a0+a0))
a2 = numpy.arange(6.).reshape(3,2)
self.assertTrue(allclose(a1+a2, a0+a2))
a0 = ctf.astensor(numpy.arange(4.))
a1 = ctf.astensor(numpy.arange(3.))
self.assertTrue((a0.reshape(4,1)+a1).shape == (4,3))
self.assertTrue((a1+a0.reshape(4,1)).shape == (4,3))
self.assertTrue((a1.reshape(1,3)+a0.reshape(4,1)).shape == (4,3))
self.assertTrue((a0.reshape(4,1)+a1.reshape(1,3)).shape == (4,3))
self.assertTrue((a1.reshape(1,1,3)+a0.reshape(4,1)).shape == (1,4,3))
self.assertTrue((a1.reshape(1,1,3)+a0.reshape(4,1,1)).shape == (4,1,3))
def test__sub__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0)
self.assertTrue(allclose(a1-.5, a0-.5))
a2 = ctf.astensor(a0*.2+1j)
self.assertTrue(allclose(a1-a2, a0-(a0*.2+1j)))
self.assertTrue(allclose(a1-a0, a0-a0))
def test__div__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0)
self.assertTrue(allclose(a1/.5, a0/.5))
a2 = ctf.astensor(a0*.2+1j)
self.assertTrue(allclose(a1/a2, a0/(a0*.2+1j)))
self.assertTrue(allclose(a1/a0, a0/a0))
def test_power(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0)
self.assertTrue(allclose(ctf.power(a1, .5), a0**.5))
a2 = ctf.astensor(a0*.2+1j)
self.assertTrue(allclose(ctf.power(a1, a2), a0**(a0*.2+1j)))
self.assertTrue(allclose(ctf.power(a1, a0), a0**a0))
def test__pow__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0)
self.assertTrue(allclose(a1**.5, a0**.5))
a2 = ctf.astensor(a0*.2+1j)
self.assertTrue(allclose(a1**a2, a0**(a0*.2+1j)))
self.assertTrue(allclose(a1**a0, a0**a0))
def test__imul__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a1 *= .5
self.assertTrue(allclose(a1, a0*.5))
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
with self.assertRaises(TypeError):
a1 *= a0*.2+1j
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a2 = numpy.arange(6.).reshape(3,2)
a1 *= a2
self.assertTrue(allclose(a1, a0*a2))
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a2 = numpy.arange(2.)
a1 *= a2
self.assertTrue(allclose(a1, a0*a2))
def test__iadd__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a1 += .5
self.assertTrue(allclose(a1, a0+.5))
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
with self.assertRaises(TypeError):
a1 += a0*.2+1j
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a2 = numpy.arange(6.).reshape(3,2)
a1 += a2
self.assertTrue(allclose(a1, a0+a2))
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a2 = numpy.arange(2.)
a1 += a2
self.assertTrue(allclose(a1, a0+a2))
def test__isub__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a1 -= .5
self.assertTrue(allclose(a1, a0-.5))
def test__idiv__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a1 /= .5
self.assertTrue(allclose(a1, a0/.5))
def test__ipow__(self):
a0 = numpy.arange(24.).reshape(4,3,2) + .4
a1 = ctf.astensor(a0).copy()
a1 **= .5
self.assertTrue(allclose(a1, a0**.5))
def test_set_item(self):
a0 = numpy.arange(24.).reshape(4,3,2) + 400.
b0 = numpy.arange(6.).reshape(3,2)
a1 = ctf.astensor(a0).copy()
b1 = ctf.astensor(b0).copy()
a0[:] = b0
a1[:] = b1
self.assertTrue(allclose(a1, a0))
a0 = numpy.arange(24.).reshape(4,3,2) + 400.
b0 = numpy.arange(6.).reshape(3,2)
a1 = ctf.astensor(a0).copy()
b1 = ctf.astensor(b0).copy()
a0[1:,1] = b0
a1[1:,1] = b1
self.assertTrue(allclose(a1, a0))
a0 = numpy.arange(24.).reshape(4,3,2) + 400.
b0 = | numpy.arange(6.) | numpy.arange |
#!/usr/bin/python
import numpy as np
import sys
import json
USE_TF_FB = False
if USE_TF_FB:
import tensorflow as tf
with open("MFCC_params.json", "r") as f:
params = json.load(f)
with open("MFCC_params.h", "w") as f:
for k, v in params.items():
f.write("#define\t{:16}\t{}\n".format(k, int(v) if k != "PREEMP_FACTOR" else float(v)))
LUT_FILE = "BUILD_MODEL/LUT.def"
FB_FILE = "BUILD_MODEL/MFCC_FB.def"
WINDOW = "HANNING"
FFT_TWIDDLE_DYN = 15
MFCC_COEFF_DYN = 10
def FP2FIX(Val, Prec):
try:
return (Val * ((1 << Prec) - 1)).astype(np.int32)
except:
return int(Val * ((1 << Prec) - 1))
def SetupTwiddlesLUT(Nfft, Inverse=False):
Phi = (np.pi * 2 / Nfft) * np.arange(0, Nfft)
if Inverse:
Twiddles_cos = np.round(np.cos(Phi) * ((1<<FFT_TWIDDLE_DYN)-1))
Twiddles_sin = np.round(np.sin(Phi) * ((1<<FFT_TWIDDLE_DYN)-1))
else:
Twiddles_cos = np.round(np.cos(-Phi) * ((1<<FFT_TWIDDLE_DYN)-1))
Twiddles_sin = np.round(np.sin(-Phi) * ((1<<FFT_TWIDDLE_DYN)-1))
return Twiddles_cos, Twiddles_sin
def SetupSwapTable(Ni):
log2 = int(np.log2(Ni))
iL = Ni / 2
iM = 1
SwapTable = np.zeros(Ni)
for i in range(log2):
for j in range(iM):
SwapTable[j + iM] = SwapTable[j] + iL
iL /= 2
iM *= 2
return SwapTable
def SetupDCTTable(Ndct):
DCT_Coeff = np.zeros((Ndct, Ndct))
for k in range(Ndct):
for i in range(Ndct):
DCT_Coeff[k, i] = np.round(np.cos(np.pi / Ndct * k * (i + 0.5)) * ((1<<FFT_TWIDDLE_DYN)-1))
return DCT_Coeff
def SetupLiftCoeff(L, N, Qdyn=11):
Lift_Coeff = np.zeros(N)
for i in range(N):
Lift_Coeff[i] = ((1.0 + (L / 2.0) * | np.sin(np.pi * i / L) | numpy.sin |
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""Test the ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import os
import unittest
import numpy as np
from dragon.core.util import nest
from dragon.core.testing.unittest.common_utils import run_tests
from dragon.vm import torch
# Fix the duplicate linked omp runtime
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# Fix the numpy seed
np.random.seed(1337)
class OpTestCase(unittest.TestCase):
"""The base test case."""
precision = 1e-5
def __init__(self, method_name='runTest'):
super(OpTestCase, self).__init__(method_name)
def assertEqual(
self,
first,
second,
msg=None,
prec=None,
):
if prec is None:
prec = self.precision
inputs = nest.flatten(first)
num_first = len(inputs)
inputs += nest.flatten(second)
num_second = len(inputs) - num_first
for i, input in enumerate(inputs):
if isinstance(input, torch.Tensor):
inputs[i] = input.numpy()
first = inputs[:num_first] if num_first > 1 else inputs[0]
second = inputs[num_first:len(inputs)] if num_second > 1 else inputs[num_first]
if isinstance(first, np.ndarray) and isinstance(second, np.ndarray):
super(OpTestCase, self).assertEqual(first.shape, second.shape)
if first.dtype == bool and second.dtype == bool:
diff = first ^ second
num_unique = len(np.unique(diff))
self.assertLessEqual(num_unique, 1, msg)
else:
diff = np.abs(first - second)
max_err = diff.max()
self.assertLessEqual(max_err, prec, msg)
elif nest.is_sequence(first) and nest.is_sequence(second):
for a, b in zip(first, second):
self.assertEqual(a, b, msg, prec)
else:
super(OpTestCase, self).assertEqual(first, second, msg)
class TestTensorOps(OpTestCase):
"""Test the tensor ops."""
# Testing shapes for binary ops
unary_test_shapes = [(2,)]
# Testing shapes for binary ops
binary_test_shapes = [((2,), (2,)), ((2, 3), (3,)), ((2, 3), (2, 1))]
def test_abs(self):
data = np.array([-1., 0., 1.], 'float32')
x = new_tensor(data)
self.assertEqual(x.abs(), np.abs(data))
def test_add(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a + b, data1 + data2)
self.assertEqual(1 + a, 1 + data1)
a += b
self.assertEqual(a, data1 + data2)
def test_addmm(self):
entries = [((2, 3), (3, 4), (2, 4))]
for a_shape, b_shape, c_shape in entries:
data1, data2 = arange(a_shape), arange(b_shape)
data3 = arange(c_shape)
a, b = new_tensor(data1), new_tensor(data2)
c = new_tensor(data3)
y = c.addmm(a, b)
self.assertEqual(y, np.matmul(data1, data2) + data3)
def test_argmax(self):
entries = [(0, True), (0, False), (1, True), (1, False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
result = np.argmax(data, axis)
if keepdims:
result = np.expand_dims(result, axis)
self.assertEqual(x.argmax(axis, keepdims), result)
def test_argmin(self):
entries = [(0, True), (0, False), (1, True), (1, False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
result = np.argmin(data, axis)
if keepdims:
result = np.expand_dims(result, axis)
self.assertEqual(x.argmin(axis, keepdims), result)
def test_atan2(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.atan2(b), np.arctan2(data1, data2))
def test_baddbmm(self):
entries = [((2, 2, 3), (2, 3, 4), (2, 2, 4))]
for a_shape, b_shape, c_shape in entries:
data1, data2 = arange(a_shape), arange(b_shape)
data3 = arange(c_shape)
a, b = new_tensor(data1), new_tensor(data2)
c = new_tensor(data3)
y = c.baddbmm(a, b)
self.assertEqual(y, np.matmul(data1, data2) + data3)
c.baddbmm_(a, b)
self.assertEqual(c, np.matmul(data1, data2) + data3)
def test_bitwise_and(self):
for a_shape, b_shape in self.binary_test_shapes:
data1 = arange(a_shape, dtype='int32')
data2 = arange(b_shape, 1, dtype='int32')
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a & b, np.bitwise_and(data1, data2))
a &= b
self.assertEqual(a, np.bitwise_and(data1, data2))
def test_bitwise_not(self):
for shape in self.unary_test_shapes:
data = np.random.binomial(1, 0.5, shape).astype('bool')
x = new_tensor(data)
self.assertEqual(~x, np.invert(data))
x.bitwise_not_()
self.assertEqual(x, np.invert(data))
def test_bitwise_or(self):
for a_shape, b_shape in self.binary_test_shapes:
data1 = arange(a_shape, dtype='int32')
data2 = arange(b_shape, 1, dtype='int32')
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a | b, np.bitwise_or(data1, data2))
a |= b
self.assertEqual(a, np.bitwise_or(data1, data2))
def test_bitwise_xor(self):
for a_shape, b_shape in self.binary_test_shapes:
data1 = arange(a_shape, dtype='int32')
data2 = arange(b_shape, 1, dtype='int32')
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a ^ b, np.bitwise_xor(data1, data2))
a ^= b
self.assertEqual(a, np.bitwise_xor(data1, data2))
def test_bmm(self):
test_shapes = [((1, 2, 3), (2, 3, 4)),
((2, 2, 3), (1, 3, 4)),
((2, 2, 3), (2, 3, 4)),
((2, 1, 2, 3), (2, 3, 4)),
((1, 2, 3), (2, 2, 3, 4)),
((2, 1, 2, 3), (1, 2, 3, 4))]
for a_shape, b_shape in test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.bmm(b), np.matmul(data1, data2))
def test_ceil(self):
data = np.array([1.4, 1.7, 2.0])
x = new_tensor(data)
self.assertEqual(x.ceil(), np.ceil(data))
x.ceil_()
self.assertEqual(x, np.ceil(data))
def test_chunk(self):
data = arange((2, 3))
x = new_tensor(data)
y = x.chunk(2, 1)
self.assertEqual(y, [np.split(data, (2,), axis=1)])
def test_clamp(self):
entries = [(None, None), (2, None), (None, 4), (2, 4)]
for low, high in entries:
data = arange((6,))
x = new_tensor(data)
result = np.clip(data, low, high) if low or high else data
self.assertEqual(x.clamp(low, high), result)
x.clamp_(low, high)
self.assertEqual(x, result)
def test_copy(self):
data1, data2 = arange((2,)), arange((2, 3))
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.copy_(b), data2)
def test_cos(self):
data = np.array([0., math.pi * 0.5, math.pi], 'float32')
x = new_tensor(data)
self.assertEqual(x.cos(), np.cos(data))
def test_cum_sum(self):
data = arange((6,), 1)
x = new_tensor(data)
self.assertEqual(x.cumsum(0), np.cumsum(data, 0))
def test_div(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a / b, data1 / data2)
a /= b
self.assertEqual(a, data1 / data2)
def test_equal(self):
for a_shape, b_shape in self.binary_test_shapes:
data1 = uniform(a_shape)
data2 = dropout(data1, drop_ratio=0.5)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a == b, np.equal(data1, data2))
def test_exp(self):
data = np.array([0., 1., 2.], 'float32')
x = new_tensor(data)
self.assertEqual(x.exp(), np.exp(data))
x.exp_()
self.assertEqual(x, np.exp(data))
def test_expand(self):
entries = [(2, 2, 3, 1),
(1, 2, 3, 2),
(2, 2, 3, 2),
(2, 1, 2, 3, 1)]
for shape in entries:
data = np.arange(6).astype('float32').reshape((1, 2, 3, 1))
x = new_tensor(data)
self.assertEqual(x.expand(shape), np.broadcast_to(data, shape))
self.assertEqual(x.expand_as(x.expand(shape)), np.broadcast_to(data, shape))
def test_eye(self):
entries = [(2,), (2, 2), (2, 3), (3, 2)]
for shape in entries:
x = torch.eye(*shape, dtype='float32')
self.assertEqual(x, np.eye(*shape, dtype='float32'))
def test_fill(self):
entries = [((2, 3), 1), ((2, 3), 1.)]
for shape, value in entries:
data = np.zeros(shape)
x = new_tensor(data)
x.fill_(value)
data.fill(value)
self.assertEqual(x, data)
def test_full(self):
entries = [((2, 3), 1), ((2, 3), 1.)]
for shape, value in entries:
data = np.zeros(shape)
x = torch.full((1,), 0).new_full(shape, value)
data.fill(value)
self.assertEqual(x, data)
self.assertEqual(torch.empty(1).new_ones(shape), np.ones(shape))
self.assertEqual(torch.empty(1).new_zeros(shape), np.zeros(shape))
self.assertEqual(torch.full_like(x, 0), np.zeros(shape))
def test_flatten(self):
data = arange((1, 2, 3))
x = new_tensor(data)
self.assertEqual(x.flatten(), data.flatten())
x.flatten_(-3, -2)
self.assertEqual(x, data.reshape((2, 3)))
def test_flip(self):
data = arange((2, 3, 4))
x = new_tensor(data)
self.assertEqual(x.flip((1, 2)), np.flip(data, (1, 2)))
self.assertEqual(x.fliplr(), np.fliplr(data))
self.assertEqual(x.flipud(), np.flipud(data))
def test_floor(self):
data = np.array([0.9, 1.4, 1.9])
x = new_tensor(data)
self.assertEqual(x.floor(), np.floor(data))
x.floor_()
self.assertEqual(x, np.floor(data))
def test_gather(self):
for axis in range(0, 1):
data1 = arange((2, 4))
data2 = np.array([[0, 1, 1, 0], [1, 1, 0, 0]])
x, index = new_tensor(data1), new_tensor(data2)
y = x.gather(axis, index)
result = np.zeros_like(data2)
for i, j in itertools.product(*[range(d) for d in data2.shape]):
if axis == 0:
result[i, j] = data1[data2[i, j], j]
else:
result[i, j] = data1[i, data2[i, j]]
self.assertEqual([y], [result])
def test_getitem(self):
data1, data2 = arange((2, 3)), arange((2,), dtype='int64')
x, index = new_tensor(data1), new_tensor(data2)
self.assertEqual(x[x > 2], data1[data1 > 2])
entries = [0,
slice(None, None, None),
slice(0, None, None),
slice(0, 0, None),
slice(0, 1, None),
slice(0, 1, 1)]
for item in entries:
try:
self.assertEqual(x.__getitem__(item), data1.__getitem__(item))
except (NotImplementedError, ValueError):
pass
self.assertEqual(x[index], data1[data2])
self.assertEqual(x[:, index], data1[:, data2])
entries = [x,
(slice(1, None, None), index),
(1, index),
(index, index)]
for item in entries:
try:
x.__getitem__(item)
except (TypeError, NotImplementedError):
pass
def test_greater(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a > b, np.greater(data1, data2))
def test_greater_equal(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a >= b, np.greater_equal(data1, data2))
def test_index_select(self):
entries = [1, (1, 2)]
for axis in entries:
data = arange((1, 2, 3, 4))
index = np.array([0, 1, 1], dtype='int64')
axes = nest.flatten(axis)
if len(axes) > 1:
flatten_shape = \
data.shape[:axes[0]] + \
(int(np.prod(data.shape[axes[0]:axes[-1] + 1])),) + \
data.shape[axes[-1] + 1:]
else:
flatten_shape = data.shape[:]
for i in index:
slices = [slice(None, None, None)] * (len(flatten_shape) - 1)
slices.insert(axes[0], i)
x = new_tensor(data)
x_index = new_tensor(index, False)
y = x.index_select(axis, x_index)
self.assertEqual(
y, np.take(data.reshape(flatten_shape), index, axis=axes[0]))
def test_isfinite(self):
data = np.array([0., float('nan'), float('inf')])
x = new_tensor(data)
self.assertEqual(x.isfinite(), np.isfinite(data))
def test_isinf(self):
data = np.array([0., 1., float('inf')])
x = new_tensor(data)
self.assertEqual(x.isinf(), np.isinf(data))
def test_isnan(self):
data = np.array([0., 1., float('nan')])
x = new_tensor(data)
self.assertEqual(x.isnan(), np.isnan(data))
def test_less(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a < b, np.less(data1, data2))
def test_less_equal(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a <= b, np.less_equal(data1, data2))
def test_log(self):
data = np.array([1., 2., 3.], 'float32')
x = new_tensor(data)
self.assertEqual(x.log(), np.log(data))
x.log_()
self.assertEqual(x, np.log(data))
def test_logical_and(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.logical_and(b), np.logical_and(data1, data2))
def test_logical_not(self):
for shape in self.unary_test_shapes:
data = arange(shape)
x = new_tensor(data)
self.assertEqual(x.logical_not(), np.logical_not(data))
def test_logical_or(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.logical_or(b), np.logical_or(data1, data2))
def test_logical_xor(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.logical_xor(b), np.logical_xor(data1, data2))
def test_log_sum_exp(self):
data = np.array([1., 2., 3.], 'float32')
x = new_tensor(data)
self.assertEqual(x.logsumexp(0), np.log(np.sum(np.exp(data))))
def test_masked_fill(self):
data = arange((2, 3))
x = new_tensor(data)
mask = x > 2
y = x.masked_fill(mask, 0)
x.masked_fill_(mask, 0)
data[data > 2] = 0
self.assertEqual(x, data)
self.assertEqual(y, data)
def test_matmul(self):
test_shapes = [((2,), (2,)),
((2,), (2, 3)),
((2, 3), (3,)),
((2, 3), (3, 4)),
((2,), (4, 2, 3)),
((4, 2, 3), (3,)),
((1, 2, 3), (2, 3, 4)),
((2, 2, 3), (1, 3, 4)),
((2, 2, 3), (2, 3, 4)),
((2, 1, 2, 3), (2, 3, 4)),
((1, 2, 3), (2, 2, 3, 4)),
((2, 1, 2, 3), (1, 2, 3, 4))]
for a_shape, b_shape in test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.__matmul__(b), np.matmul(data1, data2))
def test_max(self):
entries = [(0, True), (0, False),
(1, True), (1, False),
((0, 1), True), ((0, 1), False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.max(axis, keepdim=keepdims)
result = np.max(data, axis, keepdims=keepdims)
self.assertEqual(y, result)
def test_maximum(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1), new_tensor(data2)
y = a.maximum(b)
self.assertEqual(y, np.maximum(data1, data2))
def test_mean(self):
entries = [(0, True), (0, False),
(1, True), (1, False),
((0, 1), True), ((0, 1), False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.mean(axis, keepdim=keepdims)
result = np.mean(data, axis, keepdims=keepdims)
self.assertEqual(y, result)
def test_min(self):
entries = [(0, True), (0, False),
(1, True), (1, False),
((0, 1), True), ((0, 1), False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.min(axis, keepdim=keepdims)
result = np.min(data, axis, keepdims=keepdims)
self.assertEqual(y, result)
def test_minimum(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1), new_tensor(data2)
y = a.minimum(b)
self.assertEqual(y, np.minimum(data1, data2))
def test_mm(self):
entries = [((2, 3), (3, 4))]
for a_shape, b_shape in entries:
data1, data2 = arange(a_shape), arange(b_shape)
a, b = new_tensor(data1), new_tensor(data2)
y = a.mm(b)
self.assertEqual(y, np.matmul(data1, data2))
def test_mul(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a * b, data1 * data2)
a *= b
self.assertEqual(a, data1 * data2)
def test_multinomial(self):
data = np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]])
x = new_tensor(data)
y = x.multinomial(2)
self.assertEqual(y.shape, (2, 2))
def test_narrow(self):
data = arange((2, 3))
x = new_tensor(data)
self.assertEqual(x.narrow(0, 1, 1), data[1:2, :])
def test_not_equal(self):
for a_shape, b_shape in self.binary_test_shapes:
data1 = uniform(a_shape)
data2 = dropout(data1, drop_ratio=0.5)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a != b, np.not_equal(data1, data2))
def test_neg(self):
data = np.array([-1., 0., 1.], 'float32')
x = new_tensor(data)
self.assertEqual(-x, -data)
x.neg_()
self.assertEqual(x, -data)
def test_non_zero(self):
data = arange((2, 3))
x = new_tensor(data)
self.assertEqual((x > 2).nonzero(), np.stack(np.nonzero(data > 2), axis=1))
def test_norm(self):
entries = [(0, True), (0, False),
(1, True), (1, False),
((0, 1), True), ((0, 1), False)]
for axis, keepdims in entries:
for ord in (1, 2, 'fro', None):
data = arange((2, 3))
x = new_tensor(data)
y = x.norm(ord, axis, keepdim=keepdims)
if ord == 1:
result = np.sum(np.abs(data), axis=axis, keepdims=keepdims)
elif ord == 2 or ord == 'fro':
result = np.sum(np.square(data), axis=axis, keepdims=keepdims)
result = np.sqrt(result)
else:
result = np.linalg.norm(data, ord, axis, keepdims=keepdims)
self.assertEqual(y, result)
def test_normal(self):
data = arange((2, 3))
x = new_tensor(data)
x.normal_()
def test_permute(self):
entries = [(0, 2, 1), None]
for perm in entries:
data = arange((2, 3, 4))
x = new_tensor(data)
if perm is None:
self.assertEqual(x.permute(), np.transpose(data))
self.assertEqual(x.T, data.T)
x.permute_()
self.assertEqual(x, np.transpose(data))
else:
self.assertEqual(x.permute(*perm), np.transpose(data, perm))
x.permute_(*perm)
self.assertEqual(x, np.transpose(data, perm))
entries = [(0, 1), (0, 2), (1, 2)]
for dim0, dim1 in entries:
data = arange((2, 3, 4))
x = new_tensor(data)
perm = list(range(len(data.shape)))
perm[dim0], perm[dim1] = perm[dim1], perm[dim0]
self.assertEqual(x.transpose(dim0, dim1), np.transpose(data, perm))
x.transpose_(dim0, dim1)
self.assertEqual(x, np.transpose(data, perm))
def test_pow(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape, 1), arange(b_shape)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.pow(b), np.power(data1, data2))
def test_reciprocal(self):
data = np.array([1., 2., 3.], 'float32')
x = new_tensor(data)
self.assertEqual(x.reciprocal(), np.reciprocal(data))
x.reciprocal_()
self.assertEqual(x, np.reciprocal(data))
def test_repeat(self):
entries = [(2,), (1, 1), (1, 2), (2, 1), (2, 2)]
for repeats in entries:
data = arange((2, 2))
x = new_tensor(data)
y = x.repeat(repeats)
repeats = (1,) * (len(data.shape) - len(repeats)) + repeats
self.assertEqual(y, np.tile(data, repeats))
def test_reshape(self):
entries = [(0, 0), (0, -1)]
for shape in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.reshape(shape)
self.assertEqual(y, data.reshape(y.shape))
x.reshape_(shape)
self.assertEqual(x, data.reshape(y.shape))
self.assertEqual(x.view(data.shape), data)
x.view_(data.shape)
self.assertEqual(x, data)
self.assertEqual(x.view_as(x), data)
def test_roll(self):
entries = [(0, 0), ((0, 0), (0, 1)), ((-1, 1), (0, 1)), (1, None)]
for shift, axis in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.roll(shift, axis)
self.assertEqual(y, np.roll(data, shift, axis))
def test_round(self):
data = np.array([0.9, 1.4, 1.9], 'float32')
x = new_tensor(data)
self.assertEqual(x.round(), np.round(data))
x.round_()
self.assertEqual(x, np.round(data))
def test_rsqrt(self):
data = np.array([4., 9., 16], 'float32')
x = new_tensor(data)
result = 1. / np.sqrt(data)
self.assertEqual(x.rsqrt(), result)
x.rsqrt_()
self.assertEqual(x, result)
def test_scatter(self):
for axis in range(0, 1):
data1 = arange((4, 4))
data2 = np.array([[0, 1, 2, 3], [1, 2, 3, 0],
[2, 3, 0, 1], [3, 0, 1, 2]])
data3 = arange((4, 4), 100)
x, index = new_tensor(data1), new_tensor(data2)
v = new_tensor(data3)
y = x.scatter(axis, index, v)
result = data1.copy()
for i, j in itertools.product(*[range(d) for d in data2.shape]):
if axis == 0:
result[data2[i, j], j] = data3[i, j]
else:
result[i, data2[i, j]] = data3[i, j]
self.assertEqual(y, result)
x.scatter_(axis, index, v)
self.assertEqual(x, result)
def test_scatter_add(self):
for axis in range(0, 1):
data1 = arange((4, 4))
data2 = np.array([[0, 0], [0, 0]])
data3 = arange((4, 4), 100)
x, index = new_tensor(data1), new_tensor(data2)
v = new_tensor(data3)
y = x.scatter_add(axis, index, v)
result = data1.copy()
for i, j in itertools.product(*[range(d) for d in data2.shape]):
if axis == 0:
result[data2[i, j], j] += data3[i, j]
else:
result[i, data2[i, j]] += data3[i, j]
self.assertEqual(y, result)
x.scatter_(axis, index, v, reduce='add')
self.assertEqual(x, result)
def test_scatter_mul(self):
for axis in range(0, 1):
data1 = arange((4, 4))
data2 = np.array([[0, 1, 2, 3], [1, 2, 3, 0],
[2, 3, 0, 1], [3, 0, 1, 2]])
x, index = new_tensor(data1), new_tensor(data2)
result = data1.copy()
for i, j in itertools.product(*[range(d) for d in data2.shape]):
if axis == 0:
result[data2[i, j], j] *= 2.33
else:
result[i, data2[i, j]] *= 2.33
x.scatter_(axis, index, 2.33, reduce='multiply')
self.assertEqual(x, result)
def test_setitem(self):
data = arange((2, 3))
x = new_tensor(data)
x[x > 2] = 0
data[data > 2] = 0
self.assertEqual(x, data)
entries = [0,
slice(None, None, None),
slice(0, None, None),
slice(0, 0, None),
slice(0, 1, None),
slice(0, 1, 1),
data,
(data, data)]
for item in entries:
try:
x.__setitem__(item, 0)
data.__setitem__(item, 0)
self.assertEqual(x, data)
except (NotImplementedError, ValueError, TypeError):
pass
def test_sign(self):
data = np.array([-1., 0., 1.], 'float32')
x = new_tensor(data)
self.assertEqual(x.sign(), np.sign(data))
x.sign_()
self.assertEqual(x, np.sign(data))
def test_sin(self):
data = np.array([0., math.pi * 0.5, math.pi], 'float32')
x = new_tensor(data)
self.assertEqual(x.sin(), np.sin(data))
def test_sort(self):
entries = [(None, True),
(0, True),
(-1, True),
(0, False),
(-1, False)]
for axis, descending in entries:
data = uniform((5, 10))
x = new_tensor(data)
val, idx1 = x.sort(axis, descending)
idx2 = x.argsort(axis, descending)
axis = axis if axis is not None else -1
result_val = np.sort(-data if descending else data, axis=axis)
result_val = -result_val if descending else result_val
result_idx = np.argsort(-data if descending else data, axis=axis)
result_idx = np.take(result_idx, np.arange(data.shape[axis]), axis=axis)
self.assertEqual(val, result_val)
self.assertEqual(idx1, result_idx)
self.assertEqual(idx2, result_idx)
def test_split(self):
entries = [((2, 4), 2, 1),
((2, 3), 2, 1),
((2, 3), (2, 1), 1)]
for shape, size_or_sections, dim in entries:
data = arange(shape)
x = new_tensor(data)
y = x.split(size_or_sections, dim)
self.assertEqual(y, np.split(data, (2,), axis=1))
def test_sqrt(self):
data = np.array([4., 9., 16], 'float32')
x = new_tensor(data)
self.assertEqual(x.sqrt(), np.sqrt(data))
x.sqrt_()
self.assertEqual(x, np.sqrt(data))
def test_square(self):
data = np.array([2., 3., 4], 'float32')
x = new_tensor(data)
self.assertEqual(x.square(), np.square(data))
def test_squeeze(self):
entries = [((2, 1, 3), 1), ((1, 2, 1, 3), (0, 2)), ((3, 1, 2, 1), (1,))]
for shape, axis in entries:
data = arange(shape)
x = new_tensor(data)
self.assertEqual(x.squeeze(axis), np.squeeze(data, axis))
x.squeeze_(axis)
self.assertEqual(x, np.squeeze(data, axis))
def test_sub(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a - b, data1 - data2)
a -= b
self.assertEqual(a, data1 - data2)
def test_tril(self):
entries = [(3, 3), (3, 4,), (4, 3), (2, 3, 3)]
for shape in entries:
data = arange(shape, 1)
for k in range(-max(shape), max(shape) + 1):
x = new_tensor(data)
y = x.tril(k)
self.assertEqual(y, np.tril(data, k))
x.tril_(k)
self.assertEqual(x, np.tril(data, k))
def test_triu(self):
entries = [(3, 3), (3, 4,), (4, 3), (2, 3, 3)]
for shape in entries:
data = arange(shape, 1)
for k in range(-max(shape), max(shape) + 1):
x = new_tensor(data)
y = x.triu(k)
self.assertEqual(y, np.triu(data, k))
x.triu_(k)
self.assertEqual(x, np.triu(data, k))
def test_topk(self):
entries = [(2, None, True),
(2, 0, True),
(2, -1, True),
(2, 0, False),
(2, -1, False)]
for k, axis, largest in entries:
data = uniform((5, 10))
x = new_tensor(data)
y = x.topk(k, axis, largest)[1]
axis = axis if axis is not None else -1
result = np.argsort(-data if largest else data, axis=axis)
result = np.take(result, np.arange(k), axis=axis)
self.assertEqual(y, result)
def test_type(self):
entries = [('bool', 'bool'),
('byte', 'uint8'),
('char', 'int8'),
('double', 'float64'),
('float', 'float32'),
('half', 'float16'),
('int', 'int32'),
('long', 'int64')]
for name, dtype in entries:
data = arange((2, 3))
x = new_tensor(data)
self.assertEqual(getattr(x, name)(), data.astype(dtype))
getattr(x, name + '_')()
self.assertEqual(x, data.astype(dtype))
y = x.type(dtype)
self.assertEqual(y.type(), dtype)
def test_unbind(self):
entries = [0, 1]
for axis in entries:
data = arange((2, 3))
num = data.shape[axis]
grad = np.ones(data.shape, 'float32')
grad[tuple(slice(0, 1) if i == axis else
slice(None) for i in range(data.ndim))] = 0
x = new_tensor(data)
y = x.unbind(axis)
result = [x.squeeze(axis) for x in np.split(data, num, axis)]
self.assertEqual(y, result)
def test_uniform(self):
data = arange((2, 3))
x = new_tensor(data)
x.uniform_()
def test_unique(self):
data = np.array([1, 1, 3, 5, 5, 7, 9])
entries = [(False, False),
(True, False),
(False, True),
(True, True)]
for return_inverse, return_counts in entries:
x = new_tensor(data)
y = x.unique(return_inverse=return_inverse,
return_counts=return_counts,
sorted=True)
result = np.unique(
data,
return_inverse=return_inverse,
return_counts=return_counts)
self.assertEqual(y, result)
def test_unsqueeze(self):
entries = [1, -1]
for axis in entries:
data = arange((2, 3, 4))
x = new_tensor(data)
self.assertEqual(x.unsqueeze(axis), np.expand_dims(data, axis=axis))
x.unsqueeze_(axis)
self.assertEqual(x, np.expand_dims(data, axis=axis))
def test_where(self):
entries = [((6,), (6,))]
for a_shape, b_shape in entries:
data1, data2 = arange(a_shape), arange(b_shape, 1)
data3 = data1 > 1
a, b = new_tensor(data1, False), new_tensor(data2, False)
c = new_tensor(data3, False)
self.assertEqual(a.where(c, b), np.where(data3, data1, data2))
class TestTorchOps(OpTestCase):
"""Test the builtin torch ops."""
def test_arange(self):
entries = [([5], {'dtype': 'int64'}),
([0, 5], {'dtype': 'int64'}),
([0, 5, 2], {'dtype': 'int64'}),
([0., 1., 0.2], {'dtype': 'float32'})]
for (args, kwargs) in entries:
data = np.arange(*args, **kwargs)
x = torch.arange(*args, **kwargs)
self.assertEqual(x, data)
def test_cat(self):
entries = [0, 1]
for axis in entries:
data = arange((2, 2))
x = new_tensor(data)
y = torch.cat([x, x], dim=axis)
self.assertEqual(y, np.concatenate([data, data], axis=axis))
def test_linspace(self):
entries = [([[0., 5.], [10., 40.], 5], {'dim': 0, 'dtype': 'float32'}),
([[0., 5.], [10., 40.], 5], {'dim': 1, 'dtype': 'float32'}),
([[0., 5.], [10., 40.], 5], {'dim': -1, 'dtype': 'float32'}),
([[0.], [10.], 5], {'dim': 0, 'dtype': 'float32'}),
([[0.], [10.], 5], {'dim': -1, 'dtype': 'float32'}),
([0., 10., 5], {'dim': 0, 'dtype': 'float32'}),
([0., 10., 5], {'dim': 0, 'dtype': 'int64'})]
for (args, kwargs) in entries:
x = torch.linspace(*args, **kwargs)
kwargs['axis'] = kwargs.pop('dim')
data = np.linspace(*args, **kwargs)
self.assertEqual(x, data)
def test_ones_like(self):
data = | np.ones((2, 3), dtype='float32') | numpy.ones |
"""
functions for image segmentation and splitting of training/test
dataset
"""
import time
import numpy as np
import matplotlib.colors as colors
import matplotlib as mpl
# my modules
import chmap.utilities.datatypes.datatypes as datatypes
# machine learning modules
import tensorflow as tf
import matplotlib.pyplot as plt
from IPython.display import clear_output
from sklearn.cluster import KMeans
from skimage import measure
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import *
def normalize(input_image):
"""
normalizes image
:param input_image:
:param input_mask:
:return:
"""
input_image = tf.cast(input_image, tf.float32) / 255.0
# input_mask -= 1
return input_image
def load_image_train(datapoint, size):
input_image = tf.image.resize(datapoint, size)
# input_image = tf.image.resize(datapoint, (128, 128))
# if datapoint['segmentation_mask']:
# input_mask = tf.image.resize(datapoint['segmentation_mask'], size)
# # input_mask = tf.image.resize(segmentation_mask, (128, 128))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
# if input_mask:
# input_mask = tf.image.flip_left_right(input_mask)
# input_image = normalize(input_image)
return input_image
def load_image_val(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def display_sample(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
def create_mask(pred_mask: tf.Tensor) -> tf.Tensor:
"""Return a filter mask with the top 1 predictions
only.
Parameters
----------
pred_mask : tf.Tensor
A [IMG_SIZE, IMG_SIZE, N_CLASS] tensor. For each pixel we have
N_CLASS values (vector) which represents the probability of the pixel
being these classes. Example: A pixel with the vector [0.0, 0.0, 1.0]
has been predicted class 2 with a probability of 100%.
Returns
-------
tf.Tensor
A [IMG_SIZE, IMG_SIZE, 1] mask with top 1 predictions
for each pixels.
"""
# pred_mask -> [IMG_SIZE, SIZE, N_CLASS]
# 1 prediction for each class but we want the highest score only
# so we use argmax
pred_mask = tf.argmax(pred_mask, axis=-1)
# pred_mask becomes [IMG_SIZE, IMG_SIZE]
# but matplotlib needs [IMG_SIZE, IMG_SIZE, 1]
pred_mask = tf.expand_dims(pred_mask, axis=-1)
return pred_mask
def show_predictions(sample_image=None, sample_mask=None, dataset=None, model=None, num=1):
"""Show a sample prediction.
Parameters
----------
dataset : [type], optional
[Input dataset, by default None
num : int, optional
Number of sample to show, by default 1
"""
if dataset:
for image, mask in dataset.take(num):
pred_mask = model.predict(image)
display_sample([image, mask, create_mask(pred_mask)])
else:
# The model is expecting a tensor of the size
# [BATCH_SIZE, IMG_SIZE, IMG_SIZE, 3]
# but sample_image[0] is [IMG_SIZE, IMG_SIZE, 3]
# and we want only 1 inference to be faster
# so we add an additional dimension [1, IMG_SIZE, IMG_SIZE, 3]
one_img_batch = sample_image[0][tf.newaxis, ...]
# one_img_batch -> [1, IMG_SIZE, IMG_SIZE, 3]
inference = model.predict(one_img_batch)
# inference -> [1, IMG_SIZE, IMG_SIZE, N_CLASS]
pred_mask = create_mask(inference)
# pred_mask -> [1, IMG_SIZE, IMG_SIZE, 1]
display_sample([sample_image[0], sample_mask[0],
pred_mask[0]])
#### more advanced training
class DisplayCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
clear_output(wait=True)
# show_predictions()
print('\nSample Prediction after epoch {}\n'.format(epoch + 1))
#### apply detection
def ml_chd(model, iit_list, los_list, use_indices, inst_list):
start = time.time()
chd_image_list = [datatypes.CHDImage()] * len(inst_list)
for inst_ind, instrument in enumerate(inst_list):
if iit_list[inst_ind] is not None:
# define CHD parameters
image_data = iit_list[inst_ind].iit_data
use_chd = use_indices[inst_ind]
# ML CHD
# create correct data format
scalarMap = mpl.cm.ScalarMappable(norm=colors.LogNorm(vmin=1.0, vmax=np.max(image_data)),
cmap='sohoeit195')
colorVal = scalarMap.to_rgba(image_data, norm=True)
data_x = colorVal[:, :, :3]
# apply ml algorithm
ml_output = model.predict(data_x[tf.newaxis, ...], verbose=1)
result = (ml_output[0] > 0.1).astype(np.uint8)
# use_chd = np.logical_and(image_data != -9999, result.squeeze() > 0)
pred = np.zeros(shape=result.squeeze().shape)
pred[use_chd] = result.squeeze()[use_chd]
# pred = np.zeros(shape=ml_output.squeeze().shape)
# pred[use_chd] = ml_output.squeeze()[use_chd]
# chd_result = np.logical_and(pred == 1, use_chd == 1)
# chd_result = chd_result.astype(int)
# binary_result = np.logical_and(binary_output == 1, use_chd == 1)
# binary_result = binary_result.astype(int)
# create CHD image
chd_image_list[inst_ind] = datatypes.create_chd_image(los_list[inst_ind], pred)
chd_image_list[inst_ind].get_coordinates()
# chd_binary_list[inst_ind] = datatypes.create_chd_image(los_list[inst_ind], binary_result)
# chd_binary_list[inst_ind].get_coordinates()
end = time.time()
print("Coronal Hole Detection algorithm implemented in", end - start, "seconds.")
return chd_image_list
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
"""Function to add 2 convolutional layers with the parameters passed to it"""
# first layer
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal',
padding='same')(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation('relu')(x)
# second layer
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size),
kernel_initializer='he_normal', padding='same')(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def get_unet(input_img, n_filters=16, dropout=0.1, batchnorm=True):
# Contracting Path
c1 = conv2d_block(input_img, n_filters * 1, kernel_size=3, batchnorm=batchnorm)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size=3, batchnorm=batchnorm)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size=3, batchnorm=batchnorm)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size=3, batchnorm=batchnorm)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3, batchnorm=batchnorm)
# Expansive Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = conv2d_block(u6, n_filters * 8, kernel_size=3, batchnorm=batchnorm)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
u7 = Dropout(dropout)(u7)
c7 = conv2d_block(u7, n_filters * 4, kernel_size=3, batchnorm=batchnorm)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
u8 = Dropout(dropout)(u8)
c8 = conv2d_block(u8, n_filters * 2, kernel_size=3, batchnorm=batchnorm)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1])
u9 = Dropout(dropout)(u9)
c9 = conv2d_block(u9, n_filters * 1, kernel_size=3, batchnorm=batchnorm)
outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
model = Model(inputs=[input_img], outputs=[outputs])
return model
def load_model(model_h5, IMG_SIZE=2048, N_CHANNELS=3):
"""
function to load keras model from hdf5 file
:param model_h5:
:param IMG_SIZE:
:param N_CHANNELS:
:return:
"""
input_img = Input((IMG_SIZE, IMG_SIZE, N_CHANNELS), name='img')
model = get_unet(input_img, n_filters=16, dropout=0.05, batchnorm=True)
model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
model.load_weights(model_h5)
return model
def cluster_brightness(clustered_img, org_img, n_clusters):
# create average color array
avg_color = []
for i in range(0, n_clusters):
cluster_indices = np.where(clustered_img == i)
# average per row
average_color_per_row = np.average(org_img[cluster_indices], axis=0)
# find average across average per row
avg_color.append(average_color_per_row)
return avg_color
def kmeans_detection(org_map, use_data, arr, N_CLUSTERS, IMG_HEIGHT, IMG_WIDTH, map_x, map_y):
optimalk = KMeans(n_clusters=N_CLUSTERS, random_state=0, init='k-means++').fit(arr)
labels = optimalk.labels_
pred_clustered = labels.reshape(IMG_HEIGHT, IMG_WIDTH)
# get cluster brightnesses
avg_color = cluster_brightness(pred_clustered, use_data, N_CLUSTERS)
color_order = np.argsort(avg_color)
### CH Detection
chd_clustered = pred_clustered + 1
chd_clustered = np.where(np.logical_or(chd_clustered == color_order[0] + 1, chd_clustered == color_order[1] + 1), N_CLUSTERS + 1, 0)
chd_clustered = np.where(chd_clustered == N_CLUSTERS + 1, 1, 0)
# area constraint
chd_labeled = measure.label(chd_clustered, connectivity=2, background=0, return_num=True)
# get area
chd_area = [props.area for props in measure.regionprops(chd_labeled[0])]
# remove CH with less than 10 pixels in area
chd_good_area = np.where(np.array(chd_area) > 25)
indices = []
chd_plot = np.zeros(chd_labeled[0].shape)
for val in chd_good_area[0]:
val_label = val + 1
indices.append( | np.logical_and(chd_labeled[0] == val_label, val in chd_good_area[0]) | numpy.logical_and |
try:
import os, errno
from datetime import datetime
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.linear_model import BayesianRidge
from sklearn import preprocessing
import seaborn as sns
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from scipy import stats
sns.set_theme()
except:
pass
from environmental_data_modules import AurnPostProcessor
class AurnImputationTest(AurnPostProcessor):
"""
Class for testing the imputation of the data that is extracted from the AURN server.
"""
# testing default values
DEFAULT_DATA_LOST = 0.5
DEFAULT_DATA_LOSS_POSITION = 'end'
DEFAULT_CHECK_SITES = False
DEFAULT_STAT_DIR = './'
BASE_IMPUTED_STATS_PDF_FILE = '{}/{}_{}_imputed_comparison.pdf'
BASE_IMPUTED_STATS_CSV_FILE = '{}/aurn_{}_correlation_stats.csv'
DEFAULT_FLOAT_FORMAT = '%.4f'
def __init__(self, metadata_filename=AurnPostProcessor.DEFAULT_METADATA_FILE, metadata_url=AurnPostProcessor.DEFAULT_METADATA_URL,
out_dir=AurnPostProcessor.DEFAULT_OUT_DIR, verbose=AurnPostProcessor.DEFAULT_VERBOSE, stat_dir=DEFAULT_STAT_DIR):
""" Initialise instance of the AurnImputationTest class.
Initialises the private class variables
Args:
metadata_filename: filename of the metadata used in Aurn data extraction
metadata_url: alternative source of AURN metadata, if metadata_filename is None
out_dir: (string) directory to be used for all outputs
verbose: (integer) level of verbosity in output.
Returns:
Initialised instance of AurnPostProcessor
"""
super(AurnImputationTest, self).__init__(metadata_filename,metadata_url,out_dir,verbose)
self._data_lost = AurnImputationTest.DEFAULT_DATA_LOST
self._data_loss_position = AurnImputationTest.DEFAULT_DATA_LOSS_POSITION
self.check_sites = AurnImputationTest.DEFAULT_CHECK_SITES
self.rng = np.random.RandomState(0)
self.stat_dir = stat_dir
@property
def stat_dir(self):
return self.__stat_dir
@stat_dir.setter
def stat_dir(self, dir_name):
try:
dir_name = str(dir_name)
except ValueError as err:
raise err
try:
os.makedirs(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise ValueError("Directory name {} cannot be created.".format(dir_name))
self.__stat_dir = dir_name
@property
def data_lost(self):
return self._data_lost
@data_lost.setter
def data_lost(self, data_lost):
if type(data_lost) == float and 0 <= data_lost < 1:
self._data_lost = data_lost
else:
raise Exception('data_lost is {}, but should be float from 0 upto (not including) 1'.format(data_lost))
@property
def data_loss_position(self):
return self._data_loss_position
@data_loss_position.setter
def data_loss_position(self, data_loss_position):
if type(data_loss_position) == str and data_loss_position in ['start','middle','end','random']:
self._data_loss_position = data_loss_position
else:
raise Exception("data_loss_position is {}, but should be string matching one of these: ['start','middle','end','random']".format(data_loss_position))
def imputation_test(self, in_file, date_range=None,
site_list=AurnPostProcessor.DEFAULT_SITE_LIST,
emep_filename=AurnPostProcessor.DEFAULT_EMEP_FILENAME,
min_years_reference=AurnPostProcessor.DEFAULT_MIN_YEARS_REFERENCE,
min_years=AurnPostProcessor.DEFAULT_MIN_YEARS,
data_lost=DEFAULT_DATA_LOST,
data_loss_position=DEFAULT_DATA_LOSS_POSITION,
save_to_csv=AurnPostProcessor.DEFAULT_SAVE_TO_CSV,
outfile_suffix='',check_sites=DEFAULT_CHECK_SITES,
species_list=AurnPostProcessor.SPECIES_LIST_EXTRACTED):
""" Testing the imputation methods used for filling in missing data. Replicates the
methods used in 'process' function, but for the defined sites will remove a
given amount of data from the dataframe before imputation, then compare the
imputed data with the original data, to determine accuracy of the method.
The stations to be tested need to meet the requirements for 'reference' sites.
Args:
in_file: (str) The file spec of the input file (required)
date_range: (list of 2 datetime) The date range of interest
site_list: (list of string/number) Site IDs of interest
emep_filename: (str) The file spec of the EMEP file to be used to help calculate #Todo Doug
min_years_reference: (float) The minimum number of years of data for any site that we are going to
use as a reference site later. (this cannot be less than min_years)
min_years: (float) The minimum number of years of data that a site must have
data_lost: (float) The fraction of each dataset to remove
data_loss_position: (str) where to lose the data (start,middle,end,random)
save_to_csv: (boolean) Whether to save the output dateframes to CSV file(s)
outfile_suffix: (str) The suffix to appended to the end of output file names.
check_sites: (boolean) If True then routine will list appropriate stations
to use for the imputation tests, then exit
species_list: (list of strings) list of chemical species to test
Returns:
daily_dataframe: daily dataset, for all measurements, as pandas.Dataframe
Required MultiIndex:
'time_stamp' (datetime object): date (only) (e.g. 2017-06-01)
'sensor_name' (string): ID string for site (e.g. 'LIN3 [AQ]')
Required columns:
'O3.max' (float): daily maximum value
'O3.mean' (float): daily mean value
'O3.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'PM10.max' (float): daily maximum value
'PM10.mean' (float): daily mean value
'PM10.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'PM2.5.max' (float): daily maximum value
'PM2.5.mean' (float): daily mean value
'PM2.5.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'NO2.max' (float): daily maximum value
'NO2.mean' (float): daily mean value
'NO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'NOXasNO2.max' (float): daily maximum value
'NOXasNO2.mean' (float): daily mean value
'NOXasNO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'SO2.max' (float): daily maximum value
'SO2.mean' (float): daily mean value
'SO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
"""
# Process inputs
if date_range is not None:
self.date_range = [datetime.strptime(date_range[0], AurnPostProcessor.INPUT_DATE_FORMAT),
datetime.strptime(date_range[1], AurnPostProcessor.INPUT_DATE_FORMAT)]
else:
self.date_range = [self.get_available_start(), self.get_available_end()]
self.file_out = AurnPostProcessor.BASE_FILE_OUT.format(self.out_dir, outfile_suffix)
self._emep_data = self.load_emep_data(emep_filename)
self.min_years = min_years
self.min_years_reference = min_years_reference
self.site_list = site_list
self.data_lost = data_lost
self.data_loss_position = data_loss_position
self.species_list = species_list
self.check_sites = check_sites
self.station_data = self.metadata['AURN_metadata'][['site_id', 'latitude', 'longitude', 'site_name']]
if self.verbose > 1: print('Station data: \n {}'.format(self.station_data))
self.pdf_file_string = AurnImputationTest.BASE_IMPUTED_STATS_PDF_FILE
self.csv_file_string = AurnImputationTest.BASE_IMPUTED_STATS_CSV_FILE
self.float_format = AurnImputationTest.DEFAULT_FLOAT_FORMAT
# load and prepare the hourly dataset
hourly_dataframe = self.load_aurn_data(in_file)
print('filter for minimum data lengths, and reduce dataset to only stations of interest')
hourly_dataframe_filtered, reference_sites, required_sites, site_list_internal = \
self.site_list_and_preparation(hourly_dataframe)
if len(hourly_dataframe_filtered.index) == 0:
print('Exiting post-processing: Metadata is empty after initial filtering processes')
return
print('data preparation, to create the test dataset for imputation')
hourly_test_dataframe = self.data_preparation(hourly_dataframe_filtered,
reference_sites, site_list_internal)
# return
print('imputation of data, returning hourly data')
hourly_imputed_dataframe = self.organise_data_imputation(
hourly_test_dataframe, reference_sites, required_sites, site_list_internal)
print('sorting data (no imputation), returning hourly data')
hourly_reference_dataframe = self.organise_data(hourly_dataframe_filtered, site_list_internal)
# calculate the daily max and mean for each station
daily_reference_dataframe = self.combine_and_organise_mean_max(hourly_reference_dataframe)
daily_imputed_dataframe = self.combine_and_organise_mean_max(hourly_imputed_dataframe)
# calculate the stats for the hourly and daily data, printing out graphs of these
self.imputation_hourly_analysis(hourly_imputed_dataframe,hourly_reference_dataframe,site_list_internal)
self.imputation_daily_analysis(daily_imputed_dataframe,daily_reference_dataframe,site_list_internal)
#if save_to_csv:
# # write this dataset to file
# daily_dataframe.to_csv(self.file_out, index=True, header=True, float_format='%.2f')
#return daily_dataframe
def site_list_and_preparation(self,hourly_dataframe):
"""
Wrapper for the list_required_and_reference_sites routine. This will list sites
based on the given minimum and reference year requirements, then determine which
sites for all species of interest fit the reference year requirements. If none do,
or if 'check_sites' flag is True, then potential sites of use will be listed, and
the program exited.
Args:
hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
Returns:
hourly_dataframe_filtered: pandas dataframe, as above, containing hourly dataset for only
the reference station datasets
reference_sites_out: (dict, keys are species):
items: (list of strings) the site_id's for our reference sites for each `spc`
minus the sites in the `site_working` list
required_sites: (dict, keys are species):
items: (list of strings) required sites for `spc` (not used later?)
site_working_list: (list, strings) a single list of required sites for the imputation tests
"""
df_part_filtered, reference_sites, required_sites, site_list_internal = \
self.list_required_and_reference_sites(hourly_dataframe)
# create a list of the sites that we can use for the imputation tests for all requested species
site_all = site_list_internal.copy()
combined_reference_site_list = []
for spc in self.species_list:
site_all = set(site_all).intersection(reference_sites[spc])
combined_reference_site_list = combined_reference_site_list + required_sites[spc]
# trim down the database to cover only stations which are references for at least one species
combined_reference_site_list = list(dict.fromkeys(combined_reference_site_list))
hourly_dataframe_filtered = df_part_filtered[df_part_filtered[self._site_string].isin(combined_reference_site_list)]
# get the list of required sites from what is available, and what was requested
site_working_list = set(site_all).intersection(self.site_list)
# checks on what sites are available, and if we use them or not
if self.check_sites or len(site_working_list) == 0:
for spc in self.species_list:
print('for species {} there are {} sites suitable for testing imputation'.
format(spc,len(reference_sites[spc])))
print(reference_sites[spc])
print('there are {} sites suitable for all requested species'.format(len(site_all)))
print(site_all)
if not self.check_sites:
print('Requested sites were: {}'.format(self.site_list))
print('We are exiting because none were suitable sites for imputation tests for all requested species, see above messages.')
return pd.DataFrame(), [], [], []
# derive new lists of reference stations, excluding the sites we will use for imputation tests
reference_sites_out = {}
for spc in self.species_list:
reference_sites_out[spc] = [site for site in reference_sites[spc] if site not in site_working_list]
if len(reference_sites_out[spc]) == 0:
print('there are no reference sites for species {}, please set a reduced site list'.format(spc))
return pd.DataFrame(), [], [], []
# success! return the filtered dataframe, and our lists of sites
return hourly_dataframe_filtered, reference_sites_out, required_sites, site_working_list
def data_preparation(self, hourly_dataframe, reference_sites, site_list_internal):
"""
Prepare test data for imputation, by removing the specified amount of data from the test sites.
Args:
hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
reference_sites: (dict, keys are species):
items: (list of strings) the site_id's for our reference sites for each `spc`
minus the sites in the `site_working` list
site_list_internal: (list, strings) a single list of required sites
Returns:
hourly_dataframe_out: hourly dataset, for all measurements, as pandas.Dataframe, with the required data removed
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
"""
# get list of reference sites, which should exclude the sites for testing
reference_site_list = []
for spc in self.species_list:
reference_site_list = reference_site_list + reference_sites[spc]
reference_site_list = list(dict.fromkeys(reference_site_list))
# create dataframe with reference sites only
hourly_dataframe_out = hourly_dataframe[hourly_dataframe[self._site_string].isin(reference_site_list)]
for site in site_list_internal:
print(' filtering site {}'.format(site))
working_dataframe = hourly_dataframe[hourly_dataframe[self._site_string]==site].copy()
data_length = len(working_dataframe)
print('index length is {}'.format(data_length))
if self.data_loss_position == 'end':
start_point = 0
end_point = int(np.floor(data_length * self.data_lost))
working_dataframe = working_dataframe.iloc[start_point:end_point]
elif self.data_loss_position == 'middle':
half_data_retain = (1-self.data_lost)/2
start_point = int(np.floor(data_length * half_data_retain))
end_point = data_length - start_point
working_dataframe_start = working_dataframe.iloc[0:start_point]
working_dataframe_end = working_dataframe.iloc[end_point:data_length]
working_dataframe = working_dataframe_start.append(working_dataframe_end)
elif self.data_loss_position == 'start':
start_point = int(np.ceil(data_length * (1-self.data_lost)))
end_point = data_length
working_dataframe = working_dataframe.iloc[start_point:end_point]
elif self.data_loss_position == 'random':
data_points_lost = int(np.floor(data_length * self.data_lost))
keeping_samples = np.hstack((
np.zeros(data_points_lost, dtype=np.bool),
np.ones(data_length - data_points_lost,dtype=np.bool)
))
self.rng.shuffle(keeping_samples)
print(keeping_samples)
working_dataframe = working_dataframe.iloc[np.where(keeping_samples)[0]]
else:
print('{} data loss method not implemented yet, keeping all data'.format(self.data_loss_position))
start_point = 0
end_point = data_length
hourly_dataframe_out = hourly_dataframe_out.append(working_dataframe)
return hourly_dataframe_out
def organise_data(self, hourly_dataframe_filtered, site_list_internal):
"""
Function for organising the required datasets. This mirrors the imputation function.
Args:
hourly_dataframe_filtered: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
site_list_internal (list, string or int): combined list of sites to retain
Returns:
hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Required Index:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
O3_flag (int): flag indicating imputed data (0=original,1=imputed)
PM10_flag (int):
PM2.5_flag (int):
NO2_flag (int):
NOXasNO2_flag (int):
SO2_flag (int):
"""
date_index = pd.date_range(start=self.start, end=self.end, freq='1H', name=self._timestamp_string)
output_dataframe = pd.DataFrame()
hourly_dataframe_internal = hourly_dataframe_filtered.set_index(self._timestamp_string)
spc_list = self.species_list
if self.verbose > 1: print('1. Site list internal: ', site_list_internal)
for site in site_list_internal:
if self.verbose > 1: print('2. Site: ', site)
# create new dataframe, with the dates that we are interested in
working_hourly_dataframe = pd.DataFrame([], index=date_index)
working_hourly_dataframe[self._site_string] = site
# copy these to a new dataframe
working_hourly_dataframe[spc_list] = \
hourly_dataframe_internal[hourly_dataframe_internal[self._site_string] == site][spc_list]
# copy imputed data of interest into copy of original dataframe (without EMEP and neighbouring sites)
for spc in spc_list:
working_hourly_dataframe['{}_flag'.format(spc)] = 0
working_hourly_dataframe.loc[working_hourly_dataframe[spc].isna(),'{}_flag'.format(spc)] = 1
# append data to the output dataframe
output_dataframe = output_dataframe.append(working_hourly_dataframe)
output_dataframe = output_dataframe.reset_index().set_index([self._timestamp_string,self._site_string])
return(output_dataframe)
def imputation_hourly_analysis(self,hourly_imputed_dataframe,hourly_reference_dataframe,site_list_internal):
"""
Statistical analysis of the hourly results for the imputation of AURN data
Args:
hourly_imputed_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
O3_flag (int): flag indicating imputed data (0=original,1=imputed)
PM10_flag (int):
PM2.5_flag (int):
NO2_flag (int):
NOXasNO2_flag (int):
SO2_flag (int):
hourly_reference_dataframe: hourly dataset, for all measurements, as pandas.Dataframe
Index: none
Required Columns:
timestamp (datetime object):
site_id (string):
Optional Columns:
O3 (float):
PM10 (float):
PM2.5 (float):
NO2 (float):
NOXasNO2 (float):
SO2 (float):
O3_flag (int): flag indicating imputed data (0=original,1=imputed)
PM10_flag (int): (all these flags should be zero)
PM2.5_flag (int):
NO2_flag (int):
NOXasNO2_flag (int):
SO2_flag (int):
site_list_internal: (list, strings) a single list of required sites
Returns: None
"""
sns.set_style('ticks')
sns.set_context('paper')
sns.despine()
note="""
Analysis of the reliability of the imputation of AURN datasets
at the site {}. This is the original hourly data.
The configuration used for this test is:
start date: {}
end date: {}
fraction of data removed: {}
position in timeseries for data removal: {}
"""
mul_ind = pd.MultiIndex.from_product([site_list_internal,self.species_list],names=[self._site_string,'spc'])
col_headers = ['kendalltau_corr','spearmanr_corr','pearsonr_corr','slope','r_squared','p_value','std_err']
hourly_stat_dataset = pd.DataFrame(index=mul_ind,columns=col_headers,dtype=np.float)
for site in site_list_internal:
print('working on site: {}'.format(site))
with PdfPages(self.pdf_file_string.format(self.stat_dir,site,'hourly')) as pdf_pages:
firstPage = plt.figure(figsize=(6,6))
firstPage.clf()
firstPage.text(0.5,0.5,note.format(site,self.start,self.end,self.data_lost,self.data_loss_position),
transform=firstPage.transFigure, size=12, ha="center")
pdf_pages.savefig()
plt.close()
for spc in self.species_list:
print('stats for species: {}'.format(spc))
data_imputed = hourly_imputed_dataframe.loc[(slice(None),site),spc]
data_reference = hourly_reference_dataframe.loc[(slice(None),site),spc]
flag_imputed = hourly_imputed_dataframe.loc[(slice(None),site),'{}_flag'.format(spc)]
# keep only the data which has been imputed
data_imputed = data_imputed[flag_imputed==1]
data_reference = data_reference[flag_imputed==1]
# remove datapoints which were NaN in the original data
data_imputed = data_imputed[data_reference.notna()]
data_reference = data_reference[data_reference.notna()]
# plot scatter
data_combined = pd.DataFrame()
data_combined[spc] = data_reference
data_combined['{} (imputed)'.format(spc)] = data_imputed
min_val = min(min(data_imputed),min(data_reference))
max_val = max(max(data_imputed),max(data_reference))
range_val = max_val - min_val
k_corr, k_pval = stats.kendalltau(data_reference,data_imputed)
s_corr, s_pval = stats.spearmanr(data_reference,data_imputed)
p_corr, p_pval = stats.pearsonr(data_reference,data_imputed)
slope, intercept, r_value, p_value, std_err = stats.linregress(data_reference,data_imputed)
hourly_stat_dataset.loc[(site,spc),col_headers] = [k_corr,s_corr,p_corr,slope,r_value**2,p_value,std_err]
sns_plot = sns.jointplot(data=data_combined,x=spc,y='{} (imputed)'.format(spc),kind="reg")
sns_plot.ax_joint.plot(data_combined['{} (imputed)'.format(spc)],data_combined['{} (imputed)'.format(spc)], 'r-', linewidth=1)
sns_plot.ax_joint.set_xlim(min_val-range_val*0.05,max_val+range_val*0.05)
sns_plot.ax_joint.set_ylim(min_val-range_val*0.05,max_val+range_val*0.05)
sns_plot.ax_joint.text(min_val+range_val*0.1,max_val-range_val*0.1,'KendallTau; corr = {0:.2f}; p = {1:.2f}'.format(k_corr,k_pval))
pdf_pages.savefig(sns_plot.fig)
plt.close()
hourly_stat_dataset.to_csv(self.csv_file_string.format(self.stat_dir,'hourly'), index=True, header=True, float_format=self.float_format)
def imputation_daily_analysis(self,daily_imputed_dataframe,daily_reference_dataframe,site_list_internal):
"""
Statistical analysis of the daily results for the imputation of AURN data
Args:
daily_imputed_dataframe: daily dataset, for all measurements, as pandas.Dataframe
Required MultiIndex:
'time_stamp' (datetime object): date (only) (e.g. 2017-06-01)
'sensor_name' (string): ID string for site (e.g. 'LIN3 [AQ]')
Required columns:
'O3.max' (float): daily maximum value
'O3.mean' (float): daily mean value
'O3.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'PM10.max' (float): daily maximum value
'PM10.mean' (float): daily mean value
'PM10.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'PM2.5.max' (float): daily maximum value
'PM2.5.mean' (float): daily mean value
'PM2.5.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'NO2.max' (float): daily maximum value
'NO2.mean' (float): daily mean value
'NO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'NOXasNO2.max' (float): daily maximum value
'NOXasNO2.mean' (float): daily mean value
'NOXasNO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
'SO2.max' (float): daily maximum value
'SO2.mean' (float): daily mean value
'SO2.flag' (float): flag to indicate fraction of imputed data
(1 = fully imputed, 0 = no imputed values were used)
daily_reference_dataframe: daily dataset, for all measurements, as pandas.Dataframe,
same layout as 'daily_imputed_dataframe'
site_list_internal: (list, strings) a single list of required sites
Returns: None
"""
sns.set_style('ticks')
sns.set_context('paper')
sns.despine()
note="""
Analysis of the reliability of the imputation of AURN datasets
at the site {}. This is the daily mean and maximum data.
The configuration used for this test is:
start date: {}
end date: {}
fraction of data removed: {}
position in timeseries for data removal: {}
"""
stat_list = ['mean','max']
mul_ind = pd.MultiIndex.from_product([site_list_internal,self.species_list,stat_list],names=[self._site_string,'spc','stat'])
col_headers = ['kendalltau_corr','spearmanr_corr','pearsonr_corr','slope','r_squared','p_value','std_err']
daily_stat_dataset = pd.DataFrame(index=mul_ind,columns=col_headers,dtype=np.float)
fill_ranges = [0,0.125,0.25,0.5,1.0]
length_ranges = len(fill_ranges)
for site in site_list_internal:
site_string = "{} [AQ]".format(site)
print('working on site: {}'.format(site))
with PdfPages(self.pdf_file_string.format(self.stat_dir,site,'daily')) as pdf_pages:
firstPage = plt.figure(figsize=(6,6))
firstPage.clf()
firstPage.text(0.5,0.5,note.format(site,self.start,self.end,self.data_lost,self.data_loss_position),
transform=firstPage.transFigure, size=12, ha="center")
pdf_pages.savefig()
plt.close()
for spc in self.species_list:
print('stats for species: {}'.format(spc))
for stat in stat_list:
data_imputed = daily_imputed_dataframe.loc[(slice(None),site_string),'{}_{}'.format(spc,stat)]
data_reference = daily_reference_dataframe.loc[(slice(None),site_string),'{}_{}'.format(spc,stat)]
flag_imputed = daily_imputed_dataframe.loc[(slice(None),site_string),'{}_flag'.format(spc)]
flag_already_missing = daily_reference_dataframe.loc[(slice(None),site_string),'{}_flag'.format(spc)]
flag_plot = | np.floor(1-flag_already_missing) | numpy.floor |
from scipy.sparse.linalg import svds, eigsh
from scipy.linalg.interpolative import svd as isvd
import os
import numpy as np
import matplotlib.pyplot as plt
import time
from mpi4py import MPI
import h5py
from tqdm import tqdm
def test_function( N, M , noise=0.001):
"""
Generate some test data we can use. The resulting matrix should have a rank of 4.
"""
x = np.linspace(-1,1,M)
p0 = x*0+1.0
p1 = x
p2 = 0.5*(3*x*x-1)
p3 = 0.5*(5*x*x*x-3*x)
result = []
for ii in range(N):
tmp = np.random.uniform(-1,1,4)
tmp = tmp[0]*p0 + tmp[1]*p1 + tmp[2]*p2 + tmp[3]*p3
tmp = tmp + np.random.normal(0,1.0,M)*noise
result.append(tmp)
result = np.vstack(result)
return result
def invert_permutation(p):
"""Given a permutation, provide an array that undoes the permutation.
"""
s = np.empty(p.size, p.dtype)
s[p] = np.arange(p.size)
return s
class batched_SVD(object):
"""Compute an SVD of all data, but in small batches to overcome memory issues.
Parameters:
----------
data: A pointer to a data object, say a N,M matrix.
N experimental observations of dimension M
N_max: The maximum number of entries in sub block of data
k_singular: The number of singular values to consider
randomize : A flag which determines if the data will be split in a random fashion. True by default
Attributes:
-----------
self.order : The order in which the data will be examined
self.inv_order : The inverse of the above array
self.N_split : The number of batches of data
self.parts : A list of selection arrays
self.partial_svd_u : A list of (truncated) svd matrices (U) from individual batches of data
self.partial_svd_s : A list of (truncated) svd matrices (S) from individual batches of data
self.partial_svd_vt : A list of (truncated) svd matrices (V^T) from individual batches of data
self.partial_bases : A list of (truncated) svd matrices (SV^T) from individual batches of data
Examples:
---------
data = test_function(10000,3000,1.1)
bSVD = batched_SVD(data, 2000, 5, randomize=True)
u,s,vt = bSVD.go_svd()
"""
def __init__(self, data, N_max, k_singular, randomize=True):
self.data = data
self.N_max = N_max
self.k_singular = k_singular
self.randomize = randomize
self.order = np.arange( self.data.shape[0] )
if self.randomize:
np.random.shuffle( self.order )
self.N_split = int( np.floor(self.data.shape[0] / self.N_max) ) +1
self.parts = np.array_split( self.order, self.N_split)
# if we have these numbers in order, we can use them as slices in a hdf5 setting
# this is only requiered when we randomize the lot
if self.randomize:
tmp = []
for part in self.parts:
part = np.sort(part)
tmp.append(part)
self.parts = tmp
self.order = | np.concatenate(self.parts) | numpy.concatenate |
import numpy as np
import random
import os
import pickle
import math
import seaborn as sns
import matplotlib.pyplot as plt
import copy
from definitions import *
################################## measurement ##########################################
def f1_score(TP,FP,FN):
return 2*TP / (2*TP + FP + FN)
def f1_scores(CM):
output=[f1_score(CM[i,i],np.sum(CM[:,i])-CM[i,i],np.sum(CM[i])-CM[i,i]) for i in range(CM.shape[0])]
return output
def accuracy_scores(CM):
output=[CM[i,i]/np.sum(CM[i]) for i in range(CM.shape[0])]
return output
################################## venn diagramme ##########################################
from matplotlib_venn import venn2, venn2_circles, venn2_unweighted
from matplotlib_venn import venn3, venn3_circles
def venn_2counts(a,b):
ab=np.intersect1d(a, b)
ab=len(ab)
a_minusb=len(a)-ab
b_minusa=len(b)-ab
return a_minusb, b_minusa,ab
def venn_3counts(a,b,c):
ab=np.intersect1d(a, b)
abc=np.intersect1d(ab, c)
abc_len=len(abc)
ab_minusc=len(ab)-abc_len
bc=np.intersect1d(b, c)
bc_minusa=len(bc)-abc_len
ac=np.intersect1d(a, c)
ac_minusb=len(ac)-abc_len
solo_a=len(a)-(ab_minusc+abc_len+ac_minusb)
solo_b=len(b)-(ab_minusc+abc_len+bc_minusa)
solo_c=len(c)-(bc_minusa+abc_len+ac_minusb)
return solo_a,solo_b,ab_minusc,solo_c,ac_minusb,bc_minusa,abc_len
################################## Investigating misclassified features/probs ##########################################
def full_sub_idx_creator(b,sub_b,test_lens):
sub_b_idx=np.array([np.where(b==sub_b[i])[0][0] for i in range(len(sub_b))],dtype='int')
full_idxs=full_idx_creator(test_lens)
return [full_idxs[sub_b_idx[j]] for j in range(len(sub_b_idx))]
def sub_idx_creator(b,sub_b):
sub_b_idx=np.array([np.where(b==sub_b[i])[0][0] for i in range(len(sub_b))],dtype='int')
return sub_b_idx
def full_correct_wrong_idx_features(correct_ids,ids,lens,probs=None,X=None,y=None):
false_ids=np.array([ids[i] for i in range(len(ids)) if ids[i] not in correct_ids],dtype='int')
full_correct_idxs=full_sub_idx_creator(ids,correct_ids,lens)
full_wrong_idxs=full_sub_idx_creator(ids,false_ids,lens)
if probs is not None:
assert (X is not None) & (y is not None)
correct_probs=[probs[full_correct_idxs[i]] for i in range(len(full_correct_idxs))]
wrong_probs=[probs[full_wrong_idxs[i]] for i in range(len(full_wrong_idxs))]
correct_features=[X[full_correct_idxs[i]] for i in range(len(full_correct_idxs))]
wrong_features=[X[full_wrong_idxs[i]] for i in range(len(full_wrong_idxs))]
y_wrong=[np.array(y,dtype='int')[full_wrong_idxs[i]][0] for i in range(len(full_wrong_idxs))]
return full_correct_idxs,full_wrong_idxs,correct_probs,wrong_probs,correct_features,wrong_features,y_wrong
else:
return full_correct_idxs,full_wrong_idxs
################################### introduce new metrics for counting how many severity cases ##################################
def cutoff(data,threds=[5,10]):
num_kind=len(threds)
if num_kind==len(data):
output=np.array([len(np.where(data[i]>threds[i])[0]) for i in range(num_kind)])
if num_kind>2:
output[2]=len(np.where(data[2]<threds[2])[0])
else:
output=np.array([len(np.where(data[:,i]>threds[i])[0]) for i in range(num_kind)])
if num_kind>2:
output[2]=len(np.where(data[:,2]<threds[2])[0])
return output
def cutoff_proportion(data,threds=[5,10]):
num_kind=len(threds)
lens=len(data[0])
if num_kind==len(data):
output=np.array([len(np.where(data[i]>threds[i])[0])/lens for i in range(num_kind)])
if num_kind>2:
output[2]=len(np.where(data[2]<threds[2])[0])/lens
else:
output=np.array([len(np.where(data[:,i]>threds[i])[0])/lens for i in range(num_kind)])
if num_kind>2:
output[2]=len(np.where(data[:,2]<threds[2])[0])/lens
return output
def mean_severity_feature(minlen=20,data_type='weekly',job='cla',proportion=True,threds=[5,10]):
mean_severity=[]
pseudo_sliding_mean_severity=[]
y_=[]
job_int=int(job=='reg')
X_Original=load_pickle(DATA_interim+'participants_class_'+data_type+'.pkl')
for j in range(len(X_Original)):
len_=len(X_Original[j].data[0])
if len_>=minlen+job_int:
if proportion:
mean_severity_=cutoff_proportion(X_Original[j].data,threds=threds)
else:
mean_severity_=cutoff(X_Original[j].data)
mean_severity.append(mean_severity_)
y_.append(X_Original[j].diagnosis)
for start in np.arange(len_-minlen-job_int+1):
pseudo_sliding_mean_severity.append(mean_severity_)
mean_severity=np.array(mean_severity)
pseudo_sliding_mean_severity=np.array(pseudo_sliding_mean_severity)
return mean_severity,y_, pseudo_sliding_mean_severity
def sliding_mean_severity_feature(X_original,proportion=True):
if proportion:
sliding_mean_severity=[cutoff_proportion(X_original[j]) for j in range(len(X_original))]
else:
sliding_mean_severity=[cutoff(X_original[j]) for j in range(len(X_original))]
return np.asarray(sliding_mean_severity)
################################## For spectrums on triangles ##########################################
def prob_individual(probs):
score=np.zeros(probs.shape[-1])
preds=np.argmax(probs,axis=1)
for i in range(len(score)):
score[i]+=len(np.where(preds==i)[0])
return score/len(preds)
def trianglePoints_generator(lens,probs,y,mental_dict={0:"borderline",1:"healthy",2:"bipolar"}):
full_idxs=full_idx_creator(lens)
preds=np.array([prob_individual(probs[full_idxs[i]]) for i in range(len(lens))])
y_labels=np.array([y[full_idxs[i][0]] for i in range(len(lens))],dtype='int')
trianglePoints={ "borderline": [],
"healthy": [],
"bipolar": []}
for j in range(len(y_labels)):
trianglePoints[mental_dict[y_labels[j]]].append(preds[j])
return trianglePoints
def plotDensityMap(scores,title=None):
"""Plots, given a set of scores, the density map on a triangle.
Parameters
----------
scores : list
List of scores, where each score is a 3-dimensional list.
"""
TRIANGLE = np.array([[math.cos(math.pi*0.5), math.sin(math.pi*0.5)],
[math.cos(math.pi*1.166), math.sin(math.pi*1.166)],
[math.cos(math.pi*1.833), math.sin(math.pi*1.833)]])
# scores1=[]
# for score in scores:
# for element in score:
# scores1.append(element)
pointsX = [score.dot(TRIANGLE)[0] for score in scores]
pointsY = [score.dot(TRIANGLE)[1] for score in scores]
vertices = []
vertices.append(np.array([1,0,0]).dot(TRIANGLE))
vertices.append(np.array([0,1,0]).dot(TRIANGLE))
vertices.append(np.array([0,0,1]).dot(TRIANGLE))
for i in range(3):
p1 = vertices[i]
if i == 2:
p2 = vertices[0]
else:
p2 = vertices[i+1]
c = 0.5 * (p1 + p2)
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], color='k', linestyle='-', linewidth=2)
plt.plot([0, c[0]], [0, c[1]], color='k', linestyle='-', linewidth=1)
ax = plt.gca()
ax.set_xlim([-1.2, 1.32])
ax.set_ylim([-0.7,1.3])
ax.text(0.8, -0.6,"bipolar")
ax.text(-1.1, -0.6, "healthy" )
ax.text(-0.15, 1.05, "borderline")
data = [[pointsX[i], pointsY[i]] for i in range(len(pointsX))]
H, _, _=np.histogram2d(pointsX,pointsY,bins=40,normed=True)
norm=H.max()-H.min()
contour1=0.75
target1=norm*contour1+H.min()
def objective(limit, target):
w = np.where(H>limit)
count = H[w]
return count.sum() - target
# level1 = scipy.optimize.bisect(objective, H.min(), H.max(), args=(target1,))
# levels = [level1]
sns.kdeplot(np.array(pointsX), np.array(pointsY),shade=True, ax=ax)
sns.kdeplot( | np.array(pointsX) | numpy.array |
import pandas as pd
import numpy as np
import math
import tensorflow as tf
from tensorflow import keras
from tensorflow.compat.v1.keras import backend as K
'''
Data preprocessing for Convlstm with rolling window.
Loads rolling, sequenced data and removes first 12 months of output observations and last month of training observations.
This allows the network to use the first months 1-12 of input data to predict the output for the 13th month, months 2-13 to predict the output for month 14, etc.
'''
# Data loading
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/Filled Sequence Data/x_train_convlstm_filled_seq_final.npy", "rb") as f:
X_train = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/Filled Sequence Data/y_train_convlstm_filled_seq_final.npy", "rb") as f:
y_train = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/Filled Sequence Data/x_test_convlstm_filled_seq_final.npy", "rb") as f:
X_test = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/GitHub/preprocessing/convlstm/Filled Sequence Data/y_test_convlstm_filled_seq_final.npy", "rb") as f:
y_test = np.load(f)
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/plotting/real_ice_extents.npy", "rb") as f:
y_extent = np.load(f)
#Split to ~80%-20% train-test split
train_months=408
# 408 months (34 years) train - 96 months (8 years) test
y_extent_train=y_extent[:train_months]
y_extent_test=y_extent[train_months:504]
# convert nan values to 0
X_train = np.nan_to_num(X_train)
y_train = | np.nan_to_num(y_train) | numpy.nan_to_num |
from core.util_classes import robot_predicates
from sco_py.expr import Expr, AffExpr, EqExpr, LEqExpr
from core.util_classes.pr2_sampling import (
ee_reachable_resample,
resample_bp_around_target,
)
import core.util_classes.pr2_constants as const
from collections import OrderedDict
from openravepy import DOFAffine
import numpy as np
"""
This file Defines specific PR2 related predicates
"""
# Attributes used in pr2 domain. (Tuple to avoid changes to the attr_inds)
ATTRMAP = {
"Robot": (
("backHeight", | np.array([0], dtype=np.int) | numpy.array |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by <NAME> from official pycls codebase inorder to add the AL functionality
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import numpy as np
import os
import optuna
import sys
import torch
import pickle
import subprocess as sp
import copy
from pycls.core.config import assert_cfg
from pycls.core.config import dump_cfg
from pycls.core.config import custom_dump_cfg
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from pycls.utils.meters import ValMeter
import pycls.core.losses as losses
import pycls.core.model_builder as model_builder
import pycls.core.optimizer as optim
import pycls.utils.benchmark as bu
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.loader as imagenet_loader
from helper.args_util import get_main_args
from helper.args_util import parse_args
from helper.args_util import get_al_args
from helper.subprocess_utils import vaal_sampling_util
from helper.subprocess_utils import active_sampling
from helper.subprocess_utils import test_net_subprocess_call
from helper.subprocess_utils import SWA_subprocess_call
from helper.path_extractor import get_latest_model_path
from helper.path_extractor import get_best_model_path
from helper.path_extractor import update_lset_uset_paths
logger = lu.get_logger(__name__)
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_xvalues = []
plot_it_y_values = []
def plot_arrays(cfg, x_vals, y_vals, x_name, y_name, dataset_name, isDebug=False):
"""Basic utility to plot X vs Y line graphs.
Args:
cfg: Reference to the config yaml
x_vals: values on x-axis
y_vals: values on y-axis
x_name: Label on x-axis
y_name: Label on y-axis
dataset_name: Dataset name.
isDebug (bool, optional): Switch for debug mode. Defaults to False.
"""
if not du.is_master_proc(cfg):
return
import matplotlib.pyplot as plt
temp_name = "{}_vs_{}".format(x_name, y_name)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title("Dataset: {}; {}".format(dataset_name, temp_name))
plt.plot(x_vals, y_vals)
if isDebug:
print(f"plot_saved at {cfg.OUT_DIR+temp_name}.png")
if cfg.TRAIN.TRANSFER_EXP:
temp_path = (
os.path.join(
"transfer_experiment",
cfg.MODEL.TRANSFER_MODEL_TYPE
+ "_depth_"
+ str(cfg.MODEL.TRANSFER_MODEL_DEPTH),
)
+ "/"
)
plt.savefig(cfg.OUT_DIR + temp_path + temp_name + ".png")
plt.savefig(cfg.OUT_DIR + temp_name + ".png")
plt.close()
def save_plot_values(
cfg, temp_arrays, temp_names, isParallel=True, saveInTextFormat=False, isDebug=True
):
"""Saves arrays provided in the list in npy format"""
# return if not master process
if isParallel:
if not du.is_master_proc(cfg):
return
for i in range(len(temp_arrays)):
temp_arrays[i] = np.array(temp_arrays[i])
temp_dir = cfg.OUT_DIR
if cfg.TRAIN.TRANSFER_EXP:
temp_dir += (
os.path.join(
"transfer_experiment",
cfg.MODEL.TRANSFER_MODEL_TYPE
+ "_depth_"
+ str(cfg.MODEL.TRANSFER_MODEL_DEPTH),
)
+ "/"
)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if saveInTextFormat:
if isDebug:
print(
f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.txt in text format!!"
)
np.savetxt(temp_dir + temp_names[i] + ".txt", temp_arrays[i], fmt="%d")
else:
if isDebug:
print(
f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.npy in numpy format!!"
)
np.save(temp_dir + temp_names[i] + ".npy", temp_arrays[i])
def is_eval_epoch(cfg, cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or (
cur_epoch + 1
) == cfg.OPTIM.MAX_EPOCH
def log_model_info(model):
"""Logs model info"""
logger.info("Model:\n{}".format(model))
logger.info("Params: {:,}".format(mu.params_count(model)))
logger.info("Flops: {:,}".format(mu.flops_count(model)))
def train_epoch(
train_loader,
model,
loss_fun,
optimizer,
train_meter,
cur_epoch,
cfg,
clf_iter_count,
clf_change_lr_iter,
clf_max_iter,
):
"""Performs one epoch of training."""
if cfg.NUM_GPUS > 1:
train_loader.sampler.set_epoch(cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cfg, cur_epoch)
if cfg.OPTIM.TYPE == "sgd":
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic() # This basically notes the start time in timer class defined in utils/timer.py
len_train_loader = len(train_loader)
for cur_iter, (inputs, labels) in enumerate(train_loader):
# ensuring that inputs are floatTensor as model weights are
inputs = inputs.type(torch.cuda.FloatTensor)
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parametersSWA
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
# Average error and losses across GPUs
# Also this this calls wait method on reductions so we are ensured
# to obtain synchronized results
loss, top1_err = du.scaled_all_reduce(cfg, [loss, top1_err])
# Copy the stats from GPU to CPU (sync point)
loss, top1_err = loss.item(), top1_err.item()
# #ONLY MASTER PROCESS SHOULD WRITE TO TENSORBOARD
if du.is_master_proc(cfg):
if cur_iter is not 0 and cur_iter % 5 == 0:
# because cur_epoch starts with 0
plot_it_xvalues.append((cur_epoch) * len_train_loader + cur_iter)
plot_it_y_values.append(loss)
save_plot_values(
cfg,
[plot_it_xvalues, plot_it_y_values],
["plot_it_xvalues.npy", "plot_it_y_values.npy"],
isDebug=False,
)
plot_arrays(
cfg,
x_vals=plot_it_xvalues,
y_vals=plot_it_y_values,
x_name="Iterations",
y_name="Loss",
dataset_name=cfg.TRAIN.DATASET,
)
# Compute the difference in time now from start time initialized just before this for loop.
train_meter.iter_toc()
train_meter.update_stats(
top1_err=top1_err, loss=loss, lr=lr, mb_size=inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
return loss, clf_iter_count
@torch.no_grad()
def test_epoch(cfg, test_loader, model, test_meter, cur_epoch):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
misclassifications = 0.0
totalSamples = 0.0
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
inputs = inputs.type(torch.cuda.FloatTensor)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err = du.scaled_all_reduce(cfg, [top1_err])
# as above returns a list
top1_err = top1_err[0]
# Copy the errors from GPU to CPU (sync point)
top1_err = top1_err.item()
# Multiply by Number of GPU's as top1_err is scaled by 1/Num_GPUs
misclassifications += top1_err * inputs.size(0) * cfg.NUM_GPUS
totalSamples += inputs.size(0) * cfg.NUM_GPUS
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err=top1_err, mb_size=inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch)
test_meter.reset()
return misclassifications / totalSamples
def train_model(
best_val_acc,
best_val_epoch,
trainDataset,
valDataset,
dataObj,
cfg,
trial,
isPruning,
):
"""Trains the model."""
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_xvalues
global plot_it_y_values
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_xvalues = []
plot_it_y_values = []
# Build the model (before the loaders to speed up debugging)
model = model_builder.build_model(
cfg, active_sampling=cfg.ACTIVE_LEARNING.ACTIVATE, isDistributed=True
)
# Define the loss function
if cfg.TRAIN.IMBALANCED:
if cfg.TRAIN.DATASET == "IMAGENET":
raise NotImplementedError
temp_lSet, _, _ = dataObj.loadPartitions(
lSetPath=cfg.ACTIVE_LEARNING.LSET_PATH,
uSetPath=cfg.ACTIVE_LEARNING.USET_PATH,
valSetPath=cfg.ACTIVE_LEARNING.VALSET_PATH,
)
temp_weights = dataObj.getClassWeightsFromDataset(
dataset=trainDataset, index_set=temp_lSet, bs=cfg.TRAIN.BATCH_SIZE
)
# print(f"temp_weights: {temp_weights}")
loss_fun = torch.nn.CrossEntropyLoss(
weight=temp_weights.cuda(torch.cuda.current_device())
)
print("Weighted cross entropy loss chosen as loss function")
print(
"Sum of weights: {} and weights.shape: {}".format(
torch.sum(temp_weights), temp_weights.shape
)
)
else:
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(cfg, model)
print("========OPTIMIZER========")
print("optimizer: {}".format(optimizer))
print("=========================")
start_epoch = 0
# Load initial weights if there are any
if cfg.TRAIN.WEIGHTS:
start_epoch = cu.load_checkpoint(cfg, cfg.TRAIN.WEIGHTS, model, optimizer)
logger.info("=================================")
logger.info("Loaded initial weights from: {}".format(cfg.TRAIN.WEIGHTS))
logger.info("Base LR: {}".format(cfg.OPTIM.BASE_LR))
logger.info("=================================")
# If active learning mode then there has to be some starting point model
if cfg.ACTIVE_LEARNING.ACTIVATE:
if cfg.TRAIN.DATASET in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
print("==================================")
print(
"We are not finetuning over the provided dataset {}".format(
cfg.TRAIN.DATASET
)
)
print(
"So Although we can load best model from path: {} -- but we won't do on CIFAR datsets".format(
cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR
)
)
print("Exiting model loafing function")
print("==================================")
else:
cu.load_checkpoint(cfg, cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR, model)
logger.info("=================================")
logger.info(
"Loaded initial weights from: {}".format(
cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR
)
)
logger.info("Base LR: {}".format(cfg.OPTIM.BASE_LR))
logger.info("=================================")
# check if randAug activated
if cfg.RANDAUG.ACTIVATE:
print("==========================================")
print(
"RandAug activated with N: {} and M: {}".format(
cfg.RANDAUG.N, cfg.RANDAUG.M
)
)
print("==========================================")
# Compute precise time
if start_epoch == 0 and cfg.PREC_TIME.ENABLED:
logger.info("Computing precise time...")
bu.compute_precise_time(model, loss_fun)
nu.reset_bn_stats(model)
# Create data loaders
lSet = []
uSet = []
# handles when we pass cifar/svhn datasets
if cfg.TRAIN.DATASET in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
# get partitions
lSet, uSet, valSet = dataObj.loadPartitions(
lSetPath=cfg.ACTIVE_LEARNING.LSET_PATH,
uSetPath=cfg.ACTIVE_LEARNING.USET_PATH,
valSetPath=cfg.ACTIVE_LEARNING.VALSET_PATH,
)
print("====== Partitions Loaded =======")
print("lSet: {}, uSet:{}, valSet: {}".format(len(lSet), len(uSet), len(valSet)))
print("================================")
train_loader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=lSet,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
data=trainDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=True,
)
valSetLoader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=valSet,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
data=valDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
allowRepeat=False,
)
# Loading test partition
logger.info("==== Loading TestDataset ====")
oldmode = dataObj.eval_mode
dataObj.eval_mode = True
testDataset, n_TestDatapts = dataObj.getDataset(
save_dir=cfg.TEST_DIR, isTrain=False, isDownload=True
)
print("Number of testing datapoints: {}".format(n_TestDatapts))
test_loader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=None,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
data=testDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
allowRepeat=False,
)
dataObj.eval_mode = oldmode
elif cfg.TRAIN.DATASET == "IMAGENET":
logger.info("==========================")
logger.info("Trying to load imagenet dataset")
logger.info("==========================")
train_loader, valSetLoader = imagenet_loader.get_data_loaders(cfg)
test_loader = imagenet_loader.construct_test_loader(cfg)
else:
logger.info(f"Dataset {cfg.TRAIN.DATASET} currently not supported")
raise NotImplementedError
# Create meters
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(valSetLoader), cfg)
test_meter = TestMeter(len(test_loader), cfg)
# Perform the training loop
print("Len(train_loader): {}".format(len(train_loader)))
logger.info("Start epoch: {}".format(start_epoch + 1))
val_set_acc = 0.0
temp_best_val_acc = 0.0
temp_best_val_epoch = 0
##best checkpoint states
best_model_state = None
best_opt_state = None
val_acc_epochs_x = []
val_acc_epochs_y = []
clf_train_iterations = cfg.OPTIM.MAX_EPOCH * int(len(lSet) / cfg.TRAIN.BATCH_SIZE)
clf_change_lr_iter = clf_train_iterations // 25
clf_iter_count = 0
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
# # Train for one epoch
train_loss, clf_iter_count = train_epoch(
train_loader,
model,
loss_fun,
optimizer,
train_meter,
cur_epoch,
cfg,
clf_iter_count,
clf_change_lr_iter,
clf_train_iterations,
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# # Evaluate the model
if is_eval_epoch(cfg, cur_epoch):
# Original code passes on testLoader but we want to compute on val Set
val_set_err = test_epoch(cfg, valSetLoader, model, val_meter, cur_epoch)
val_set_acc = 100.0 - val_set_err
if temp_best_val_acc < val_set_acc:
temp_best_val_acc = val_set_acc
temp_best_val_epoch = cur_epoch + 1
# Save best model and optimizer state for checkpointing
model.eval()
best_model_state = (
model.module.state_dict()
if cfg.NUM_GPUS > 1
else model.state_dict()
)
best_opt_state = optimizer.state_dict()
model.train()
# log if master process
if du.is_master_proc(cfg):
# as we start from 0 epoch
val_acc_epochs_x.append(cur_epoch + 1)
val_acc_epochs_y.append(val_set_acc)
#######################
# Save a checkpoint
######################
if cfg.TRAIN.DATASET == "IMAGENET" and cu.is_checkpoint_epoch(cfg, cur_epoch):
# named_save_checkpoint saves model with cur_epoch+1 in name
checkpoint_file = cu.named_save_checkpoint(
cfg, "valSet_acc_" + str(val_set_acc), model, optimizer, cur_epoch
)
logger.info("Wrote checkpoint to: {}".format(checkpoint_file))
# ##Tensorboard for loss vs epoch
if du.is_master_proc(cfg):
plot_epoch_xvalues.append(cur_epoch)
plot_epoch_yvalues.append(train_loss)
save_plot_values(
cfg,
[
plot_epoch_xvalues,
plot_epoch_yvalues,
plot_it_xvalues,
plot_it_y_values,
val_acc_epochs_x,
val_acc_epochs_y,
],
[
"plot_epoch_xvalues.npy",
"plot_epoch_yvalues.npy",
"plot_it_xvalues.npy",
"plot_it_y_values.npy",
"val_acc_epochs_x",
"val_acc_epochs_y",
],
isDebug=False,
)
logger.info("Successfully logged numpy arrays!!")
##PLOT arrays
plot_arrays(
cfg,
x_vals=plot_epoch_xvalues,
y_vals=plot_epoch_yvalues,
x_name="Epochs",
y_name="Loss",
dataset_name=cfg.TRAIN.DATASET,
)
plot_arrays(
cfg,
x_vals=val_acc_epochs_x,
y_vals=val_acc_epochs_y,
x_name="Epochs",
y_name="Validation accuracy",
dataset_name=cfg.TRAIN.DATASET,
)
print("~~~ isPruning Flag: ", isPruning)
print("~~~ isEvalEpoch: ", is_eval_epoch(cfg, cur_epoch))
if (
isPruning
and cur_epoch != 0
and (cur_epoch % 20 == 0)
and is_eval_epoch(cfg, cur_epoch)
):
print("======================================\n")
print("Inside pruning: -- ", isPruning)
print("======================================\n")
trial.report(val_set_acc, cur_epoch)
if trial.should_prune():
print("======================================\n")
print("Getting pruned!!")
print("======================================\n")
raise optuna.exceptions.TrialPruned()
save_plot_values(
cfg,
[
plot_epoch_xvalues,
plot_epoch_yvalues,
plot_it_xvalues,
plot_it_y_values,
val_acc_epochs_x,
val_acc_epochs_y,
],
[
"plot_epoch_xvalues.npy",
"plot_epoch_yvalues.npy",
"plot_it_xvalues.npy",
"plot_it_y_values.npy",
"val_acc_epochs_x",
"val_acc_epochs_y",
],
)
if du.is_master_proc(cfg):
# update shared variable -- iff process is master process
# if distributed training
if cfg.NUM_GPUS > 1:
best_val_acc.value = temp_best_val_acc
best_val_epoch.value = temp_best_val_epoch
else:
best_val_acc = temp_best_val_acc
best_val_epoch = temp_best_val_epoch
"""
SAVES the best model checkpoint
"""
checkpoint_file = cu.state_save_checkpoint(
cfg=cfg,
info="vlBest_acc_" + str(temp_best_val_acc),
model_state=best_model_state,
optimizer_state=best_opt_state,
epoch=temp_best_val_epoch,
)
logger.info("Wrote checkpoint to: {}".format(checkpoint_file))
if not cfg.NUM_GPUS > 1:
return best_val_acc, best_val_epoch
def single_proc_train(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
):
"""Performs single process training."""
# Setup logging
lu.setup_logging(cfg)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
if cfg.NUM_GPUS > 1:
train_model(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
)
else:
return train_model(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
)
def ensemble_sampling(
args,
cfg,
main_args,
temp_out_dir,
trainDataset,
valDataset,
noAugDataset,
dataObj,
debug=True,
):
temp_cfg = copy.deepcopy(cfg)
if debug:
logger.info("Inside Ensemble sampling function")
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(main_args)
num_ensembles = args.num_ensembles
ENS_DIR_SUFFIX = "ens_model_"
current_device = 0
# train num_ensemble models
print("==========================")
print(f"Num_Ensembles: {num_ensembles}")
print(f"main_args: {main_args}")
print(f"initial temp_out_dir: {temp_out_dir}")
print(f"cfg.ACTIVE_LEARNING.ACTIVATE: {cfg.ACTIVE_LEARNING.ACTIVATE}")
print(f"cfg.ACTIVE_LEARNING.LSET_PATH: {cfg.ACTIVE_LEARNING.LSET_PATH}")
print(f"cfg.ACTIVE_LEARNING.USET_PATH: {cfg.ACTIVE_LEARNING.USET_PATH}")
print(f"cfg.ACTIVE_LEARNING.VALSET_PATH: {cfg.ACTIVE_LEARNING.VALSET_PATH}")
print(f"cfg.ACTIVE_LEARNING.SAMPLING_FN: {cfg.ACTIVE_LEARNING.SAMPLING_FN}")
print("==========================")
model_paths = []
for i in range(num_ensembles):
print("=== Training ensemble [{}/{}] ===".format(i + 1, num_ensembles))
cfg.defrost() # to make cfg mutable
"""
Switch off any regularization if there is any
"""
print(f"Rand_Aug was switched to {cfg.RANDAUG.ACTIVATE}")
if cfg.RANDAUG.ACTIVATE:
cfg.RANDAUG.ACTIVATE = False
print(f"Setting RandAug to --> {cfg.RANDAUG.ACTIVATE}")
print(f"SWA was switched to {cfg.SWA_MODE.ACTIVATE}")
if cfg.SWA_MODE.ACTIVATE:
cfg.SWA_MODE.ACTIVATE = False
print(f"Setting SWA MODE to --> {cfg.SWA_MODE.ACTIVATE}")
cfg.OPTIM.MAX_EPOCH = args.ens_epochs
print(f"Max epochs for training ensemble: {cfg.OPTIM.MAX_EPOCH}")
cfg.RNG_SEED += i
cfg.ACTIVE_LEARNING.BUDGET_SIZE = args.budget_size
cfg.TEST.BATCH_SIZE = args.test_batch_size
cfg.TEST.DATASET = args.dataset
cfg.TRAIN.BATCH_SIZE = args.train_batch_size
cfg.TRAIN.DATASET = args.dataset
cfg.TRAIN.EVAL_PERIOD = args.eval_period
cfg.TRAIN.CHECKPOINT_PERIOD = args.checkpoint_period
cfg.TRAIN.IMBALANCED = args.isimbalanced
cfg.ENSEMBLE.NUM_MODELS = num_ensembles
cfg.ENSEMBLE.MODEL_TYPE = [str(cfg.MODEL.TYPE)]
print(f"====== Ensemble OPTIM LR: {cfg.OPTIM.BASE_LR}=====")
print("=== SEED: {} ===".format(cfg.RNG_SEED))
cfg.OUT_DIR = temp_out_dir + ENS_DIR_SUFFIX + str(i + 1) + "/"
model_paths.append(cfg.OUT_DIR)
print(f"cfg.OUT_DIR: {cfg.OUT_DIR}")
print(f"cfg.ACTIVE_LEARNING.BUDGET_SIZE: {cfg.ACTIVE_LEARNING.BUDGET_SIZE}")
if os.path.exists(cfg.OUT_DIR):
print(
f"Skipping ensemble {i+1} learning as it already exists: {cfg.OUT_DIR}"
)
else:
al_main(cfg, args, trainDataset, valDataset, dataObj, None, isSkipCfg=True)
cfg.defrost()
if debug:
print(f"[Before] model_paths: {model_paths}")
model_paths = [
get_best_model_path(None, [], 0, "", False, directPath=md_path)
for md_path in model_paths
]
if debug:
print(f"[After] model_paths: {model_paths}")
temp_args = [model_paths, num_ensembles, noAugDataset, dataObj, temp_out_dir]
active_sampling(cfg, ensemble_args=temp_args, debug=False)
# Get original CFG back
cfg = copy.deepcopy(temp_cfg)
return 0
# this calls distributed training
def al_main(
cfg, args, trainDataset, valDataset, dataObj, al_args=None, isSkipCfg=False
):
"""Main function running AL cycles"""
if not isSkipCfg:
# Load config options
cfg.merge_from_file(args.cfg_file)
if al_args is not None:
cfg.merge_from_list(al_args)
assert_cfg()
cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg(cfg)
# Perform training
if cfg.NUM_GPUS > 1:
print("============================")
print("Number of Gpus available for multiprocessing: {}".format(cfg.NUM_GPUS))
print("============================")
best_val_acc, best_val_epoch = mpu.multi_proc_run(
num_proc=cfg.NUM_GPUS,
fun=single_proc_train,
fun_args=(trainDataset, valDataset, dataObj, cfg, 0, True),
)
else:
temp_val_acc = 0.0
temp_val_epoch = 0
# val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg
best_val_acc, best_val_epoch = single_proc_train(
temp_val_acc,
temp_val_epoch,
trainDataset,
valDataset,
dataObj,
cfg,
0,
True,
)
cfg.defrost() # Make cfg mutable for other operations
return best_val_acc, best_val_epoch
def main(cfg):
# Parse cmd line args
args = parse_args()
best_val_accuracies = []
test_accuracies = []
test_model_paths = [] # For verification purposes
best_val_epochs = []
temp_model_path = ""
al_model_phase = args.al_mode
print("== al_model_phase: {} ==".format(al_model_phase))
al_start = args.init_partition
sampling_fn = args.sampling_fn if al_model_phase else None
dataset_name = args.dataset
if al_model_phase:
al_step = args.step_partition
al_stop = al_start + args.al_max_iter * al_step
data_splits = [round(i, 1) for i in np.arange(al_start, al_stop, al_step)]
else:
data_splits = [args.init_partition]
al_max_iter = len(data_splits)
i_start = 1 if al_max_iter > 1 else 0
# compulsory arguments needed irrespective of active learning or not
main_args = get_main_args(args)
temp_out_dir = ""
directory_specific = "vanilla"
if args.isTransferExp:
print(
f"========= [Running Transfer Experiment; DIRECTORY SPECIFIC SET TO {args.transfer_dir_specific}] ========="
)
directory_specific = args.transfer_dir_specific
else:
if args.swa_mode and args.rand_aug:
directory_specific = "swa_rand_aug"
elif args.swa_mode:
directory_specific = "swa"
elif args.rand_aug:
directory_specific = "rand_aug"
else:
print("========= [NO ADVANCED REGULARIZATION TRICK ACTIVATED] =========")
print(f"Directory_specific: {directory_specific}")
# ONLY SWA MODE
# Construct datasets
from al_utils.data import Data as custom_Data
if args.dataset in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
dataObj = custom_Data(dataset=args.dataset, israndAug=args.rand_aug, args=args)
logger.info("==== Loading trainDataset ====")
trainDataset, n_TrainDatapts = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
# To get reference to data which has no transformations applied
oldmode = dataObj.eval_mode
dataObj.eval_mode = True # To remove any transforms
logger.info("==== Loading valDataset ====")
valDataset, _ = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
logger.info("==== Loading noAugDataset ====")
noAugDataset, _ = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
dataObj.eval_mode = oldmode
elif args.dataset == "IMAGENET":
trainDataset = None
valDataset = None
noAugDataset = None
dataObj = None
# All these are defined later as they need final values of cfg and yet cfg is not properly set
pass
else:
logger.info(f"{args.dataset} dataset not handled yet.")
raise NotImplementedError
if args.only_swa:
# USAGE: When we only want to run SWA on some model weights
cfg.RANDAUG.ACTIVATE = args.rand_aug
cfg.MODEL.DEPTH = args.model_depth
cfg.MODEL.TYPE = args.model_type
cfg.TRAIN.DATASET = args.dataset
cfg.TRAIN.BATCH_SIZE = args.train_batch_size
cfg.TEST.BATCH_SIZE = args.test_batch_size
# To reflect our cmd arguments and config file changes in cfg
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(main_args)
cfg.ACTIVE_LEARNING.LSET_PATH = args.lSetPath
cfg.ACTIVE_LEARNING.USET_PATH = args.uSetPath
cfg.ACTIVE_LEARNING.VALSET_PATH = args.valSetPath
temp_out_dir = (
args.out_dir
+ dataset_name
+ "/"
+ str(args.init_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
)
logger.info(f"Temp_out_dir: {temp_out_dir}")
if args.only_swa_partition == args.init_partition:
temp_l_SetPath = args.lSetPath
temp_u_SetPath = args.uSetPath
else:
temp_l_SetPath = (
args.out_dir
+ args.dataset
+ "/"
+ str(args.only_swa_partition - args.step_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/lSet.npy"
)
temp_u_SetPath = (
args.out_dir
+ args.dataset
+ "/"
+ str(args.only_swa_partition - args.step_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/uSet.npy"
)
latest_model_path = get_latest_model_path(
dir_path=temp_out_dir + "checkpoints/"
)
print("temp_out_dir: {}".format(temp_out_dir))
print("lsetPath: {}".format(temp_l_SetPath))
print("uSetPath: {}".format(temp_u_SetPath))
print("valSetPath: {}".format(args.valSetPath))
print("latest_model_path: {}".format(latest_model_path))
args.device_ids = | np.arange(cfg.NUM_GPUS) | numpy.arange |
import numpy as np
from base import selective_loss
from scipy.sparse.csgraph import connected_components
class neighbourhood_selection(selective_loss):
def __init__(self, X,
coef=1.,
offset=None,
quadratic=None,
initial=None):
p = X.shape[1]
selective_loss.__init__(self, p**2 - p,
coef=coef,
offset=offset,
quadratic=quadratic,
initial=initial)
"""
X.shape = (n,p)
"""
self.X = X.copy()
self.off_diagonal = ~np.identity(p, dtype=bool)
def smooth_objective(self, beta, mode='both',
check_feasibility=False):
"""
beta.shape = (p^2 - p,)
"""
resid = self.X - np.dot(self.X, self.reshape(beta))
if mode == 'both':
f = self.scale((resid**2).sum()) / 2.
g = self.scale(-np.dot(self.X.T, resid)[self.off_diagonal])
return f, g
elif mode == 'func':
f = self.scale((resid**2).sum()) / 2.
return f
elif mode == 'grad':
g = self.scale(-np.dot(self.X.T, resid)[self.off_diagonal])
return g
else:
raise ValueError("mode incorrectly specified")
# this is something that regreg does not know about, i.e.
# what is data and what is not...
def reshape(self, beta):
p = self.X.shape[1]
B = np.zeros((p, p))
B[self.off_diagonal] = beta
return B
def gradient(self, data, beta):
"""
Gradient of smooth part restricted to active set
"""
old_data, self.X = self.X, data
g = self.smooth_objective(beta, 'grad')
self.X = old_data
return g
def hessian(self, data, beta):
return np.identity(self.shape[0])
def setup_sampling(self, X, active, quadratic_coef):
self.accept_data = 0
self.total_data = 0
self.quadratic_coef = quadratic_coef
self.data = X
self.edges = self.reshape(active).astype(bool)
self.graph = np.logical_or(self.edges.T, self.edges)
if not hasattr(self, "ncomponents") or not hasattr(self, "labels"):
self.ncomponents, self.labels = \
connected_components(self.graph, directed=False)
def proposal(self, data):
"""
update one column of X at a time
"""
# pick one random column
n, p = data.shape
idx = np.random.choice(range(p))
keep = (self.labels == self.labels[idx])
keep[idx] = False
# compute the projection matrix
if keep.any():
L = data[:, keep]
P = np.dot(L, np.linalg.pinv(L))
else:
P = np.zeros((n, n))
R = np.identity(n) - P
# compute the proposal
residual = np.dot(R, data[:, idx])
eta = np.dot(R, np.random.standard_normal(n))
eta -= np.dot(residual, eta) * residual / (np.linalg.norm(residual)**2)
eta /= | np.linalg.norm(eta) | numpy.linalg.norm |
from pkgutil import get_data
import h5py as h5
import os
import torch
import numpy as np
import matplotlib
# matplotlib.use('TkAgg')
matplotlib.use('Agg')
import matplotlib.pyplot
import matplotlib.pyplot as plt
from PIL import Image
import io
from torch.utils.data import Dataset
class NTU_RGBD(Dataset):
def __init__(self,
h5_file,
keys=None,
n_t=50,
batch_size=1,
device='cpu',
filter_t=False,
n_steps_skip=0):
# Keys are actions
assert os.path.exists(h5_file), f"HDF5 file {h5_file} does not exist"
self._h5_file_handle = h5.File(h5_file, 'r')
self.keys = keys if keys is not None else list(
self._h5_file_handle.keys())
print(self.keys)
self.n_t = n_t
max_seq_len = self._h5_file_handle[f"{self.keys[0]}/pose"].shape[1]
self.t = torch.linspace(0., 1., min(max_seq_len, self.n_t)).to(device)
self._key_start_idx = np.zeros(
len(self.keys),
dtype=np.int64) # starting index for each key in __getitem__
self._key_indices = {
} # element indices for each key in the hdf5 dataset
n_data = 0
for i, k in enumerate(self.keys):
self._key_start_idx[i] = n_data
if not filter_t:
self._key_indices[k] = np.arange(
int(self._h5_file_handle[k].attrs['len']))
else:
seq_len = np.array(self._h5_file_handle[f"{k}/n_frames"])
self._key_indices[k] = np.ravel(np.nonzero(seq_len >= n_t))
n_data += len(self._key_indices[k])
self.max_idx = n_data
self.batch_size = batch_size
self.n_batches = int(np.ceil(self.max_idx / self.batch_size))
self.device = device
self.n_steps_skip = n_steps_skip
def __len__(self):
return self.max_idx
def __getitem__(self, index):
action, idx = self.get_action_idx(index)
if self.n_steps_skip > 0:
pose = torch.tensor(self._h5_file_handle[f"{action}/pose"][
idx, ::self.n_steps_skip]).to(self.device)
pose = pose[:self.
n_t] #because we select specific idx, so first position is time
else:
pose = torch.tensor(
self._h5_file_handle[f"{action}/pose"][idx, :self.n_t]).to(
self.device)
return pose
def get_action_idx(self, global_idx):
_sub = self._key_start_idx - global_idx
action_idx = int(np.nonzero(_sub <= 0)[0][-1])
action = self.keys[action_idx]
idx = self._key_indices[action][-_sub[action_idx]]
return action, idx
def close(self):
self._h5_file_handle.close()
def visualize(
self,
traj,
concatenate=True,
plot_path=None, #'plots/rotmnist/traj',
img_w=None,
img_h=None,
save_separate=False,
add_frame=True,
**kwargs):
T = len(traj)
try:
# In case of tensor
traj = traj.cpu().numpy()
except:
pass
# traj = (traj * (self.data_max - self.data_min) +\
# self.data_min)
def save_image(data, filename):
im = Image.fromarray(np.transpose(data, (1, 0, 2)))
#im = ImageOps.flip(im)
if img_w is not None and img_h is not None:
n_pics = im.size[0] // im.size[1] #w//h
im = im.resize((img_w * n_pics, img_h))
im.save(filename)
def resize_array(data):
if img_w is not None and img_h is not None:
if len(data.shape) == 2:
# Grey scale
im = Image.fromarray(np.transpose(data))
else:
im = Image.fromarray(np.transpose(data, (1, 0, 2)))
n_pics = im.size[0] // im.size[1] #w//h
im = im.resize((img_w * n_pics, img_h))
if len(data.shape) == 2:
# Grey scale
resized_data = np.transpose(
self.arr_from_img(im)[0].astype(np.uint8))
else:
resized_data = np.transpose(self.arr_from_img(im),
(2, 1, 0)).astype(np.uint8)
return resized_data
else:
return data
def make_frame(img):
img = img.astype(int)
color_placement = -128
width = int(0.01 * img.shape[1])
gap = 0
img_h, img_w = img.shape[:2]
# Horizontal lines
col_ind = list(range(0, img_w))
row_ind = list(range(gap, width+gap)) +\
list(range(img_h - width - gap, img_h - gap))
img[np.ix_(row_ind, col_ind)] = color_placement
# Vertical lines
col_ind = list(range(0, width))+\
list(range(img_w-width, img_w))
row_ind = list(range(gap, img_h - gap))
img[np.ix_(row_ind, col_ind)] = color_placement
RGB = np.zeros((img_h, img_w, 3))
if len(img.shape) < 3:
RGB[img != color_placement] = np.stack(
[img[img != color_placement]] * 3, axis=1)
# Make frame different color
RGB[(img == color_placement)] = [131, 156, 255]
else:
RGB[img != color_placement] = img[img != color_placement]
# Make frame different color
RGB[(img == color_placement)[:, :, 0]] = [131, 156, 255]
return RGB.astype(np.uint8) #Image.fromarray(RGB.astype(np.uint8))
if concatenate:
concat_image = []
if plot_path is not None:
os.makedirs(os.path.dirname(plot_path), exist_ok=True)
for t in range(T):
image = self.get_picture_array(traj[t][None], 0, 0)
if add_frame:
image = make_frame(image)
if concatenate:
concat_image.append(image)
if plot_path is not None and save_separate:
tmp = list(os.path.splitext(plot_path))
print(tmp)
if tmp[1] == '':
# Extension
tmp[1] = 'png'
save_image(image, tmp[0] + '_%03d.' % t + tmp[1])
if concatenate:
# Concatenate to 0, because we transpose images when save.
# Because PIL and numpy have different order of axes
concat_image = np.concatenate(concat_image, axis=0)
if plot_path is not None:
tmp = list(os.path.splitext(plot_path))
if tmp[1] == '':
# Extension
tmp[1] = '.png'
save_image(concat_image, tmp[0] + '_concat' + tmp[1])
concat_image = resize_array(concat_image)
return concat_image
def arr_from_img(self, im, shift=0):
w, h = im.size
arr = im.getdata()
#c = int(np.product(arr.size) / (w * h))
c = len(im.getbands())
return np.asarray(arr, dtype=np.float32).reshape(
(h, w, c)).transpose(2, 1, 0) - shift * 255
def get_picture_array(self, X, index, shift=0):
X = self.plot_skeleton(X[0])[None]
ch, w, h = X.shape[1], X.shape[2], X.shape[3]
ret = (X[index] + shift*255).\
reshape(ch, w, h).transpose(2, 1, 0).clip(0, 255).astype(np.uint8)
if ch == 1:
ret = ret.reshape(h, w)
return ret
def plot_skeleton(self, x):
xmax = np.max(x[0]) * 1.1
xmin = np.min(x[0]) * 1.1
ymax = np.max(x[1]) * 1.1
ymin = np.min(x[1]) * 1.1
x = x.transpose() #[25, 3] -> [3, 25]
# Determine which nodes are connected as bones according to NTU skeleton structure
# Note that the sequence number starts from 0 and needs to be minus 1
arms = np.array([24, 12, 11, 10, 9, 21, 5, 6, 7, 8, 22]) - 1 #Arms
rightHand = np.array([12, 25]) - 1 #one 's right hand
leftHand = | np.array([8, 23]) | numpy.array |
import cv2
import numpy as np
import dlib
from imutils import face_utils
from keras.models import load_model
detector = dlib.get_frontal_face_detector()
model_simple = load_model('Simple_Autoencoder_face.h5')
model_deep = load_model('Deep_Autoencoder_face.h5')
model_conv = load_model('Convolutional_Autoencoder_face.h5')
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def keras_predict(image):
processed = keras_process_image(image)
decoded_image_simple = model_simple.predict(processed)
decoded_image_deep = model_deep.predict(processed)
decoded_image_conv = model_conv.predict(np.reshape(processed, (-1, 50, 50, 3)))
return decoded_image_simple, decoded_image_deep, decoded_image_conv
def keras_process_image(img):
image_x = 50
image_y = 50
img = cv2.resize(img, (image_x, image_y))
img = np.array(img, dtype=np.float32)
img = np.reshape(img, (-1, image_x * image_y * 3))
img = img.astype('float32') / 255.
return img
def adjust(decoded_image_simple, decoded_image_deep, decoded_image_conv):
decoded_image_simple = decoded_image_simple.astype('float32') * 255.
decoded_image_deep = decoded_image_deep.astype('float32') * 255.
decoded_image_conv = decoded_image_conv.astype('float32') * 255.
decoded_image_simple = np.reshape(decoded_image_simple, (50, 50, 3))
decoded_image_deep = np.reshape(decoded_image_deep, (50, 50, 3))
decoded_image_conv = | np.reshape(decoded_image_conv, (50, 50, 3)) | numpy.reshape |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.api import ms_function
from mindspore.ops.composite import GradOperation
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class NetGatherD(nn.Cell):
def __init__(self, dim=1):
super(NetGatherD, self).__init__()
self.gatherd = P.GatherD()
self.dim = int(dim)
def construct(self, x, index):
return self.gatherd(x, self.dim, index)
class NetGatherDGrad(nn.Cell):
def __init__(self, network):
super(NetGatherDGrad, self).__init__()
self.grad = GradOperation(get_all=True, sens_param=True)
self.network = network
@ms_function
def construct(self, inputx, index, output_grad):
return self.grad(self.network)(inputx, index, output_grad)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_gatherd_grad_fp32():
prop = 100 if np.random.random() > 0.5 else -100
x = np.random.randn(5, 5, 5).astype(np.float32) * prop
index = np.random.randint(0, 5, (5, 3, 5)).astype(np.int32)
dim = 1
gatherd = NetGatherD(dim)
grad = NetGatherDGrad(gatherd)
dout = np.random.randint(0, 5, index.shape).astype(np.float32) * prop
output_grad = grad(Tensor(x), Tensor(index), Tensor(dout))
if isinstance(output_grad, (tuple, list)):
output_grad = output_grad[0]
print(output_grad.asnumpy())
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_gatherd_grad_fp16():
prop = 100 if np.random.random() > 0.5 else -100
x = np.random.randn(5, 5, 5).astype(np.float16) * prop
index = np.random.randint(0, 5, (3, 5, 5)).astype(np.int32)
dim = 0
gatherd = NetGatherD(dim)
grad = NetGatherDGrad(gatherd)
dout = np.random.randint(0, 5, index.shape).astype(np.float16) * prop
output_grad = grad(Tensor(x), Tensor(index), Tensor(dout))
if isinstance(output_grad, (tuple, list)):
output_grad = output_grad[0]
print(output_grad.asnumpy())
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_gatherd_grad_int32():
prop = 100 if | np.random.random() | numpy.random.random |
import numpy as np
from scipy import linalg
import logging
DTYPE = np.double
logger = logging.getLogger("timeseries.distances")
def make_symmetrical(matrix):
"""
Converts an approximation generated by ACA (which isn't symmetrical) to a symmetrical matrix by
taking the average between the entries at i,j and j,i
"""
size = len(matrix)
result = | np.zeros((size, size)) | numpy.zeros |
#!/usr/bin/env python
# Copyright 2019-2020 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Display cutouts and lightcurve from a ZTF alert """
import argparse
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from fink_client.avroUtils import AlertReader
from fink_client.visualisation import show_stamps
from fink_client.visualisation import extract_field
# For plots
font = {
'weight': 'bold',
'size': 22
}
matplotlib.rc('font', **font)
# Bands
filter_color = {1: '#1f77b4', 2: '#ff7f0e', 3: '#2ca02c'}
# [
# '#1f77b4', # muted blue
# '#ff7f0e', # safety orange
# '#2ca02c', # cooked asparagus green
# '#d62728', # brick red
# '#9467bd', # muted purple
# '#8c564b', # chestnut brown
# '#e377c2', # raspberry yogurt pink
# '#7f7f7f', # middle gray
# '#bcbd22', # curry yellow-green
# '#17becf' # blue-teal
# ]
filter_name = {1: 'g band', 2: 'r band', 3: 'i band'}
def main():
""" """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-filename', type=str, default='',
help="Path to an alert data file (avro format)")
args = parser.parse_args(None)
r = AlertReader(args.filename)
# Display the cutouts contained in the alert
alert = r.to_list(size=1)[0]
print(alert['objectId'])
fig = plt.figure(num=0, figsize=(12, 4))
show_stamps(alert, fig)
# extract current and historical data as one vector
mag = extract_field(alert, 'magpsf')
error = extract_field(alert, 'sigmapsf')
upper = extract_field(alert, "diffmaglim")
# filter bands
fid = extract_field(alert, "fid")
# Rescale dates to end at 0
jd = extract_field(alert, "jd")
dates = np.array([i - jd[0] for i in jd])
# Title of the plot (alert ID)
title = alert["objectId"]
# loop over filters
fig = plt.figure(num=1, figsize=(12, 4))
# Loop over each filter
for filt in filter_color.keys():
mask = | np.where(fid == filt) | numpy.where |
import logging
import math
import numpy as np
from mdsea.constants import DTYPE
from scipy import special
from scipy.interpolate import interp1d
log = logging.getLogger(__name__)
MONTECARLO_SPEEDRANGE = np.arange(0, 50, 0.001)
# ======================================================================
# --- Speed Distributions
# ======================================================================
def mb(mass, temp, k_boltzmann) -> np.ndarray:
""" Returns Maxwell-Boltzmann's speed distribution. """
term1 = mass / (2 * math.pi * k_boltzmann * temp)
term2 = 4 * math.pi * MONTECARLO_SPEEDRANGE ** 2
term3 = -mass * MONTECARLO_SPEEDRANGE ** 2 / (2 * k_boltzmann * temp)
return term1 ** 1.5 * term2 * np.exp(term3)
def mb_cdf(mass, temp, k_boltzmann) -> np.ndarray:
"""
Returns a Maxwell-Boltzmann's speed distribution
Cumulative Distribution Function (CDF).
"""
term0 = math.sqrt(k_boltzmann * temp / mass)
term1 = special.erf(MONTECARLO_SPEEDRANGE / (math.sqrt(2) * term0))
term2 = np.sqrt(2 / math.pi) * MONTECARLO_SPEEDRANGE
term3 = np.exp(-(MONTECARLO_SPEEDRANGE ** 2) / (2 * term0 ** 2)) / term0
return term1 - term2 * term3
# ======================================================================
# --- Classes
# ======================================================================
class _Gen:
def __init__(self, nparticles: int, ndim: int) -> None:
self.nparticles = nparticles
self.ndim = ndim
# This will change to True once a
# 'generator' is successfully called
self.generated = False
# This will be updated by a 'generator'
self.coords = np.zeros((self.ndim, self.nparticles), dtype=DTYPE)
# ==================================================================
# --- Private methods
# ==================================================================
def _get(self) -> np.ndarray:
"""
Returns numpy ndarray of coordinates
in their dimensional components (x1, x2, x3, ...).
"""
if not self.generated:
log.warning("Coordinates haven't been generated yet!")
return self.coords.astype(DTYPE)
class VelGen(_Gen):
def __init__(self, nparticles: int, ndim: int) -> None:
super().__init__(nparticles, ndim)
self.speeds = None
# ==================================================================
# --- Private methods: Cartesian Coordinates
# ==================================================================
def _mb_coords(self, i, r, thetas):
if i == self.ndim - 1:
cc = r * np.cos(thetas[self.ndim - 2]) * np.prod([np.sin(thetas[j]) for j in range(self.ndim - 2)])
elif i == self.ndim - 2:
cc = r * | np.sin(thetas[self.ndim - 2]) | numpy.sin |
"""
Module: libpcp.dft
Author: <NAME>, International Audio Laboratories Erlangen
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the PCP Notebooks (https://www.audiolabs-erlangen.de/PCP)
"""
import numpy as np
from matplotlib import pyplot as plt
import libpcp.signal
def plot_inner_product(ax, t, x, y, color_x='k', color_y='r', label_x='x', label_y='y'):
"""Plot inner product
Notebook: PCP_09_dft.ipynb
Args:
ax: Axis handle
t: Time axis
x: Signal x
y: Signal y
color_x: Color of signal x (Default value = 'k')
color_y: Color of signal y (Default value = 'r')
label_x: Label of signal x (Default value = 'x')
label_y: Label of signal y (Default value = 'y')
"""
ax.plot(t, x, color=color_x, linewidth=1.0, linestyle='-', label=label_x)
ax.plot(t, y, color=color_y, linewidth=1.0, linestyle='-', label=label_y)
ax.set_xlim([0, t[-1]])
ax.set_ylim([-1.5, 1.5])
ax.set_xlabel('Time (seconds)')
ax.set_ylabel('Amplitude')
sim = np.vdot(y, x)
ax.set_title(r'$\langle$ %s $|$ %s $\rangle = %.1f$' % (label_x, label_y, sim))
ax.legend(loc='upper right')
def plot_signal_e_k(ax, x, k, show_e=True, show_opt=False):
"""Plot signal and k-th DFT sinusoid
Notebook: PCP_09_dft.ipynb
Args:
ax: Axis handle
x: Signal
k: Index of DFT
show_e: Shows cosine and sine (Default value = True)
show_opt: Shows cosine with optimal phase (Default value = False)
"""
N = len(x)
time_index = np.arange(N)
ax.plot(time_index, x, 'k', marker='.', markersize='10', linewidth=2.0, label='$x$')
plt.xlabel('Time (samples)')
e_k = np.exp(2 * np.pi * 1j * k * time_index / N)
c_k = np.real(e_k)
s_k = np.imag(e_k)
X_k = np.vdot(e_k, x)
plt.title(r'k = %d: Re($X(k)$) = %0.2f, Im($X(k)$) = %0.2f, $|X(k)|$=%0.2f' %
(k, X_k.real, X_k.imag, np.abs(X_k)))
if show_e is True:
ax.plot(time_index, c_k, 'r', marker='.', markersize='5',
linewidth=1.0, linestyle=':', label='$\mathrm{Re}(\overline{\mathbf{u}}_k)$')
ax.plot(time_index, s_k, 'b', marker='.', markersize='5',
linewidth=1.0, linestyle=':', label='$\mathrm{Im}(\overline{\mathbf{u}}_k)$')
if show_opt is True:
phase_k = - np.angle(X_k) / (2 * np.pi)
cos_k_opt = np.cos(2 * np.pi * (k * time_index / N - phase_k))
d_k = np.sum(x * cos_k_opt)
ax.plot(time_index, cos_k_opt, 'g', marker='.', markersize='5',
linewidth=1.0, linestyle=':', label='$\cos_{k, opt}$')
plt.grid()
plt.legend(loc='lower right')
def generate_matrix_dft(N, K):
"""Generate a DFT (discete Fourier transfrom) matrix
Notebook: PCP_09_dft.ipynb
Args:
N: Number of samples
K: Number of frequency bins
Returns:
dft: The DFT matrix
"""
dft = np.zeros((K, N), dtype=np.complex128)
time_index = np.arange(N)
for k in range(K):
dft[k, :] = np.exp(-2j * np.pi * k * time_index / N)
return dft
def dft(x):
"""Compute the discete Fourier transfrom (DFT)
Notebook: PCP_09_dft.ipynb
Args:
x: Signal to be transformed
Returns:
X: Fourier transform of x
"""
x = x.astype(np.complex128)
N = len(x)
dft_mat = generate_matrix_dft(N, N)
return np.dot(dft_mat, x)
def fft(x):
"""Compute the fast Fourier transform (FFT)
Notebook: PCP_09_dft.ipynb
Args:
x: Signal to be transformed
Returns:
X: Fourier transform of x
"""
x = x.astype(np.complex128)
N = len(x)
log2N = np.log2(N)
assert log2N == int(log2N), 'N must be a power of two!'
X = np.zeros(N, dtype=np.complex128)
if N == 1:
return x
else:
this_range = np.arange(N)
A = fft(x[this_range % 2 == 0])
B = fft(x[this_range % 2 == 1])
range_twiddle_k = np.arange(N // 2)
sigma = np.exp(-2j * np.pi * range_twiddle_k / N)
C = sigma * B
X[:N//2] = A + C
X[N//2:] = A - C
return X
def plot_signal_dft(t, x, X, ax_sec=False, ax_Hz=False, freq_half=False, figsize=(10, 2)):
"""Plotting function for signals and its magnitude DFT
Notebook: PCP_09_dft.ipynb
Args:
t: Time axis (given in seconds)
x: Signal
X: DFT
ax_sec: Plots time axis in seconds (Default value = False)
ax_Hz: Plots frequency axis in Hertz (Default value = False)
freq_half: Plots only low half of frequency coefficients (Default value = False)
figsize: Size of figure (Default value = (10, 2))
"""
N = len(x)
if freq_half is True:
K = N // 2
X = X[:K]
else:
K = N
plt.figure(figsize=figsize)
ax = plt.subplot(1, 2, 1)
ax.set_title('$x$ with $N=%d$' % N)
if ax_sec is True:
ax.plot(t, x, 'k', marker='.', markersize='3', linewidth=0.5)
ax.set_xlabel('Time (seconds)')
else:
ax.plot(x, 'k', marker='.', markersize='3', linewidth=0.5)
ax.set_xlabel('Time (samples)')
ax.grid()
ax = plt.subplot(1, 2, 2)
ax.set_title('$|X|$')
if ax_Hz is True:
Fs = 1 / (t[1] - t[0])
ax_freq = Fs * np.arange(K) / N
ax.plot(ax_freq, np.abs(X), 'k', marker='.', markersize='3', linewidth=0.5)
ax.set_xlabel('Frequency (Hz)')
else:
ax.plot(np.abs(X), 'k', marker='.', markersize='3', linewidth=0.5)
ax.set_xlabel('Frequency (index)')
ax.grid()
plt.tight_layout()
plt.show()
def exercise_freq_index(show_result=True):
"""Exercise 1: Interpretation of Frequency Indices
Notebook: PCP_09_dft.ipynb
Args:
show_result: Show result (Default value = True)
"""
if show_result is False:
return
Fs = 64
dur = 2
x, t = libpcp.signal.generate_example_signal(Fs=Fs, dur=dur)
X = fft(x)
print('=== Plot with axes given in indices (Fs=64, dur=2) ===', flush=True)
plot_signal_dft(t, x, X)
print('=== Plot with axes given in seconds and Hertz (Fs=64, dur=2) ===', flush=True)
plot_signal_dft(t, x, X, ax_sec=True, ax_Hz=True, freq_half=True)
Fs = 32
dur = 2
x, t = libpcp.signal.generate_example_signal(Fs=Fs, dur=dur)
X = fft(x)
print('=== Plot with axes given in indices (Fs=32, dur=2) ===', flush=True)
plot_signal_dft(t, x, X)
print('=== Plot with axes given in seconds and Hertz (Fs=32, dur=2) ===', flush=True)
plot_signal_dft(t, x, X, ax_sec=True, ax_Hz=True, freq_half=True)
def exercise_missing_time(show_result=True):
"""Exercise 2: Missing Time Localization
Notebook: PCP_09_dft.ipynb
Args:
show_result: Show result (Default value = True)
"""
if show_result is False:
return
N = 256
T = 6
omega1 = 1
omega2 = 5
amp1 = 1
amp2 = 0.5
t = np.linspace(0, T, N)
t1 = t[:N//2]
t2 = t[N//2:]
x1 = amp1 * np.sin(2*np.pi*omega1*t) + amp2 * np.sin(2*np.pi*omega2*t)
x2 = np.concatenate((amp1 * np.sin(2*np.pi*omega1*t1), amp2 * np.sin(2*np.pi*omega2*t2)))
X1 = fft(x1)
X2 = fft(x2)
print('=== Plot with axes given in indices ===')
plot_signal_dft(t, x1, X1)
plot_signal_dft(t, x2, X2)
plt.show()
print('=== Plot with axes given in seconds and Hertz ===')
plot_signal_dft(t, x1, X1, ax_sec=True, ax_Hz=True, freq_half=True)
plot_signal_dft(t, x2, X2, ax_sec=True, ax_Hz=True, freq_half=True)
plt.show()
def exercise_chirp(show_result=True):
"""Exercise 3: Chirp Signal
Notebook: PCP_09_dft.ipynb
Args:
show_result: Show result (Default value = True)
"""
if show_result is False:
return
def generate_chirp_linear(t0=0, t1=1, N=128):
"""Generation chirp with linear frequency increase
Notebook: PCP_09_dft.ipynb
Args:
t0: Start time in seconds (Default value = 0)
t1: End time in seconds (Default value = 1)
N: Number of samples (Default value = 128)
Returns:
x: Generated chirp signal
t: Time axis (in seconds)
"""
t = np.linspace(t0, t1, N)
x = np.sin(np.pi * t ** 2)
return x, t
def generate_chirp_plot_signal_dft(t0, t1, N):
"""Plot linear chirp signal
Notebook: PCP_09_dft.ipynb
Args:
t0: Start time in seconds
t1: End time in seconds
N: Number of samples
"""
x, t = generate_chirp_linear(t0=t0, t1=t1, N=N)
X = fft(x)
plot_signal_dft(t, x, X, ax_sec=True, ax_Hz=True, freq_half=True)
generate_chirp_plot_signal_dft(t0=0, t1=2, N=128)
generate_chirp_plot_signal_dft(t0=0, t1=4, N=128)
generate_chirp_plot_signal_dft(t0=4, t1=8, N=128)
generate_chirp_plot_signal_dft(t0=4, t1=8, N=256)
def exercise_inverse(show_result=True):
"""Exercise 4: Inverse DFT
Notebook: PCP_09_dft.ipynb
Args:
show_result: Show result (Default value = True)
"""
if show_result is False:
return
def generate_matrix_dft_inv(N, K):
"""Generates an IDFT (inverse discrete Fourier transfrom) matrix
Notebook: PCP_09_dft.ipynb
Args:
N: Number of samples
K: Number of frequency bins
Returns:
dft: The DFT matrix
"""
dft_inv = np.zeros((K, N), dtype=np.complex128)
time_index = np.arange(N)
for k in range(K):
dft_inv[k, :] = np.exp(2j * np.pi * k * time_index / N) / N
return dft_inv
N = 32
dft_mat = generate_matrix_dft(N, N)
dft_inv_mat = generate_matrix_dft_inv(N, N)
A = np.matmul(dft_mat, dft_inv_mat)
B = np.matmul(dft_inv_mat, dft_mat)
I = np.eye(N)
print('Comparison between DFT * DFT_inv and I:', np.allclose(A, I))
print('Comparison between DFT_inv * DFT and I:', | np.allclose(B, I) | numpy.allclose |
# -----------------------------------------------------------------------------
# From Numpy to Python
# Copyright (2017) <NAME> - BSD license
# More information at https://github.com/rougier/numpy-book
# -----------------------------------------------------------------------------
import numpy as np
import numba as nb
@nb.jit(nopython=True, parallel=False, fastmath=True)
def mgrid(xn, yn):
Xi = | np.empty((xn, yn), dtype=np.int64) | numpy.empty |
"""
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota
See: LICENSE.md for complete license details
Author: <NAME>
Analysis for Thor RTSM
"""
#%%
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
from Core import Loader
from Core import OpenData
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Palatino"],
"font.size": 10
})
# Constants
hz2rps = 2 * np.pi
rps2hz = 1 / hz2rps
#%% File Lists
import os.path as path
pathBase = path.join('/home', 'rega0051', 'FlightArchive', 'Thor')
#pathBase = path.join('G:', 'Shared drives', 'UAVLab', 'Flight Data', 'Thor')
fileList = {}
flt = 'FLT126'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
flt = 'FLT127'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
flt = 'FLT128'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
#%%
from Core import FreqTrans
rtsmSegList = [
# {'flt': 'FLT126', 'seg': ('time_us', [875171956 , 887171956], 'FLT126 - RTSM - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT126', 'seg': ('time_us', [829130591 , 841130591], 'FLT126 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT127', 'seg': ('time_us', [641655909 , 653655909], 'FLT127 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [700263746 , 712263746 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Interesting Roll Margin vs. Uncertainty
# {'flt': 'FLT128', 'seg': ('time_us', [831753831 , 843753831 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT128', 'seg': ('time_us', [ 959859721 , 971859721 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Not good
# {'flt': 'FLT126', 'seg': ('time_us', [928833763 , 940833763], 'FLT126 - RTSM Large - Nominal Gain, 8 deg amp'), 'color': 'r'},
# {'flt': 'FLT127', 'seg': ('time_us', [698755386 , 707255278], 'FLT127 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [779830919 , 791830919 ], 'FLT128 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'},
# {'flt': 'FLT128', 'seg': ('time_us', [900237086 , 912237086 ], 'FLT128 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'},
# {'flt': 'FLT126', 'seg': ('time_us', [902952886 , 924952886], 'FLT126 - RTSM Long - Nominal Gain, 4 deg amp'), 'color': 'b'},
# {'flt': 'FLT127', 'seg': ('time_us', [657015836 , 689015836], 'FLT127 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'b'}, # Yaw controller in-op??
{'flt': 'FLT128', 'seg': ('time_us', [714385469 , 746385469 ], 'FLT128 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'b'},
{'flt': 'FLT128', 'seg': ('time_us', [847254621 , 879254621 ], 'FLT128 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'g'}, # Best
# {'flt': 'FLT127', 'seg': ('time_us', [1209355236 , 1221535868], 'FLT127 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'm'}, # Yaw controller in-op??
{'flt': 'FLT128', 'seg': ('time_us', [794251787 , 826251787 ], 'FLT128 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'r'},
{'flt': 'FLT128', 'seg': ('time_us', [921438015 , 953438015 ], 'FLT128 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'm'},
# {'flt': 'FLT126', 'seg': ('time_us', [981115495 , 993115495], 'FLT126 - RTSM - High Gain, 4 deg amp')},
# {'flt': 'FLT126', 'seg': ('time_us', [689907125 , 711907125], 'FLT126 - RTSM Long - High Gain, 4 deg amp')},
# {'flt': 'FLT126', 'seg': ('time_us', [728048050 , 740048050], 'FLT126 - RTSM Large - High Gain, 8 deg amp')},
]
oDataSegs = []
for rtsmSeg in rtsmSegList:
fltNum = rtsmSeg['flt']
fileLog = fileList[fltNum]['log']
fileConfig = fileList[fltNum]['config']
# Load
h5Data = Loader.Load_h5(fileLog) # RAPTRS log data as hdf5
sysConfig = Loader.JsonRead(fileConfig)
oData = Loader.OpenData_RAPTRS(h5Data, sysConfig)
oData['cmdRoll_FF'] = h5Data['Control']['cmdRoll_pidFF']
oData['cmdRoll_FB'] = h5Data['Control']['cmdRoll_pidFB']
oData['cmdPitch_FF'] = h5Data['Control']['cmdPitch_pidFF']
oData['cmdPitch_FB'] = h5Data['Control']['cmdPitch_pidFB']
oData['cmdYaw_FF'] = h5Data['Control']['refPsi_rad']
oData['cmdYaw_FB'] = h5Data['Control']['cmdYaw_damp_rps']
# Segments
rtsmSeg['seg'][1][0] += 1e6
rtsmSeg['seg'][1][1] += -1e6 + 50e3
oDataSegs.append(OpenData.Segment(oData, rtsmSeg['seg']))
#%%
sigExcList = ['cmdRoll_rps', 'cmdPitch_rps', 'cmdYaw_rps']
sigFbList = ['cmdRoll_FB', 'cmdPitch_FB', 'cmdYaw_FB']
sigFfList = ['cmdRoll_FF', 'cmdPitch_FF', 'cmdYaw_FF']
#sigSensList = ['wB_I_rps', 'cmdPitch_FF', 'cmdYaw_FF']
freqExc_rps = []
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_1']['Frequency']))
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_2']['Frequency']))
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_3']['Frequency']))
vCmdList = []
vExcList = []
vFbList = []
vFfList = []
ySensList = []
for iSeg, seg in enumerate(oDataSegs):
vCmd = np.zeros((len(sigExcList), len(seg['time_s'])))
vExc = np.zeros((len(sigExcList), len(seg['time_s'])))
vFb = np.zeros((len(sigExcList), len(seg['time_s'])))
vFf = np.zeros((len(sigExcList), len(seg['time_s'])))
ySens = np.zeros((len(sigExcList), len(seg['time_s'])))
for iSig, sigExc in enumerate(sigExcList):
sigFb = sigFbList[iSig]
sigFf = sigFfList[iSig]
vCmd[iSig] = seg['Control'][sigExc]
vExc[iSig] = seg['Excitation'][sigExc]
vFb[iSig] = -seg[sigFb]
# vFb[iSig][1:-1] = -seg[sigFb][0:-2] # Shift the time of the output into next frame
vFf[iSig] = seg[sigFf]
ySens[iSig] = seg['wB_I_rps'][iSig]
vCmdList.append(vCmd)
vExcList.append(vExc)
vFbList.append(vFb)
vFfList.append(vFf)
ySensList.append(ySens)
plt.plot(oDataSegs[iSeg]['time_s'], oDataSegs[iSeg]['vIas_mps'])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][0])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][1])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][2])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][0])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][1])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][2])
#%% Estimate the frequency response function
# Define the excitation frequencies
freqRate_hz = 50
freqRate_rps = freqRate_hz * hz2rps
optSpec = FreqTrans.OptSpect(dftType = 'czt', freqRate_rps = freqRate_rps, smooth = ('box', 5), winType = 'bartlett', detrendType = 'linear')
# Excited Frequencies per input channel
optSpec.freq_rps = np.asarray(freqExc_rps)
optSpec.freqInterp = np.sort(optSpec.freq_rps.flatten())
# Null Frequencies
freqNull_rps = optSpec.freqInterp[0:-1] + 0.5 * np.diff(optSpec.freqInterp)
optSpec.freqNull = freqNull_rps
optSpec.freqNullInterp = True
# FRF Estimate
TaEstNomList = []
TaEstUncList = []
TaEstCohList = []
SaEstNomList = []
SaEstUncList = []
SaEstCohList = []
LaEstNomList = []
LaEstUncList = []
LaEstCohList = []
for iSeg, seg in enumerate(oDataSegs):
freq_rps, Txy, Cxy, Sxx, Syy, Sxy, TxyUnc, SxxNull, Snn = FreqTrans.FreqRespFuncEstNoise(vExcList[iSeg], vFbList[iSeg], optSpec)
freq_hz = freq_rps * rps2hz
TaEstNom = -Txy # Sa = I - Ta
TaEstUnc = TxyUnc # TxuUnc = np.abs(Sxu / Sxx)
TaEstCoh = Cxy # Cxy = np.abs(Sxu)**2 / (Sxx * Suu)
SaEstNom, SaEstUnc, SaEstCoh = FreqTrans.TtoS(TaEstNom, TaEstUnc, TaEstCoh)
LaEstNom, LaEstUnc, LaEstCoh = FreqTrans.StoL(SaEstNom, SaEstUnc, SaEstCoh)
TaEstNomList.append( TaEstNom )
TaEstUncList.append( TaEstUnc )
TaEstCohList.append( TaEstCoh )
SaEstNomList.append( SaEstNom )
SaEstUncList.append( SaEstUnc )
SaEstCohList.append( SaEstCoh )
LaEstNomList.append( LaEstNom )
LaEstUncList.append( LaEstUnc )
LaEstCohList.append( LaEstCoh )
print(np.sum(SxxNull, axis = -1) / np.sum(Sxx, axis = -1))
T_InputNames = sigExcList
T_OutputNames = sigFbList
# Compute Gain, Phase, Crit Distance
#%% Sigma Plot
svLaEstNomList = []
svLaEstUncList = []
for iSeg in range(0, len(oDataSegs)):
# I3 = np.repeat([np.eye(3)], SaEstNomList.shape[-1], axis=0).T
# svLaEstNom_mag = FreqTrans.Sigma( I3 + LaEstNomList[iSeg] ) # Singular Value Decomp
svLaEstNom_mag = 1 / FreqTrans.Sigma(SaEstNomList[iSeg]) # sv(I + La) = 1 / sv(Sa)
svLaEstUnc_mag = FreqTrans.Sigma( LaEstUncList[iSeg] ) # Singular Value Decomp
svLaEstNomList.append(svLaEstNom_mag)
svLaEstUncList.append(svLaEstUnc_mag)
if True:
fig = None
for iSeg in range(0, len(oDataSegs)):
cohLaEst = LaEstCohList[iSeg]
# cohLaEstMin = np.min(cohLaEst, axis = (0,1))
cohLaEstMin = np.mean(cohLaEst, axis = (0,1))
svNom = svLaEstNomList[iSeg]
svNomMin = np.min(svNom, axis=0)
svUnc = svLaEstUncList[iSeg]
svUncMax = np.max(svUnc, axis=0)
svUncLower = svNomMin - svUncMax
svUncLower[svUncLower < 0] = svNomMin[svUncLower < 0]
fig = FreqTrans.PlotSigma(freq_hz[0], svNomMin, svUnc_mag = svUncLower, coher_nd = cohLaEstMin, fig = fig, color = rtsmSegList[iSeg]['color'], linestyle = '-', label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotSigma(freq_hz[0], 0.4 * np.ones_like(freq_hz[0]), color = 'r', linestyle = '--', fig = fig)
ax = fig.get_axes()
ax[0].set_xlim(0, 10)
ax[0].set_ylim(0, 1.5)
#%% Vector Margin Plots
inPlot = ['$p_{ex}$', '$q_{ex}$', '$r_{ex}$'] # Elements of sigExcList
outPlot = ['$p_{fb}$', '$q_{fb}$', '$r_{fb}$'] # Elements of sigFbList
vmLaEstNomList_mag = []
vmLaEstUncList_mag = []
for iSeg in range(0, len(oDataSegs)):
vm_mag, vmUnc_mag, vmMin_mag = FreqTrans.VectorMargin(LaEstNomList[iSeg], LaEstUncList[iSeg], typeUnc = 'circle')
vmLaEstNomList_mag.append(vm_mag)
vmLaEstUncList_mag.append(vmUnc_mag)
# vm_mag.append(vmMin_mag)
numOut = len(outPlot); numIn = len(inPlot)
ioArray = np.array(np.meshgrid(np.arange(numOut), np.arange(numIn))).T.reshape(-1, 2)
if False:
for iPlot, [iOut, iIn] in enumerate(ioArray):
fig = 10 + iPlot
for iSeg in range(0, len(oDataSegs)):
vm_mag = vmLaEstNomList_mag[iSeg][iOut, iIn]
vmUnc_mag = vmLaEstUncList_mag[iSeg][iOut, iIn]
fig = FreqTrans.PlotVectorMargin(freq_hz[iIn], vm_mag, vmUnc_mag = vmUnc_mag, coher_nd = LaEstCohList[iSeg][iOut, iIn], fig = fig, color = rtsmSegList[iSeg]['color'], label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotVectorMargin(freq_hz[iIn], 0.4 * np.ones_like(freq_hz[iIn]), fig = fig, color = 'r', linestyle = '--', label = 'Critical')
fig.suptitle('$L_a$ - ' + inPlot[iIn] + ' to ' + outPlot[iOut])
ax = fig.get_axes()
ax[0].set_ylim(0, 2)
#%% Nyquist Plots
if False:
for iPlot, [iOut, iIn] in enumerate(ioArray):
fig = 20 + iPlot
for iSeg in range(0, len(oDataSegs)):
Tnom = LaEstNomList[iSeg][iOut, iIn]
Tunc = np.abs(LaEstUncList[iSeg][iOut, iIn])
fig = FreqTrans.PlotNyquist(Tnom, Tunc, fig = fig, color = rtsmSegList[iSeg]['color'], marker = '.', label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotNyquist(np.asarray([-1+ 0j]), TUnc = np.asarray([0.4 + 0.4j]), fig = fig, color = 'r', marker = '+', label = 'Critical Region')
fig.suptitle(inPlot[iIn] + ' to ' + outPlot[iOut])
ax = fig.get_axes()
ax[0].set_xlim(-3, 1)
ax[0].set_ylim(-2, 2)
#%% Bode Plots
gainLaEstNomList_mag = []
gainLaEstUncList_mag = []
phaseLaEstNomList_deg = []
for iSeg in range(0, len(oDataSegs)):
gainLaEstNom_mag, phaseLaEstNom_deg = FreqTrans.GainPhase(LaEstNomList[iSeg], magUnit = 'mag', phaseUnit = 'deg', unwrap = True)
gainLaEstUnc_mag = FreqTrans.Gain(LaEstNomList[iSeg], magUnit = 'mag')
gainLaEstNomList_mag.append(gainLaEstNom_mag)
phaseLaEstNomList_deg.append(phaseLaEstNom_deg)
gainLaEstUncList_mag.append(gainLaEstUnc_mag)
if False:
for iPlot, [iOut, iIn] in enumerate(ioArray):
fig = 20 + iPlot
for iSeg in range(0, len(oDataSegs)):
gain_mag = gainLaEstNomList_mag[iSeg][iOut, iIn]
phase_deg = phaseLaEstNomList_deg[iSeg][iOut, iIn]
coher_nd = LaEstCohList[iSeg][iOut, iIn]
gainUnc_mag = gainLaEstUncList_mag[iSeg][iOut, iIn]
fig = FreqTrans.PlotBode(freq_hz[iIn], gain_mag, phase_deg, coher_nd, gainUnc_mag, fig = fig, dB = True, color = rtsmSegList[iSeg]['color'], label = oDataSegs[iSeg]['Desc'])
# fig.suptitle(inName + ' to ' + outName, size=20)
#%% Turbulence
optSpecE = FreqTrans.OptSpect(dftType = 'czt', freqRate_rps = freqRate_rps, smooth = ('box', 7), winType='bartlett')
optSpecE.freq_rps = np.asarray(freqExc_rps)
optSpecE.freqInterp = np.sort(optSpecE.freq_rps.flatten())
optSpecN = FreqTrans.OptSpect(dftType = 'czt', freqRate_rps = freqRate_rps, smooth = ('box', 7), winType='bartlett')
optSpecN.freq_rps = freqNull_rps
SyyList = []
for iSeg in range(0, len(oDataSegs)):
_, _, SyyNull = FreqTrans.Spectrum(ySensList[iSeg], optSpecN)
SyyList.append(SyyNull)
_, _, Sxx = FreqTrans.Spectrum(vExcList[iSeg], optSpec)
_, _, SxxNull = FreqTrans.Spectrum(vExcList[iSeg], optSpecN)
print(np.sum(SxxNull, axis = -1) / np.sum(Sxx, axis = -1))
from Core import Environment
ft2m = 0.3049
m2ft = 1/ft2m
b_ft = 4
# levelList = ['light', 'moderate', 'severe']
levelList = ['light', 'moderate']
freqTurb_rps = np.sort(freqNull_rps)
if False:
# for iOut, outName in enumerate(outPlot):
for iOut, outName in enumerate(outPlot[0:1]):
plt.figure()
for iSeg in range(0, len(oDataSegs)):
plt.loglog(freqTurb_rps*rps2hz, SyyList[iSeg][iOut], marker='.', linestyle='None', color = rtsmSegList[iSeg]['color'], label = oDataSegs[iSeg]['Desc'])
V_mps = np.mean(seg['vIas_mps'])
V_fps = V_mps * m2ft
h_m = np.mean(seg['altBaro_m'])
h_ft = h_m * m2ft
for iLevel, level in enumerate(levelList):
sigma = Environment.TurbIntensity(h_ft, level = level)
L_ft = Environment.TurbLengthScale(h_ft)
Puvw_Dryden = Environment.TurbSpectDryden(sigma, L_ft, freqTurb_rps / V_fps) * V_fps
Ppqr_Dryden = Environment.TurbSpectRate(Puvw_Dryden, sigma, L_ft, freqTurb_rps, V_fps, b_ft)
# plt.loglog(freqTurb_rps*rps2hz, np.abs(Ppqr_Dryden[iOut]), label = "Dryden - Level: " + level)
Puvw_VonKarman = Environment.TurbSpectVonKarman(sigma, L_ft, freqTurb_rps / V_fps) * V_fps
Ppqr_VonKarman = Environment.TurbSpectRate(Puvw_VonKarman, sigma, L_ft, freqTurb_rps, V_fps, b_ft)
plt.loglog(freqTurb_rps*rps2hz, np.abs(Ppqr_VonKarman[iOut]), label = "VonKarman - Level: " + level)
plt.grid(True)
plt.xlim([0.1, 10])
# plt.title('Disturbance Estimate - ' + outName)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power Spectrum (dB)')
plt.legend()
#%% Spectrograms of Output and Disturbances
if False:
#%%
iSgnlExc = 0
iSgnlOut = 0
freqRate_rps = 50 * hz2rps
optSpec = FreqTrans.OptSpect(dftType = 'dftmat', freq_rps = freqExc_rps[iSgnlExc], freqRate_rps = freqRate_rps, smooth = ('box', 5), winType = 'bartlett', detrendType = 'linear')
optSpecN = FreqTrans.OptSpect(dftType = 'dftmat', freq_rps = freqNull_rps, freqRate_rps = freqRate_rps, smooth = ('box', 5), winType = 'bartlett', detrendType = 'linear')
for iSeg in range(0, len(oDataSegs)):
t = oDataSegs[iSeg]['time_s']
x = vExcList[iSeg][iSgnlExc]
y = vFbList[iSeg][iSgnlOut]
# Number of time segments and length of overlap, units of samples
#lenSeg = 2**6 - 1
lenSeg = int(1.0 * optSpec.freqRate_rps * rps2hz)
lenOverlap = 1
# Compute Spectrum over time
tSpecY_s, freqSpecY_rps, P_Y_mag = FreqTrans.SpectTime(t, y, lenSeg, lenOverlap, optSpec)
tSpecN_s, freqSpecN_rps, P_N_mag = FreqTrans.SpectTime(t, y, lenSeg, lenOverlap, optSpecN)
# Plot the Spectrogram
fig = FreqTrans.Spectogram(tSpecY_s, freqSpecY_rps * rps2hz, 20 * np.log10(P_Y_mag))
fig.suptitle(oDataSegs[iSeg]['Desc'] + ': Spectrogram - ' + sigFbList[iSgnlOut])
fig = FreqTrans.Spectogram(tSpecN_s, freqSpecN_rps * rps2hz, 20 * np.log10(P_N_mag))
fig.suptitle(oDataSegs[iSeg]['Desc'] + ': Spectrogram Null - ' + sigFbList[iSgnlOut])
#%% Estimate the frequency response function time history
iSeg = 3
optSpec = FreqTrans.OptSpect(dftType = 'czt', freqRate_rps = freqRate_rps, smooth = ('box', 5), winType = 'bartlett', detrendType = 'linear')
# Excited Frequencies per input channel
optSpec.freq_rps = np.asarray(freqExc_rps)
optSpec.freqInterp = np.sort(optSpec.freq_rps.flatten())
# Null Frequencies
freqNull_rps = optSpec.freqInterp[0:-1] + 0.5 * np.diff(optSpec.freqInterp)
optSpec.freqNull = freqNull_rps
optSpec.freqNullInterp = True
# FRF Estimate
time_s = seg['time_s']
lenX = len(time_s)
lenFreq = optSpec.freqInterp.shape[-1]
lenStep = 1
numSec = int((lenX) / lenStep)
numOut = 3
numIn = 3
TaEstNomHist = np.zeros((numSec, numOut, numIn, lenFreq), dtype=complex)
TaEstUncHist = np.zeros((numSec, numOut, numIn, lenFreq), dtype=complex)
TaEstCohHist = np.zeros((numSec, numOut, numIn, lenFreq))
SaEstNomHist = np.zeros((numSec, numOut, numIn, lenFreq), dtype=complex)
SaEstUncHist = np.zeros((numSec, numOut, numIn, lenFreq), dtype=complex)
SaEstCohHist = np.zeros((numSec, numOut, numIn, lenFreq))
LaEstNomHist = np.zeros((numSec, numOut, numIn, lenFreq), dtype=complex)
LaEstUncHist = np.zeros((numSec, numOut, numIn, lenFreq), dtype=complex)
LaEstCohHist = np.zeros((numSec, numOut, numIn, lenFreq))
SxxHist = np.zeros((numSec, numIn, lenFreq))
SzzHist = np.zeros((numSec, numOut, numIn, lenFreq))
SxxNullHist = np.zeros((numSec, numIn, lenFreq))
SnnHist = | np.zeros((numSec, numOut, numIn, lenFreq)) | numpy.zeros |
""" This module provides the functionality to calculate ephemeris for two bodies problem
also in the case of perturbed methods. More advance pertubed methods will be handled
in other module
"""
# Standard library imports
import logging
from math import isclose
from typing import ForwardRef
# Third party imports
import pandas as pd
import numpy as np
from numpy.linalg import norm
from toolz import pipe
# Local application imports
from myorbit.util.general import my_range, NoConvergenceError, my_isclose
import myorbit.data_catalog as dc
from myorbit.util.timeut import mjd2str_date
from myorbit.planets import g_xyz_equat_sun_j2000
from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver
from myorbit.kepler.ellipitical import calc_rv_for_elliptic_orbit, calc_M
from myorbit.lagrange.lagrange_coeff import calc_rv_from_r0v0
from myorbit.util.general import mu_Sun, calc_eccentricity_vector, angle_between_vectors
from myorbit.pert_cowels import calc_eph_by_cowells
from myorbit.two_body import calc_eph_planet
from myorbit.util.timeut import EQX_B1950, EQX_J2000
from myorbit.ephemeris_input import EphemrisInput
from myorbit.pert_enckes import calc_eph_by_enckes
from myorbit.two_body import calc_eph_twobody
from myorbit.util.constants import *
logger = logging.getLogger(__name__)
def calc_tp(M0, a, epoch):
deltaT = TWOPI*np.sqrt(pow(a,3)/GM)*(1-M0/TWOPI)
return deltaT + epoch
def calc_comets_that_no_converge(delta_days):
"""The orbit of all comets is studied around the perihelion [-days, +days]
Parameters
----------
delta_days : int
[description]
"""
df = dc.DF_COMETS
not_converged=[]
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
msg = f'Testing Object: {obj.name}'
print (msg)
logger.info(msg)
if hasattr(obj,'M0') :
M_at_epoch = obj.M0
else :
M_at_epoch = None
# from 20 days before perihelion passage to 20 days after 20 days perihelion passage
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd, M_at_epoch=M_at_epoch)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
hs = []
es = []
for dt in range(2,delta_days*2,2):
clock_mjd = T0_MJD + dt
try :
r_xyz, rdot_xyz, h_xyz, f = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
hs.append(np.linalg.norm(h_xyz))
es.append(np.linalg.norm(calc_eccentricity_vector(r_xyz, rdot_xyz,h_xyz)))
except NoConvergenceError :
print (f"===== Object {name} doest not converged at {clock_mjd} MJD")
not_converged.append(name)
if not all(isclose(h, hs[0], abs_tol=1e-12) for h in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
if not all(isclose(ec, es[0], abs_tol=1e-12) for ec in es):
msg = f'The eccentric vector is NOT constant in the orbit'
print (msg)
logger.error(msg)
print (not_converged)
def test_all_bodies(delta_days):
df = dc.DF_BODIES
not_converged=[]
for idx, name in enumerate(df['Name']):
body = dc.read_body_elms_for(name,df)
msg = f'Testing Object: {body.name}'
solver = KeplerianStateSolver.make(e=body.e, a=body.a, epoch=body.epoch_mjd, M_at_epoch=body.M0)
tp = calc_tp(body.M0, body.a, body.epoch_mjd)
hs = []
try :
for clock_mjd in my_range(tp-delta_days, tp+delta_days, 2):
r_xyz, rdot_xyz, r, h = solver.calc_rv(clock_mjd)
hs.append(h)
if not all(isclose(h, hs[0], abs_tol=1e-12) for h in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
except NoConvergenceError :
print (f"===========> NOT converged for object {name}")
not_converged.append(name)
if idx % 1000 == 0 :
print (f"================================================>> {idx}")
print (not_converged)
def test_almost_parabolical(delta_days):
df = dc.DF_COMETS
not_converged=[]
names = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)']
#names = ['C/2020 U5 (PANSTARRS)']
df = df[df.Name.isin(names)]
for idx, name in enumerate(df['Name']):
if name not in names :
continue
obj = dc.read_comet_elms_for(name,df)
msg = f'Testing Object: {obj.name} with Tp:{mjd2str_date(obj.tp_mjd)}'
print (msg)
logger.info(msg)
if hasattr(obj,'M0') :
M_at_epoch = obj.M0
else :
M_at_epoch = None
# from 20 days before perihelion passage to 20 days after 20 days perihelion passage
#solver = ParabolicalStateSolver(obj.tp_mjd, obj.q, obj.e)
solver = EllipticalStateSolver(q=obj.q, a=obj.a, e=obj.e, tp_mjd=obj.tp_mjd, epoch_mjd=obj.epoch_mjd)
hs = []
for clock_mjd in my_range(obj.tp_mjd-delta_days, obj.tp_mjd+delta_days, 2):
r_xyz, rdot_xyz, r, h_xyz, *others = solver.calc_rv(clock_mjd)
hs.append(h_xyz)
print(mjd2str_date(clock_mjd))
if not all(np.allclose(h_xyz, hs[0], atol=1e-12) for h_xyz in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
print (not_converged)
def test_comets_convergence(delta_days=50):
df = dc.DF_COMETS
#FILTERED_OBJS = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)']
#FILTERED_OBJS=['C/1827 P1 (Pons)']
FILTERED_OBJS=[]
if len(FILTERED_OBJS) != 0:
df = df[df.Name.isin(FILTERED_OBJS)]
result = []
df = df.sort_values('e', ascending=False)
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
kep_nc = uni_nc = 0
#print (f"Object {name} with e={obj.e}")
for dt in range(2,delta_days*2,2):
r1_xyz = rdot1_xyz = f1 = None
try :
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
except NoConvergenceError :
kep_nc += 1
r2_xyz = rdot2_xyz = f2 = None
try :
r2_xyz, rdot2_xyz, h_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
except NoConvergenceError :
uni_nc += 1
print (f"The noconvergence was with e: {obj.e}")
if (kep_nc >0) or (uni_nc > 0) :
row = {}
row['name'] = name
row['e'] = obj.e
row['kep_nc'] = kep_nc
row['uni_nc'] = uni_nc
result.append(row)
df_out = pd.DataFrame(result)
if len(df_out) > 0:
print (f'There are {len(df_out)} comets with convergence problems')
df_out = df_out.sort_values(by=['uni_nc','kep_nc'],ascending=False)
df_out.to_csv('convergence_problems.csv',index=False,header=True)
else :
print ("Undetected no-convergences")
def test_universal_kepler(delta_days=50):
df = dc.DF_COMETS
FILTERED_OBJS=[]
#FILTERED_OBJS=['C/1933 D1 (Peltier)','C/1989 R1 (Helin-Roman)','C/2007 M5 (SOHO)','C/1988 M1 (SMM)','C/2008 C5 (SOHO)']
#FILTERED_OBJS=['C/2007 M5 (SOHO)']
# C/2000 O1 (Koehn)
# This one has high nonconverence with 500 C/2000 O1 (Koehn)
if len(FILTERED_OBJS) != 0:
df = df[df.Name.isin(FILTERED_OBJS)]
df = df.sort_values('e', ascending=False)
result = []
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
#print (name)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
r_failed = v_failed = f_failed = nc_failed= 0
for dt in range(2,delta_days*2,2):
try :
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
r2_xyz, rdot2_xyz, h2_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
e_xyz = calc_eccentricity_vector(r1_xyz, rdot1_xyz, h1_xyz)
f3 = angle_between_vectors(e_xyz, r1_xyz)
if not isclose(f1,f2,rel_tol=0, abs_tol=1e-03):
f_failed += 1
msg=f"name: {obj.name}, TWOPI - f univ: {TWOPI-f2} f Universal: {f2} f Kepler: {f1} e:{obj.e} f Excentricity: {f3} f Excentricity: {TWOPI-f3}"
logger.error(msg)
if not my_isclose(r1_xyz, r2_xyz, abs_tol=1e-03):
msg = f"name: {obj.name}, e: {obj.e}, diff_rxyz ={np.linalg.norm(r1_xyz- r2_xyz)} diff_rdotxyz: {np.linalg.norm(rdot1_xyz- rdot2_xyz)}"
logger.error(msg)
r_failed += 1
if not my_isclose (rdot1_xyz, rdot2_xyz, abs_tol=1e-03) :
v_failed += 1
except NoConvergenceError :
nc_failed += 1
if (f_failed >0) or (r_failed > 0) or (v_failed > 0) or (nc_failed > 0):
row = {}
row['name'] = name
row['e'] = obj.e
row['f_failed'] = f_failed
row['r_failed'] = r_failed
row['v_failed'] = v_failed
row['nc_failed'] = nc_failed
result.append(row)
df_out = pd.DataFrame(result)
if len(df_out) > 0:
print (f'There are {len(df_out)} comets with convergence problems')
#df_out = df_out.sort_values(by=['uni_nc','kep_nc'],ascending=False)
df_out.to_csv('kepler_universal.csv',index=False,header=True)
print (df_out)
else :
print ("No problems detected")
def test_enckes():
obj= dc.C_2003_M3_SOHO
eph = EphemrisInput(from_date="2001.03.01.0",
to_date = "2005.08.31.0",
step_dd_hh_hhh = "02 00.0",
equinox_name = EQX_J2000)
dfc = calc_eph_by_enckes(obj, eph)
def test_comet(name, delta_days=50):
obj = dc.read_comet_elms_for(name,dc.DF_COMETS)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
#print (f"Time interval considered: from:{mjd2str_date(T0_MJD-delta_days)} to {mjd2str_date(T0_MJD+delta_days)}")
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
max_diff_r = 0
for dt in range(2,delta_days*2,2):
try :
print (f"{mjd2str_date(T0_MJD+dt)}")
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
r2_xyz, rdot2_xyz, h2_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
if not isclose(f1,f2, rel_tol=0, abs_tol=1e-03):
msg=f"{mjd2str_date(T0_MJD+dt)} f Uni:{f2} f Kepler:{f1} TWOPI-f:{TWOPI-f1}"
print (msg)
logger.error(msg)
if not my_isclose(r1_xyz, r2_xyz, abs_tol=1e-07):
diff_rxyz = np.linalg.norm(r1_xyz- r2_xyz)
if diff_rxyz > max_diff_r :
max_diff_r = diff_rxyz
print (f"Maximun distance at time:{mjd2str_date(T0_MJD+dt)}")
msg = f"{mjd2str_date(T0_MJD+dt)}, diff_rxyz ={np.linalg.norm(r1_xyz- r2_xyz)} diff_rdotxyz: { | np.linalg.norm(rdot1_xyz- rdot2_xyz) | numpy.linalg.norm |
# pylint: disable=missing-docstring
import unittest
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
import tf_encrypted as tfe
from tf_encrypted.private_model import PrivateModel
from tf_encrypted.private_model import secure_model
# TODO this is a bit weird
from tf_encrypted.convert.convert_test import read_graph, export_matmul
class TestPrivateModel(unittest.TestCase):
def test_private_model(self):
def provide_input():
return tf.placeholder(dtype=tf.float32, shape=[1, 2], name="api/0")
export_matmul("matmul.pb", [1, 2])
graph_def = read_graph("matmul.pb")
with tfe.protocol.Pond():
c = tfe.convert.convert.Converter(tfe.convert.registry())
y = c.convert(graph_def, 'input-provider', provide_input)
model = PrivateModel(y)
output = model.private_predict( | np.ones([1, 2]) | numpy.ones |
#!/usr/bin/env python
# ENABLE package for ASL
import os, sys
import glob
import shutil
import math
from optparse import OptionParser, OptionGroup
import nibabel as nib
import numpy as np
import scipy.stats
from . import fslwrap as fsl
from .image import AslImage, AslOptionParser, AslWorkspace
__version__ = "v0.3.3-4-g88e961e"
__timestamp__ = "Thu Mar 1 11:02:28 2018"
def get_rois(wsp, t1, ref, noise_roi, gm_roi, options, log=sys.stdout):
log.write("Generating ROIs...\n")
# Bet the T1 - we will need this for the noise ROI and the GM ROI
log.write("Brain-extracting T1 image\n")
t1_bet, t1_mask = wsp.bet(t1, args="-B -f 0.3", mask=True)
if gm_roi is None:
# If we need a GM ROI use FAST to segment T1
log.write("Generating GM ROI by segmenting T1 image\n")
fastdir = wsp.sub("fast", imgs=[t1_bet])
gm_roi = fastdir.fast(t1_bet, args="-p")
d = gm_roi.data()
d[np.logical_or(d<1.5, d > 2.5)] = 0
gm_roi = gm_roi.derived(d, suffix="_GM")
wsp.add_img(gm_roi)
options.gm_from_t1 = True
if noise_roi is None:
# If we need a noise ROI, invert the T1 brain mask
log.write("Generating noise ROI by inverting T1 brain mask\n")
noise_roi = t1_mask.derived(1-t1_mask.data(), suffix="_inv")
wsp.add_img(noise_roi)
options.noise_from_t1 = True
if options.noise_from_t1 or options.gm_from_t1:
# Need to register T1 to ASL space so we can apply the transform the the
# either or both of the noise/GM ROIs
log.write("Registering ROIs to ASL space\n")
# Bet the reference image to use as a reg target
ref_bet = wsp.bet(ref)
# This is done to avoid the contrast enhanced rim resulting from low intensity ref image
d = ref_bet.data()
thr_ref = np.percentile(d[d!=0], 10.0)
d[d<thr_ref] = 0
raw_bet = ref_bet.derived(d)
wsp.add_img(raw_bet)
asl2t1, asl2t1_mat, t12asl_mat = wsp.flirt(ref_bet, t1_bet, args="-dof 7",
output_name="ASL_2T1",
output_mat="ASL_2T1.mat",
output_invmat="T1_2ASL.mat")
if options.noise_from_t1:
# Register noise ROI to ASL space since it was defined in T1 space
noise_roi = wsp.apply_xfm(noise_roi, ref_bet, t12asl_mat,
args="-interp nearestneighbour",
output_name="%s_2asl" % noise_roi.iname)
if options.gm_from_t1:
# Register GM ROI to ASL space since it was defined in T1 space
gm_roi = wsp.apply_xfm(gm_roi, ref_bet, t12asl_mat,
args="-interp nearestneighbour",
output_name="%s_2asl" % gm_roi.iname)
log.write("DONE\n\n")
return noise_roi, gm_roi
def tsf(df, t):
"""
Survival function (1-CDF) of the t-distribution
Heuristics to agree with FSL ttologp but not properly justified
"""
if t < 0:
return 1-tsf(df, -t)
elif t > 700:
return scipy.stats.t.sf(t, df)
else:
return 1-scipy.special.stdtr(df, t)
def calculate_cnr(asl_data, gm_roi, noise_roi, log=sys.stdout):
"""
Sort ASL images based on CNR (Contrast:Noise ratio).
CNR is defined as mean difference signal divided by nonbrain standard deviation
:param asl_data: Differenced single-TI ASL data
:param gm_roi: Grey-matter mask
:param noise_roi: Noise mask (typically all non-brain parts of the image)
:returns: 4D image in which volumes have been sorted from highest to lowest CNR
"""
log.write("Sort ASL-diff images based on CNR...\n")
tdim = asl_data.shape[3]
cnrs = []
for i in range(tdim):
vol_data = asl_data.data()[:,:,:,i].astype(np.float32)
meanGM = np.mean(vol_data[gm_roi.data() > 0])
noisestd = np.std(vol_data[noise_roi.data() > 0])
cnr = meanGM/noisestd
if cnr < 0:
log.write("WARNING: CNR was negative - are your tag-control pairs the right way round?")
cnrs.append(cnr)
return cnrs
def sort_cnr(asl_data, cnrs, log=sys.stdout):
# Sort by decreasing CNR
sorted_cnrs = sorted(enumerate(cnrs), key=lambda x: x[1], reverse=True)
# Create re-ordered data array
log.write("Images sorted by CNR\n\n")
log.write("Volume\tCNR\n")
sorted_data = np.zeros(asl_data.shape)
for idx, cnr in enumerate(sorted_cnrs):
vol_data = asl_data.data()[:,:,:,cnr[0]].astype(np.float32)
sorted_data[:,:,:,idx] = vol_data
log.write("%i\t%.3f\n" % (cnr[0], cnr[1]))
log.write("DONE\n\n")
sorted_cnrs = [(idx, cnr[0], cnr[1]) for idx, cnr in enumerate(sorted_cnrs)]
return asl_data.derived(sorted_data, suffix="_sorted"), sorted_cnrs
def calculate_quality_measures(asl_data, gm_roi, noise_roi, min_nvols, log=sys.stdout):
"""
Calculate quality measures for ASL data, sorted by CNR
"""
log.write("Calculate quality measures...\n")
if min_nvols < 2:
raise RuntimeError("Need to keep at least 2 volumes to calculate quality measures")
tdim = asl_data.shape[3]
gm_roi = gm_roi.data()
noise_roi = noise_roi.data()
num_gm_voxels = np.count_nonzero(gm_roi)
log.write("Volumes\ttCNR\tDETECT\tCOV\ttSNR\n")
qms = {"tcnr" : [], "detect" : [], "cov" : [], "tsnr" : []}
for i in range(min_nvols, tdim+1, 1):
temp_data = asl_data.data()[:,:,:,:i]
mean = np.mean(temp_data, 3)
std = np.std(temp_data, 3, ddof=1)
# STD = 0 means constant data across volumes, do something sane
std[std == 0] = 1
mean[std == 0] = 0
snr = mean / std
serr = std / math.sqrt(float(i))
tstats = mean / serr
# Annoyingly this is slower than using ttologp. scipy.special.stdtr
# is fast but not accurate enough. Need to understand exactly what
# this is doing, however, because it seems to rely on 'anything below
# float32 minimum == 0'
calc_p = np.vectorize(lambda x: tsf(i, x))
p1 = calc_p(tstats).astype(np.float32)
p1[p1 > 0.05] = 0
sigvox2 = p1
sigvoxGM2 = | np.count_nonzero(sigvox2[gm_roi > 0]) | numpy.count_nonzero |
import numpy as np
import os
import warnings
import collections
import copy
import time
class Env(object):
def __init__(self, args, data):
self.args = args
self.rnd = np.random.RandomState(seed= args['random_seed'])
self.input_data = data
self.n_nodes = data.shape[1]
self.v_t = args['v_t']
self.v_d = args['v_d']
self.batch_size = data.shape[0]
# print("Using Not revisiting nodes")
def reset(self):
self.batch_size = self.input_data[:, :, :2].shape[0]
self.input_pnt = self.input_data[:, :, :2]
self.dist_mat = np.zeros([self.batch_size, self.n_nodes, self.n_nodes])
for i in range(self.n_nodes):
for j in range(i+1, self.n_nodes):
self.dist_mat[:, i, j] = ((self.input_pnt[:, i, 0] - self.input_pnt[:, j, 0])**2 + (self.input_pnt[:, i, 1] - self.input_pnt[:, j, 1])**2)**0.5
self.dist_mat[:, j, i] = self.dist_mat[:, i, j]
self.drone_mat = self.dist_mat/self.v_d
avail_actions = np.ones([self.batch_size, self.n_nodes, 2], dtype=np.float32)
avail_actions[:, self.n_nodes-1, :] = np.zeros([self.batch_size, 2])
self.state = np.ones([self.batch_size, self.n_nodes])
self.state[:, self.n_nodes-1] = np.zeros([self.batch_size])
self.sortie = np.zeros(self.batch_size)
self.returned = np.ones(self.batch_size)
self.current_time = np.zeros(self.batch_size)
self.truck_loc = np.ones([self.batch_size], dtype=np.int32)*(self.n_nodes-1)
self.drone_loc = np.ones([self.batch_size], dtype=np.int32)*(self.n_nodes-1)
# self.combined_nodes = np.zeros([self.batch_size, self.n_nodes])
# self.combined_check = np.zeros([self.batch_size, self.n_nodes])
dynamic = np.zeros([self.batch_size, self.n_nodes, 2], dtype=np.float32)
dynamic[:, :, 0] = self.dist_mat[:, self.n_nodes-1]
dynamic[:, :, 1] = self.drone_mat[:, self.n_nodes-1]
return dynamic, avail_actions
def step(self, idx_truck, idx_drone, time_vec_truck, time_vec_drone, terminated):
old_sortie = copy.copy(self.sortie)
# compute which action occurs first
t_truck = self.dist_mat[np.arange(self.batch_size, dtype=np.int64), self.truck_loc, idx_truck]
t_drone = self.drone_mat[np.arange(self.batch_size, dtype=np.int64), self.drone_loc, idx_drone]
# only count nonzero time movements: if trucks/drones stay at the same place, update based on other actions
A = t_truck+ np.equal(t_truck, np.zeros(self.batch_size)).astype(int)*np.ones(self.batch_size)*10000
B = t_drone+ np.equal(t_drone, np.zeros(self.batch_size)).astype(int)*np.ones(self.batch_size)*10000
C = time_vec_truck[:, 1]+ np.equal(time_vec_truck[:, 1], np.zeros(self.batch_size)).astype(int)*np.ones(self.batch_size)*10000
D = time_vec_drone[:, 1]+ np.equal(time_vec_drone[:, 1], np.zeros(self.batch_size)).astype(int)*np.ones(self.batch_size)*10000
time_step = np.minimum.reduce([A, B, C, D])
b_s = np.where(terminated==1)[0]
time_step[b_s] = np.zeros(len(b_s))
self.time_step = time_step
self.current_time += time_step
time_vec_truck[:, 1]+=np.logical_and(np.equal(time_vec_truck[:, 1], np.zeros(self.batch_size)),
np.greater(t_truck, np.zeros(self.batch_size))).astype(int)*(t_truck-time_step) - \
np.greater(time_vec_truck[:, 1], np.zeros(self.batch_size))*(time_step)
time_vec_drone[:, 1]+=np.logical_and(np.equal(time_vec_drone[:, 1], np.zeros(self.batch_size)),
np.greater(t_drone, np.zeros(self.batch_size))).astype(int)*(t_drone-time_step) - \
np.greater(time_vec_drone[:, 1], np.zeros(self.batch_size))*(time_step)
# self.truck_loc += np.logical_and(np.less_equal(time_step, t_truck), np.equal(time_vec_truck[:, 1], np.zeros(self.batch_size)))*(idx_truck - self.truck_loc)
# self.drone_loc += np.logical_and(np.less_equal(time_step, t_drone), np.equal(time_vec_drone[:, 1], np.zeros(self.batch_size)))*(idx_drone - self.drone_loc)
self.truck_loc += np.equal(time_vec_truck[:, 1], np.zeros(self.batch_size))*(idx_truck - self.truck_loc)
self.drone_loc += np.equal(time_vec_drone[:, 1], np.zeros(self.batch_size))*(idx_drone - self.drone_loc)
time_vec_truck[:, 0] = np.logical_and(np.less(time_step, t_truck), np.greater(time_vec_truck[:, 1], np.zeros(self.batch_size)))*idx_truck
time_vec_drone[:, 0] = np.logical_and(np.less(time_step, t_drone), np.greater(time_vec_drone[:, 1], np.zeros(self.batch_size)))*idx_drone
# update demand because of turck and drone
b_s = np.where(np.equal(time_vec_truck[:, 1], np.zeros(self.batch_size)))[0]
self.state[b_s, idx_truck[b_s]] = np.zeros(len(b_s))
idx_satis = np.where(np.less(self.sortie-np.equal(time_vec_drone[:, 1], 0), np.zeros(self.batch_size)))[0]
self.state[idx_satis, idx_drone[idx_satis]]-= np.equal(time_vec_drone[idx_satis, 1], np.zeros(len(idx_satis)))*self.state[idx_satis, idx_drone[idx_satis]]
# update sortie if drone served customer
self.sortie[idx_satis] = np.ones(len(idx_satis))
a = np.equal((self.truck_loc==self.drone_loc).astype(int)+(time_vec_drone[:, 1]==0).astype(int)+(time_vec_truck[:, 1]==0).astype(int), 3)
# b = np.equal((self.combined_nodes[np.arange(self.batch_size), self.truck_loc]==1).astype(int) + a.astype(int), 2)
idx_stais = np.where(np.expand_dims(a, 1))[0]
self.sortie[idx_stais] = np.zeros(len(idx_stais))
self.returned = np.ones(self.batch_size) - np.equal((old_sortie==1).astype(int) +(self.sortie==1).astype(int)+(time_vec_drone[:, 1]==0).astype(int), 3)
self.returned[idx_stais] = np.ones(len(idx_stais))
# self.combined_nodes[idx_stais, self.truck_loc[idx_stais]] = 1
# b_s = np.where(b)[0]
# self.combined_check[b_s, idx_truck[b_s]] =1
#######################################################################################
# masking scheme
#######################################################################################
avail_actions = np.zeros([self.batch_size, self.n_nodes, 2], dtype=np.float32)
# for unfinished actions of truck: make only unfinished actions available
b_s = np.where(np.expand_dims(time_vec_truck[:, 1], 1)>0)[0]
idx_fixed = time_vec_truck[b_s, np.zeros(len(b_s), dtype=np.int64)]
avail_actions[b_s, idx_fixed.astype(int), 0] = np.ones(len(b_s))
# for unfinished actions of drone: make only unfinished actions available
b_s_d = np.where(np.expand_dims(time_vec_drone[:, 1], 1)>0)[0]
idx_fixed_d = time_vec_drone[b_s_d, np.zeros(len(b_s_d), dtype=np.int64)]
avail_actions[b_s_d, idx_fixed_d.astype(int), 1] = np.ones(len(b_s_d))
# otherwise, select any node with unsatisfied demand regardless sortie value
a = np.equal(np.greater_equal(time_vec_truck[:, 1], 0).astype(int) + np.equal(time_vec_drone[:, 1], 0).astype(int), 2)
b_s = np.where(np.expand_dims(a, 1))[0]
avail_actions[b_s, :, 1] = np.greater(self.state[b_s, :], 0)
# if drone has already selected returning node make it stay there
a =np.equal(np.equal(self.returned, 0).astype(int)+np.equal(time_vec_drone[:, 1], 0).astype(int), 2)
b_s = np.where(np.expand_dims(a, 1))[0]
avail_actions[b_s, :, 1] = 0
avail_actions[b_s, self.drone_loc[b_s], 1] = 1
# for drone if the action is finished and sortie == 1 let the drone select comb nodes
# a = np.equal(np.equal(time_vec_drone[:, 1], 0).astype(int)+np.equal(self.sortie, 1).astype(int)+np.equal(self.returned, 1).astype(int), 3)
# b_s = np.where(a)[0]
# avail_actions[b_s, :, 1] += (self.combined_nodes[b_s] - self.combined_check[b_s])*np.expand_dims(np.greater(self.state[b_s].sum(axis=1), 2).astype(int), 1)*np.ones([len(b_s), self.n_nodes])
# avail_actions[b_s, :, 1] = np.greater(avail_actions[b_s, :, 1], 0)
# for truck that finished action select any node with customer demand
b_s = np.where(np.expand_dims(time_vec_truck[:, 1], 1)==0)[0]
avail_actions[b_s, :, 0] += | np.greater(self.state[b_s, :], 0) | numpy.greater |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: RainbowSecret
## Microsoft Research
## <EMAIL>
## Copyright (c) 2018
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import argparse
import scipy
from scipy import ndimage
import torch, cv2
import numpy as np
import sys
import pdb
import sys
sys.path.insert(1, '/home/project/domain-adapt')
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data
from collections import OrderedDict
from OCNet.data.dataset import get_segmentation_dataset
from OCNet.model.network import get_segmentation_model
from OCNet.model.config import Parameters
import os
import scipy.ndimage as nd
from math import ceil
from PIL import Image as PILImage
import matplotlib.pyplot as plt
import torch.nn as nn
torch_ver = torch.__version__[:3]
def get_palette(num_cls):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def pad_image(img, target_size):
"""Pad an image up to the target size."""
rows_missing = target_size[0] - img.shape[2]
cols_missing = target_size[1] - img.shape[3]
padded_img = | np.pad(img, ((0, 0), (0, 0), (0, rows_missing), (0, cols_missing)), 'constant') | numpy.pad |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PYTHON VERSION OF MATTHEW LEONAWICZ's DOF/DOT/LOGS R Script
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
# Author: <NAME> -- 2019 -- <EMAIL>
# LICENSE: MIT
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def tfg_days( x, err='off' ):
''' calculate DOF/DOT/LOGS for a vector of 12 chronological monthly values '''
import itertools
import numpy as np
# filter the div by zero and comparison with np.nan warnings from numpy
if err == 'off':
np.warnings.filterwarnings( "ignore", category=RuntimeWarning )
x[ x == 0 ] = -0.0001 # need to treat zero as freezing (working with signs)
# positive or negative monthly temps
s1 = np.sign( x )
# products of consecutive months' signs: positive indicates no change; negative indicates a potential freeze or thaw transition
s = s1[:11] * s1[1:]
idx, = np.where( s < 0 )
# may be length zero (no transitions)
ind = np.sort( np.concatenate( [idx, idx+1] ) )
if np.any( np.isnan( x ) == True ): # ignore cells with missing data
dot, dof, grow = itertools.repeat( np.array([np.nan]), 3 )
case = 1
elif (len(ind) == 0) & (s1[0] > 0): # no transitions: all positive temps means no freeze day
dot = 0
dof, grow = itertools.repeat( np.array([365]), 2 )
case = 2
elif (len(ind) == 0) & (s1[0] < 0): # no transitions: all negative temps means no thaw day
dot = np.array([365])
dof, grow = itertools.repeat( np.array([0]), 2 )
case = 3
# [ML FIXED]
elif len(ind) == 2: # only one transition during the year, thawing or freezing
# places where we know the ground freezes and thaws,
# but during a specific 12 months we just don't happen to witness both
# only thaw occurs
if x[ ind[0] ] < 0:
# [ml] note:((ind[0]+1)-1) is ind[0]+1 is the month number and minus 1 is to get to previous month
# we could make that a call to a months array -- months = range(1, 12+1)
dot = 15 + 30 * ((ind[0]+1)-1) - np.round( x[ ind[0] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = np.array([350]) # 350: we know the ground freezes so we use 350 rather than the special 365
grow = dof - dot
case = 4
# only freeze occurs
if x[ ind[0] ] > 0:
dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dot = | np.array([15]) | numpy.array |
import numpy as np
import matplotlib
import platform
if platform.system() == 'Darwin':
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib import rcParams
import torch
from datetime import datetime
import time
import pickle
import os
import seaborn as sns
import matplotlib.pylab as plt
from scipy.special import softmax
import json
from double_well_model import *
from metropolis import MetropolisHastings
from utils import *
from nflib.MADE import *
from nflib.flows import *
from nflib.spline_flows import NSF_AR, NSF_CL
import itertools
import os
cwd = os.getcwd()
print('current directory', cwd)
def main(params):
# setting device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
params['device'] = device
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# timing the entire run.
start_time = time.time()
if params['random_seed'] == 0:
params['random_seed'] = np.random.randint(1,100)
# setting the random seeds
torch.manual_seed(params['random_seed'])
np.random.seed(params['random_seed'])
# Creating new directory to save all run outputs in
date_time = str(datetime.now()).replace(' ', '_').replace(':', '_') # ensures there aren't any issues saving this as a file name.
experiment_name = params['exp_base_name']+"_rand_seed-%s_ML_epochs-%s_KL_epochs-%s_learning_rate-%s_MLweight-%s_KLweight-%s_explore%s_temperature-%s_s_time-%s" % (
params['random_seed'], params['MLepochs'], params['KLepochs'],
params['lr'], params['MLweight'], params['KLweight'],
params['explore'], params['temperature'], date_time )
os.mkdir('experiments/'+experiment_name)
experiment_dir = 'experiments/'+ experiment_name+'/'
# write out all of the parameters used into a text file:
with open(experiment_dir+ 'params_used.txt', 'w') as file:
file.write(json.dumps(params, cls=NumpyEncoder))
# loading in the environment class, used to score the evolutionary hamiltonians
well_params = DoubleWell.params_default.copy()
well_params['dim'] = 2
gen_model = DoubleWell(params=well_params)
if params['MCMC'] == True:
nsteps = 20000
x0_left = np.array([[-1.8, 0.0]])
x0_right = np.array([[1.8, 0.0]])
sampler = MetropolisHastings(gen_model, x0=x0_left, noise=0.1,
stride=10, mapper=None,
is_discrete=False)
data1 = sampler.run(nsteps)
sampler = MetropolisHastings(gen_model, x0=x0_right, noise=0.1,
stride=10, mapper=None,
is_discrete=False)
data2 = sampler.run(nsteps)
data = np.concatenate([data1, data2 ], axis=0)
print('amount of concat data', data.shape)
print('the size of all data to be used (train and val)', data.shape)
# make data a torch tensor
data = torch.from_numpy(data).float().to(device)
# prepare transition state
x_ts = np.vstack([np.zeros(1000), (1.0/gen_model.params['k']) * np.random.randn(1000)]).T
# make train test split
rand_inds = np.random.choice(np.arange(data.shape[0]), params['tda'], replace=False)
train_set = rand_inds[: (params['tda']//2) ]
test_set = rand_inds[ (params['tda']//2): ]
x = data[train_set, :]
xval = data[test_set, :]
print('shape of data used for training', x.shape)
# plotting the training and Xval dataset energy histograms:
for dset, name in zip([x, xval], ['Train', 'XVal']):
plt.figure()
scores = gen_model.energy(dset.cpu().detach().numpy())
plt.hist(scores, bins=100)
plt.gcf().savefig(experiment_dir+'Expectation_'+name+'_Data_Hist.png', dpi=100)
plt.close()
# ======= setting up the normalizing flows:
# logistic distribution
# base = TransformedDistribution(Uniform(torch.zeros(gen_model.dim), torch.ones(gen_model.dim)), SigmoidTransform().inv)
base = torch.distributions.multivariate_normal.MultivariateNormal(torch.zeros(gen_model.dim), torch.eye(gen_model.dim))
if params['model_type'] == 'realNVP':
# RealNVP
# used to have 9 layers
flows = [AffineHalfFlow(dim=gen_model.dim, parity=i%2, nh=params['hidden_dim'], block_mask=params['block_mask']) for i in range(params['num_layers'])]
if params['model_type'] == 'NICE':
# NICE
# 4 layers
flows = [AffineHalfFlow(dim=gen_model.dim, parity=i%2, nh=params['hidden_dim'] ,scale=False, block_mask=params['block_mask']) for i in range(params['num_layers'])]
flows.append(AffineConstantFlow(dim=gen_model.dim, shift=False))
if params['model_type'] == 'slowMAF':
#SlowMAF (MAF, but without any parameter sharing for each dimension's scale/shift)
#4 layers
flows = [SlowMAF(dim=gen_model.dim, parity=i%2, nh=params['hidden_dim']) for i in range(params['num_layers'])]
if params['model_type'] == 'MAF':
# MAF (with MADE net, so we get very fast density estimation)
# 4 layers
flows = [MAF(dim=gen_model.dim, parity=i%2, nh=params['hidden_dim']) for i in range(params['num_layers'])]
# Neural splines, coupling
if params['model_type'] == 'neuralSpline':
nfs_flow = NSF_CL if True else NSF_AR
# MAY WANT TO CHANGE THIS HIDDEN_DIM SIZE!
# 3 layers
flows = [nfs_flow(dim=gen_model.dim, K=8, B=3, hidden_dim=params['hidden_dim']) for _ in range(params['num_layers'])]
convs = [Invertible1x1Conv(dim=gen_model.dim) for _ in flows]
# PREVIOUSLY WAS ACTNORM BUT THIS CLEVER INIT DOESNT WORK FOR ONEHOTS
norms = [AffineConstantFlow(dim=gen_model.dim) for _ in flows]
flows = list(itertools.chain(*zip(norms, convs, flows)))
network = NormalizingFlowModel(base, flows, gen_model)
network.flow.to(device)
print('data', data1.shape)
# printing out where the samples are from
plt.figure()
plt.scatter(data1[:,0], data1[:,1], color='blue')
plt.scatter(data2[:,0], data2[:,1], color='red')
plt.gcf().savefig(experiment_dir+'training_data.png', dpi=100)
plt.close()
plt.figure()
plt.hist(data1[:,0], color='blue')
plt.hist(data2[:,0], color='red')
plt.gcf().savefig(experiment_dir+'training_data_hist.png', dpi=100)
plt.close()
if params['MLepochs']>0:
# only ML training.
ML_losses = network.train_flexible(x, xval=xval, lr=params['lr'], std=params['latent_std'], epochs=params['MLepochs'], batch_size=params['MLbatch'],
verbose=params['verbose'], clipnorm=params['gradient_clip'], weight_KL=0.0,
save_partway_inter=params['save_partway_inter'], experiment_dir=experiment_dir)
ML_losses = ML_losses['total_loss']
print('done with ML training')
# TODO: Add in temperature for sampling: temperature=params['temperature']
#exp_energy_x, hard_energy_x = network.sample_energy(num_samples=5000, temperature=params['temperature'] )
plt.figure()
fig, axes = plot_network(network, gen_model, data1, data2, x_ts, weight_cutoff=1e-2)
fig.savefig(experiment_dir+'ML_only_network_plot.png', dpi=100)
plt.close()
plt.figure()
plt.plot(ML_losses, label='training')
#plt.plot(network1.history['val_loss'], label='validation')
plt.legend()
plt.gcf().savefig(experiment_dir+'Post_ML_LossCurves.png', dpi=100)
plt.close()
torch.save(network.flow.state_dict(), experiment_dir+'Model_Post_ML_Training.torch')
pickle.dump(ML_losses, open(experiment_dir+'ML_only_losses_dict.pickle','wb'))
if params['KL_only']:
KL_losses = network.train_flexible(x, weight_ML=0.0, weight_entropy = params['Entropyweight'],
epochs=params['KLepochs'], lr=params['lr'],
batch_size=params['KLbatch'], temperature=params['temperature'],
explore=params['explore'], verbose=params['verbose'],
save_partway_inter=params['save_partway_inter'],
experiment_dir=experiment_dir, clipnorm=params['gradient_clip'])
KL_losses = KL_losses['total_loss']
plt.figure()
plt.plot(KL_losses, label='training')
#plt.plot(KL_losses, label='validation')
plt.legend()
plt.gcf().savefig(experiment_dir+'Post_KL_LossCurves.png', dpi=100)
plt.close()
torch.save(network.flow.state_dict(), experiment_dir+'Model_Post_KL_Training.torch')
pickle.dump(KL_losses, open(experiment_dir+'KL_only_losses_dict.pickle','wb'))
else:
ML_KL_losses = network.train_flexible(x, xval=xval, lr=params['lr'], std=params['latent_std'], epochs=params['KLepochs'], batch_size=params['KLbatch'],
weight_ML=params['MLweight'], weight_KL=params['KLweight'],
temperature=params['temperature'], explore=params['explore'], verbose=params['verbose'],
save_partway_inter=params['save_partway_inter'], clipnorm=params['gradient_clip'],
experiment_dir=experiment_dir, weight_entropy = params['Entropyweight'])
for loss_to_plot in ['total_loss', 'ld_loss', 'kl_loss', 'ml_loss']:
print('to plot', loss_to_plot, len(ML_KL_losses[loss_to_plot]))
plt.figure()
plt.plot(ML_KL_losses[loss_to_plot])
plt.gcf().savefig(experiment_dir+'Post_KL_'+loss_to_plot+'_LossCurve.png', dpi=100)
plt.close()
pickle.dump(ML_KL_losses, open(experiment_dir+'ML_KL_losses_dict.pickle','wb'))
plt.figure()
fig, axes = plot_network(network, gen_model, data1, data2, x_ts, weight_cutoff=1e-2)
fig.savefig(experiment_dir+'MLandKL_network_plot.png', dpi=100)
plt.close()
total_time = time.time() - start_time
print('======== total time for this run in minutes', total_time/60)
with open(experiment_dir+ 'time_taken.txt', 'w') as file:
file.write('Total time taken was: ' + str(total_time))
def plot_network(network, gen_model, traj_left, traj_right, x_ts,
weight_cutoff=1e-2,):
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 3.5))
plt.subplots_adjust(wspace=0.25)
# Plot X distribution
axis = axes[0]
axis.plot(traj_left[:, 0], traj_left[:, 1], linewidth=0, marker='.', markersize=3, color='blue')
axis.plot(x_ts[:, 0], x_ts[:, 1], linewidth=0, marker='.', markersize=3, color='orange')
axis.plot(traj_right[:, 0], traj_right[:, 1], linewidth=0, marker='.', markersize=3, color='red')
axis.set_xlabel('$x_1$')
axis.set_xlim(-3, 3)
axis.set_ylabel('$x_2$', labelpad=-12)
axis.set_ylim(-4, 4)
axis.set_yticks([-4, -2, 0, 2, 4])
# Plot Z distribution
axis = axes[1]
with torch.no_grad():
z_left, _, _ = network.forward(torch.from_numpy(traj_left).float())
z_ts, _, _ = network.forward( torch.from_numpy(x_ts).float())
z_right, _, _ = network.forward( torch.from_numpy(traj_right).float())
axis.plot(z_left[:, 0], z_left[:, 1], linewidth=0, marker='.', markersize=3, color='blue')
axis.plot(z_ts[:, 0], z_ts[:, 1], linewidth=0, marker='.', markersize=3, color='orange')
axis.plot(z_right[:, 0], z_right[:, 1], linewidth=0, marker='.', markersize=3, color='red')
circle = plt.Circle((0, 0), radius=1.0, color='black', alpha=0.4, fill=True)
axis.add_artist(circle)
circle = plt.Circle((0, 0), radius=2.0, color='black', alpha=0.25, fill=True)
axis.add_artist(circle)
circle = plt.Circle((0, 0), radius=3.0, color='black', alpha=0.1, fill=True)
axis.add_artist(circle)
axis.set_xlabel('$z_1$')
axis.set_xlim(-4, 4)
axis.set_ylabel('$z_2$', labelpad=-12)
axis.set_ylim(-4, 4)
axis.set_yticks([-4, -2, 0, 2, 4])
# Plot proposal distribution
# getting samples and histograms.
X1, Y1 = test_sample(network, temperature=1.0, plot=False) # bin means and then negative log of empirical x0 frequencies.
_, W1 = hist_weights(network)
axis = axes[2]
# this is a grid of energies that are plotted as a line. ground truth.
_, E = gen_model.plot_dimer_energy(axis=axis, temperature=1.0)
Y1 = Y1 - Y1.min() + E.min()
Inan = np.where(W1 < weight_cutoff)
Y1[Inan] = np.nan
#Y2 = Y2 - Y2.min() + E.min()
#axis.plot(X2, Y2, color='#FF6600', linewidth=2, label='ML+KL+RC')
axis.plot(X1, Y1, color='orange', linewidth=2, label='ML+KL')
axis.set_xlim(-3, 3)
axis.set_ylim(-12, 5.5)
axis.set_yticks([])
axis.set_xlabel('$x_1$')
axis.set_ylabel('Energy / kT')
#plt.legend(ncol=1, loc=9, fontsize=12, frameon=False)
# Plot reweighted distribution
RX1, RY1, DR1 = test_sample_rew(network, gen_model, temperature=1.0, plot=False)
axis = axes[3]
Ex, E = gen_model.plot_dimer_energy(axis=axis, temperature=1.0)
RY1 = RY1 - RY1[np.isfinite(RY1)].min() + E.min()
RY1[Inan] = np.nan
#RY1[RY1 > -4] = np.nan
#RY2 = RY2 - RY2[np.isfinite(RY2)].min() + E.min()
#axis.errorbar(RX2, RY2, DR2, color='#FF6600', linewidth=2, label='ML+KL+RC')
axis.errorbar(RX1, RY1, DR1, color='orange', linewidth=2, label='ML+KL')
axis.set_xlim(-3, 3)
axis.set_ylim(-12, 5.5)
axis.set_yticks([-12, -10, -8, -6, -4, -2, 0, 2, 4])
axis.set_xlabel('$x_1$')
axis.set_ylabel('')
return fig, axes
def test_sample(network, temperature=1.0, nsample=100000, plot=True):
if nsample <= 100000:
sample_x = network.sample_xs(temperature=temperature, num_samples=nsample)
else:
sample_x = []
for i in range(int(nsample/100000)):
sample_x = network.sample_xs(temperature=temperature, num_samples=nsample)
sample_x.append(sample_x)
sample_x = np.vstack(sample_x)
sample_x = sample_x.detach().numpy()
# xgen = network.Tzx.predict(np.sqrt(temperature) * np.random.randn(100000, 2))
params = DoubleWell.params_default.copy()
params['dim'] = 2
double_well = DoubleWell(params=params)
plt.figure(figsize=(4, 4))
h, b = np.histogram(sample_x[:, 0], bins=100)
# h is the numbers in each bin.
bin_means = (b[:-1] + b[1:])/2
Eh = -np.log(h) / temperature # log of numbers in each. this brings it down from the boltzmann.
if plot:
Ex, E = double_well.plot_dimer_energy(temperature=temperature)
Eh = Eh - Eh.min() + E.min() # from the lowest real energy E, have the increase in energy on a log scale.
plt.plot(bin_means, Eh, color='green', linewidth=2)
return bin_means, Eh
def hist_weights(network):
sample_x, log_w = network.sample_log_w(temperature=1.0, num_samples=100000)
log_w -= log_w.max()
bins = np.linspace(-2.5, 2.5, 100)
bin_means = (bins[:-1] + bins[1:]) /2
sample_x_index = np.digitize(sample_x[:, 0], bins)
whist = np.zeros(len(bins) + 1)
for i in range(len(log_w)):
whist[sample_x_index[i]] += np.exp(log_w[i])
return bin_means, whist[1:-1]
# reweighting
def test_sample_rew(network, gen_model, temperature=1.0, plot=True):
sample_x, log_w = network.sample_log_w(temperature=1.0, num_samples=100000)
log_w -= log_w.max()
bin_means, Es = free_energy_bootstrap(sample_x[:, 0], bins=100, nbootstrap=100, log_weights=log_w)
plt.figure(figsize=(4, 4))
Emean = mean_finite(Es, axis=0)-10.7
Estd = std_finite(Es, axis=0)
var = mean_finite(std_finite(Es, axis=0) ** 2)
if plot:
gen_model.plot_dimer_energy()
plt.errorbar(bin_means, Emean, Estd, linewidth=2, color='green')
# variance
print('Estimator Standard Error: ', np.sqrt(var))
return bin_means, Emean, Estd
def mean_finite_(x, min_finite=1):
""" Computes mean over finite values """
isfin = np.isfinite(x)
if np.count_nonzero(isfin) > min_finite:
return np.mean(x[isfin])
else:
return np.nan
def std_finite_(x, min_finite=2):
""" Computes mean over finite values """
isfin = np.isfinite(x)
if np.count_nonzero(isfin) >= min_finite:
return np.std(x[isfin])
else:
return np.nan
def mean_finite(x, axis=None, min_finite=1):
if axis is None:
return mean_finite_(x)
if axis == 0 or axis == 1:
M = np.zeros((x.shape[axis-1],))
for i in range(x.shape[axis-1]):
if axis == 0:
M[i] = mean_finite_(x[:, i])
else:
M[i] = mean_finite_(x[i])
return M
else:
raise NotImplementedError('axis value not implemented:', axis)
def std_finite(x, axis=None, min_finite=2):
if axis is None:
return mean_finite_(x)
if axis == 0 or axis == 1:
S = np.zeros((x.shape[axis-1],))
for i in range(x.shape[axis-1]):
if axis == 0:
S[i] = std_finite_(x[:, i])
else:
S[i] = std_finite_(x[i])
return S
else:
raise NotImplementedError('axis value not implemented:', axis)
def free_energy_bootstrap(D, bins=100, range=None, log_weights=None, bias=None, temperature=1.0,
nbootstrap=100, align_bins=None):
""" Bootstrapped free energy calculation
If D is a single array, bootstraps by sample. If D is a list of arrays, bootstraps by trajectories
Parameters
----------
D : array of list of arrays
Samples in the coordinate in which we compute the free energy
bins : int
Number of bins
range : None or (float, float)
value range for bins, if not given will be chosen by min and max values of D
nbootstrap : int
number of bootstraps
log_weights : None or arrays matching D
sample weights
bias : function
if not None, the given bias will be removed.
align_bins : None or indices
if not None, will shift samples to align at the given bins indices
Returns
-------
bin_means : array((nbins,))
mean positions of bins
Es : array((sample, nbins))
for each bootstrap the free energies of bins.
"""
if range is None:
range = (np.min(D), np.max(D))
bin_edges = None
Es = []
by_traj = isinstance(D, list)
for _ in np.arange(nbootstrap):
Isel = np.random.choice(len(D), size=len(D), replace=True)
if by_traj:
Dsample = | np.concatenate([D[i] for i in Isel]) | numpy.concatenate |
import itertools
import os
import shutil
import sys
import glob
from collections import defaultdict
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, \
accuracy_score, cohen_kappa_score
from sklearn.metrics import f1_score
from matplotlib import gridspec
import seaborn as sns
CLASSES = ['W', 'N1', 'N2', 'N3', 'REM']
def get_basename_(path):
name = os.path.basename(os.path.normpath(path))
# cut of number for ordering
if len(name)>1 and name[1] == '_':
name = name.split("_")[-1]
return name
def cm_figure_(prediction, truth, classes, configuration_name):
classes = classes.copy()
cm = confusion_matrix(truth, prediction, labels=range(len(classes)))
num_classes = cm.shape[0]
per_class_metrics = np.array(
precision_recall_fscore_support(truth, prediction, beta=1.0,
labels=range(
len(classes)))).T.round(2)
cm_norm = cm.astype('float') * 1 / (cm.sum(axis=1)[:, np.newaxis]+1e-7)
cm_norm = np.nan_to_num(cm_norm, copy=True)
fig = plt.figure(figsize=(3, 2), dpi=320, facecolor='w',
edgecolor='k')
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(
np.concatenate((cm_norm, np.zeros((len(classes), 4))), axis=1),
cmap='Oranges')
classes += ['PR', 'RE', 'F1', 'S']
xtick_marks = np.arange(len(classes))
ytick_marks = np.arange(len(classes) - 4)
ax.set_xlabel('Predicted', fontsize=5, weight='bold')
ax.set_xticks(xtick_marks)
c = ax.set_xticklabels(classes, fontsize=5, ha='center')
#ax.xaxis.set_label_position('top')
#ax.xaxis.tick_top()
ax.set_ylabel('True Label', fontsize=5, weight='bold')
ax.set_yticks(ytick_marks)
ax.set_yticklabels(classes[:-4], fontsize=5, va='center')
ax.yaxis.set_label_position('left')
ax.yaxis.tick_left()
ax.set_title(configuration_name, fontsize=5, horizontalalignment='center')
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, '{}\n({:.2f})'.format(cm[i, j], cm_norm[i, j]),
horizontalalignment="center", fontsize=5,
verticalalignment='center', color="black")
for i, j in itertools.product(range(cm.shape[0]),
range(cm.shape[1], cm.shape[1] + 4)):
val = per_class_metrics[i, j - num_classes]
ax.text(j, i, val if j != cm.shape[1] + 3 else int(val),
horizontalalignment="center", fontsize=5,
verticalalignment='center', color="black")
return fig
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", xlabel=None, ylabel=None, **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
#cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
#cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=0, ha="center",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=["black", "white"],
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Arguments:
im : The AxesImage to be labeled.
Optional arguments:
data : Data used to annotate. If None, the image's data is used.
valfmt : The format of the annotations inside the heatmap.
This should either use the string format method, e.g.
"$ {x:.2f}", or be a :class:`matplotlib.ticker.Formatter`.
textcolors : A list or array of two color specifications. The first is
used for values below a threshold, the second for those
above.
threshold : Value in data units according to which the colors from
textcolors are applied. If None (the default) uses the
middle of the colormap as separation.
Further arguments are passed on to the created text labels.
"""
import matplotlib
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center", fontsize=8)
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if data[i, j] <=1:
kw.update(color=textcolors[im.norm(data[i, j]) > threshold])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
else:
text = im.axes.text(j, i, "{:d}".format(int(data[i, j]), None),
**kw)
texts.append(text)
return texts
def table_plot_(table, yticks, xticks, agg_table: bool = True):
num_yticks = len(yticks)
# m.configs]
aggs = np.stack([np.mean(table, 0), np.std(table, 0)], axis=0)
#fig = plt.figure(figsize=(8.27, 11.69), dpi=320, facecolor='w',
# edgecolor='k')
fig = plt.figure(figsize=(len(xticks), .5*len(yticks)), dpi=120,
facecolor='w',
edgecolor='k')
gs = gridspec.GridSpec(num_yticks + 4, len(xticks))
ax1 = fig.add_subplot(gs[:num_yticks, :])
# plt.suptitle(PREFIX, fontsize=12)
# ax1 = plt.subplot(211)#fig.add_subplot(2, 1, 1)
ax1.imshow(table[:num_yticks], cmap='YlGn', aspect="auto")
for i, j in itertools.product(range(num_yticks),
range(table.shape[1])):
ax1.text(j, i, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(num_yticks)
ax1.set_yticks(ytick_marks)
ax1.set_yticklabels(yticks)
ax1.set_xticklabels([])
if agg_table:
ax2 = fig.add_subplot(gs[num_yticks + 1:, :])
ax2.imshow(aggs, cmap='YlGn', aspect="auto")
# ax2.set_aspect('equal', 'box')
# plt.imshow(table,cmap='Oranges')
for i, j in itertools.product(range(aggs.shape[0]),
range(aggs.shape[1])):
ax2.text(j, i, '{:.3f}'.format(aggs[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(2)
ax2.set_yticks(ytick_marks)
ax2.set_yticklabels(['mean', 'std'])
ax1 = ax2
xtick_marks = np.arange(len(xticks))
ax1.set_xticks(xtick_marks)
ax1.set_xticklabels(xticks, rotation=60)
return fig
def table_plot_folded_(table, yticks, xticks, agg_table: bool = False):
yticks = [y.replace("WESA_","").replace("_MLready.npz","") for y in yticks]
num_yticks = (len(yticks)+1) //2
max_yticks = len(yticks)
xticks = xticks + xticks
# m.configs]
min_val = min([min(t) for t in table])
max_val = max([max(t) for t in table])
aggs = np.stack([np.mean(table, 0), np.std(table, 0)], axis=0)
#fig = plt.figure(figsize=(8.27, 11.69), dpi=320, facecolor='w',
# edgecolor='k')
fig = plt.figure(figsize=(len(xticks), .5*num_yticks), dpi=120,
facecolor='w',
edgecolor='k')
gs = gridspec.GridSpec(num_yticks + 4, len(xticks))
ax1 = fig.add_subplot(gs[:num_yticks, :(len(xticks)//2)])
ax1.imshow(table[:num_yticks], cmap='YlGn', aspect="auto", vmin=min_val, vmax=max_val)
for i, j in itertools.product(range(num_yticks),
range(table.shape[1])):
ax1.text(j, i, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ax2 = fig.add_subplot(gs[:num_yticks, (len(xticks)//2):])
ax2.imshow(table[num_yticks:], cmap='YlGn', aspect="auto", vmin=min_val, vmax=max_val)
for i, j in itertools.product(range(num_yticks, max_yticks),
range(table.shape[1])):
ax2.text(j, i-num_yticks, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(num_yticks)
ax1.set_yticks(ytick_marks)
ax1.set_yticklabels(yticks[:num_yticks])
ax1.set_xticklabels([])
#plt.draw()
#yax = ax1.get_yaxis()
#pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
#yax.set_tick_params(pad=pad)
ytick_marks = np.arange(num_yticks)
ax2.set_yticks(ytick_marks)
ax2.set_yticklabels(yticks[num_yticks:])
ax2.set_xticklabels([])
if agg_table:
ax3 = fig.add_subplot(gs[num_yticks + 1:, :])
ax3.imshow(aggs, cmap='YlGn', aspect="auto")
# ax2.set_aspect('equal', 'box')
# plt.imshow(table,cmap='Oranges')
for i, j in itertools.product(range(aggs.shape[0]),
range(aggs.shape[1])):
ax3.text(j, i, '{:.3f}'.format(aggs[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(2)
ax3.set_yticks(ytick_marks)
ax3.set_yticklabels(['mean', 'std'])
xtick_marks = np.arange(len(xticks) // 2)
ax3.set_xticks(xtick_marks)
ax3.set_xticklabels(xticks, rotation=60)
#ax1 = ax2
xtick_marks = np.arange(len(xticks)//2)
ax1.set_xticks(xtick_marks)
ax1.set_xticklabels(xticks, rotation=60)
ax1.tick_params(labelbottom=False, labeltop=True, labelleft=True, labelright=False,
bottom=False, top=True, left=True, right=False)
ax2.set_xticks(xtick_marks)
ax2.set_xticklabels(xticks, rotation=60)
ax2.tick_params(labelbottom=False, labeltop=True, labelleft=False, labelright=True,
bottom=False, top=True, left=False, right=True)
return fig
class Model(object):
def __init__(self, path):
self.name = get_basename_(path)
self.path = path
print(f"model {self.name}")
self.configs = [Configurations(p) for p in sorted(glob.glob(path + '/*'))]
class Runs(object):
def __init__(self, path):
self.name = get_basename_(path)
print(f"runs: {self.name}")
self.path = path
self.subjects = sorted(glob.glob(path + '/*'))
class Configurations(object):
def __init__(self, path):
self.name = get_basename_(path)
self.path = path
print(f"config: {self.name}")
self.runs = [Runs(p) for p in sorted(glob.glob(path + '/*'))]
class Evaluation(object):
def __init__(self, path):
self.path = path
self.models = [Model(p) for p in sorted(glob.glob(path + '/*'))]
def cm(self):
for i, model in enumerate(self.models):
runs = []
for config in model.configs:
runs.append(config.name)
truth = []
prediction = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
truth.append(result['y_true'])
prediction.append(result['y_pred'])
truth = list(itertools.chain.from_iterable(truth))
prediction = list(itertools.chain.from_iterable(prediction))
cm = confusion_matrix(truth, prediction,
labels=range(5))
cm_norm = cm.astype('float') * 1 / (
cm.sum(axis=1)[:, np.newaxis] + 1e-7)
cm_norm = np.nan_to_num(cm_norm, copy=True)
fig, (ax2) = plt.subplots(1, 1,
figsize=(2.5,2.5),
dpi=200) #
plt.subplots_adjust(hspace=.05)
fig.suptitle(get_basename_(model.name),
fontsize=8, weight="bold",y=0.93)
per_class_metrics = np.array(
precision_recall_fscore_support(truth, prediction, beta=1.0,
labels=range(
5))).round(
2)
#im = heatmap(per_class_metrics, ['PR', 'RE', 'F1', 'S'],
# ('W', 'N1', 'N2', 'N3', 'REM'),
# ax=ax1, cmap="YlGn", vmin=0,vmax=1e10,
# aspect='auto')
#texts = annotate_heatmap(im, valfmt="{x:.2f} ")
im = heatmap(cm_norm, ('W', 'N1', 'N2', 'N3', 'REM'),
('W', 'N1', 'N2', 'N3', 'REM'),
ax=ax2, cmap="YlGn", aspect='auto',
xlabel="Predicted Label", ylabel="True Label")
texts = annotate_heatmap(im, valfmt="{x:.2f} ")
#ax2.get_shared_x_axes().join(ax1, ax2)
#ax1.tick_params(axis="x", labelbottom=0)
#ax1.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom=False, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
# labelbottom=False) # labels along the bottom edge are off
try:
plt.savefig("cv_plots/cv_cm_" + model.name + ".eps", dpi=300, transparent=True, bbox_inches="tight")
except:
print("Failed saving plot.")
def boxplot(self, xlabel=None, ymin=.4):
models = []
rows = []
for i, model in enumerate(self.models):
models.append(model.name)
configs = []
for config in model.configs:
configs.append(config.name)
if len(config.runs) == 0: continue
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
acc = result['acc']/100
rows.append([get_basename_(path), model.name, config.name,
acc])
df = pd.DataFrame(rows, columns=['subject', 'model', 'config',
'accuracy'])
fig, ax = plt.subplots(figsize=(6,4), dpi=120)
#ax.set_title("Subject-wise accuracy", fontsize=14)
ax = sns.boxplot(x="config", y="accuracy", hue="model", data=df,
#palette="Set3",
order=[c.name for c in self.models[0].configs])
ax.tick_params(labelsize=10)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=10)
else:
ax.set_xlabel("")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=10)
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.set_ylim(ymin=ymin, ymax=1)
ax.set_ylabel('subject accuracy', fontsize=10)
def bar(self, xlabel=None, ymin=0.4):
models = []
means = []
stds = []
rows = []
for i, model in enumerate(self.models):
models.append(model.name)
runs = []
model_mean = []
model_std = []
for config in model.configs:
runs.append(config.name)
accs = np.array([])
for j, run in enumerate(config.runs):
truth = []
prediction = []
for path in run.subjects:
result = self.read_subject_file(path)
truth.append(result['y_true'])
prediction.append(result['y_pred'])
truth = list(itertools.chain.from_iterable(truth))
prediction = list(itertools.chain.from_iterable(prediction))
acc = accuracy_score(truth, prediction)
f1m = f1_score(truth, prediction, average='macro')
_, _, f1c, _ = precision_recall_fscore_support(truth,
prediction,
beta=1.0,
labels=range(
5))
kappa = cohen_kappa_score(truth, prediction)
rows.append(
[model.name, config.name, acc, f1m, kappa] + list(f1c))
accs = np.append(accs, acc)
model_mean.append(np.mean(accs))
model_std.append(np.std(accs))
means.append(model_mean)
stds.append(model_std)
cols = ['model', 'config',
'accuracy', 'f1m', 'kappa', 'W',
'N1', 'N2', 'N3', 'R']
df = pd.DataFrame(rows, columns=cols)
fig, ax = plt.subplots(figsize=(6, 4), dpi=120)
res = df.groupby(['model', 'config'], as_index=False)[cols].mean()
print(res.round(3).to_latex())
ax.set_title("Overall accuracy")
ax = sns.barplot(x="config", y="accuracy", hue="model", data=df,
#palette="Set3",
order=[c.name for c in self.models[0].configs])
ax.tick_params(labelsize=10)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=10)
else:
ax.set_xlabel("")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=10)
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.set_ylim(ymin=ymin, ymax=1)
ax.set_ylabel('accuracy', fontsize=10)
def hypnogram(self, index=0, models=None, config=None, start=None,
end=None):
models = self.models if models is None else [m for m in self.models
if m.name in models]
if len(models) == 0: raise ValueError("no matching models found!")
f, axarr = plt.subplots(len(models), 1, squeeze=False,
sharex=True, sharey=True,
figsize=(10, 3.5 * len(models)), dpi=320)
plt.yticks(range(5), ['W', 'N1', 'N2', 'N3', 'REM'], fontsize=10)
for i, model in enumerate(models):
cfg = model.configs[0] if config is None else\
next((item for item in model.configs if item.name == config),
None)
if cfg is None:
raise ValueError(f"config {config} not found")
run = cfg.runs[0]
path = run.subjects[index]
subject = get_basename_(path)
f.suptitle(f"{subject}", fontsize=12)
result = self.read_subject_file(path)
# only part of record
if start is None and end is None:
end = len(result['y_pred'])
start = 0
axarr[i, 0].set_xlim(xmin=start, xmax=end)
axarr[i, 0].plot(range(len(result['y_pred'])), result['y_pred'],
label="prediction")
axarr[i, 0].set_ylim(ymin=0.0)
#axarr[i, 0].plot(range(len(result['y_true'])), result[
# 'y_true'], alpha=0.9, label="truth", linestyle=':')
wrong = np.argwhere(np.not_equal(result['y_true'], result[
'y_pred']))
axarr[i, 0].plot(wrong, result['y_true'][wrong], '.',
label="error")
acc = result['acc']
#axarr[i, 0].set_title(f"{model.name} ({cfg.name}) - "
axarr[i, 0].set_title(f"{model.name} [ACC: {acc:.2f}%]",
fontsize=10)
# f"[{acc:.2f}%]", fontsize=10)
if 'attention' in result.keys():
ax2 = axarr[i, 0].twinx()
# same x-axis
color = 'tab:green'
ax2.set_ylabel('attention', color=color, fontsize=10)
attention = result['attention']
ax2.plot(range(len(attention)), attention, color=color)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylim(0.0, 1)
if 'drop' in result.keys():
dropped = np.argwhere(result['drop'])
for d in dropped:
axarr[i, 0].axvspan(d-0.5, d+0.5, alpha=0.2, color='red')
axarr[i, 0].legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=12)
axarr[i, 0].set_xlabel("epoch", fontsize=10)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
def table(self, folded=False):
table = []
for i, model in enumerate(self.models):
for config in model.configs:
column = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
column.append(result['acc'])
table.append(column)
table = np.vstack(table).T
subjects = [get_basename_(p) for p in run.subjects]
xticks = [m.name + '-' + r.name for m in self.models for r in m.configs]
if folded:
table_plot_folded_(table, subjects, xticks)
else:
table_plot_(table, subjects, xticks)
try:
plt.savefig("cv_plots/cv_tab_" + model.name + ".eps", dpi=300, transparent=True, bbox_inches="tight")
except:
print("Failed saving plot.")
def att_subject_table(self):
att_models = []
table = []
for i, model in enumerate(self.models):
for config in model.configs:
column = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
if not 'attention' in result.keys():
continue
column.append(np.mean(result['attention']))
if column != []:
table.append(column)
att_models.append(model.name + f"({config.name})")
table = np.vstack(table).T
subjects = [get_basename_(p) for p in run.subjects]
#xticks = [m.name + '-' + r.name for m in self.models for r in
# m.configs]
table_plot_(table, subjects, att_models)
def att_table(self):
att_models = []
table = []
for i, model in enumerate(self.models):
for config in model.configs:
print(model.name)
column = [[],[],[],[],[]]
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
if not 'attention' in result.keys():
continue
att_per_label = zip(result['y_pred'], result['attention'])
assert(not np.isnan(np.min(result['attention'])))
for label, a in att_per_label:
column[label].append(a)
if column != [[],[],[],[],[]]:
column = [np.mean(np.array(av)) if av != [] else 0 for av
in column]
table.append(column)
att_models.append(model.name)
table = np.vstack(table)
table_plot_(table, att_models, ['W', 'N1', "N2", "N3", "REM"],
agg_table=False)
def extract_experts(self):
def get_acc(prediction, truth):
wrong = np.argwhere(np.not_equal(truth, prediction))
acc = 100 * (1 - (len(wrong) / len(truth)))
return acc
for i, model in enumerate(self.models):
configs = []
true_label_dict = None
for config in model.configs:
experts = None
soft_votes_dict = defaultdict(lambda : [])
hard_votes_dict = defaultdict(lambda : [])
true_label_dict = {}
configs.append(config.name)
accs = np.array([])
if len(config.runs) == 0: continue
run = config.runs[0]
# print("run: ", run.name)
for path in run.subjects:
result = self.read_subject_file(path)
subject = get_basename_(path)
expert_base_path = os.path.join(self.path, os.path.basename(
config.path))
if experts is None:
experts = result['expert_channels']
for expert in experts:
os.makedirs(
os.path.join(self.path, 'Expert-' +
expert, os.path.basename(config.path), 'Expert-' +
expert))
voting_models = ['SOFT-V', 'MAJ-V']
for new_model in voting_models:
path = os.path.join(self.path, new_model, os.path.basename(
config.path), os.path.basename(
config.path))
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
for new_model in voting_models:
os.makedirs(os.path.join(self.path, new_model, os.path.basename(
config.path), os.path.basename(
config.path)))
for i in range(result['y_experts'].shape[1]):
y_expert_prob = result['y_experts'][:, i, :]
y_expert_pred = np.argmax(y_expert_prob, 1)
expert = result['expert_channels'][i]
y_true = result['y_true']
true_label_dict[subject] = y_true
a = result['a'][:, i]
drop = None
if 'drop_channels' in result.keys():
drop = result['drop_channels'][:, i]
hard_votes_dict[subject].append(y_expert_pred)
soft_votes_dict[subject].append(y_expert_prob)
wrong = np.argwhere(np.not_equal(y_true, y_expert_pred))
acc = 100*(1-wrong.shape[0]/len(y_expert_pred))
savepath = os.path.join(self.path, 'Expert-' +
expert, os.path.basename(config.path), 'Expert-' +
expert, subject)
savedict = {'y_true': y_true, 'y_pred': y_expert_pred,
'acc': acc, 'attention': a}
if drop is not None:
savedict['drop'] = drop
np.savez(savepath, **savedict)
for subject, predictions in soft_votes_dict.items():
soft_votes = np.array(predictions)
soft_vote = np.mean(soft_votes, axis=0)
soft_vote = np.argmax(soft_vote, axis=1)
y_true = true_label_dict[subject]
savepath = os.path.join(self.path, 'SOFT-V', os.path.basename(
config.path), os.path.basename(
config.path), subject)
savedict = {'y_true': y_true, 'y_pred': soft_vote,
'acc': get_acc(soft_vote, y_true)}
np.savez(savepath, **savedict)
for subject, predictions in hard_votes_dict.items():
hard_votes = | np.array(predictions) | numpy.array |
# -*- coding: utf-8 -*-
r""".. _sphara_filtering_eeg:
Spatial SPHARA filtering of EEG data
====================================
.. topic:: Section contents
In this tutorial we show how to use the SPHARA basis functions to
design a spatial low pass filter for application to EEG data. The
FEM discretization of the Laplace-Beltrami operator is used to
calculate the SPHARA basic functions that are used for the SPHARA
low pass filter. The applicability of the filter is shown using an
EEG data set that is disturbed by white noise in different noise
levels.
Introduction
------------
The human head as a volume conductor exhibits spatial low-pass filter
properties. For this reason, the potential distribution of the EEG on
the scalp surface can be represented by a few low-frequency SPHARA
basis functions, compare :ref:`sphara_analysis_eeg`. In contrast,
single channel dropouts and spatially uncorrelated sensor noise
exhibit an almost equally distributed spatial SPHARA spectrum. This
fact can be exploited for the design of a spatial filter for the
suppression of uncorrelated sensor noise.
"""
######################################################################
# At the beginning we import three modules of the SpharaPy package as
# well as several other packages and single functions from
# packages.
# Code source: <NAME>
# License: BSD 3 clause
# import modules from spharapy package
import spharapy.trimesh as tm
import spharapy.spharafilter as sf
import spharapy.datasets as sd
# import additional modules used in this tutorial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
######################################################################
# Import the spatial configuration of the EEG sensors and the SEP data
# --------------------------------------------------------------------
# In this tutorial we will apply a spatial SPHARA filter to SEP data
# of a single subject recorded with a 256 channel EEG system with
# equidistant layout. The data set is one of the example data sets
# contained in the SpharaPy toolbox.
# loading the 256 channel EEG dataset from spharapy sample datasets
mesh_in = sd.load_eeg_256_channel_study()
######################################################################
# The dataset includes lists of vertices, triangles, and sensor
# labels, as well as EEG data from previously performed experiment
# addressing the cortical activation related to somatosensory-evoked
# potentials (SEP).
print(mesh_in.keys())
######################################################################
# The triangulation of the EEG sensor setup consists of 256 vertices
# and 480 triangles. The EEG data consists of 256 channels and 369
# time samples, 50 ms before to 130 ms after stimulation. The sampling
# frequency is 2048 Hz.
vertlist = np.array(mesh_in['vertlist'])
trilist = | np.array(mesh_in['trilist']) | numpy.array |
import sys
from typing import List, Tuple, Union
import numpy as np
def detect_vertical_partitions(gray: np.ndarray, threshold: float = 200) -> np.ndarray:
y = []
for row in gray:
y.append(np.mean(row))
return np.where(np.array(y) > threshold)[0]
def detect_lines(
partitions: np.ndarray,
threshold: float = 5
) -> Union[List[int], List[List[int]]]:
last = -1
len_list = []
coordinates_list = []
for i in partitions:
if last == -1:
last = i
elif (i - last) > threshold:
len_list.append(i - last - 1)
coordinates_list.append([last + 1, i - 1])
last = i
return len_list, coordinates_list
def detect_edges(
len_list: List[int],
coordinates_list: List[List[int]],
len_img: int,
w_partitions: int = 5
) -> List[List[int]]:
if len(len_list) != len(coordinates_list):
sys.stderr.write("ERROR: The length of arrays do not match!")
sys.exit()
len_edge = np.median(len_list).astype(int)
edges = []
half_partitions = w_partitions // 2 + 1
z_max = 0
for length, (z1, z2) in zip(len_list, coordinates_list):
if (abs(length - len_edge) / len_edge) < 0.05:
if (z1 - z_max + half_partitions) > len_edge:
edges.append([z1 - len_edge + half_partitions, z1 - half_partitions])
edges.append([z1, z2])
z_max = z2 + half_partitions
if (len_img - z_max + half_partitions) > len_edge:
edges.append([z_max + half_partitions, z_max + len_edge + half_partitions])
return edges
def check_is_shift(
gray: np.ndarray,
len_edge: int,
ver_edges: List[List[int]],
hor_edges: List[List[int]]
) -> bool:
y1, y2 = ver_edges[-1]
on_shift = []
off_shift = []
for i, (x1, x2) in enumerate(hor_edges):
off_shift.append(gray[y1: y2, x2].mean())
on_shift.append(gray[y1: y2, x1 + len_edge//2 + 3].mean())
if i != len(hor_edges) - 1:
on_shift.append(gray[y1: y2, x2 + len_edge//2 + 3].mean())
return np.mean(off_shift) < | np.mean(on_shift) | numpy.mean |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""main"""
import argparse
import os
import json
import cv2
import numpy as np
from api.infer import SdkApi
from config.config import config
from tqdm import tqdm
from shapely.geometry import Polygon
def parser_args():
"""parser_args"""
parser = argparse.ArgumentParser(description="siamRPN inference")
parser.add_argument("--img_path",
type=str,
required=False,
default="../data/input/vot2015",
help="image directory.")
parser.add_argument(
"--pipeline_path",
type=str,
required=False,
default="../data/config/siamRPN.pipeline",
help="image file path. The default is '../data/config/siamRPN.pipeline'. ")
parser.add_argument(
"--infer_result_dir",
type=str,
required=False,
default="./result",
help="cache dir of inference result. The default is './result'."
)
arg = parser.parse_args()
return arg
def get_instance_image(img, bbox, size_z, size_x, context_amount, img_mean=None):
cx, cy, w, h = bbox # float type
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z) # the width of the crop box
s_x = s_z * size_x / size_z
instance_img, scale_x = crop_and_pad(img, cx, cy, size_x, s_x, img_mean)
w_x = w * scale_x
h_x = h * scale_x
return instance_img, w_x, h_x, scale_x
def get_exemplar_image(img, bbox, size_z, context_amount, img_mean=None):
cx, cy, w, h = bbox
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
exemplar_img, _ = crop_and_pad(img, cx, cy, size_z, s_z, img_mean)
return exemplar_img, scale_z, s_z
def round_up(value):
return round(value + 1e-6 + 1000) - 1000
def crop_and_pad(img, cx, cy, model_sz, original_sz, img_mean=None):
"""change img size
:param img:rgb
:param cx: center x
:param cy: center y
:param model_sz: changed size
:param original_sz: origin size
:param img_mean: mean of img
:return: changed img ,scale for origin to changed
"""
im_h, im_w, _ = img.shape
xmin = cx - (original_sz - 1) / 2
xmax = xmin + original_sz - 1
ymin = cy - (original_sz - 1) / 2
ymax = ymin + original_sz - 1
left = int(round_up(max(0., -xmin)))
top = int(round_up(max(0., -ymin)))
right = int(round_up(max(0., xmax - im_w + 1)))
bottom = int(round_up(max(0., ymax - im_h + 1)))
xmin = int(round_up(xmin + left))
xmax = int(round_up(xmax + left))
ymin = int(round_up(ymin + top))
ymax = int(round_up(ymax + top))
r, c, k = img.shape
if any([top, bottom, left, right]):
# 0 is better than 1 initialization
te_im = np.zeros((r + top + bottom, c + left + right, k), np.uint8)
te_im[top:top + r, left:left + c, :] = img
if top:
te_im[0:top, left:left + c, :] = img_mean
if bottom:
te_im[r + top:, left:left + c, :] = img_mean
if left:
te_im[:, 0:left, :] = img_mean
if right:
te_im[:, c + left:, :] = img_mean
im_patch_original = te_im[int(ymin):int(
ymax + 1), int(xmin):int(xmax + 1), :]
else:
im_patch_original = img[int(ymin):int(
ymax + 1), int(xmin):int(xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
# zzp: use cv to get a better speed
im_patch = cv2.resize(im_patch_original, (model_sz, model_sz))
else:
im_patch = im_patch_original
scale = model_sz / im_patch_original.shape[0]
return im_patch, scale
def generate_anchors(total_stride, base_size, scales, ratios, score_size):
""" anchor generator function"""
anchor_num = len(ratios) * len(scales)
anchor = np.zeros((anchor_num, 4), dtype=np.float32)
size = base_size * base_size
count = 0
for ratio in ratios:
ws = int(np.sqrt(size / ratio))
hs = int(ws * ratio)
for scale in scales:
wws = ws * scale
hhs = hs * scale
anchor[count, 0] = 0
anchor[count, 1] = 0
anchor[count, 2] = wws
anchor[count, 3] = hhs
count += 1
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size // 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def box_transform_inv(anchors, offset):
"""invert transform box
:param anchors: object
:param offset: object
:return: object
"""
anchor_xctr = anchors[:, :1]
anchor_yctr = anchors[:, 1:2]
anchor_w = anchors[:, 2:3]
anchor_h = anchors[:, 3:]
offset_x, offset_y, offset_w, offset_h = offset[:,
:1], offset[:, 1:2], offset[:, 2:3], offset[:, 3:],
box_cx = anchor_w * offset_x + anchor_xctr
box_cy = anchor_h * offset_y + anchor_yctr
box_w = anchor_w * np.exp(offset_w)
box_h = anchor_h * np.exp(offset_h)
box = np.hstack([box_cx, box_cy, box_w, box_h])
return box
def get_axis_aligned_bbox(region):
""" convert region to (cx, cy, w, h) that represent by axis aligned box
"""
nv = len(region)
region = np.array(region)
if nv == 8:
x1 = min(region[0::2])
x2 = max(region[0::2])
y1 = min(region[1::2])
y2 = max(region[1::2])
A1 = np.linalg.norm(region[0:2] - region[2:4]) * \
np.linalg.norm(region[2:4] - region[4:6])
A2 = (x2 - x1) * (y2 - y1)
s = np.sqrt(A1 / A2)
w = s * (x2 - x1) + 1
h = s * (y2 - y1) + 1
x = x1
y = y1
else:
x = region[0]
y = region[1]
w = region[2]
h = region[3]
return x, y, w, h
def softmax(y):
"""softmax of numpy"""
x = y.copy()
if len(x.shape) > 1:
tmp = np.max(x, axis=1)
x -= tmp.reshape((x.shape[0], 1))
x = np.exp(x)
tmp = np.sum(x, axis=1)
x /= tmp.reshape((x.shape[0], 1))
else:
tmp = np.max(x)
x -= tmp
x = np.exp(x)
tmp = np.sum(x)
x /= tmp
return x
def judge_failures(pred_bbox, gt_bbox, threshold=0):
"""" judge whether to fail or not """
if len(gt_bbox) == 4:
if iou(np.array(pred_bbox).reshape(-1, 4), np.array(gt_bbox).reshape(-1, 4)) > threshold:
return False
else:
poly_pred = Polygon(np.array([[pred_bbox[0], pred_bbox[1]],
[pred_bbox[2], pred_bbox[1]],
[pred_bbox[2], pred_bbox[3]],
[pred_bbox[0], pred_bbox[3]]
])).convex_hull
poly_gt = Polygon(np.array(gt_bbox).reshape(4, 2)).convex_hull
inter_area = poly_gt.intersection(poly_pred).area
overlap = inter_area / (poly_gt.area + poly_pred.area - inter_area)
if overlap > threshold:
return False
return True
def calculate_accuracy_failures(pred_trajectory, gt_trajectory,
bound=None):
'''
args:
pred_trajectory:list of bbox
gt_trajectory: list of bbox ,shape == pred_trajectory
bound :w and h of img
return :
overlaps:list ,iou value in pred_trajectory
acc : mean iou value
failures: failures point in pred_trajectory
num_failures: number of failres
'''
overlaps = []
failures = []
for i, pred_traj in enumerate(pred_trajectory):
if len(pred_traj) == 1:
if pred_trajectory[i][0] == 2:
failures.append(i)
overlaps.append(float("nan"))
else:
if bound is not None:
poly_img = Polygon(np.array([[0, 0],
[0, bound[1]],
[bound[0], bound[1]],
[bound[0], 0]])).convex_hull
if len(gt_trajectory[i]) == 8:
poly_pred = Polygon(np.array([[pred_trajectory[i][0], pred_trajectory[i][1]],
[pred_trajectory[i][2], pred_trajectory[i][1]],
[pred_trajectory[i][2], pred_trajectory[i][3]],
[pred_trajectory[i][0], pred_trajectory[i][3]]
])).convex_hull
poly_gt = Polygon(
np.array(gt_trajectory[i]).reshape(4, 2)).convex_hull
if bound is not None:
gt_inter_img = poly_gt.intersection(poly_img)
pred_inter_img = poly_pred.intersection(poly_img)
inter_area = gt_inter_img.intersection(pred_inter_img).area
overlap = inter_area / \
(gt_inter_img.area + pred_inter_img.area - inter_area)
else:
inter_area = poly_gt.intersection(poly_pred).area
overlap = inter_area / \
(poly_gt.area + poly_pred.area - inter_area)
elif len(gt_trajectory[i]) == 4:
overlap = iou(np.array(pred_trajectory[i]).reshape(-1, 4), np.array(gt_trajectory[i]).reshape(-1, 4))
overlaps.append(overlap)
acc = 0
num_failures = len(failures)
if overlaps:
acc = np.nanmean(overlaps)
return acc, overlaps, failures, num_failures
def calculate_expected_overlap(fragments, fweights):
""" compute expected iou """
max_len = fragments.shape[1]
expected_overlaps = np.zeros((max_len), np.float32)
expected_overlaps[0] = 1
# TODO Speed Up
for i in range(1, max_len):
mask = np.logical_not(np.isnan(fragments[:, i]))
if np.any(mask):
fragment = fragments[mask, 1:i+1]
seq_mean = np.sum(fragment, 1) / fragment.shape[1]
expected_overlaps[i] = np.sum(seq_mean *
fweights[mask]) / np.sum(fweights[mask])
return expected_overlaps
def iou(box1, box2):
""" compute iou """
box1, box2 = box1.copy(), box2.copy()
N = box1.shape[0]
K = box2.shape[0]
box1 = np.array(box1.reshape((N, 1, 4))) + \
np.zeros((1, K, 4)) # box1=[N,K,4]
box2 = np.array(box2.reshape((1, K, 4))) + \
np.zeros((N, 1, 4)) # box1=[N,K,4]
x_max = np.max(np.stack((box1[:, :, 0], box2[:, :, 0]), axis=-1), axis=2)
x_min = np.min(np.stack((box1[:, :, 2], box2[:, :, 2]), axis=-1), axis=2)
y_max = np.max(np.stack((box1[:, :, 1], box2[:, :, 1]), axis=-1), axis=2)
y_min = np.min(np.stack((box1[:, :, 3], box2[:, :, 3]), axis=-1), axis=2)
tb = x_min-x_max
lr = y_min-y_max
tb[np.where(tb < 0)] = 0
lr[np.where(lr < 0)] = 0
over_square = tb*lr
all_square = (box1[:, :, 2] - box1[:, :, 0]) * (box1[:, :, 3] - box1[:, :, 1]) + (box2[:, :, 2] - \
box2[:, :, 0]) * (box2[:, :, 3] - box2[:, :, 1]) - over_square
return over_square / all_square
def calculate_eao(dataset_name, all_failures, all_overlaps, gt_traj_length, skipping=5):
'''
input:dataset name
all_failures: type is list , index of failure
all_overlaps: type is list , length of list is the length of all_failures
gt_traj_length: type is list , length of list is the length of all_failures
skipping:number of skipping per failing
'''
if dataset_name == "VOT2016":
low = 108
high = 371
elif dataset_name == "VOT2015":
low = 108
high = 371
fragment_num = sum([len(x)+1 for x in all_failures])
max_len = max([len(x) for x in all_overlaps])
tags = [1] * max_len
seq_weight = 1 / (1 + 1e-10) # division by zero
eao = {}
# prepare segments
fweights = np.ones(fragment_num, dtype=np.float32) * np.nan
fragments = np.ones((fragment_num, max_len), dtype=np.float32) * np.nan
seg_counter = 0
for traj_len, failures, overlaps in zip(gt_traj_length, all_failures, all_overlaps):
if failures:
points = [x+skipping for x in failures if
x+skipping <= len(overlaps)]
points.insert(0, 0)
for i, _ in enumerate(points):
if i != len(points) - 1:
fragment = np.array(
overlaps[points[i]:points[i+1]+1], dtype=np.float32)
fragments[seg_counter, :] = 0
else:
fragment = np.array(overlaps[points[i]:], dtype=np.float32)
fragment[np.isnan(fragment)] = 0
fragments[seg_counter, :len(fragment)] = fragment
if i != len(points) - 1:
tag_value = tags[points[i]:points[i+1]+1]
w = sum(tag_value) / (points[i+1] - points[i]+1)
fweights[seg_counter] = seq_weight * w
else:
tag_value = tags[points[i]:len(overlaps)]
w = sum(tag_value) / (traj_len - points[i]+1e-16)
fweights[seg_counter] = seq_weight * w
seg_counter += 1
else:
# no failure
max_idx = min(len(overlaps), max_len)
fragments[seg_counter, :max_idx] = overlaps[:max_idx]
tag_value = tags[0: max_idx]
w = sum(tag_value) / max_idx
fweights[seg_counter] = seq_weight * w
seg_counter += 1
expected_overlaps = calculate_expected_overlap(fragments, fweights)
print(len(expected_overlaps))
# calculate eao
weight = np.zeros((len(expected_overlaps)))
weight[low-1:high-1+1] = 1
expected_overlaps = np.array(expected_overlaps, dtype=np.float32)
is_valid = np.logical_not(np.isnan(expected_overlaps))
eao_ = np.sum(expected_overlaps[is_valid] *
weight[is_valid]) / np.sum(weight[is_valid])
eao = eao_
return eao
class SiamRPNTracker:
""" Tracker for SiamRPN"""
def __init__(self):
valid_scope = 2 * config.valid_scope + 1
self.anchors = generate_anchors(config.total_stride, config.anchor_base_size, config.anchor_scales,
config.anchor_ratios,
valid_scope)
self.window = np.tile(np.outer(np.hanning(config.score_size), np.hanning(config.score_size))[None, :],
[config.anchor_num, 1, 1]).flatten()
def _cosine_window(self, size):
"""
get the cosine window
"""
cos_window = np.hanning(int(size[0]))[:, np.newaxis].dot(
np.hanning(int(size[1]))[np.newaxis, :])
cos_window = cos_window.astype(np.float32)
cos_window /= np.sum(cos_window)
return cos_window
def init(self, frame, bbox):
""" initialize siamfc tracker
Args:
frame: an RGB image
bbox: one-based bounding box [x, y, width, height]
"""
self.shape = frame.shape
self.pos = np.array(
[bbox[0] + bbox[2] / 2 - 1 / 2, bbox[1] + bbox[3] / 2 - 1 / 2]) # center x, center y, zero based
self.target_sz = np.array([bbox[2], bbox[3]]) # width, height
self.bbox = np.array([bbox[0] + bbox[2] / 2 - 1 / 2,
bbox[1] + bbox[3] / 2 - 1 / 2, bbox[2], bbox[3]])
self.origin_target_sz = np.array([bbox[2], bbox[3]])
# get exemplar img
self.img_mean = np.mean(frame, axis=(0, 1))
exemplar_img, _, _ = get_exemplar_image(frame, self.bbox,
config.exemplar_size, config.context_amount, self.img_mean)
exemplar_img = exemplar_img.transpose((2, 0, 1)).astype(np.float32)
exemplar_img = np.expand_dims(exemplar_img, axis=0)
return exemplar_img
def update(self, frame):
"""track object based on the previous frame
Args:
frame: an RGB image
Returns:
bbox: tuple of 1-based bounding box(xmin, ymin, xmax, ymax)
"""
self.img_mean = np.mean(frame, axis=(0, 1))
instance_img_np, _, _, scale_x = get_instance_image(frame, self.bbox, config.exemplar_size,
config.instance_size,
config.context_amount, self.img_mean)
self.scale_x = scale_x
instance_img_np = instance_img_np.transpose(
(2, 0, 1)).astype(np.float32)
instance_img_np = np.expand_dims(instance_img_np, axis=0)
return instance_img_np
def postprocess(self, pred_score, pred_regression):
"""postprocess of prediction"""
pred_score = np.frombuffer(pred_score, dtype=np.float32)
pred_regression = np.frombuffer(pred_regression, dtype=np.float32)
pred_conf = pred_score.reshape(
(config.anchor_num * config.score_size * config.score_size, 2))
pred_offset = pred_regression.reshape(
(config.anchor_num * config.score_size * config.score_size, 4))
delta = pred_offset
box_pred = box_transform_inv(self.anchors, delta)
score_pred = softmax(pred_conf)[:, 1]
def change(r):
return np.maximum(r, 1. / r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
s_c = change(sz(box_pred[:, 2], box_pred[:, 3]) /
(sz_wh(self.target_sz * self.scale_x))) # scale penalty
r_c = change((self.target_sz[0] / self.target_sz[1]) /
(box_pred[:, 2] / box_pred[:, 3])) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1.) * config.penalty_k)
pscore = penalty * score_pred
pscore = pscore * (1 - config.window_influence) + \
self.window * config.window_influence
best_pscore_id = np.argmax(pscore)
target = box_pred[best_pscore_id, :] / self.scale_x
lr = penalty[best_pscore_id] * \
score_pred[best_pscore_id] * config.lr_box
res_x = np.clip(target[0] + self.pos[0], 0, self.shape[1])
res_y = np.clip(target[1] + self.pos[1], 0, self.shape[0])
res_w = np.clip(self.target_sz[0] * (1 - lr) + target[2] * lr, config.min_scale * self.origin_target_sz[0],
config.max_scale * self.origin_target_sz[0])
res_h = np.clip(self.target_sz[1] * (1 - lr) + target[3] * lr, config.min_scale * self.origin_target_sz[1],
config.max_scale * self.origin_target_sz[1])
self.pos = np.array([res_x, res_y])
self.target_sz = np.array([res_w, res_h])
bbox = np.array([res_x, res_y, res_w, res_h])
self.bbox = (
np.clip(bbox[0], 0, self.shape[1]).astype(np.float64),
| np.clip(bbox[1], 0, self.shape[0]) | numpy.clip |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Analytical nuclear hessian for 1-electron spin-free x2c method
Ref.
JCP 135, 244104 (2011); DOI:10.1063/1.3667202
JCTC 8, 2617 (2012); DOI:10.1021/ct300127e
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.x2c import x2c
from pyscf.x2c import sfx2c1e_grad
def hcore_hess_generator(x2cobj, mol=None):
'''nuclear gradients of 1-component X2c hcore Hamiltonian (spin-free part only)
'''
if mol is None: mol = x2cobj.mol
xmol, contr_coeff = x2cobj.get_xmol(mol)
if x2cobj.basis is not None:
s22 = xmol.intor_symmetric('int1e_ovlp')
s21 = gto.intor_cross('int1e_ovlp', xmol, mol)
contr_coeff = lib.cho_solve(s22, s21)
get_h1_xmol = gen_sf_hfw(xmol, x2cobj.approx)
def hcore_deriv(ia, ja):
h1 = get_h1_xmol(ia, ja)
if contr_coeff is not None:
h1 = lib.einsum('pi,xypq,qj->xyij', contr_coeff, h1, contr_coeff)
return | numpy.asarray(h1) | numpy.asarray |
import numpy as np
import pandas as pd
import os
import time
from ldsc_polyfun import jackknife, regressions, sumstats, ldscore, parse
import logging
from copy import deepcopy
from tqdm import tqdm
from polyfun_utils import Logger, check_package_versions, set_snpid_index, configure_logger, get_file_name
from polyfun_utils import SNP_COLUMNS
from pyarrow import ArrowIOError
from pyarrow.lib import ArrowInvalid
from compute_ldscores_from_ld import compute_ldscores_chr
import tempfile
MAX_CHI2=80
def __filter__(fname, noun, verb, merge_obj):
merged_list = None
if fname:
f = lambda x,n: x.format(noun=noun, verb=verb, fname=fname, num=n)
x = parse.FilterFile(fname)
c = 'Read list of {num} {noun} to {verb} from {fname}'
logging.info(f(c, len(x.IDList)))
merged_list = merge_obj.loj(x.IDList)
len_merged_list = len(merged_list)
if len_merged_list > 0:
c = 'After merging, {num} {noun} remain'
logging.info(f(c, len_merged_list))
else:
error_msg = 'No {noun} retained for analysis'
raise ValueError(f(error_msg, 0))
return merged_list
def splash_screen():
print('*********************************************************************')
print('* PolyFun (POLYgenic FUNctionally-informed fine-mapping)')
print('* Version 1.0.0')
print('* (C) 2019-2021 <NAME>')
print('*********************************************************************')
print()
def check_args(args):
#verify that the requested computations are valid
mode_params = np.array([args.compute_h2_L2, args.compute_ldscores, args.compute_h2_bins])
if np.sum(mode_params)==0:
raise ValueError('must specify at least one of --compute-h2-L2, --compute-ldscores, --compute-h2-bins')
if args.compute_h2_L2 and args.compute_h2_bins and not args.compute_ldscores:
raise ValueError('cannot use both --compute-h2_L2 and --compute_h2_bins without also specifying --compute-ldscores')
if args.chr is not None:
if args.compute_h2_L2 or args.compute_h2_bins:
raise ValueError('--chr can only be specified when using only --compute-ldscores')
if args.bfile_chr is not None:
if not args.compute_ldscores:
raise ValueError('--bfile-chr can only be specified when using --compute-ldscores')
if args.ld_ukb:
if not args.compute_ldscores:
raise ValueError('--ld-ukb can only be specified when using --compute-ldscores')
if args.no_partitions:
if not args.compute_h2_L2:
raise ValueError('cannot specify --no-partitions without specifying --compute-h2-L2')
if args.compute_ldscores:
raise ValueError('cannot specify both --no-partitions and --compute-ldscores')
if args.compute_h2_bins:
raise ValueError('cannot specify both --no-partitions and --compute-h2-bins')
if args.compute_ldscores and args.compute_h2_bins and not args.compute_h2_L2:
raise ValueError('cannot use both --compute-ldscores and --compute_h2_bins without also specifying --compute-h2-L2')
#verify partitioning parameters
if args.skip_Ckmedian and (args.num_bins is None or args.num_bins<=0):
raise ValueError('You must specify --num-bins when using --skip-Ckmedian')
#verify LD-score related parameters
if args.ld_dir is not None and not args.ld_ukb:
raise ValueError('You cannot specify --ld-dir without also specifying --ld-ukb')
if args.bfile_chr is not None and args.ld_ukb:
raise ValueError('You can specify only one of --bfile-chr and --ld-ukb')
if args.compute_ldscores:
if args.bfile_chr is None and not args.ld_ukb:
raise ValueError('You must specify either --bfile-chr or --ld-ukb when you specify --compute-ldscores')
if not args.ld_ukb and (args.ld_wind_cm is None and args.ld_wind_kb is None and args.ld_wind_snps is None):
args.ld_wind_cm = 1.0
logging.warning('no ld-wind argument specified. PolyFun will use --ld-cm 1.0')
if not args.compute_ldscores:
if not (args.ld_wind_cm is None and args.ld_wind_kb is None and args.ld_wind_snps is None):
raise ValueError('--ld-wind parameters can only be specified together with --compute-ldscores')
if args.keep is not None:
raise ValueError('--keep can only be specified together with --compute-ldscores')
if args.chr is not None:
raise ValueError('--chr can only be specified together with --compute-ldscores')
if args.compute_h2_L2:
if args.sumstats is None:
raise ValueError('--sumstats must be specified when using --compute-h2-L2')
if args.ref_ld_chr is None:
raise ValueError('--ref-ld-chr must be specified when using --compute-h2-L2')
if args.w_ld_chr is None:
raise ValueError('--w-ld-chr must be specified when using --compute-h2-L2')
if args.compute_h2_bins:
if args.sumstats is None:
raise ValueError('--sumstats must be specified when using --compute-h2-bins')
if args.w_ld_chr is None:
raise ValueError('--w-ld-chr must be specified when using --compute-h2-bins')
if args.ref_ld_chr is not None and not args.compute_ldscores:
raise ValueError('--ref-ld-chr should not be specified when using --compute-h2-bins, unless you also use --compute-ldscores')
return args
def check_files(args):
#check that required input files exist
if args.compute_h2_L2:
if not os.path.exists(args.sumstats):
raise IOError('Cannot find sumstats file %s'%(args.sumstats))
for chr_num in range(1,23):
get_file_name(args, 'ref-ld', chr_num, verify_exists=True, allow_multiple=True)
get_file_name(args, 'w-ld', chr_num, verify_exists=True)
get_file_name(args, 'annot', chr_num, verify_exists=True, allow_multiple=True)
if args.compute_ldscores:
if args.chr is None: chr_range = range(1,23)
else: chr_range = range(args.chr, args.chr+1)
for chr_num in chr_range:
if args.bfile_chr is not None:
get_file_name(args, 'bim', chr_num, verify_exists=True)
get_file_name(args, 'fam', chr_num, verify_exists=True)
get_file_name(args, 'bed', chr_num, verify_exists=True)
if not args.compute_h2_L2:
get_file_name(args, 'snpvar_ridge', chr_num, verify_exists=True)
get_file_name(args, 'bins', chr_num, verify_exists=True)
if args.compute_h2_bins and not args.compute_ldscores:
for chr_num in range(1,23):
get_file_name(args, 'w-ld', chr_num, verify_exists=True)
if not args.compute_h2_L2:
get_file_name(args, 'bins', chr_num, verify_exists=True)
class PolyFun:
def __init__(self):
pass
def run_ldsc(self, args, use_ridge, nn, keep_large, evenodd_split, n_blocks=2):
#prepare LDSC objects
log = Logger()
args.h2 = args.sumstats
args.ref_ld = None
args.w_ld = None
args.n_blocks = n_blocks
args.M = None
args.not_M_5_50 = True
#if not ridge, the we'll use the LD-scores of our bins
if not use_ridge:
args = deepcopy(args)
args.ref_ld_chr = args.output_prefix+'.'
#read input data
if use_ridge or not args.compute_ldscores or True:
M_annot, w_ld_cname, ref_ld_cnames, df_sumstats, _ = sumstats._read_ld_sumstats(args, log, args.h2)
else:
#TODO: Don't reload files if we don't have to...
M_annot = self.M
w_ld_cname = 'w_ld'
ref_ld_cnames = self.df_bins.columns
try:
df_sumstats = pd.read_parquet(args.sumstats)
except (ArrowIOError, ArrowInvalid):
df_sumstats = pd.read_table(args.sumstats, sep='\s+')
###merge everything together...
#prepare LD-scores for S-LDSC run
ref_ld = np.array(df_sumstats[ref_ld_cnames], dtype=np.float32)
sumstats._check_ld_condnum(args, log, ref_ld_cnames)
if df_sumstats.shape[0] < 200000:
logging.warning('number of SNPs is smaller than 200k; this is almost always bad.')
n_snp = len(df_sumstats)
n_blocks = np.minimum(n_snp, args.n_blocks)
n_annot = len(ref_ld_cnames)
if n_annot<=1:
raise ValueError('Only one annotation found')
chisq_max = max(0.001*df_sumstats['N'].max(), MAX_CHI2)
#prepare chi2 statistics
s = lambda x: | np.array(x) | numpy.array |
from __future__ import print_function
import math, types
import numpy as N
import matplotlib.pyplot as P
def plotres(psr,deleted=False,group=None,**kwargs):
"""Plot residuals, compute unweighted rms residual."""
res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs
if (not deleted) and N.any(psr.deleted != 0):
res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0]
print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs))
meanres = math.sqrt(N.mean(res**2)) / 1e-6
if group is None:
i = N.argsort(t)
P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs)
else:
if (not deleted) and N.any(psr.deleted):
flagmask = psr.flagvals(group)[~psr.deleted]
else:
flagmask = psr.flagvals(group)
unique = list(set(flagmask))
for flagval in unique:
f = (flagmask == flagval)
flagres, flagt, flagerrs = res[f], t[f], errs[f]
i = N.argsort(flagt)
P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs)
P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1))
P.xlabel('MJD'); P.ylabel('res [us]')
P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
# select parameters by name or number, omit non-existing
def _select(p,pars,select):
sel = []
for s in select:
if isinstance(s,str) and s in pars:
sel.append(pars.index(s))
elif isinstance(s,int) and s < p:
sel.append(s)
return len(sel), sel
def plothist(data,pars=[],offsets=[],norms=[],select=[],weights={},ranges={},labels={},skip=[],append=False,
bins=50,color='k',linestyle=None,linewidth=1,title=None):
if hasattr(data,'data') and not isinstance(data,N.ndarray):
# parse a multinestdata structure
if not pars and hasattr(data,'parnames'):
pars = data.parnames
data = data.data
p = data.shape[-1]
if not pars:
pars = map('p{0}'.format,range(p))
if offsets:
data = data.copy()
if isinstance(offsets,dict):
for i,par in enumerate(pars):
if par in offsets:
data[:,i] = data[:,i] - offsets[par]
else:
if len(offsets) < p:
offsets = offsets + [0.0] * (p - len(offsets))
data = data - N.array(offsets)
if norms:
if len(norms) < p:
norms = norms + [1.0] * (p - len(norms))
data = data / norms
if select:
p, sel = _select(p,pars,select)
data, pars = data[:,sel], [pars[s] for s in sel]
if weights:
weight = 1
for i,par in enumerate(pars):
if par in weights:
if isinstance(weights[par],types.FunctionType):
weight = weight * N.vectorize(weights[par])(data[:,i])
else:
weight = weight * weights[par]
else:
weight = None
# only need lines for multiple plots
# lines = ['dotted','dashdot','dashed','solid']
if not append:
P.figure(figsize=(16*(min(p,4)/4.0),3*(int((p-1)/4)+1)))
for i in range(p):
# figure out how big the multiplot needs to be
if type(append) == int: # need this since isinstance(False,int) == True
q = append
elif isinstance(append,(list,tuple)):
q = len(append)
else:
q = p
# increment subplot index if we're skipping
sp = i + 1
for s in skip:
if i >= s:
sp = sp + 1
# if we're given the actual parnames of an existing plot, figure out where we fall
if isinstance(append,(list,tuple)):
try:
sp = append.index(pars[i]) + 1
except ValueError:
continue
P.subplot(int((q-1)/4)+1,min(q,4),sp)
if append:
P.hold(True)
if pars[i] in ranges:
dx = ranges[pars[i]]
P.hist(data[:,i],bins=int(bins * (N.max(data[:,i]) - N.min(data[:,i])) / (dx[1] - dx[0])),
weights=weight,normed=True,histtype='step',color=color,linestyle=linestyle,linewidth=linewidth)
P.xlim(dx)
else:
P.hist(data[:,i],bins=bins,
weights=weight,normed=True,histtype='step',color=color,linestyle=linestyle,linewidth=linewidth)
P.xlabel(labels[pars[i]] if pars[i] in labels else pars[i])
# P.ticklabel_format(style='sci',axis='both',scilimits=(-3,4),useoffset='True')
P.locator_params(axis='both',nbins=6)
P.minorticks_on()
fx = P.ScalarFormatter(useOffset=True,useMathText=True)
fx.set_powerlimits((-3,4)); fx.set_scientific(True)
fy = P.ScalarFormatter(useOffset=True,useMathText=True)
fy.set_powerlimits((-3,4)); fy.set_scientific(True)
P.gca().xaxis.set_major_formatter(fx)
P.gca().yaxis.set_major_formatter(fy)
P.hold(False)
if title and not append:
P.suptitle(title)
P.tight_layout()
# to do: should fix this histogram so that the contours are correct
# even for restricted ranges...
def _plotonehist2(x,y,parx,pary,smooth=False,colormap=True,ranges={},labels={},bins=50,levels=3,weights=None,
color='k',linewidth=1):
hold = P.ishold()
hrange = [ranges[parx] if parx in ranges else [N.min(x),N.max(x)],
ranges[pary] if pary in ranges else [N.min(y),N.max(y)]]
[h,xs,ys] = N.histogram2d(x,y,bins=bins,normed=True,range=hrange,weights=weights)
if colormap:
P.contourf(0.5*(xs[1:]+xs[:-1]),0.5*(ys[1:]+ys[:-1]),h.T,cmap=P.get_cmap('YlOrBr')); P.hold(True)
H,tmp1,tmp2 = N.histogram2d(x,y,bins=bins,range=hrange,weights=weights)
if smooth:
# only need scipy if we're smoothing
import scipy.ndimage.filters as SNF
H = SNF.gaussian_filter(H,sigma=1.5 if smooth is True else smooth)
if weights is None:
H = H / len(x)
else:
H = H / N.sum(H) # I think this is right...
Hflat = -N.sort(-H.flatten()) # sort highest to lowest
cumprob = N.cumsum(Hflat) # sum cumulative probability
levels = [N.interp(level,cumprob,Hflat) for level in [0.6826,0.9547,0.9973][:levels]]
xs = | N.linspace(hrange[0][0],hrange[0][1],bins) | numpy.linspace |
# FUNCTION feat(f):
# INPUT: signal f is 2D array-> 1st dim: samples, 2nd dim: different signal profiles
# INPUT EXAMPLE: input force of 200 sample readings of fx,fy,fz will be an input array of (200,3)
# OUTPUT: time and frequency features of f, suming to numfeat features
# OUTPUT EXAMPLE: for the (200,3) input array we get a (numfeat,3) output feature array
#
# Code source: <NAME>
#
# License: BSD 3 clause
#
import time
import numpy as np
import numpy.matlib as npm
from numpy import linalg as la
import math
import scipy.io as sio
from scipy.optimize import curve_fit
from nitime import algorithms as alg
import shutil
import os
from pylab import *
import random
import matplotlib.pyplot as plt
import pywt
threshold = 0.0001
nbins = 3
p = 3
binlims = (-10, 10)
# TIME DOMAIN FEATURES =============================================================================================
# Integrated Signal (IS): sumation over 1st dimension
def intsgnl(f):
return np.array([sum(abs(f), 0)]), time.time()
# Mean Absolute Value (MAV): 1/N * IS
def meanabs(f):
return 1./len(f)*intsgnl(f)[0], time.time()
# MAV SLoPe (MAVSLP): MAV(i+1)-MAV(i)
def meanabsslp(f):
return meanabs(f[1:, :])[0] - meanabs(f[:-1, :])[0], time.time()
# Simple Square Integral (SSI): sumation of squares over 1st dimension
def ssi(f):
return np.array([sum(np.power(f, 2), 0)]), time.time()
# VARiance (VAR): 1/(N-1) * SSI
def var(f):
return 1./(len(f)-1) * ssi(f)[0], time.time()
# Root Mean Square (RMS): sqrt(1/N * SSI)
def rms(f):
return np.power(1./len(f) * ssi(f)[0], 0.5), time.time()
# RaNGe (RNG): max(f) - min(f)
def rng(f):
return np.array([np.amax(f, 0) - np.amin(f, 0)]), time.time()
# Waveform Length (WL): sumation over (x(n+1)-x(n))
def wavl(f):
return np.array([sum(abs(f[1:, :]-f[:-1, :]), 0)]), time.time()
# Zero Crossing (ZC): sumation over {(-x(n+1)*x(n)>=thres)*(|x(n)-x(n+1)|>=thres)}
def zerox(f):
tmpdiff = abs(f[:-1, :] - f[1:, :]) >= threshold
tmpmult = -np.multiply(f[1:, :], f[:-1, :]) >= threshold
return np.array([sum(np.multiply(tmpmult, tmpdiff), 0)]), time.time()
# Slope Sigh Change (SSC): sumation over {((x(n)-x(n-1))*(x(n)-x(n+1)))>=thres}
def ssc(f):
tmpd1 = f[1:-1, :] - f[:-2, :]
tmpd2 = f[1:-1, :] - f[2:, :]
return np.array([sum(np.multiply(tmpd1, tmpd2) >= threshold, 0)]), time.time()
# Willison AMPlitude (WAMP): sumation over {(x(n)-x(n-1))>=thres}
def wamp(f):
tmpd = f[1:, :] - f[:-1, :]
return np.array([sum(tmpd >= threshold, 0)]), time.time()
# Histogram of Signal (HS)
def shist(f):
shist = np.zeros((nbins, f.shape[-1]))
for i in range(f.shape[-1]):
tmphist, _ = np.histogram(f[:, i], nbins)
shist[:, i] = tmphist
return shist, time.time()
# EXTRA TIME DOMAIN FEATURES LIKE GOLZ DID IN ICRA2015 =============================================================
# Integrated Signal Real (ISR): sumation of real values over 1st dimension
def intsgnlr(f):
return np.array([np.sum(f, 0)]), time.time()
# Mean Value (MV): 1/N * ISR
def meanv(f):
return np.array([np.mean(f, 0)]), time.time()
# Integrated Weighted Signal Real (IWSR): sumation of real values minus their mean, over 1st dimension
def intwsgnlr(f):
return np.array([sum(f - meanv(f)[0], 0)]), time.time()
# Standard Deviation (SD): 1/N * sumation over (f-MV)^2
def stdr(f):
return np.array([np.std(f, 0)]), time.time()
# MaXimum (MX): max(f)
def mx(f):
return np.array([np.max(f, 0)]), time.time()
# RaNGe X (RNGX): number of samples, aka 1st dimension
def rngx(f):
return np.array([[np.array(f).shape[0] for i in range( | np.array(f) | numpy.array |
# ============================================================================
# 第七章 給湯設備
# 第一節 給湯設備
# Ver.18(エネルギー消費性能計算プログラム(住宅版)Ver.02.05~)
# ============================================================================
import numpy as np
from functools import lru_cache
import pyhees.section7_1_b as default
import pyhees.section7_1_c as gas
import pyhees.section7_1_d as oil
import pyhees.section7_1_e as eheatpump
import pyhees.section7_1_f as eheater
import pyhees.section7_1_g as hybrid_gas
import pyhees.section7_1_g_3 as hybrid_gas_3
import pyhees.section7_1_h as gas_hybrid
import pyhees.section7_1_i as whybrid
import pyhees.section7_1_j as watersaving
import pyhees.section7_1_m as schedule
import pyhees.section9_2 as lss
import pyhees.section9_3 as ass
from pyhees.section11_1 import load_outdoor, get_Theta_ex
from pyhees.section11_2 import load_solrad
from pyhees.section11_3 import load_schedule, get_schedule_hw
# ============================================================================
# 5. 給湯設備によるエネルギー消費量
# ============================================================================
# ============================================================================
# 5.1 消費電力量
# ============================================================================
@lru_cache()
def calc_hotwater_load(n_p, region, sol_region, has_bath, bath_function, pipe_diameter, kitchen_watersaving_A,
kitchen_watersaving_C, shower_watersaving_A, shower_watersaving_B, washbowl_watersaving_C,
bath_insulation,
type=None, ls_type=None, A_sp=None, P_alpha_sp=None, P_beta_sp=None, W_tnk_ss=None,
hotwater_use=None, heating_flag_d=None, A_col=None, P_alpha=None, P_beta=None, V_fan_P0=None,
d0=None, d1=None, m_fan_test=None, W_tnk_ass=None
):
"""給湯負荷の計算
Args:
n_p(float): 仮想居住人数 (人)
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
has_bath(bool): 浴室等の有無
bath_function(str): ふろ機能の種類
pipe_diameter(str): ヘッダー分岐後の径
kitchen_watersaving_A(bool): 台所水栓の手元止水機能の有無
kitchen_watersaving_C(bool): 台所水栓の水優先吐水機能の有無
shower_watersaving_A(bool): 浴室シャワー水栓の手元止水機能の有無
shower_watersaving_B(bool): 浴室シャワー水栓の小流量吐水機能の有無
washbowl_watersaving_C(bool): 洗面水栓の水優先吐水機能の有無
bath_insulation(bool): 浴槽の断熱の有無
type(str, optional): 太陽熱利用設備の種類 (液体集熱式,空気集熱式,None) (Default value = None)
ls_type(str, optional): 液体集熱式太陽熱利用設備の種類 (太陽熱温水器,ソーラーシステム) (Default value = None)
A_sp(float, optional): 太陽熱集熱部の有効集熱面積 (m2) (Default value = None)
P_alpha_sp(float, optional): 太陽熱集熱部の方位角 (°) (Default value = None)
P_beta_sp(float, optional): 太陽熱集熱部の傾斜角 (°) (Default value = None)
W_tnk_ss(float, optional): ソーラーシステムのタンク容量 (L) (Default value = None)
hotwater_use(bool, optional): 空気集熱式太陽熱利用設備が給湯部を有する場合はTrue (Default value = None)
heating_flag_d(ndarray, optional): 暖房日 (Default value = None)
A_col(tuple, optional): 集熱器群の面積 (m2) (Default value = None)
P_alpha(float, optional): 方位角 (°) (Default value = None)
P_beta(float, optional): 傾斜角 (°) (Default value = None)
V_fan_P0(float, optional): 空気搬送ファンの送風機特性曲線において機外静圧をゼロとしたときの空気搬送ファンの風量 (m3/h) (Default value = None)
d0(tuple, optional): 集熱器群を構成する集熱器の集熱効率特性線図一次近似式の切片 (-) (Default value = None)
d1(tuple, optional): 集熱器群を構成する集熱器の集熱効率特性線図一次近似式の傾き (W/(m2K)) (Default value = None)
m_fan_test(tuple, optional): 集熱器群を構成する集熱器の集熱性能試験時における単位面積当たりの空気の質量流量 (kg/(s・m2)) (Default value = None)
W_tnk_ass(float, optional): タンク容量 (L) (Default value = None)
Returns:
dict: 1日当たりの給湯設備付加
"""
# 生活スケジュール
schedule = load_schedule()
schedule_hw = get_schedule_hw(schedule)
# 外部環境
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
# ----- 14. 夜間平均外気温度 -----
# 夜間平均外気温度 (℃) (15)
Theta_ex_Nave_d = get_Theta_ex_Nave_d(Theta_ex_d_t)
# ----- 13. 日平均外気温度 -----
# 日平均外気温度 (℃) (14)
theta_ex_d_Ave_d = get_theta_ex_d_Ave_d(Theta_ex_d_t)
# ----- 12. 日平均給水温度 -----
# 期間平均外気温度 (℃) (13)
Theta_ex_prd_Ave_d = get_Theta_ex_prd_Ave_d(theta_ex_d_Ave_d)
# 日平均給水温度 (℃) (12)
Theta_wtr_d = get_Theta_wtr_d(region, Theta_ex_prd_Ave_d)
# ----- 11. 浴槽沸かし直しによる給湯熱負荷 -----
# 浴槽沸かし直しによる給湯熱負荷 (MJ/h) (10)
L_ba_d_t = calc_L_ba_d_t(bath_insulation, schedule_hw, has_bath, theta_ex_d_Ave_d, n_p)
# ----- 10. 基準給湯量 -----
# 基準給湯量 (L/h) (7)
W_k_d_t = calc_W_k_d_t(n_p, schedule_hw)
W_s_d_t = calc_W_s_d_t(n_p, schedule_hw, has_bath)
W_w_d_t = calc_W_w_d_t(n_p, schedule_hw)
W_b1_d_t = calc_W_b1_d_t(n_p, schedule_hw, has_bath, bath_function)
W_b2_d_t = calc_W_b2_d_t(n_p, schedule_hw, has_bath, bath_function)
# 浴槽水栓さし湯時における基準給湯量 (L/h) (9)
W_ba1_d_t = calc_W_ba1_d_t(bath_function, L_ba_d_t, Theta_wtr_d)
# ----- 9. 節湯補正給湯量 -----
# 節湯補正給湯量 (L/h) (6)
W_dash_k_d_t = calc_W_dash_k_d_t(W_k_d_t, kitchen_watersaving_A, kitchen_watersaving_C, pipe_diameter, Theta_wtr_d)
W_dash_s_d_t = calc_W_dash_s_d_t(W_s_d_t, shower_watersaving_A, shower_watersaving_B, pipe_diameter)
W_dash_w_d_t = calc_W_dash_w_d_t(W_w_d_t, washbowl_watersaving_C, pipe_diameter, Theta_wtr_d)
W_dash_b1_d_t = calc_W_dash_b1_d_t(W_b1_d_t, pipe_diameter)
W_dash_b2_d_t = calc_W_dash_b2_d_t(W_b2_d_t)
W_dash_ba1_d_t = calc_W_dash_ba1_d_t(W_ba1_d_t, pipe_diameter)
# ----- 8. 節湯補正給湯熱負荷 -----
# 基準給湯温度 (℃)
Theta_sw_k = get_Theta_sw_k()
Theta_sw_s = get_Theta_sw_s()
Theta_sw_w = get_Theta_sw_w()
# 節湯補正給湯熱負荷 (MJ/h) (5)
L_dash_k_d_t = get_L_dash_k_d_t(W_dash_k_d_t, Theta_sw_k, Theta_wtr_d)
L_dash_s_d_t = get_L_dash_s_d_t(W_dash_s_d_t, Theta_sw_s, Theta_wtr_d)
L_dash_w_d_t = get_L_dash_w_d_t(W_dash_w_d_t, Theta_sw_w, Theta_wtr_d)
L_dash_b1_d_t, L_dash_b2_d_t = get_L_dash_bx_d_t(W_dash_b1_d_t, W_dash_b2_d_t, Theta_wtr_d, has_bath, bath_function)
L_dash_ba1_d_t, L_dash_ba2_d_t = get_L_dash_bax_d_t(W_dash_ba1_d_t, Theta_wtr_d, L_ba_d_t, has_bath, bath_function)
# ----- 7. 太陽熱補正給湯熱負荷 -----
# 太陽熱利用給湯設備による補正集熱量
L_sun_d_t = calc_L_sun_d_t(
region=region,
sol_region=sol_region,
solar_device=type,
ls_type=ls_type,
A_sp=A_sp,
P_alpha_sp=P_alpha_sp,
P_beta_sp=P_beta_sp,
W_tnk_ss=W_tnk_ss,
hotwater_use=hotwater_use,
heating_flag_d=heating_flag_d,
A_col=A_col,
P_alpha=P_alpha,
P_beta=P_beta,
V_fan_P0=V_fan_P0,
d0=d0,
d1=d1,
m_fan_test=m_fan_test,
W_tnk_ass=W_tnk_ass,
Theta_wtr_d=Theta_wtr_d,
L_dash_k_d_t=L_dash_k_d_t,
L_dash_s_d_t=L_dash_s_d_t,
L_dash_w_d_t=L_dash_w_d_t,
L_dash_b1_d_t=L_dash_b1_d_t,
L_dash_b2_d_t=L_dash_b2_d_t,
L_dash_ba1_d_t=L_dash_ba1_d_t
)
# 太陽熱補正給湯熱負荷
L_dashdash_k_d_t = calc_L_dashdash_k_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_s_d_t = calc_L_dashdash_s_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_w_d_t = calc_L_dashdash_w_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_b1_d_t = calc_L_dashdash_b1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_b2_d_t = calc_L_dashdash_b2_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_ba1_d_t = calc_L_dashdash_ba1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_ba2_d_t = get_L_dashdash_ba2_d_t(L_dash_ba2_d_t)
print('L_ba = {}'.format(np.sum(L_ba_d_t)))
print('W_k = {}'.format(np.sum(W_k_d_t)))
print('W_s = {}'.format(np.sum(W_s_d_t)))
print('W_w = {}'.format(np.sum(W_w_d_t)))
print('W_b1 = {}'.format(np.sum(W_b1_d_t)))
print('W_b2 = {}'.format(np.sum(W_b2_d_t)))
print('W_ba1 = {}'.format(np.sum(W_ba1_d_t)))
print('W_dash_k = {}'.format(np.sum(W_dash_k_d_t)))
print('W_dash_s = {}'.format(np.sum(W_dash_s_d_t)))
print('W_dash_w = {}'.format(np.sum(W_dash_w_d_t)))
print('W_dash_b1 = {}'.format(np.sum(W_dash_b1_d_t)))
print('W_dash_b2 = {}'.format(np.sum(W_dash_b2_d_t)))
print('W_dash_ba1 = {}'.format(np.sum(W_dash_ba1_d_t)))
print('L_dash_k = {}'.format(np.sum(L_dash_k_d_t)))
print('L_dash_s = {}'.format(np.sum(L_dash_s_d_t)))
print('L_dash_w = {}'.format(np.sum(L_dash_w_d_t)))
print('L_dash_b1 = {}'.format(np.sum(L_dash_b1_d_t)))
print('L_dash_b2 = {}'.format(np.sum(L_dash_b2_d_t)))
print('L_dash_ba1 = {}'.format(np.sum(L_dash_ba1_d_t)))
print('L_dash_ba2 = {}'.format(np.sum(L_dash_ba2_d_t)))
print('L_dashdash_k = {}'.format(np.sum(L_dashdash_k_d_t)))
print('L_dashdash_s = {}'.format(np.sum(L_dashdash_s_d_t)))
print('L_dashdash_w = {}'.format(np.sum(L_dashdash_w_d_t)))
print('L_dashdash_b1 = {}'.format(np.sum(L_dashdash_b1_d_t)))
print('L_dashdash_b2 = {}'.format(np.sum(L_dashdash_b2_d_t)))
print('L_dashdash_ba1 = {}'.format(np.sum(L_dashdash_ba1_d_t)))
print('L_dashdash_ba2 = {}'.format(np.sum(L_dashdash_ba2_d_t)))
return {
'L_dash_k_d_t': L_dash_k_d_t,
'L_dash_s_d_t': L_dash_s_d_t,
'L_dash_w_d_t': L_dash_w_d_t,
'L_dash_b1_d_t': L_dash_b1_d_t,
'L_dash_b2_d_t': L_dash_b2_d_t,
'L_dash_ba1_d_t': L_dash_ba1_d_t,
'L_dash_ba2_d_t': L_dash_ba2_d_t,
'L_dashdash_k_d_t': L_dashdash_k_d_t,
'L_dashdash_s_d_t': L_dashdash_s_d_t,
'L_dashdash_w_d_t': L_dashdash_w_d_t,
'L_dashdash_b1_d_t': L_dashdash_b1_d_t,
'L_dashdash_b2_d_t': L_dashdash_b2_d_t,
'L_dashdash_ba1_d_t': L_dashdash_ba1_d_t,
'L_dashdash_ba2_d_t': L_dashdash_ba2_d_t,
'W_dash_k_d_t': W_dash_k_d_t,
'W_dash_s_d_t': W_dash_s_d_t,
'W_dash_w_d_t': W_dash_w_d_t,
'W_dash_b1_d_t': W_dash_b1_d_t,
'W_dash_b2_d_t': W_dash_b2_d_t,
'W_dash_ba1_d_t': W_dash_ba1_d_t,
'theta_ex_d_Ave_d': theta_ex_d_Ave_d,
'Theta_ex_Nave_d': Theta_ex_Nave_d
}
def calc_E_E_W_d_t(n_p, L_HWH, heating_flag_d, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備の消費電力量 (1)
Args:
n_p(float): 仮想居住人数 (人)
L_HWH(ndarray): 温水暖房用熱源機の熱負荷
heating_flag_d(ndarray): 暖房日
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
Returns:
ndarray: 1日当たりの給湯設備の消費電力量 (kWh/d)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
if HW['hw_type'] == 'コージェネレーションを使用する':
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1時間当たりの給湯機の消費電力量 (kWh/h)
E_E_hs_d_t = calc_E_E_hs_d_t(
hw_type=HW['hw_type'],
bath_function=bath_function,
hybrid_category=HW['hybrid_category'],
package_id=HW.get('package_id'),
hybrid_param=HW.get('hybrid_param'),
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
W_dash_k_d_t=hotwater_load['W_dash_k_d_t'],
W_dash_s_d_t=hotwater_load['W_dash_s_d_t'],
W_dash_w_d_t=hotwater_load['W_dash_w_d_t'],
W_dash_b1_d_t=hotwater_load['W_dash_b1_d_t'],
W_dash_b2_d_t=hotwater_load['W_dash_b2_d_t'],
W_dash_ba1_d_t=hotwater_load['W_dash_ba1_d_t'],
theta_ex_d_Ave_d=hotwater_load['theta_ex_d_Ave_d'],
Theta_ex_Nave_d=hotwater_load['Theta_ex_Nave_d'],
L_HWH=L_HWH,
CO2HP=HW['CO2HP'] if 'CO2HP' in HW else None
)
# 太陽利用設備の補機の消費電力量
E_E_aux_ss_d_t = calc_E_E_aux_ss_d_t(
SHC=SHC,
region=region,
sol_region=sol_region,
heating_flag_d=heating_flag_d
)
# 1時間当たりの給湯設備の消費電力量(1)
E_E_W_d_t = E_E_hs_d_t + E_E_aux_ss_d_t
return E_E_W_d_t
def calc_E_E_aux_ss_d_t(SHC, region=None, sol_region=None, heating_flag_d=None):
"""1時間当たりの補機の消費電力量 (kWh/h)
Args:
SHC(dict): 太陽熱利用設備の仕様
region(int, optional): 省エネルギー地域区分 (Default value = None)
sol_region(int, optional): 年間の日射地域区分 (Default value = None)
heating_flag_d(ndarray, optional): 暖房日 (Default value = None)
Returns:
ndarray: 1時間当たりの補機の消費電力量 (kWh/h)
"""
if SHC is None:
return np.zeros(24 * 365)
elif SHC['type'] == '液体集熱式':
# 第九章「自然エネルギー利用設備」第二節「液体集熱式太陽熱利用設備」の算定方法により定まる
# 1時間当たりの補機の消費電力量 (kWh/h)
return lss.calc_E_E_lss_aux_d_t(
ls_type=SHC['ls_type'],
pmp_type='上記以外の機種',
P_alpha_sp=SHC['P_alpha_sp'],
P_beta_sp=SHC['P_beta_sp'],
region=region,
sol_region=sol_region
)
elif SHC['type'] == '空気集熱式':
# 第九章「自然エネルギー利用設備」第三節「空気集熱式太陽熱利用設備」の算定方法により定まる
# 1時間当たりの補機の消費電力量のうちの給湯設備への付加分 (kWh/h)
return ass.calc_E_E_W_aux_ass_d_t(
hotwater_use=SHC['hotwater_use'],
heating_flag_d=heating_flag_d,
region=region,
sol_region=sol_region,
P_alpha=SHC['P_alpha'],
P_beta=SHC['P_beta'],
A_col=SHC['A_col'],
V_fan_P0=SHC['V_fan_P0'],
m_fan_test=SHC['m_fan_test'],
d0=SHC['d0'],
d1=SHC['d1'],
fan_sso=SHC['fan_sso'],
fan_type=SHC['fan_type'],
pump_sso=SHC['pump_sso']
)
else:
raise ValueError(SHC['type'])
# ============================================================================
# 5.2 ガス消費量
# ============================================================================
def calc_E_G_W_d_t(n_p, L_HWH, heating_flag_d, A_A, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備のガス消費量 (MJ/h) (2)
Args:
n_p(float): 仮想居住人数
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
A_A(float): 床面積の合計[m^2]
region(int): 地域区分
sol_region(int): 年間の日射地域区分
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
heating_flag_d: returns: 1時間当たりの給湯設備のガス消費量 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯設備のガス消費量 (MJ/h)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1日当たりの給湯機のガス消費量
E_G_hs_d = calc_E_G_hs_d(
hw_type=HW['hw_type'],
hybrid_category=HW['hybrid_category'],
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
bath_function=bath_function,
package_id=HW.get('package_id'),
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
W_dash_k_d_t=hotwater_load['W_dash_k_d_t'],
W_dash_s_d_t=hotwater_load['W_dash_s_d_t'],
W_dash_w_d_t=hotwater_load['W_dash_w_d_t'],
W_dash_b1_d_t=hotwater_load['W_dash_b1_d_t'],
W_dash_b2_d_t=hotwater_load['W_dash_b2_d_t'],
W_dash_ba1_d_t=hotwater_load['W_dash_ba1_d_t'],
Theta_ex_Ave=hotwater_load['theta_ex_d_Ave_d'],
Theta_ex_Nave=hotwater_load['Theta_ex_Nave_d'],
L_HWH=L_HWH,
hybrid_param=HW.get('hybrid_param')
)
return E_G_hs_d
# ============================================================================
# 5.3 灯油消費量
# ============================================================================
def calc_E_K_W_d_t(n_p, L_HWH, heating_flag_d, A_A, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
Args:
n_p(float): 仮想居住人数
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
A_A(float): 床面積の合計[m^2]
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
heating_flag_d: returns: 1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
Returns:
ndarray: 1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return | np.zeros(24 * 365) | numpy.zeros |
#
# Author: <NAME>
# Copyright 2017
#
import logging
import isceobj
from contrib.splitSpectrum import SplitRangeSpectrum as splitSpectrum
import numpy as np
import os
from isceobj.Constants import SPEED_OF_LIGHT
import time
logger = logging.getLogger('isce.insar.runSplitSpectrum')
def split(fullBandSlc, lowBandSlc, highBandSlc, fs, bL, bH, fL, fH):
ss = splitSpectrum()
ss.blocksize = 100
ss.memsize = 512
ss.inputDS = fullBandSlc + ".vrt"
ss.lbDS = lowBandSlc
ss.hbDS = highBandSlc
ss.rangeSamplingRate = fs
ss.lowBandWidth = bL
ss.highBandWidth = bH
ss.lowCenterFrequency = fL
ss.highCenterFrequency = fH
ss.split()
def createSlcImage(slcName, width):
slc = isceobj.createSlcImage()
slc.setWidth(width)
slc.filename = slcName
slc.setAccessMode('write')
slc.renderHdr()
def adjustCenterFrequency(B, N, dc):
# because of quantization, there may not be an index representing dc. We
# therefore adjust dc to make sure that there is an index to represent it.
# We find the index that is closest to nominal dc and then adjust dc to the
# frequency of that index.
# B = full band-width
# N = length of signal
# dc = center frequency of the sub-band
df = B/N
if (dc < 0):
ind = N + np.round(dc/df)
else:
ind = np.round(dc/df);
dc = frequency (B, N, ind)
return dc
def frequency (B, N, n):
# calculates frequency at a given index.
# Assumption: for indices 0 to (N-1)/2, frequency is positive
# and for indices larger than (N-1)/2 frequency is negative
#frequency interval given B as the total bandwidth
df = B/N
middleIndex = int((N-1)/2)
if (n > middleIndex):
f = (n-N)*df
else:
f = n*df
return f
def runSplitSpectrum(self):
'''
Generate split spectrum SLCs.
'''
if not self.doSplitSpectrum:
print('Split spectrum processing not requested. Skipping ....')
return
masterFrame = self._insar.loadProduct( self._insar.masterSlcCropProduct)
slaveFrame = self._insar.loadProduct( self._insar.slaveSlcCropProduct)
masterSlc = masterFrame.getImage().filename
slaveSlc = slaveFrame.getImage().filename
width1 = masterFrame.getImage().getWidth()
width2 = slaveFrame.getImage().getWidth()
fs_master = masterFrame.rangeSamplingRate
pulseLength_master = masterFrame.instrument.pulseLength
chirpSlope_master = masterFrame.instrument.chirpSlope
#Bandwidth
B_master = np.abs(chirpSlope_master)*pulseLength_master
fs_slave = slaveFrame.rangeSamplingRate
pulseLength_slave = slaveFrame.instrument.pulseLength
chirpSlope_slave = slaveFrame.instrument.chirpSlope
#Bandwidth
B_slave = np.abs(chirpSlope_slave)*pulseLength_slave
print("master image range sampling rate: {0} MHz".format(fs_master/(1.0e6)))
print("slave image range sampling rate: {0} MHz".format(fs_slave/(1.0e6)))
print("master image total range bandwidth: {0} MHz".format(B_master/(1.0e6)))
print("slave image total range bandwidth: {0} MHz".format(B_slave/(1.0e6)))
# If the bandwidth of master and slave are different, choose the smaller bandwidth
# for range split spectrum
B = | np.min([B_slave, B_master]) | numpy.min |
import cv2
import numpy as np
from keras.models import load_model
from statistics import mode
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
import tensorflow as tf
# parameters for loading data and images
emotion_model_path = './models/model.hdf5'
emotion_labels = get_labels('fer2013')
# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)
# loading models
face_cascade = cv2.CascadeClassifier('./models/face_box.xml')
emotion_classifier = load_model(emotion_model_path)
graph = tf.get_default_graph()
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
# starting lists for calculating modes
emotion_window = []
# Select video or webcam feed
def final_ml_predict(bgr_image):
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,
minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv2.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
with graph.as_default():
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
emotion_window.append(emotion_text)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 255, 0))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * | np.asarray((0, 255, 0)) | numpy.asarray |
import job_helper
import click
@job_helper.job('toy2d_train', enumerate_job_names=False)
def train_toy2d(submit_config: job_helper.SubmitConfig, dataset, region_erode_radius, img_noise_std,
n_sup, balance_classes, seed,
sup_path, model, n_hidden, hidden_size, hidden_act, norm_layer,
perturb_noise_std, dist_contour_range,
conf_thresh, conf_avg,
cons_weight, cons_loss_fn, cons_no_dropout,
learning_rate, teacher_alpha,
num_epochs, batch_size, render_cons_grad, render_pred, device,
save_output):
settings = locals().copy()
del settings['submit_config']
import sys
print('Command line:')
print(' '.join(sys.argv))
print('Settings:')
print(', '.join(['{}={}'.format(k, settings[k]) for k in sorted(settings.keys())]))
import os
import numpy as np
import time
import cv2
from scipy.ndimage.morphology import distance_transform_edt
import optim_weight_ema
from toy2d import generate_data
from datapipe.seg_data import RepeatSampler
import torch, torch.nn as nn, torch.nn.functional as F
import torch.utils.data
rng = | np.random.RandomState(seed) | numpy.random.RandomState |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Description
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2019, HANDBOOK"
__credits__ = ["CONG-MINH NGUYEN"]
__license__ = "GPL"
__version__ = "1.0.1"
__date__ = "5/10/2019"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development" # ["Prototype", "Development", or "Production"]
# Project Style: https://dev.to/codemouse92/dead-simple-python-project-structure-and-imports-38c6
# Code Style: http://web.archive.org/web/20111010053227/http://jaynes.colorado.edu/PythonGuidelines.html#module_formatting
#==============================================================================
# Imported Modules
#==============================================================================
import argparse
from pathlib import Path
import os.path
import sys
import time
import copy
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "" # The GPU id to use, usually either "0" or "1"
import json
import numpy as np
import cv2
import requests
from Camera.OrbbecAstraS.camera import Camera, rgbd_to_pointcloud
from GeneralUtils import List, Tuple, Dict, Union, Generic, TypeVar
from GeneralUtils import sample_arrays, stack_list_horizontal
from PointCloudUtils import visualize_pc, points_to_pc, coords_labels_to_pc, load_ply_as_pc, load_ply_as_points
from PointCloudUtils import adjust_pc_coords, global_icp
from PointCloudUtils import radian2degree, degree2radian, m2mm, mm2m, create_rotx_matrix, create_roty_matrix, create_rotz_matrix, create_tranl_matrix
from Segmentation.PointNet.learner import PointNetLearner
#==============================================================================
# Constant Definitions
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def mpose2mmpose(pose: np.ndarray):
tarr = np.ones(len(pose))
tarr[:3] *= 1000
return pose * tarr
def mmpose2mpose(pose: np.ndarray):
tarr = np.ones(len(pose))
tarr[:3] *= 0.001
return pose * tarr
def load_object_models(model_path='./obj_models/modelMay10/'):
"""
Description:
:param model_path: str, path to the reference models of known objects
:return: pc_models, List[2L ndarrays], list of points of target surface
:return: centroid_models, List[Vector(3 floats)], the list of centroids of model
:return: pose_models, List[List[Vector(6 floats)]], the list of pose list of each model(each model has a list of poses)
"""
pc_models = []
centroid_models = []
pose_models = []
files = os.listdir(path=os.path.join(model_path, 'pc/'))
for _, file in enumerate(files):
filename, _ = os.path.splitext(file)
pc_model = load_ply_as_points(file_path=os.path.join(model_path, 'pc/', file))
centroid, grasping_pose = np.load(os.path.join(model_path, 'info/', filename + '.npy'), allow_pickle=True)
grasping_pose = np.array(grasping_pose).astype(float)
grasping_pose[:, :3] = mm2m(grasping_pose[:, :3])
pc_models.append(pc_model)
centroid_models.append(centroid)
pose_models.append(grasping_pose)
return pc_models, centroid_models, pose_models
def measure_xtran_params(neutral_point, transformation):
"""
Description: Assume that the transformation from robot coord to camera coord is: RotX -> RotY -> RotZ -> Tranl
In this case: RotX = 180, RotY = 0; RotZ = -90; Tranl: unknown
But we know coords of a determined neutral point in 2 coord systems,
hence we can measure Transl from robot centroid to camera centroid.(Step 2)
:param neutral_point : Dict, list of 2 coords of neutral_point in 2 coord systems
:param transformation : Dict, list of 3 rotating transformations
:return: r2c_xtran : Matrix 4x4 floats, transformation from robot coord to camera coord
:return: c2r_xtran : Matrix 4x4 floats, transformation from camera coord to robot coord
# :return: tranl : Matrix 4x4 floats, translation from robot coord to camera coord
"""
# 1: Load coords of the neutral point
neutral_robot = mm2m(coords=np.array(neutral_point['robot_coord'])) # neutral point coord in robot coord system
neutral_camera = mm2m(coords=np.array(neutral_point['camera_coord'])) # neutral point coord in camera coord system
rotx = create_rotx_matrix(theta=-transformation['rotx']) # load transformation matrix of rotation around x
roty = create_roty_matrix(theta=-transformation['roty']) # load transformation matrix of rotation around y
rotz = create_rotz_matrix(theta=-transformation['rotz']) # load transformation matrix of rotation around z
# 2: Find transformation between robot coord centroid and camera coord centroid
rotxyz = np.dot(np.dot(rotz, roty), rotx) # determine transformation matrix after rotate sequently around x, y, z
neutral_robot3 = np.dot(rotxyz, np.append(neutral_robot, 1))[:3] # find coord of neutral point after RotXYZ
Oc_in_3 = neutral_robot3 - neutral_camera # find coord of robot centroid in camera coord system
tranl = create_tranl_matrix(vector=-Oc_in_3)
# 3: Find transformation matrix from robot to camera
# r2c_xtran = np.dot(np.dot(np.dot(tranl, rotz), roty), rotx)
# c2r_xtran = np.linalg.inv(r2c_xtran)
return rotx, roty, rotz, tranl
def input_cli():
user_input = input("Enter CLI commands such as (--NAME VALUE ...): ")
custom_parser = argparse.ArgumentParser()
custom_parser.add_argument('-vb', '--verbose', type=bool, help='show detail results')
custom_parser.add_argument('-vs', '--voxel_size', type=float, help='adjust voxel size')
custom_parser.add_argument('-ft', '--fitness_threshold', type=float, help='adjust voxel size')
custom_parser.add_argument('-pi', '--selected_pose_id', type=int, help='select pose id that will execute grasp')
custom_args = custom_parser.parse_args(user_input.split())
return custom_args
def normalize_pc(points: np.ndarray):
new_points = copy.deepcopy(points)
new_points[:, 2] -= 0.677
new_points[:, 3:6] /= 255.
return new_points
def segment_obj_in_scene(scene_points, n_points: int=16384, n_channels: int=6, url='http://127.0.0.1:5000/api/'):
"""
Description: segment the point clouds of wrench and pipe out of scene
:param learner : Object, a PointNet Learner that's able to do predict point-wise classification
:param scene_points : 2L ndarray(shape=(n_points, n_channels)), list of points
:param n_points : int > 0, number input points of PointNet Learner
:param n_channels : int > 0, number channels of input points of PointNet Learner
:return: wrench_points : 2L ndarray, points of wrench
:return: pipe_points : 2L ndarray, points of pipe
"""
# Shuffle points to distribute the points equally in arrays(useful for next step, cut scene into parts to segment)
n_scene_points = len(scene_points)
scene_points = sample_arrays(arrs=scene_points, n_samples=n_scene_points)
# Do segment(cut scene into 2 parts, segment each part then unify results of 2 parts to get overall picture)
wrench_points = []
pipe_points = []
for i in range(2):
# sample the points to fit the network
cur_scene_points = scene_points[i * n_scene_points // 2:(i + 1) * n_scene_points // 2]
cur_scene_points = sample_arrays(arrs=cur_scene_points, n_samples=n_points)
# predict segment labels(send data to remote server through RESTful API)
# pred_labels = learner.predict(x=normalize_pc(points=cur_scene_points[:, :n_channels]))
data = {'points': normalize_pc(points=cur_scene_points[:, :n_channels]).tolist()}
j_data = json.dumps(data)
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
res = requests.post(url=url, data=j_data, headers=headers)
pred_labels = np.asarray(json.loads(res.text))
# extract the points in the scene of each object by labels
wrench_points.append(cur_scene_points[pred_labels == 2])
pipe_points.append(cur_scene_points[pred_labels == 3])
wrench_points = np.vstack(wrench_points) # get entire points of wrench
pipe_points = np.vstack(pipe_points) # get entire points of pipe
# visualize_pc(coords_labels_to_pc(coords=cur_scene_points[:, :3], labels=pred_labels))
return wrench_points, pipe_points
def match_object_surface(surface: np.ndarray, model: np.ndarray, model_centroid: Tuple[float, float, float],
voxel_size: float, n_channel: int=6, verbose: bool=False):
"""
Description:
:param surface : 2L ndarray(shape=(n_points, n_channels)), list of points of target surface
:param model : 2L ndarray(shape=(n_points, n_channels)), list of points of target surface
:param model_centroid : Vector(3 floats), the centroid of `model`
:param voxel_size : float, default=0.6, downsampling size of point cloud in `global_icp` algorithm
:param n_channel : int > 0, number channels of input points of PointNet Learner
:param verbose : bool, show detail results and notification or not
:return: TYPE, MEAN
"""
point_cloud_model = adjust_pc_coords(point_cloud=points_to_pc(model[:, :n_channel]), coord=model_centroid)
point_cloud_target = adjust_pc_coords(point_cloud=points_to_pc(surface[:, :n_channel]), coord=model_centroid)
xtran = global_icp(source=points_to_pc(point_cloud_model), target=points_to_pc(point_cloud_target),
voxel_size=voxel_size, verbose=verbose)
print(xtran)
return xtran
def interpolate_pose(ref_pose, surf_xtran, rotx, roty, rotz, tranl, pc_centroid):
"""
Description: match reference_pose of (x, y, z) (rx, ry, rz) and (mode, aperture) from reference source to target point cloud
:param ref_pose : Vector(8 floats), the pose of the reference model
:param surf_xtran : Matrix(4x4 floats), the transformation matrix from source model to target point cloud
:param rotx : Matrix(4x4 floats), the transformation matrix of rotation around x axis of robot coord
:param roty : Matrix(4x4 floats), the transformation matrix of rotation around y axis of robot coord
:param rotz : Matrix(4x4 floats), the transformation matrix of rotation around z axis of robot coord
:param tranl : Matrix(4x4 floats), the transformation matrix of translation from robot origin to the camera origin
:param pc_centroid : Matrix(4x4 floats), the centroid of considered point cloud
:return: Vector(6 floats), the pose in robot system
"""
# transformation matrix of robot origin to point cloud center, xyz elements
tranl2 = create_tranl_matrix(vector=-np.array(pc_centroid))
r2pc_xyz_xtran = np.dot(np.dot(np.dot(np.dot(tranl2, tranl), rotz), roty), rotx)
pc2r_xyz_xtran = | np.linalg.inv(r2pc_xyz_xtran) | numpy.linalg.inv |
# Automatically adapted for numpy.oldnumeric Aug 01, 2007 by
#!/usr/bin/env python
import cdms2
import numpy
import os
import sys
import basetest
class TestGenericGrids(basetest.CDMSBaseTest):
def testGenGrids2(self):
latb = [62.47686472, 69.70600048]
lonb = [102.87075526, 105.51598035]
fn = self.getDataFile('sampleCurveGrid4.nc')
s = fn("sample")
g = s.getGrid()
lat = g.getLatitude()
lon = g.getLongitude()
g2 = cdms2.createGenericGrid(lat, lon)
datalat = g2.getLatitude().getBounds()[22, 25]
datalon = g2.getLongitude().getBounds()[22, 25]
self.assertTrue(numpy.ma.allclose(datalat, latb))
self.assertTrue(numpy.ma.allclose(datalon, lonb))
def testGenGrids(self):
datb = numpy.array([693., 694., ])
latb = numpy.array([-26.67690036, -30.99890917, ])
lonb = numpy.array([92.41822415, 94.4512163, ])
f = self.getDataFile('sampleGenGrid3.nc')
# Slice a file variable on a curvilinear grid: by coordinates ...
samp = f['sample']
x = samp(lat=(-32, -25), lon=(90, 95))
self.assertFalse(not numpy.ma.allequal(x.data, datb))
grid = x.getGrid()
self.assertFalse(grid.shape != (2,))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not numpy.ma.allclose(lon.data, lonb, atol=1.e-5))
# ... and by index
y = samp[693:695]
self.assertFalse(not numpy.ma.allequal(y, datb))
grid = y.getGrid()
self.assertFalse(not (grid.shape == (2,)))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not numpy.ma.allclose(lon.data, lonb, atol=1.e-5))
#-------------------------------------------------------------
# Slice a TRANSIENT variable on a curvilinear grid: by coordinates ...
samp = f['sample']
x = samp(lat=(-32, -25), lon=(90, 95))
self.assertFalse(not numpy.ma.allequal(x.data, datb))
grid = x.getGrid()
self.assertFalse(grid.shape != (2,))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not numpy.ma.allclose(lon.data, lonb, atol=1.e-5))
# ... and by index
y = samp[693:695]
self.assertFalse(not numpy.ma.allequal(y, datb))
grid = y.getGrid()
self.assertFalse(not (grid.shape == (2,)))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not | numpy.ma.allclose(lon.data, lonb, atol=1.e-5) | numpy.ma.allclose |
"""Power operator."""
import numpy
from ..baseclass import Dist, StochasticallyDependentError
from .. import evaluation
class Pow(Dist):
"""Power operator."""
def __init__(self, left, right):
"""
Constructor.
Args:
left (Dist, numpy.ndarray) : Left hand side.
right (Dist, numpy.ndarray) : Right hand side.
"""
Dist.__init__(self, left=left, right=right)
def _bnd(self, xloc, left, right, cache):
"""
Distribution bounds.
Example:
>>> print(chaospy.Uniform().range([-2, 0, 2, 4]))
[[0. 0. 0. 0.]
[1. 1. 1. 1.]]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).range([-2, 0, 2, 4]))
[[0. 0. 0. 0.]
[1. 1. 1. 1.]]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).range([-2, 0, 2, 4]))
[[0.5 0.5 0.5 0.5]
[1. 1. 1. 1. ]]
>>> print(chaospy.Pow(2, chaospy.Uniform()).range([-2, 0, 2, 4]))
[[1. 1. 1. 1.]
[2. 2. 2. 2.]]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).range([-2, 0, 2, 4]))
[[0.5 0.5 0.5 0.5]
[1. 1. 1. 1. ]]
>>> print(chaospy.Pow(2, 3).range([-2, 0, 2, 4]))
[[8. 8. 8. 8.]
[8. 8. 8. 8.]]
"""
left = evaluation.get_forward_cache(left, cache)
right = evaluation.get_forward_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left**right, left**right
else:
output = numpy.ones(xloc.shape)
left = left * output
assert numpy.all(left >= 0), "root of negative number"
indices = xloc > 0
output[indices] = numpy.log(xloc[indices])
output[~indices] = -numpy.inf
indices = left != 1
output[indices] /= numpy.log(left[indices])
output = evaluation.evaluate_bound(right, output, cache=cache)
output = left**output
output[:] = (
numpy.where(output[0] < output[1], output[0], output[1]),
numpy.where(output[0] < output[1], output[1], output[0]),
)
return output
output = numpy.zeros(xloc.shape)
right = right + output
indices = right > 0
output[indices] = numpy.abs(xloc[indices])**(1/right[indices])
output[indices] *= numpy.sign(xloc[indices])
output[right == 0] = 1
output[(xloc == 0) & (right < 0)] = numpy.inf
output = evaluation.evaluate_bound(left, output, cache=cache)
pair = right % 2 == 0
bnd_ = numpy.empty(output.shape)
bnd_[0] = numpy.where(pair*(output[0]*output[1] < 0), 0, output[0])
bnd_[0] = numpy.where(pair*(output[0]*output[1] > 0), \
numpy.min(numpy.abs(output), 0), bnd_[0])**right
bnd_[1] = numpy.where(pair, numpy.max(numpy.abs(output), 0),
output[1])**right
bnd_[0], bnd_[1] = (
numpy.where(bnd_[0] < bnd_[1], bnd_[0], bnd_[1]),
numpy.where(bnd_[0] < bnd_[1], bnd_[1], bnd_[0]),
)
return bnd_
def _cdf(self, xloc, left, right, cache):
"""
Cumulative distribution function.
Example:
>>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.5 1. 1. ]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.70710678 1. 1. ]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).fwd([0.4, 0.6, 0.8, 1.2]))
[0. 0.33333333 0.75 1. ]
>>> print(chaospy.Pow(2, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5849625 1. ]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).fwd([0.4, 0.6, 0.8, 1.2]))
[0. 0.26303441 0.67807191 1. ]
>>> print(chaospy.Pow(2, 3).fwd([7, 8, 9]))
[0. 1. 1.]
"""
left = evaluation.get_forward_cache(left, cache)
right = evaluation.get_forward_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return numpy.inf
else:
assert numpy.all(left > 0), "imaginary result"
y = (numpy.log(numpy.abs(xloc) + 1.*(xloc <= 0)) /
numpy.log(numpy.abs(left)+1.*(left == 1)))
out = evaluation.evaluate_forward(right, y)
out = numpy.where(xloc <= 0, 0., out)
return out
y = numpy.sign(xloc)*numpy.abs(xloc)**(1./right)
pairs = numpy.sign(xloc**right) != -1
out1, out2 = (
evaluation.evaluate_forward(left, y, cache=cache),
evaluation.evaluate_forward(left, -y, cache=cache),
)
out = numpy.where(right < 0, 1-out1, out1-pairs*out2)
return out
def _ppf(self, q, left, right, cache):
"""
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.]
"""
left = evaluation.get_inverse_cache(left, cache)
right = evaluation.get_inverse_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left**right
else:
out = evaluation.evaluate_inverse(right, q, cache=cache)
out = numpy.where(left < 0, 1-out, out)
out = left**out
return out
right = right + numpy.zeros(q.shape)
q = | numpy.where(right < 0, 1-q, q) | numpy.where |
# -*-coding:utf8;-*-
import os
from json import JSONEncoder, dump, load
from math import ceil
import multiprocessing
import numpy as np
import pandas as pd
from constants import (
BATCH_SIZE,
FILES_PATH,
QUERY,
SONG,
SONGS,
EXPANDED_SONGS,
QUERIES
)
from loader import (
get_songs_count,
get_expanded_songs_count,
get_queries_count,
load_all_songs_pitch_contour_segmentations,
load_all_expanded_songs_pitch_contour_segmentations,
load_all_queries_pitch_contour_segmentations
)
from messages import (
log_invalid_audio_type_error,
log_no_serialized_pitch_contour_segmentations_error
)
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
def dump_structure(
structure, structure_name, cls=NumpyArrayEncoder,
as_numpy=True, as_pandas=False, extension="json", file_mode='w'
):
'''
Dumps Numpy ndarray , Pandas or Python objects. Defaults to numpy objects.
'''
filename = f'{FILES_PATH}/{structure_name}.{extension}'
filepath = "/".join(
filename.split("/")[:-1]
)
if not os.path.exists(filepath) and filepath != FILES_PATH:
os.mkdir(filepath)
if as_numpy:
with open(filename, 'w') as json_file:
dump(structure, json_file, cls=cls)
elif as_pandas:
pd.to_pickle(structure, filename)
else:
with open(filename, file_mode) as file:
file.write(str(structure))
def load_structure(
structure_name, as_numpy=True, as_pandas=False, extension="json"
):
'''
Loads Numpy ndarray, Pandas or simple read objects.
'''
filename = f'{FILES_PATH}/{structure_name}.{extension}'
if not as_pandas:
with open(filename, 'r') as json_file:
loaded = load(json_file)
if as_numpy:
loaded = | np.asarray(loaded) | numpy.asarray |
"""
Monitoring algorithms for Quicklook pipeline
"""
import numpy as np
import scipy.ndimage
import yaml
from lvmspec.quicklook.qas import MonitoringAlg, QASeverity
from lvmspec.quicklook import qlexceptions
from lvmspec.quicklook import qllogger
import os,sys
import datetime
from astropy.time import Time
from lvmspec.qa import qalib
from lvmspec.io import qa
qlog=qllogger.QLLogger("QuickLook",0)
log=qlog.getlog()
def qlf_post(qadict):
"""
A general function to HTTP post the QA output dictionary, intended for QLF
requires environmental variables: QLF_API_URL, QLF_USER, QLF_PASSWD
Args:
qadict: returned dictionary from a QA
"""
#- Check for environment variables and set them here
if "QLF_API_URL" in os.environ:
qlf_url=os.environ.get("QLF_API_URL")
if "QLF_USER" not in os.environ or "QLF_PASSWD" not in os.environ:
log.warning("Environment variables are not set for QLF. Set QLF_USER and QLF_PASSWD.")
else:
qlf_user=os.environ.get("QLF_USER")
qlf_passwd=os.environ.get("QLF_PASSWD")
log.debug("Environment variables are set for QLF. Now trying HTTP post.")
#- All set. Now try to HTTP post
try:
import requests
response=requests.get(qlf_url)
#- Check if the api has json
api=response.json()
#- proceed with post
job={"name":"QL","status":0,"dictionary":qadict} #- QLF should disintegrate dictionary
response=requests.post(api['job'],json=job,auth=(qlf_user,qlf_passwd))
except:
log.error("Skipping HTTP post... Exception",exc_info=true)
else:
log.warning("Skipping QLF. QLF_API_URL must be set as environment variable")
class Get_RMS(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="RMS"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NOISE_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NOISE_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NOISE_WARN_RANGE" in parms and "NOISE_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NOISE_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NOISE_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image got {}".format(type(args[0])))
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(input_image,paname=paname,amps=amps,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,image,paname=None,amps=False,qafile=None, qafig=None,param=None,qlf=False, refmetrics=None):
retval={}
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["CAMERA"] = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
# return rms values in rms/sqrt(exptime)
rmsccd=qalib.getrms(image.pix/np.sqrt(image.meta["EXPTIME"])) #- should we add dark current and/or readnoise to this as well?
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"NOISE_NORMAL_RANGE":[-1.0, 1.0],
"NOISE_WARN_RANGE":[-2.0, 2.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['NOISE_AMP_REF']=kwargs["REFERENCE"]
expnum=[]
rms_row=[]
rms_amps=[]
rms_over_amps=[]
overscan_values=[]
#- get amp/overcan boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
thisampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
thisoverscanboundary=_parse_sec_keyword(image.meta["BIASSEC"+kk])
for i in range(image.pix[thisoverscanboundary].shape[0]):
rmsrow = qalib.getrms(image.pix[thisoverscanboundary][i]/np.sqrt(image.meta["EXPTIME"]))
rms_row.append(rmsrow)
rms_thisover_thisamp=qalib.getrms(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_thisamp=qalib.getrms(image.pix[thisampboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_amps.append(rms_thisamp)
rms_over_amps.append(rms_thisover_thisamp)
rmsover=np.max(rms_over_amps)
rmsdiff_err='NORMAL'
if amps:
rms_amps=[]
rms_over_amps=[]
overscan_values=[]
#- get amp/overcan boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
thisampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
thisoverscanboundary=_parse_sec_keyword(image.meta["BIASSEC"+kk])
rms_thisover_thisamp=qalib.getrms(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
thisoverscan_values=np.ravel(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_thisamp=qalib.getrms(image.pix[thisampboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_amps.append(rms_thisamp)
rms_over_amps.append(rms_thisover_thisamp)
overscan_values+=thisoverscan_values.tolist()
rmsover=np.std(overscan_values)
# retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_AMP":np.array(rms_amps),"NOISE_AMP":np.array(rms_over_amps),"RMS_ROW":rms_row,"EXPNUM_WARN":expnum}
retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_AMP":np.array(rms_amps),"NOISE_AMP":np.array(rms_over_amps),"RMS_ROW":rms_row,"NOISE_STAT":rmsdiff_err,"EXPNUM_WARN":expnum}
else:
# retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_ROW":rms_row,"EXPNUM_WARN":expnum}
retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_ROW":rms_row,"NOISE_STAT":rmsdiff_err,"EXPNUM_WARN":expnum}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_RMS
plot_RMS(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Count_Pixels(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="COUNTPIX"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NPIX_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NPIX_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NPIX_WARN_RANGE" in parms and "NPIX_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NPIX_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NPIX_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(input_image,paname=paname,amps=amps,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,image,paname=None,amps=False,qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"CUTLO":3, # low threshold for number of counts in sigmas
"CUTHI":10,
"NPIX_NORMAL_RANGE":[200.0, 500.0],
"NPIX_WARN_RANGE":[50.0, 650.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['NPIX_AMP_REF']=kwargs["REFERENCE"]
#- get the counts over entire CCD in counts per second
npixlo=qalib.countpix(image.pix,nsig=param['CUTLO']) #- above 3 sigma in counts
npixhi=qalib.countpix(image.pix,nsig=param['CUTHI']) #- above 10 sigma in counts
npix_err='NORMAL'
#- get the counts for each amp
if amps:
npixlo_amps=[]
npixhi_amps=[]
#- get amp boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
ampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
npixlo_thisamp=qalib.countpix(image.pix[ampboundary]/image.meta["EXPTIME"],nsig=param['CUTLO'])
npixlo_amps.append(npixlo_thisamp)
npixhi_thisamp=qalib.countpix(image.pix[ampboundary]/image.meta["EXPTIME"],nsig=param['CUTHI'])
npixhi_amps.append(npixhi_thisamp)
# retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_AMP": npixlo_amps,"NPIX_HIGH_AMP": npixhi_amps}
retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_AMP": npixlo_amps,"NPIX_HIGH_AMP": npixhi_amps,"NPIX_STAT":npix_err}
else:
# retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi}
retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_STAT":npix_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_countpix
plot_countpix(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Integrate_Spec(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="INTEG"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "INTEG_AVG"
status=kwargs['statKey'] if 'statKey' in kwargs else "MAGDIFF_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "MAGDIFF_WARN_RANGE" in parms and "MAGDIFF_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["MAGDIFF_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["MAGDIFF_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {}, got {}".format(type(self.__inpType__),type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
dict_countbins=None
if "dict_countbins" in kwargs:
dict_countbins=kwargs["dict_countbins"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_frame,paname=paname,amps=amps, dict_countbins=dict_countbins, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,paname=None,amps=False,dict_countbins=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
retval={}
retval["PANAME" ] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
#- get the integrals for all fibers
flux=frame.flux
wave=frame.wave
integrals=np.zeros(flux.shape[0])
for ii in range(len(integrals)):
integrals[ii]=qalib.integrate_spec(wave,flux[ii])
#- average integrals over fibers of each object type and get imaging magnitudes
integ_avg_tgt=[]
mag_avg_tgt=[]
for T in ["ELG","QSO","LRG","STD"]:
fibers=np.where(frame.fibermap['OBJTYPE']==T)[0]
if len(fibers) < 1:
log.warning("no {} fibers found.".format(T))
magnitudes=frame.fibermap['MAG'][fibers]
mag_avg=np.mean(magnitudes)
mag_avg_tgt.append(mag_avg)
integ=integrals[fibers]
integ_avg=np.mean(integ)
integ_avg_tgt.append(integ_avg)
if T == "STD":
starfibers=fibers
int_stars=integ
int_average=integ_avg
# simple, temporary magdiff calculation (to be corrected...)
magdiff_avg=[]
for i in range(len(mag_avg_tgt)):
mag_fib=-2.5*np.log(integ_avg_tgt[i]/frame.meta["EXPTIME"])+30.
if mag_avg_tgt[i] != np.nan:
magdiff=mag_fib-mag_avg_tgt[i]
else:
magdiff=nan
magdiff_avg.append(magdiff)
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"MAGDIFF_NORMAL_RANGE":[-0.5, 0.5],
"MAGDIFF_WARN_RANGE":[-1.0, 1.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['MAGDIFF_TGT_REF']=kwargs["REFERENCE"]
magdiff_avg_amp = [0.0]
magdiff_err='NORMAL'
#- get the counts for each amp
if amps:
#- get the fiducial boundary
leftmax = dict_countbins["LEFT_MAX_FIBER"]
rightmin = dict_countbins["RIGHT_MIN_FIBER"]
bottommax = dict_countbins["BOTTOM_MAX_WAVE_INDEX"]
topmin = dict_countbins["TOP_MIN_WAVE_INDEX"]
fidboundary = qalib.slice_fidboundary(frame,leftmax,rightmin,bottommax,topmin)
int_avg_amps=np.zeros(4)
for amp in range(4):
wave=frame.wave[fidboundary[amp][1]]
select_thisamp=starfibers[(starfibers >= fidboundary[amp][0].start) & (starfibers < fidboundary[amp][0].stop)]
stdflux_thisamp=frame.flux[select_thisamp,fidboundary[amp][1]]
if len(stdflux_thisamp)==0:
continue
else:
integ_thisamp=np.zeros(stdflux_thisamp.shape[0])
for ii in range(stdflux_thisamp.shape[0]):
integ_thisamp[ii]=qalib.integrate_spec(wave,stdflux_thisamp[ii])
int_avg_amps[amp]=np.mean(integ_thisamp)
# retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars, "INTEG_AVG":int_average,"INTEG_AVG_AMP":int_avg_amps, "STD_FIBERID": starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_AVG_AMP":magdiff_avg_amp}
retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars, "INTEG_AVG":int_average,"INTEG_AVG_AMP":int_avg_amps, "STD_FIBERID": starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_AVG_AMP":magdiff_avg_amp,"MAGDIFF_STAT":magdiff_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars,"INTEG_AVG":int_average,"STD_FIBERID":starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg}
retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars,"INTEG_AVG":int_average,"STD_FIBERID":starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_STAT":magdiff_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_integral
plot_integral(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Sky_Continuum(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SKYCONT"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "SKYCONT"
status=kwargs['statKey'] if 'statKey' in kwargs else "SKYCONT_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "SKYCONT_WARN_RANGE" in parms and "SKYCONT_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["SKYCONT_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["SKYCONT_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {}, got {}".format(type(self.__inpType__),type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
camera=input_frame.meta["CAMERA"]
wrange1=None
wrange2=None
if "wrange1" in kwargs:
wrange1=kwargs["wrange1"]
if "wrange2" in kwargs:
wrange2=kwargs["wrange2"]
if wrange1==None:
if camera[0]=="b": wrange1= "4000,4500"
if camera[0]=="r": wrange1= "5950,6200"
if camera[0]=="z": wrange1= "8120,8270"
if wrange2==None:
if camera[0]=="b": wrange2= "5250,5550"
if camera[0]=="r": wrange2= "6990,7230"
if camera[0]=="z": wrange2= "9110,9280"
paname=None
if "paname" in kwargs:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
dict_countbins=None
if "dict_countbins" in kwargs:
dict_countbins=kwargs["dict_countbins"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig=None
return self.run_qa(fibermap,input_frame,wrange1=wrange1,wrange2=wrange2,paname=paname,amps=amps, dict_countbins=dict_countbins,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,wrange1=None,wrange2=None,
paname=None,amps=False,dict_countbins=None,
qafile=None,qafig=None, param=None, qlf=False,
refmetrics=None):
#- qa dictionary
retval={}
retval["PANAME" ]= paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
if param is None:
log.debug("Param is None. Using default param instead")
from lvmspec.io import read_params
desi_params = read_params()
param = {}
for key in ['B_CONT','R_CONT', 'Z_CONT', 'SKYCONT_WARN_RANGE', 'SKYCONT_ALARM_RANGE']:
param[key] = desi_params['qa']['skysub']['PARAMS'][key]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['SKYCONT_REF']=kwargs["REFERENCE"]
skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(
frame, wrange1, wrange2)
skycont_err = 'NORMAL'
if amps:
leftmax = dict_countbins["LEFT_MAX_FIBER"]
rightmin = dict_countbins["RIGHT_MIN_FIBER"]
bottommax = dict_countbins["BOTTOM_MAX_WAVE_INDEX"]
topmin = dict_countbins["TOP_MIN_WAVE_INDEX"]
fidboundary = qalib.slice_fidboundary(frame,leftmax,rightmin,bottommax,topmin)
k1=np.where(skyfiber < fidboundary[0][0].stop)[0]
maxsky_index=max(k1)
contamp1=np.mean(contfiberlow[:maxsky_index])
contamp3=np.mean(contfiberhigh[:maxsky_index])
if fidboundary[1][0].start >=fidboundary[0][0].stop:
k2=np.where(skyfiber > fidboundary[1][0].start)[0]
minsky_index=min(k2)
contamp2=np.mean(contfiberlow[minsky_index:])
contamp4=np.mean(contfiberhigh[minsky_index:])
else:
contamp2=0
contamp4=0
skycont_amps=np.array((contamp1,contamp2,contamp3,contamp4)) #- in four amps regions
# retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_AMP":skycont_amps}
retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_AMP":skycont_amps, "SKYCONT_STAT":skycont_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber}
retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_STAT":skycont_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_sky_continuum
plot_sky_continuum(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Sky_Peaks(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SKYPEAK"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "PEAKCOUNT_MED_SKY"
status=kwargs['statKey'] if 'statKey' in kwargs else "PEAKCOUNT_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "PEAKCOUNT_WARN_RANGE" in parms and "PEAKCOUNT_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["PEAKCOUNT_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["PEAKCOUNT_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image, got {}".format(type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
psf = None
if "PSFFile" in kwargs:
psf=kwargs["PSFFile"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs:
qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_frame,paname=paname,amps=amps,psf=psf, qafile=qafile, qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,paname=None,amps=False,psf=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
from lvmspec.qa.qalib import sky_peaks
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = camera = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
# Parameters
if param is None:
log.info("Param is None. Using default param instead")
from lvmspec.io import read_params
desi_params = read_params()
param = desi_params['qa']['skypeaks']['PARAMS']
# Run
nspec_counts, sky_counts = sky_peaks(param, frame, amps=amps)
rms_nspec = qalib.getrms(nspec_counts)
rms_skyspec = qalib.getrms(sky_counts)
sumcount_med_sky=[]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['PEAKCOUNT_REF']=kwargs["REFERENCE"]
# retval["METRICS"]={"RA":ra,"DEC":dec, "PEAKCOUNT":nspec_counts,"PEAKCOUNT_RMS":rms_nspec,"PEAKCOUNT_MED_SKY":sumcount_med_sky,"PEAKCOUNT_RMS_SKY":rms_skyspec}
sumcount_err='NORMAL'
retval["METRICS"]={"RA":ra,"DEC":dec, "PEAKCOUNT":nspec_counts,"PEAKCOUNT_RMS":rms_nspec,"PEAKCOUNT_MED_SKY":sumcount_med_sky,"PEAKCOUNT_RMS_SKY":rms_skyspec,"PEAKCOUNT_STAT":sumcount_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_sky_peaks
plot_sky_peaks(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Calc_XWSigma(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="XWSIGMA"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "WSIGMA_MED_SKY"
status=kwargs['statKey'] if 'statKey' in kwargs else "XWSIGMA_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "XWSIGMA_WARN_RANGE" in parms and "XWSIGMA_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["XWSIGMA_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["XWSIGMA_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image got {}".format(type(args[0])))
fibermap=kwargs['FiberMap']
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
psf = None
if "PSFFile" in kwargs:
psf=kwargs["PSFFile"]
fibermap = None
if "FiberMap" in kwargs:
fibermap=kwargs["FiberMap"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_image,paname=paname,amps=amps,psf=psf, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,image,paname=None,amps=False,psf=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
from scipy.optimize import curve_fit
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = camera = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
if param is None:
log.debug("Param is None. Using default param instead")
if image.meta["FLAVOR"] == 'arc':
param = {
"B_PEAKS":[4047.7, 4359.6, 5087.2],
"R_PEAKS":[6144.8, 6508.3, 6600.8, 6718.9, 6931.4, 7034.4,],
"Z_PEAKS":[8379.9, 8497.7, 8656.8, 8783.0],
"XWSIGMA_NORMAL_RANGE":[-2.0, 2.0],
"XWSIGMA_WARN_RANGE":[-4.0, 4.0]
}
else:
param = {
"B_PEAKS":[3914.4, 5199.3, 5578.9],
"R_PEAKS":[6301.9, 6365.4, 7318.2, 7342.8, 7371.3],
"Z_PEAKS":[8401.5, 8432.4, 8467.5, 9479.4, 9505.6, 9521.8],
"XWSIGMA_NORMAL_RANGE":[-2.0, 2.0],
"XWSIGMA_WARN_RANGE":[-4.0, 4.0]
}
dw=2.
dp=3
b_peaks=param['B_PEAKS']
r_peaks=param['R_PEAKS']
z_peaks=param['Z_PEAKS']
if fibermap["OBJTYPE"][0] == 'ARC':
import lvmspec.psf
psf=lvmspec.psf.PSF(psf)
xsigma=[]
wsigma=[]
xsigma_sky=[]
wsigma_sky=[]
xsigma_amp1=[]
wsigma_amp1=[]
xsigma_amp2=[]
wsigma_amp2=[]
xsigma_amp3=[]
wsigma_amp3=[]
xsigma_amp4=[]
wsigma_amp4=[]
if fibermap['FIBER'].shape[0] >= 500:
fibers = 500
else:
fibers = fibermap['FIBER'].shape[0]
for i in range(fibers):
if camera[0]=="b":
peak_wave=np.array([b_peaks[0]-dw,b_peaks[0]+dw,b_peaks[1]-dw,b_peaks[1]+dw,b_peaks[2]-dw,b_peaks[2]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsig=np.array([xsigma1,xsigma2,xsigma3])
wsig=np.array([wsigma1,wsigma2,wsigma3])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if camera[0]=="r":
peak_wave=np.array([r_peaks[0]-dw,r_peaks[0]+dw,r_peaks[1]-dw,r_peaks[1]+dw,r_peaks[2]-dw,r_peaks[2]+dw,r_peaks[3]-dw,r_peaks[3]+dw,r_peaks[4]-dw,r_peaks[4]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpix_peak4=np.arange(int(round(xpix[6]))-dp,int(round(xpix[7]))+dp+1,1)
ypix_peak4=np.arange(int(round(ypix[6])),int(round(ypix[7])),1)
xpix_peak5=np.arange(int(round(xpix[8]))-dp,int(round(xpix[9]))+dp+1,1)
ypix_peak5=np.arange(int(round(ypix[8])),int(round(ypix[9])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xpopt4,xpcov4=curve_fit(qalib.gauss,np.arange(len(xpix_peak4)),image.pix[int(np.mean(ypix_peak4)),xpix_peak4])
wpopt4,wpcov4=curve_fit(qalib.gauss,np.arange(len(ypix_peak4)),image.pix[ypix_peak4,int(np.mean(xpix_peak4))])
xpopt5,xpcov5=curve_fit(qalib.gauss,np.arange(len(xpix_peak5)),image.pix[int(np.mean(ypix_peak5)),xpix_peak5])
wpopt5,wpcov5=curve_fit(qalib.gauss,np.arange(len(ypix_peak5)),image.pix[ypix_peak5,int(np.mean(xpix_peak5))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsigma4=np.abs(xpopt4[2])
wsigma4=np.abs(wpopt4[2])
xsigma5=np.abs(xpopt5[2])
wsigma5=np.abs(wpopt5[2])
xsig=np.array([xsigma1,xsigma2,xsigma3,xsigma4,xsigma5])
wsig=np.array([wsigma1,wsigma2,wsigma3,wsigma4,wsigma5])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if camera[0]=="z":
peak_wave=np.array([z_peaks[0]-dw,z_peaks[0]+dw,z_peaks[1]-dw,z_peaks[1]+dw,z_peaks[2]-dw,z_peaks[2]+dw,z_peaks[3]-dw,z_peaks[3]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpix_peak4=np.arange(int(round(xpix[6]))-dp,int(round(xpix[7]))+dp+1,1)
ypix_peak4=np.arange(int(round(ypix[6])),int(round(ypix[7])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xpopt4,xpcov4=curve_fit(qalib.gauss,np.arange(len(xpix_peak4)),image.pix[int(np.mean(ypix_peak4)),xpix_peak4])
wpopt4,wpcov4=curve_fit(qalib.gauss,np.arange(len(ypix_peak4)),image.pix[ypix_peak4,int(np.mean(xpix_peak4))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsigma4=np.abs(xpopt4[2])
wsigma4=np.abs(wpopt4[2])
xsig=np.array([xsigma1,xsigma2,xsigma3,xsigma4])
wsig=np.array([wsigma1,wsigma2,wsigma3,wsigma4])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if fibermap['OBJTYPE'][i]=='SKY':
xsigma_sky=xsigma
wsigma_sky=wsigma
if amps:
if fibermap['FIBER'][i]<240:
if camera[0]=="b":
xsig_amp1=np.array([xsigma1])
xsig_amp3=np.array([xsigma2,xsigma3])
wsig_amp1=np.array([wsigma1])
wsig_amp3=np.array([wsigma2,wsigma3])
if camera[0]=="r":
xsig_amp1=np.array([xsigma1,xsigma2])
xsig_amp3=np.array([xsigma3,xsigma4,xsigma5])
wsig_amp1=np.array([wsigma1,wsigma2])
wsig_amp3=np.array([wsigma3,wsigma4,wsigma5])
if camera[0]=="z":
xsig_amp1=np.array([xsigma1,xsigma2,xsigma3])
xsig_amp3=np.array([xsigma4])
wsig_amp1=np.array([wsigma1,wsigma2,wsigma3])
wsig_amp3=np.array([wsigma4])
xsigma_amp1.append(xsig_amp1)
wsigma_amp1.append(wsig_amp1)
xsigma_amp3.append(xsig_amp3)
wsigma_amp3.append(wsig_amp3)
if fibermap['FIBER'][i]>260:
if camera[0]=="b":
xsig_amp2=np.array([xsigma1])
xsig_amp4=np.array([xsigma2,xsigma3])
wsig_amp2=np.array([wsigma1])
wsig_amp4=np.array([wsigma2,wsigma3])
if camera[0]=="r":
xsig_amp2= | np.array([xsigma1,xsigma2]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
<NAME>
29-05-2021
"""
# pylint: disable=invalid-name, missing-function-docstring
import time as Time
import numpy as np
from dvg_ringbuffer import RingBuffer
from dvg_ringbuffer_fir_filter import (
RingBuffer_FIR_Filter,
RingBuffer_FIR_Filter_Config,
)
from dvg_fftw_welchpowerspectrum import FFTW_WelchPowerSpectrum
from pyinstrument import Profiler
RUN_PYINSTRUMENT = False
TEST_POWERSPECTRA = True
# Main parameters to test for
BLOCK_SIZE = 2000
N_BLOCKS = 21
Fs = 20000 # [Hz]
FFTW_THREADS_CONVOLVE = 5 # sweet spot seems to be 5
FFTW_THREADS_SPECTRUM = 5 # sweet spot seems to be 5
# Simulation vars
T_total = 120 # [s]
ref_freq_Hz = 250 # [Hz]
ref_V_offset = 1.5 # [V]
sig_I_phase = 10 # [deg]
sig_I_noise_ampl = 0.04
class State:
def __init__(self, block_size, N_blocks):
"""Reflects the actual readings, parsed into separate variables, of
the lock-in amplifier. There should only be one instance of the
State class.
"""
# fmt: off
self.block_size = block_size
self.N_blocks = N_blocks
self.rb_capacity = block_size * N_blocks
self.blocks_received = 0
# Arrays to hold the block data coming from the lock-in amplifier
# Keep `time` as `dtype=np.float64`, because it can contain `np.nan`
self.time = np.full(block_size, np.nan, dtype=np.float64) # [ms]
self.ref_X = np.full(block_size, np.nan, dtype=np.float64)
self.ref_Y = np.full(block_size, np.nan, dtype=np.float64)
self.sig_I = np.full(block_size, np.nan, dtype=np.float64)
self.time_1 = np.full(block_size, np.nan, dtype=np.float64) # [ms]
self.filt_I = np.full(block_size, np.nan, dtype=np.float64)
self.mix_X = np.full(block_size, np.nan, dtype=np.float64)
self.mix_Y = np.full(block_size, np.nan, dtype=np.float64)
self.time_2 = np.full(block_size, np.nan, dtype=np.float64) # [ms]
self.X = np.full(block_size, np.nan, dtype=np.float64)
self.Y = np.full(block_size, np.nan, dtype=np.float64)
self.R = | np.full(block_size, np.nan, dtype=np.float64) | numpy.full |
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
import scikit_posthocs._posthocs as sp
import seaborn as sb
import numpy as np
class TestPosthocs(unittest.TestCase):
df = sb.load_dataset("exercise")
df_bn = np.array([[4,3,4,4,5,6,3],
[1,2,3,5,6,7,7],
[1,2,6,4,1,5,1]])
def test_posthoc_conover(self):
r_results = np.array([[-1, 1.131263e-02, 9.354690e-11],
[1.131263e-02, -1, 5.496288e-06],
[9.354690e-11, 5.496288e-06, -1]])
results = sp.posthoc_conover(self.df, val_col = 'pulse', group_col = 'kind', p_adjust = 'holm')
self.assertTrue( | np.allclose(results, r_results) | numpy.allclose |
"""
Implements the ArraysInterface object and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.tools import sharedmemtools as _smt
class ArraysInterface(object):
"""
An interface between pyGSTi's optimization methods and data storage arrays.
This class provides an abstract interface to algorithms (particularly the Levenberg-Marquardt
nonlinear least-squares algorithm) for creating an manipulating potentially distributed data
arrays with types such as "jtj" (Jacobian^T * Jacobian), "jtf" (Jacobian^T * objectivefn_vector),
and "x" (model parameter vector). The class encapsulates all the operations on these arrays so
that the algorithm doesn't need to worry about how the arrays are actually stored in memory,
e.g. whether shared memory is used or not.
"""
pass # just a base class - maybe make an abc abtract class in FUTURE?
class UndistributedArraysInterface(ArraysInterface):
"""
An arrays interface for the case when the arrays are not actually distributed.
Parameters
----------
num_global_elements : int
The total number of objective function "elements", i.e. the size of the
objective function array `f`.
num_global_params : int
The total number of (model) parameters, i.e. the size of the `x` array.
"""
def __init__(self, num_global_elements, num_global_params):
self.num_global_elements = num_global_elements
self.num_global_params = num_global_params
def allocate_jtf(self):
"""
Allocate an array for holding a `'jtf'`-type value.
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty(self.num_global_params, 'd')
def allocate_jtj(self):
"""
Allocate an array for holding an approximated Hessian (type `'jtj'`).
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty((self.num_global_params, self.num_global_params), 'd')
def allocate_jac(self):
"""
Allocate an array for holding a Jacobian matrix (type `'ep'`).
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty((self.num_global_elements, self.num_global_params), 'd')
def deallocate_jtf(self, jtf):
"""
Free an array for holding an objective function value (type `'jtf'`).
Returns
-------
None
"""
pass
def deallocate_jtj(self, jtj):
"""
Free an array for holding an approximated Hessian (type `'jtj'`).
Returns
-------
None
"""
pass
def deallocate_jac(self, jac):
"""
Free an array for holding a Jacobian matrix (type `'ep'`).
Returns
-------
None
"""
pass
def global_num_elements(self):
"""
The total number of objective function "elements".
This is the size/length of the objective function `f` vector.
Returns
-------
int
"""
return self.num_global_elements
def jac_param_slice(self, only_if_leader=False):
"""
The slice into a Jacobian's columns that belong to this processor.
Parameters
----------
only_if_leader : bool, optional
If `True`, the current processor's parameter slice is ony returned if
the processor is the "leader" (i.e. the first) of the processors that
calculate the same parameter slice. All non-leader processors return
the zero-slice `slice(0,0)`.
Returns
-------
slice
"""
return slice(0, self.num_global_params)
def jtf_param_slice(self):
"""
The slice into a `'jtf'` vector giving the rows of owned by this processor.
Returns
-------
slice
"""
return slice(0, self.num_global_params)
def param_fine_info(self):
"""
Returns information regarding how model parameters are distributed among hosts and processors.
This information relates to the "fine" distribution used in distributed layouts,
and is needed by some algorithms which utilize shared-memory communication between
processors on the same host.
Returns
-------
param_fine_slices_by_host : list
A list with one entry per host. Each entry is itself a list of
`(rank, (global_param_slice, host_param_slice))` elements where `rank` is the top-level
overall rank of a processor, `global_param_slice` is the parameter slice that processor owns
and `host_param_slice` is the same slice relative to the parameters owned by the host.
owner_host_and_rank_of_global_fine_param_index : dict
A mapping between parameter indices (keys) and the owning processor rank and host index.
Values are `(host_index, processor_rank)` tuples.
"""
all_params = slice(0, self.num_global_params)
ranks_and_pslices_for_host0 = [(0, (all_params, all_params))]
param_fine_slices_by_host = [ranks_and_pslices_for_host0]
owner_host_and_rank_of_global_fine_param_index = {i: (0, 0) for i in range(self.num_global_params)}
return param_fine_slices_by_host, \
owner_host_and_rank_of_global_fine_param_index
def allgather_x(self, x, global_x):
"""
Gather a parameter (`x`) vector onto all the processors.
Parameters
----------
x : numpy.array or LocalNumpyArray
The input vector.
global_x : numpy.array or LocalNumpyArray
The output (gathered) vector.
Returns
-------
None
"""
global_x[:] = x
def allscatter_x(self, global_x, x):
"""
Pare down an already-scattered global parameter (`x`) vector to be just a local `x` vector.
Parameters
----------
global_x : numpy.array or LocalNumpyArray
The input vector. This global vector is already present on all the processors,
so there's no need to do any MPI communication.
x : numpy.array or LocalNumpyArray
The output vector, typically a slice of `global_x`..
Returns
-------
None
"""
x[:] = global_x
def scatter_x(self, global_x, x):
"""
Scatter a global parameter (`x`) vector onto all the processors.
Parameters
----------
global_x : numpy.array or LocalNumpyArray
The input vector.
x : numpy.array or LocalNumpyArray
The output (scattered) vector.
Returns
-------
None
"""
x[:] = global_x
def allgather_f(self, f, global_f):
"""
Gather an objective funtion (`f`) vector onto all the processors.
Parameters
----------
f : numpy.array or LocalNumpyArray
The input vector.
global_f : numpy.array or LocalNumpyArray
The output (gathered) vector.
Returns
-------
None
"""
global_f[:] = f
def gather_jtj(self, jtj, return_shared=False):
"""
Gather a Hessian (`jtj`) matrix onto the root processor.
Parameters
----------
jtj : numpy.array or LocalNumpyArray
The (local) input matrix to gather.
return_shared : bool, optional
Whether the returned array is allowed to be a shared-memory array, which results
in a small performance gain because the array used internally to gather the results
can be returned directly. When `True` a shared memory handle is also returned, and
the caller assumes responsibilty for freeing the memory via
:function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`.
Returns
-------
gathered_array : numpy.ndarray or None
The full (global) output array on the root (rank=0) processor and
`None` on all other processors.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
Returned only when `return_shared == True`. The shared memory handle
associated with `gathered_array`, which is needed to free the memory.
"""
return (jtj, None) if return_shared else jtj # gathers just onto the root proc
def scatter_jtj(self, global_jtj, jtj):
"""
Scatter a Hessian (`jtj`) matrix onto all the processors.
Parameters
----------
global_jtj : numpy.ndarray
The global Hessian matrix to scatter.
jtj : numpy.ndarray or LocalNumpyArray
The local destination array.
Returns
-------
None
"""
jtj[:, :] = global_jtj
def gather_jtf(self, jtf, return_shared=False):
"""
Gather a `jtf` vector onto the root processor.
Parameters
----------
jtf : numpy.array or LocalNumpyArray
The local input vector to gather.
return_shared : bool, optional
Whether the returned array is allowed to be a shared-memory array, which results
in a small performance gain because the array used internally to gather the results
can be returned directly. When `True` a shared memory handle is also returned, and
the caller assumes responsibilty for freeing the memory via
:function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`.
Returns
-------
gathered_array : numpy.ndarray or None
The full (global) output array on the root (rank=0) processor and
`None` on all other processors.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
Returned only when `return_shared == True`. The shared memory handle
associated with `gathered_array`, which is needed to free the memory.
"""
return (jtf, None) if return_shared else jtf
def scatter_jtf(self, global_jtf, jtf):
"""
Scatter a `jtf` vector onto all the processors.
Parameters
----------
global_jtf : numpy.ndarray
The global vector to scatter.
jtf : numpy.ndarray or LocalNumpyArray
The local destination array.
Returns
-------
None
"""
jtf[:] = global_jtf
def global_svd_dot(self, jac_v, minus_jtf):
"""
Gathers the dot product between a `jtj`-type matrix and a `jtf`-type vector into a global result array.
This is typically used within SVD-defined basis calculations, where `jac_v` is the "V"
matrix of the SVD of a jacobian, and `minus_jtf` is the negative dot product between the Jacobian
matrix and objective function vector.
Parameters
----------
jac_v : numpy.ndarray or LocalNumpyArray
An array of `jtj`-type.
minus_jtf : numpy.ndarray or LocalNumpyArray
An array of `jtf`-type.
Returns
-------
numpy.ndarray
The global (gathered) parameter vector `dot(jac_v.T, minus_jtf)`.
"""
return | _np.dot(jac_v.T, minus_jtf) | numpy.dot |
#!/usr/bin/python3
from typing import Any, Union
from scipy import signal
import matplotlib.pyplot as plt
plt.figure(dpi=300)
import scipy.fftpack as fourier
import numpy as np
import struct
import argparse
import sys, os
import ctypes
from oscilloscopes.utils import unpackData
samplingRate = 1.25e9
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[result.size // 2:]
def slidingMean(x, a):
i = 0
j = a
dataLength = len(x)
res = []
while(j<dataLength):
res.append(np.mean(x[i:j]))
i += a
j += a
return res
def plot_SlidingMean(data, windowsLength):
print("Means")
mean = slidingMean(data, windowsLength)
plt.ylabel('Power (V)')
plt.xlabel('Samples')
plt.plot(mean)
def plot_autocorr(data):
print("Autocorrelation")
plt.plot(autocorr(data))
def plot_fourier(data, samplingRate):
print("Fourier")
frequencies = fourier.fftfreq(len(data), 1/samplingRate)
plt.plot(frequencies, np.abs(fourier.fft(data)))
def plot_spectrogram(data, samplingRate):
print("Spectrogram")
f, t, Sxx = signal.spectrogram(np.array(data), samplingRate, nperseg = 128)
plt.pcolormesh(t*1e6, f, Sxx)
plt.colorbar()
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [usec]')
def burst_index(traces, threshold=None, threshold_nr_burst=50):
if threshold is None:
max_value = np.amax(traces)
threshold = max_value*0.9
ind = []
for i in range(traces.shape[0]):
n = (len(( | np.abs(traces[i, :]) | numpy.abs |
import torch
from rrc_example_package.her.rl_modules.models import actor, critic
from rrc_example_package.her.arguments import get_args
import gym
import numpy as np
from rrc_example_package import cube_trajectory_env
from rrc_example_package.benchmark_rrc.python.residual_learning.residual_wrappers import RandomizedEnvWrapper
import time
# process the inputs
def process_inputs(o, g, o_mean, o_std, g_mean, g_std, args):
o_clip = np.clip(o, -args.clip_obs, args.clip_obs)
g_clip = np.clip(g, -args.clip_obs, args.clip_obs)
o_norm = np.clip((o_clip - o_mean) / (o_std), -args.clip_range, args.clip_range)
g_norm = np.clip((g_clip - g_mean) / (g_std), -args.clip_range, args.clip_range)
inputs = np.concatenate([o_norm, g_norm])
inputs = torch.tensor(inputs, dtype=torch.float32)
return inputs
# this function will choose action for the agent and do the exploration
def select_actions(pi, args, env_params):
action = pi.cpu().numpy().squeeze()
# add the gaussian
action += args.noise_eps * env_params['action_max'] * np.random.randn(*action.shape)
action = np.clip(action, -env_params['action_max'], env_params['action_max'])
# random actions...
random_actions = np.random.uniform(low=-env_params['action_max'], high=env_params['action_max'], \
size=env_params['action'])
# choose if use the random actions
action += | np.random.binomial(1, args.random_eps, 1) | numpy.random.binomial |
import cmath
import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from checker import main
"""Simple functions area start"""
x = 12 # assigning value to a variable
y = pow(x - 4, 2) + 5 # assigning a calculation to a variable
print(float(pow(x, y)) == np.power(12., 69.)) # comparison
print(y == np.sum([np.power(float(np.sum([x, -4])), 2.), 5]))
"""Simple functions area end"""
"""Math functions area start"""
print(3 + 4 - 5 == np.sum([3, 4, -5])) # Sum
print(-1 * -4 == np.multiply(-1, -4), -1 * -4 == np.negative(-4)) # Multiply
print(7 / 2 == np.divide(7, 2)) # Division
print(3 + 3 / 5 == np.sum([3, np.divide(3, 5)]))
print(math.factorial(7) == | np.math.factorial(7) | numpy.math.factorial |
import os
import numpy as np
from PIL import Image
class Utils():
@staticmethod
def rgb2gray(rgb):
if type(rgb).__module__ == np.__name__: # numpy type
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
elif type(rgb).__module__.startswith("PIL"): #PIL.Image type
return rgb.convert('L')
else:
print("No adjusted type using Utils.rgb2gray()")
return None
@staticmethod
def rankdata(a):
"""
returns rank list in decending order
"""
n = len(a)
ivec=sorted(range(len(a)), key=a.__getitem__)
svec=[a[rank] for rank in ivec]
sumranks = 0
dupcount = 0
newarray = [0]*n
for i in range(n):
sumranks += i
dupcount += 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
@staticmethod
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn == 0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn == 0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
@staticmethod
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, subtract_mean=False): # erase the distance metric
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
print("Calculate ROC")
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
#TODOTODOTODO
k_fold = [], []
for i in range(nrof_folds):
train_index_list = [i for i in range(i*int(nrof_pairs/nrof_folds), (i+1)*int(nrof_pairs/nrof_folds))]
test_index_list = [i for i in range(nrof_pairs) if i not in train_index_list]
#for test_elem in test_index_list:
# if test_elem in train_index_list:
k_fold[0].append(train_index_list)
k_fold[1].append(test_index_list)
# k_fold = [i for i in range(0, int(nrof_pairs/nrof_folds))], [i for i in range(int(nrof_pairs/nrof_folds), nrof_pairs)] #equivalent to sklearn.KFold with shuffle=False
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(zip(k_fold[0], k_fold[1])):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = Utils.L2distance(embeddings1-mean, embeddings2-mean)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = Utils.calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = Utils.calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = Utils.calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
tpr = | np.mean(tprs,0) | numpy.mean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
pix_width = 0.05
map_x_min = 0.0
map_x_max = 20.0
map_y_min = -20.0
map_y_max = 20.0
lim_z = -1.95
width = int((map_x_max-map_x_min)/pix_width)
height = int((map_y_max-map_y_min)/pix_width)
u_bias = int(np.abs(map_y_max)/(map_y_max-map_y_min)*height)
v_bias = int(np.abs(map_x_max)/(map_x_max-map_x_min)*width)
def read_pcd(file_path):
x = []
y = []
z = []
intensity = []
with open(file_path, 'r') as file:
lines = file.readlines()
[lines.pop(0) for _ in range(11)]
for line in lines:
sp_line = line.split()
if float(sp_line[0]) < 0:
continue
x.append(float(sp_line[0]))
y.append(float(sp_line[1]))
z.append(float(sp_line[2]))
intensity.append(float(sp_line[3]))
return np.array([x, y, z]), intensity
def project(x, y):
u = -x/pix_width + u_bias
v = -y/pix_width + v_bias
result = np.array([u, v])
mask = np.where((result[0] < width) & (result[1] < height))
result = result[:, mask[0]]
return result.astype(np.int16)
def get_cost_map(trans_pc, point_cloud, show=False):
img = | np.zeros((width,height,1), np.uint8) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 11:05:21 2018
@author: 028375
"""
from __future__ import unicode_literals, division
import pandas as pd
import os.path
import numpy as np
def Check2(lastmonth,thismonth,collateral):
ContractID=(thismonth['ContractID'].append(lastmonth['ContractID'])).append(collateral['ContractID']).drop_duplicates()
Outputs=pd.DataFrame(ContractID).reset_index(drop=True)
cost0=lastmonth[['ContractID','期权标的','标的类型','Upfront结算货币']]
Outputs=pd.merge(Outputs,cost0,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':'期初表Upfront','期权标的':'期初表期权标的','标的类型':'期初表标的类型'})
cost1=thismonth[['ContractID','期权标的','标的类型','Upfront结算货币']]
Outputs=pd.merge(Outputs,cost1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':'期末表Upfront','期权标的':'期末表期权标的','标的类型':'期末表标的类型'})
tmp1=collateral.groupby(['ContractID'])[['期权标的','标的类型']].first().reset_index()
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'期权标的':'资金表期权标的','标的类型':'资金表标的类型'})
collateral1=collateral.groupby(['ContractID','现金流类型'])['确认金额(结算货币)'].sum().reset_index()
collateral1=collateral1.rename(columns={'现金流类型':'CashType','确认金额(结算货币)':'Amount'})
tmp1=collateral1[collateral1['CashType']=='前端支付'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'前端支付'})
tmp1=collateral1[collateral1['CashType']=='前端期权费'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'前端期权费'})
tmp1=collateral1[collateral1['CashType']=='展期期权费'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'展期期权费'})
tmp1=collateral1[collateral1['CashType']=='到期结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'到期结算'})
tmp1=collateral1[collateral1['CashType']=='部分赎回'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'部分赎回'})
tmp1=collateral1[collateral1['CashType']=='全部赎回'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'全部赎回'})
tmp1=collateral1[collateral1['CashType']=='期间结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'期间结算'})
tmp1=collateral1[collateral1['CashType']=='红利支付'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'红利支付'})
tmp1=collateral1[collateral1['CashType']=='其他'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'其他'})
tmp1=collateral1[collateral1['CashType']=='定结期间结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'定结期间结算'})
Outputs['status1']=''
flag1=np.isnan(Outputs['期初表Upfront'])
flag2=np.isnan(Outputs['期末表Upfront'])
Outputs.loc[flag1&flag2,['status1']]='新起到期'
Outputs.loc[(~flag1)&flag2,['status1']]='存续到期'
Outputs.loc[flag1&(~flag2),['status1']]='新起存续'
Outputs.loc[(~flag1)&(~flag2),['status1']]='两期存续'
Outputs['status2']=''
flag1=(Outputs['status1']=='新起到期')
flag2=(Outputs['status1']=='存续到期')
flag3=(Outputs['status1']=='新起存续')
flag4=(Outputs['status1']=='两期存续')
colflag1=np.isnan(Outputs['前端支付'])
colflag2=np.isnan(Outputs['前端期权费'])
colflag3=np.isnan(Outputs['展期期权费'])
colflag4= | np.isnan(Outputs['到期结算']) | numpy.isnan |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import dnnc as dc
import numpy as np
import unittest
def temp_gemm(np_a, np_b, np_c, alpha, beta, transA, transB):
np_a = np_a.T if (transA==1) else np_a
np_b = np_b.T if (transB==1) else np_b
y = (alpha * np.dot(np_a, np_b)) + (beta * np_c)
return y
class GemmTest(unittest.TestCase):
def setUp(self):
self.len_a_b = 48
self.len_c = 64
self.alpha = 0.5
self.beta = 0.5
self.np_float_a = np.random.randn(self.len_a_b).astype(np.float32)
self.np_float_b = np.random.randn(self.len_a_b).astype(np.float32)
self.np_float_c = np.random.randn(self.len_c).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.dc_float_b = dc.array(list(self.np_float_b))
self.dc_float_c = dc.array(list(self.np_float_c))
self.np_double_a = np.random.randn(self.len_a_b).astype(np.double)
self.np_double_b = | np.random.randn(self.len_a_b) | numpy.random.randn |
import datetime
import email.utils
import re
from functools import partial
import Levenshtein
import networkx as nx
import numpy as np
import pandas as pd
from bigbang.parse import get_date
def consolidate_senders_activity(activity_df, to_consolidate):
"""
takes a DataFrame in the format returned by activity
takes a list of tuples of format ('from 1', 'from 2') to consolidate
returns the consolidated DataFrame (a copy, not in place)
"""
df = activity_df.copy(deep=True)
for consolidate in to_consolidate:
column_a, column_b = consolidate
if column_a in df.columns and column_b in df.columns:
df[column_a] = df[column_a] + df[column_b]
df.drop(column_b, inplace=True, axis=1) # delete the second column
return df
def matricize(series, func):
"""
create a matrix by applying func to pairwise combos of elements in a Series
returns a square matrix as a DataFrame
should return a symmetric matrix if func(a,b) == func(b,a)
should return the identity matrix if func == '=='
"""
matrix = pd.DataFrame(columns=series, index=series)
for index, element in enumerate(series):
for second_index, second_element in enumerate(series):
matrix.iloc[index, second_index] = func(element, second_element)
return matrix
def minimum_but_not_self(column, dataframe):
minimum = 100
for index, value in dataframe[column].items():
if index == column:
continue
if value < minimum:
minimum = value
return minimum
def sorted_matrix(from_dataframe, limit=None, sort_key=None):
"""
Takes a dataframe with 'from' fields for column headers
.
Returns a sorted distance matrix for the column headers,
using from_header_distance (see method).
"""
if limit is None:
limit = len(from_dataframe.columns)
distancedf = matricize(
from_dataframe.columns[: limit - 1], from_header_distance
)
# specify that the values in the matrix are integers
df = distancedf.astype(int)
# unless otherwise specified, sort to minimize the integer values with rows other than yourself
sort_key = (
sort_key
if sort_key is not None
else partial(minimum_but_not_self, dataframe=df)
)
new_columns = sorted(df.columns[: limit - 1], key=sort_key)
new_df = df.reindex(index=new_columns, columns=new_columns)
return new_df
def resolve_sender_entities(act, lexical_distance=0):
"""
Given an Archive's activity matrix, return a dict of lists, each containing
message senders ('From' fields) that have been groups to be
probably the same entity.
"""
# senders orders by descending total activity
senders = act.sum(0).sort_values(ascending=False)
return resolve_entities(
senders, from_header_distance, threshold=lexical_distance
)
def resolve_entities(significance, distance_function, threshold=0):
"""
Takes a Series mapping entities (index) to significance (values, numerical).
Resolves the entities based on a lexical distance function.
Returns a dictionary of labeled (keys) entity lists (values).
Key is the most significant member of the entity list.
"""
entities = significance.index
# entities in lexical order
entities_lex = entities.sort_values()
entities_lex_dict = dict([(p[1], p[0]) for p in enumerate(entities_lex)])
n = len(entities)
# binary matrix of similarity between entries
sim = | np.zeros((n, n)) | numpy.zeros |
name = "attitudes"
''' Attitude Control Module
Ok, so the principle behind this module is the base class "att", which represents an attitude
description, by default of type "DCM". Can also be created from Euler Angles, PRV, Quaternions,
CRPs, and MRPs. Can also be transformed into these others by a method as well.
For simplicity's sake, I'm going to treat these classes as kind of a "dual number" where the DCM
representation is stored, but for all other types, the representation of that type is also stored.
This should allow for direct quaternion addition and so forth.
This should also allow me to simplify the addition/subtraction functions into a single function,
that read the types of the inputs and acts accordingly.
There will probably also be an angular acceleration vector class, but I'll get there when I get
there.
Author: <NAME>
'''
#standard imports
import numpy as np
from numpy import linalg as LA
# ------------------------------------------------------------------------------------------------
# CONSTANTS
# ------------------------------------------------------------------------------------------------
# Nothing here yet
# -----------------------------------------------------------------------------------------------
# BASE CLASS "ATT"
# -----------------------------------------------------------------------------------------------
class att():
''' Attitude Description Class
Defines an attitude, by default from a DCM description. Also contains a whole bunch of class
methods for defining by other means (CRP, quaternions, etc).
Arguments:
DCM: (ndarray [2x2]) General 3x3 DCM of the attitude description
'''
def __init__(self, DCM, type='DCM', angle_vec=np.array([]),units='rad',
euler_type=None,phi=None,path=None):
''' Standard Definition from a DCM '''
if np.max(np.abs((DCM.T @ DCM) - np.eye(3))) > 1e-3:
raise ValueError('DCM doesn\'t appear to be orthonormal')
self.DCM = DCM
self.type = type
self.units = units
if euler_type:
self.order = euler_type
if angle_vec != np.array([]):
self.vec = angle_vec
if phi:
self.phi = phi
if path:
self.path = path
def __repr__(self):
if self.type == 'DCM':
return 'DCM Attitude description is \n {}'.format(self.DCM)
elif self.type == 'PRV':
statement = ''' \n
{} Attitude description is: \n e = {} \n Phi = {} {} \n
\n DCM description is: \n {} \n
'''
return statement.format(self.type,list(self.vec),self.phi,self.units,self.DCM)
elif self.type == 'Euler Angle':
statement = '\n {} {} Attitude description is: \n {} {} \n \n DCM description is: \n {} \n'
return statement.format(self.order,self.type,list(self.vec),self.units,self.DCM)
else:
statement = '\n {} Attitude description is: \n {} \n \n DCM description is: \n {} \n'
return statement.format(self.type,np.array(self.vec).flatten(),self.DCM)
@classmethod
def _from_eul_ang(cls,type,ang1,ang2,ang3,units='deg'):
''' Definition from Euler Angles
Takes a type, 3 angles, and units to determine a DCM, then records both sets
Arguments:
type: (int) int of order of rotation axes
ang1: (float) angle of rotation about first axis
ang2: (float) angle of rotation about second axis
ang3: (float) angle of rotation about third axis
units: (string) either 'rad' or 'deg'
'''
if units=='deg':
ang1, ang2, ang3 = np.radians(ang1),np.radians(ang2),np.radians(ang3)
if type not in (123,132,213,231,312,321,131,121,212,232,313,323):
raise ValueError('Euler angle type definition is incorrect')
angle_vec = np.array([ang1,ang2,ang3])
type = str(type)
DCM = eul_to_DCM(int(type[0]),ang1,int(type[1]),ang2,int(type[2]),ang3,'rad')
if units=='deg':
angle_vec = np.degrees(angle_vec)
return cls(DCM,'Euler Angle',angle_vec=angle_vec,units=units,euler_type=type)
@classmethod
def _from_PRV(cls,vec,phi=None,units='rad'):
''' Definition from Principle Rotation Vector
Takes either a vector with norm != 1 or a normalized vector and a phi rotation magnitude
Internally, the normalized vector and the phi rotation are used
Arguments:
vec: (list) principle rotation vector
phi: (float) optional, rotation magnitude
units: (string) either 'rad' or 'deg' to specify units for phi
'''
if not phi:
phi = LA.norm(vec)
vec = vec/LA.norm(vec)
if units=='deg':
phi = np.radians(phi)
e1,e2,e3 = vec
sigma = 1 - np.cos(phi)
cphi = np.cos(phi)
sphi = np.sin(phi)
C = np.array([[e1*e1*sigma+cphi,e1*e2*sigma+e3*sphi,e1*e3*sigma - e2*sphi],
[e2*e1*sigma - e3*sphi,e2**2*sigma+cphi,e2*e3*sigma+e1*sphi],
[e3*e1*sigma+e2*sphi,e3*e2*sigma-e1*sphi,e3**2*sigma+cphi]])
if units=='deg':
phi = np.degrees(phi)
return cls(C,'PRV', units=units, angle_vec=np.array(vec), phi=phi)
@classmethod
def _from_quat(cls,vec):
'''Definition from Quaternions
Takes in a quaternion and spits out an attitude object (DCM). Checks first for a valid
quaternion
Arguments:
vec: (list) of quaternion values
'''
if np.abs(LA.norm(vec)-1) > 1e-13:
raise ValueError('Quaternions must have norm of 1')
b0,b1,b2,b3 = vec
C = np.array([[b0**2+b1**2-b2**2-b3**2, 2*(b1*b2+b0*b3), 2*(b1*b3-b0*b2)],
[2*(b1*b2-b0*b3), b0**2-b1**2+b2**2-b3**2, 2*(b2*b3+b0*b1)],
[2*(b1*b3+b0*b2), 2*(b2*b3-b0*b1), b0**2-b1**2-b2**2+b3**2]])
return cls(C,'Quaternion', angle_vec=vec)
@classmethod
def _from_CRP(cls,vec):
'''Definition from Classical Rodriguez Parameters
Uses the vector definition of the DCM to convert CRPs into a valid attitude object (element
option also available in comments)
Arguments:
vec: (list) of CRP values
'''
q = np.atleast_2d(vec).reshape(3,1)
C = (1/(1+q.T@q))*((1-q.T@q)*np.eye(3) + 2 * q @ q.T - 2 * tilde(q))
# q1,q2,q3 = q.reshape(np.size(vec))
# C = np.array([[1+q1**2-q2**2-q3**2, 2*(q1*q2+q3), 2*(q1*q3-q2)],
# [2*(q1*q2-q3), 1-q1**2+q2**2-q3**2, 2*(q2*q3+q1)],
# [2*(q1*q3+q2), 2*(q2*q3-q1), 1-q1**2-q2**2+q3**2]])
# C = (1/(1 + q.T @ q)) * C
return cls(C,'CRP',angle_vec=np.array(vec))
@classmethod
def _from_MRP(cls,vec):
'''Definition from Modified Rodriguez Parameters
Uses the vector definition of the DCM to convert MRPs into a valid attitude object. Returns
the path whether it's long (norm > 1) or short (norm < 1) with norm==1 taken to be short
Arguments:
vec: (list) of MRP values
'''
s = np.atleast_2d(vec).T
C = np.eye(3) + (8*tilde(s)@tilde(s) - 4*(1-s.T@s)*tilde(s))/(1+s.T@s)**2
if LA.norm(vec) > 1:
path = 'long'
else:
path = 'short'
return cls(C,'MRP',angle_vec=np.array(vec),path=path)
def _to_eul_ang(self,type,units='deg'):
'''Conversion to Euler Angles. There's no easy way to do this, so it's always just done
from the DCM. Which is fine, it's still quick.
Arguments:
type: (int) currently must be 321 or 313 since those are common. Will expand
units: (str) optional, units to output the angles
'''
C = self.DCM
if type == 321:
ang1 = np.arctan2(C[0,1],C[0,0])
ang2 = -np.arcsin(C[0,2])
ang3 = np.arctan2(C[1,2],C[2,2])
elif type == 313:
ang1 = np.arctan2(C[2,0],-C[2,1])
ang2 = np.arccos(C[2,2])
ang3 = np.arctan2(C[0,2],C[1,2])
if units == 'deg':
ang1,ang2,ang3 = np.degrees([ang1,ang2,ang3])
return self._from_eul_ang(type,ang1,ang2,ang3,units=units)
def _to_PRV(self, units='rad'):
'''Conversion to Principle Rotation Vector. Always done from the DCM. Doesn't need to
take any arguments
Outputs the short version of the PRV (using arccos function) and the positive output
for e_hat
'''
C = self.DCM
phi = np.arccos(0.5*(C[0,0]+C[1,1]+C[2,2]-1))
e = (1/(2*np.sin(phi)))*np.array([C[1,2]-C[2,1],C[2,0]-C[0,2],C[0,1]-C[1,0]])
if units=='deg':
phi = np.degrees(phi)
return self._from_PRV(e,phi=phi,units=units)
def _to_quat(self, path='short'):
'''If the object is a classical or modified Rodriguez parameter object, directly converts
to quaternions via known relations. Otherwise, uses sheppard's method to determine the
quaternions from the DCM.
Arguments:
path: (str) optional, tells the function whether you'd like the short way or the
long way
'''
if self.type == 'CRP':
q = self.vec
b0 = 1/np.sqrt(1+LA.norm(q)**2)
b1 = q[0]*b0
b2 = q[1]*b0
b3 = q[2]*b0
elif self.type == 'MRP':
s = self.vec
b0 = (1-LA.norm(s)**2)/(1+LA.norm(s)**2)
b1 = 2*s[0]/(1+LA.norm(s)**2)
b2 = 2*s[1]/(1+LA.norm(s)**2)
b3 = 2*s[2]/(1+LA.norm(s)**2)
else:
#the annoying way...
C = self.DCM
[[C11,C12,C13],
[C21,C22,C23],
[C31,C32,C33]] = C
trC = C[0,0]+C[1,1]+C[2,2]
b02 = 0.25*(1+trC)
b12 = 0.25*(1+2*C[0,0]-trC)
b22 = 0.25*(1+2*C[1,1]-trC)
b32 = 0.25*(1+2*C[2,2]-trC)
b0b1 = (C23 - C32)/4
b0b2 = (C31 - C13)/4
b0b3 = (C12 - C21)/4
b1b2 = (C12 + C21)/4
b3b1 = (C31 + C13)/4
b2b3 = (C23 + C32)/4
squares = [b02,b12,b22,b32]
if b02 == np.max(squares):
b0 = np.sqrt(b02)
b1 = b0b1/b0
b2 = b0b2/b0
b3 = b0b3/b0
elif b12 == np.max(squares):
b1 = np.sqrt(b12)
b0 = b0b1/b1
b2 = b1b2/b1
b3 = b3b1/b1
elif b22 == np.max(squares):
b2 = np.sqrt(b22)
b0 = b0b2/b2
b1 = b1b2/b2
b3 = b2b3/b2
else:
b3 = np.sqrt(b32)
b0 = b0b3/b3
b1 = b3b1/b3
b2 = b2b3/b3
quats = np.array([b0,b1,b2,b3])
if b0 > 0 and path == 'long':
quats = -quats
elif b0 < 0 and path == 'short':
quats = -quats
return self._from_quat(quats)
def _to_CRP(self):
'''Conversion to Classical Rodriguex Parameters. If the initial attitude is in quaternions,
then it converts directly, because that's very easy. Otherwise, it converts from the DCM,
which is actually still pretty easy. No arguments because the shadow set doesn't really
exist.
'''
if self.type == 'Quaternion':
b0,b1,b2,b3 = self.vec
q = np.array([b1/b0,b2/b0,b3/b0])
else:
C = self.DCM
[[C11,C12,C13],
[C21,C22,C23],
[C31,C32,C33]] = C
zeta = np.sqrt(C11+C22+C33+1)
q = (1/zeta**2)*np.array([C23-C32,C31-C13,C12-C21])
return self._from_CRP(q)
def _to_MRP(self,path='short'):
'''Conversion to Modified Rodriguez Parameters
Similar to CRPs, if the input attitude is a quaternion, it'll just do the output directly,
otherwise, it'll compute the CRP from the DCM. This function does have an input for the
short rotation or the long rotation, though.
'''
if self.type == 'Quaternion':
b0,b1,b2,b3 = self.vec
s = np.array([b1/(1+b0),b2/(1+b0),b3/(1+b0)])
else:
C = self.DCM
[[C11,C12,C13],
[C21,C22,C23],
[C31,C32,C33]] = C
zeta = np.sqrt(C11+C22+C33+1)
s = (1/(zeta*(zeta+2)))*np.array([C23-C32,C31-C13,C12-C21])
if LA.norm(s) > 1 and path=='short':
s = -s/LA.norm(s)
elif LA.norm(s) < 1 and path=='long':
s = -s/LA.norm(s)
return self._from_MRP(s)
# ------------------------------------------------------------------------------------------------
# INTERNAL FUNCTIONS (designed to be used by module, not user)
# ------------------------------------------------------------------------------------------------
def rot(angle,axis,radordeg):
''' Defines a single axis rotation'''
mat = np.array([])
if radordeg == 'rad':
angle = angle
elif radordeg == 'deg':
angle = np.radians(angle)
else:
print('Error')
if axis==1:
mat = np.array( [[ 1, 0, 0 ],
[ 0, np.cos(angle), np.sin(angle) ],
[ 0, -np.sin(angle), np.cos(angle) ]])
elif axis==2:
mat = np.array( [[ np.cos(angle), 0, -np.sin(angle) ],
[ 0, 1, 0 ],
[ np.sin(angle), 0, np.cos(angle) ]])
elif axis==3:
mat = np.array([[ np.cos(angle), np.sin(angle), 0 ],
[ -np.sin(angle), np.cos(angle), 0 ],
[ 0, 0, 1 ]])
else:
print('Error')
return mat
def eul_to_DCM(rot1axis,rot1ang,rot2axis,rot2ang,rot3axis,rot3ang,radordeg):
'''Combines 3 axis rotations to complete a DCM from euler angles'''
mat1 = rot(rot1ang,rot1axis,radordeg)
mat2 = rot(rot2ang,rot2axis,radordeg)
mat3 = rot(rot3ang,rot3axis,radordeg)
DCM = mat3@mat2@mat1
return DCM.T
# ------------------------------------------------------------------------------------------------
# MAIN FUNCTIONS (to be used by module or user)
# ------------------------------------------------------------------------------------------------
def tilde(x):
'''
Returns a tilde matrix for a given vector. Should be robust enough to handle vectors of
any reasonable (vector-like) shape
'''
x = np.array(x).reshape(np.size(x))
return np.array([[0, -x[2], x[1] ],
[x[2], 0, -x[0] ],
[-x[1], x[0], 0 ]])
def add(att1,att2):
''' Addition function between attitude descriptions.
This function will first check to see whether the addition can be done directly (generally
when the two parameters are off the same type) and will do it that way if so. However, I am
skipping symmetric Euler Angle addition for now. If direct addition cannot be done, then the
DCM is used to add the two parameters and the output type can be chosen by the user.
ONLY RETURNS DCM OUTPUT UNLESS DIRECT ADDITION IS ALLOWED (WILL BE CHANGED IN FUTURE)
Arguments:
att1: (att object) representing the first attitude to sum
att2: (att object) representing the second attitude to sum
'''
if att1.type=='PRV' and att2.type=='PRV':
phi1 = att1.phi
phi2 = att2.phi
e1 = att1.vec
e2 = att2.vec
phi = 2*np.arccos(np.cos(phi1/2)*np.cos(phi2/2)-np.sin(phi1/2)*np.sin(phi2/2)*np.dot(e1,e2))
e = (np.cos(phi2/2)*np.sin(phi1/2)*e1+np.cos(phi1/2)*np.sin(phi2/2)*e2+\
np.sin(phi1/2)*np.sin(phi2/2)*np.cross(e1,e2))/np.sin(phi/2)
return att._from_PRV(e,phi)
elif att1.type=='Quaternion' and att2.type=='Quaternion':
b0_1,b1_1,b2_1,b3_1 = att1.vec
b0_2,b1_2,b2_2,b3_2 = att2.vec
b_1 = np.array(att1.vec).reshape(4,1)
mat = np.array([[b0_2, -b1_2, -b2_2, -b3_2 ],
[b1_2, b0_2, b3_2, -b2_2 ],
[b2_2, -b3_2, b0_2, b1_2 ],
[b3_2, b2_2, -b1_2, b0_2 ]])
return att._from_quat((mat @ b_1).reshape(4))
elif att1.type=='CRP' and att2.type=='CRP':
q1 = att1.vec
q2 = att2.vec
q = (q2 + q1 - np.cross(q2,q1))/(1-np.dot(q2,q1))
return att._from_CRP(q)
elif att1.type=='MRP' and att2.type=='MRP':
#outputs the "short" MRP
s2 = att1.vec
s1 = att2.vec
s = ((1-LA.norm(s1)**2)*s2 + (1-LA.norm(s2)**2)*s1 - 2*np.cross(s2,s1))/(1 + \
LA.norm(s1)**2*LA.norm(s2)**2 - 2*np.dot(s1,s2))
return att._from_MRP(s)
else:
C1 = att1.DCM
C2 = att2.DCM
C = C2 @ C1
return att(C,'DCM')
def subtract(att1,att2):
''' Subtraction function between attitude descriptions.
This function will first check to see whether the subtraction can be done directly (generally
when the two parameters are off the same type) and will do it that way if so. However, I am
skipping symmetric Euler Angle subtraction for now. If direct addition cannot be done, then the
DCM is used to subtract the two parameters and the output type can be chosen by the user.
This function subtracts att1 from att2
ONLY RETURNS DCM OUTPUT UNLESS DIRECT SUBTRACTION IS ALLOWED (WILL BE CHANGED IN FUTURE)
Arguments:
att1: (att object) representing the first attitude to difference
att2: (att object) representing the second attitude to difference
'''
if att1.type=='PRV' and att2.type=='PRV':
phi1 = att1.phi
phi2 = att2.phi
e1 = att1.vec
e2 = att2.vec
phi = 2*np.arccos(np.cos(phi1/2)*np.cos(phi2/2)+np.sin(phi1/2)*np.sin(phi2/2)*np.dot(e2,e1))
e = (np.cos(phi1/2)*np.sin(phi2/2)*e2-np.cos(phi2/2)*np.sin(phi1/2)*e1+\
np.sin(phi1/2)*np.sin(phi2/2)*np.cross(e2,e1))/np.sin(phi/2)
return att._from_PRV(e,phi)
elif att1.type=='Quaternion' and att2.type=='Quaternion':
b0_1,b1_1,b2_1,b3_1 = att1.vec
b0_2,b1_2,b2_2,b3_2 = att2.vec
b_2 = | np.array(att2.vec) | numpy.array |
"""
The script with demonstration of the spectrum estimation by the data
from acceleration sensors.
"""
# noinspection PyUnresolvedReferences
import matplotlib.pyplot as plt
import numpy as np
from demo_util import get_demo_plot_manager
from spectrum_processing_1d.processing import estimate_spectrum
from spectrum_processing_1d.spectrum_functions import build_wave_spectrum_fun
from spectrum_processing_1d.trajectory_emulator import KFunRelation, generate_trajectory
def main():
# spectrum function
s_fun = build_wave_spectrum_fun(
omega_m=np.array([0.15, 0.45, -0.2]) * np.pi * 2,
var=np.array([1.0, 0.5, 0.6]),
omega_lim=1.0 * 2 * np.pi
)
# relation between k and omega
k_omega_relation = KFunRelation(lambda omega: omega ** 2 / 9.8)
dt = 0.25
fn = 1.0
fs = 1 / dt
# demo to show source spectrum and wave
x_0 = np.linspace(-10, 10, 1000)
# create wave sample
trajectory_data_wave_demo = generate_trajectory(
s_fun=s_fun,
k_omega_relation=k_omega_relation,
x_0=x_0,
trajectory_len=3,
fn=fn,
fs=fs
)
# emulate sensor data
trajectory_data_wave_param = generate_trajectory(
s_fun=s_fun,
k_omega_relation=k_omega_relation,
x_0=[0],
trajectory_len=100000,
fn=fn,
fs=fs
)
# estimate spectrum form
omega_est, s_est, (x_est, y_est, angle_est) = estimate_spectrum(
ax=trajectory_data_wave_param.sensor_ax[:, 0],
ay=trajectory_data_wave_param.sensor_ay[:, 0],
alpha=trajectory_data_wave_param.sensor_alpha[:, 0],
return_trajectory=True,
fs=fs,
nperseg=512,
nfft=1024,
corr_dist=20.0
)
# cut spectrum
omega_lim = | np.searchsorted(omega_est, -fn * 2 * np.pi, side='right') | numpy.searchsorted |
#!/usr/bin/env python
"""
Functions for index predictions.
Author: <NAME>, <NAME>, IWF Graz, Austria
twitter @chrisoutofspace, https://github.com/cmoestl
started April 2018, last update May 2019
Python 3.7
Packages not included in anaconda installation: sunpy, cdflib (https://github.com/MAVENSDC/cdflib)
Issues:
- ...
To-dos:
- ...
Future steps:
- ...
"""
import copy
from datetime import datetime
from dateutil import tz
import numpy as np
from matplotlib.dates import num2date, date2num
from numba import njit, jit
import astropy.time
import scipy
# Machine learning specific:
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import mean_squared_error, make_scorer
class DstFeatureExtraction(BaseEstimator, TransformerMixin):
"""
Takes an array in columns defined by input_keys variable and returns
Based off of example from:
https://scikit-learn.org/dev/developers/contributing.html#rolling-your-own-estimator
Init Parameters
===============
--> SatData(input_dict, source=None, header=None)
input_dict : dict(key: dataarray)
Dict containing the input data in the form of key: data (in array or list)
Example: {'time': timearray, 'bx': bxarray}. The available keys for data input
can be accessed in SatData.default_keys.
header : dict(headerkey: value)
Dict containing metadata on the data array provided. Useful data headers are
provided in SatData.empty_header but this can be expanded as needed.
source : str
Provide quick-access name of satellite/data type for source.
Attributes
==========
.look_back
Number of hours in past to provide in output features.
.input_keys
List of keys of array provided to transform() method.
.feature_keys (only after calling transform())
List of keys of output features.
.reduced_features
Default=True, use reduced features.
.v_power/.den_power
Exponents in pressure term.
.bz_power
Power of Bz in output feature.
.m1/.m2/.e1/.e2
Parameters for OBrien's ring-current term (see calc_ring_current_term())
Methods
=======
.fit()
Filler function. Returns self.
.transform(X)
Transforms input matrix X (SatData.data.T) into ML features.
Examples
"""
def __init__(self, input_keys=[], v_power=1, den_power=1, bz_power=1, m1=-4.4, m2=2.4, e1=9.74, e2=4.69, look_back=5, reduced_features=True):
self.look_back = look_back
self.input_keys = input_keys
self.reduced_features = reduced_features
self.v_power = v_power
self.den_power = den_power
self.bz_power = bz_power
self.m1 = m1
self.m2 = m2
self.e1 = e1
self.e2 = e2
def fit(self, X, y = None):
return self
def transform(self, X):
# bx and by give no improvement, neither does np.gradient(da['density'], nor V**2)
# TIME AND SOLAR WIND VARIABLES
# -----------------------------
time = X[:,0]
bx, by, bz = X[:,self.input_keys.index('bx')], X[:,self.input_keys.index('by')], X[:,self.input_keys.index('bz')]
btot = X[:,self.input_keys.index('btot')]
speed, density, temp = X[:,self.input_keys.index('speed')], X[:,self.input_keys.index('density')], X[:,self.input_keys.index('temp')]
sin_DOY, cos_DOY, sin_LT, cos_LT = extract_local_time_variables(time)
# Combine all:
if self.reduced_features:
X = np.vstack((sin_DOY, cos_DOY, time, bx, by, bz)).T
else:
X = np.vstack((sin_DOY, cos_DOY, time, speed, density, btot, bx, by, bz)).T
# OTHER VARIABLES
# ---------------
# Pressure term
pressure = X[:,3]**self.den_power * X[:,2]**self.v_power
# Gradient in Bz
dbz = np.gradient(bz)
# Newell coupling
ec = calc_newell_coupling(by, bz, speed)
# Bz to a certain power
bz_exp = bz**self.bz_power
# OBrien's ring current term
deltat = np.asarray([(time[i+1] - time[i])*24. for i in range(len(time)-1)] + [0.])
deltat[-1] = deltat[-2]
rc = calc_ring_current_term(deltat, bz, speed, m1=self.m1, m2=self.m2, e1=self.e1, e2=self.e2)
# Sinphi from Temerin-Li model:
# Including this term improves overall accuracy but reduces accuracy of large negative values:
tt, ttt = 2.*np.pi*(time-2449718.5)/365.24, 2.*np.pi*(time-2449718.5)
cosphi = np.sin(tt+0.078) * np.sin(ttt-tt-1.22) * (9.58589e-2) + np.cos(tt+0.078) * (0.39+0.104528*np.cos(ttt-tt-1.22))
sinphi = (1. - cosphi*cosphi)**0.5
# PAST TERMS
# ----------
def create_past_dataset(data, look_back=1):
shifted = []
# Fill up empty values with mean:
for i in range(look_back):
shifted.append(np.full((look_back), np.nanmean(data)))
# Fill rest of array with past values:
for i in range(len(data)-look_back):
a = data[i:(i+look_back)]
shifted.append(a)
return np.array(shifted)
#theta = -(np.arccos(-X[:,5]/X[:,4]) - np.pi) / 2. # infinite?
#exx = X[:,2] * X[:,4] * np.sin(theta)**7
# Calculate past terms:
past_pressure = create_past_dataset(pressure, look_back=self.look_back)
past_rc = create_past_dataset(rc, look_back=self.look_back)
past_ec = create_past_dataset(ec, look_back=self.look_back)
past_speed = create_past_dataset(speed, look_back=self.look_back)
past_bz = create_past_dataset(bz, look_back=self.look_back)
past_dbz = create_past_dataset(dbz, look_back=self.look_back)
past_sinphi = create_past_dataset(sinphi, look_back=self.look_back)
if self.reduced_features:
past_bz_24h = create_past_dataset(bz, look_back=24)
past_rc_24h = create_past_dataset(rc, look_back=24)
features = np.concatenate((X, rc.reshape(-1,1), ec.reshape(-1,1), pressure.reshape(-1,1),
sinphi.reshape(-1,1), dbz.reshape(-1,1),
past_bz_24h, past_rc_24h), axis=1)
self.feature_keys = ['sin_DOY', 'cos_DOY', 'time', 'bx', 'by', 'bz'] + \
['rc', 'ec', 'pressure', 'sinphi', 'dbz'] + \
['bz(t-{})'.format(i) for i in range(1,24+1)] + \
['rc(t-{})'.format(i) for i in range(1,24+1)]
else:
features = np.concatenate((X, rc.reshape(-1,1), ec.reshape(-1,1), pressure.reshape(-1,1),
bz_exp.reshape(-1,1), sinphi.reshape(-1,1), dbz.reshape(-1,1),
past_pressure, past_rc, past_speed, past_bz, past_dbz, past_sinphi, past_ec), axis=1)
self.feature_keys = ['sin_DOY', 'cos_DOY', 'time', 'speed', 'density', 'btot', 'bx', 'by', 'bz'] + \
['rc', 'ec', 'pressure', 'bzpower', 'sinphi', 'dbz'] + \
['pressure(t-{})'.format(i) for i in range(1,self.look_back+1)] + \
['rc(t-{})'.format(i) for i in range(1,self.look_back+1)] + \
['speed(t-{})'.format(i) for i in range(1,self.look_back+1)] + \
['bz(t-{})'.format(i) for i in range(1,self.look_back+1)] + \
['dbz(t-{})'.format(i) for i in range(1,self.look_back+1)] + \
['sinphi(t-{})'.format(i) for i in range(1,self.look_back+1)] + \
['ec(t-{})'.format(i) for i in range(1,self.look_back+1)]
return features
def dst_loss_function(y_true, y_pred):
rsme_all = math.sqrt(mean_squared_error(y_true, y_pred))
inds = np.where(y_true < 0.3) # Lowest 30% (0 to 1 MinMaxScaler)
rmse_cut = math.sqrt(mean_squared_error(y_true[inds], y_pred[inds]))
return (rsme_all + rmse_cut)
def calc_dst_burton(time, bz, speed, density):
"""Calculates Dst from solar wind input according to Burton et al. 1975 method.
Parameters
==========
time : np.array
Array containing time variables.
bz : np.array
Array containing Bz in coordinate system ?.
speed : np.array
Array containing solar wind speed.
density : np.array
Array containing Bz in coordinate system ?.
Returns
=======
dst_burton : np.array
Array with calculated values over timesteps time.
"""
protonmass=1.6726219*1e-27 #kg
bzneg = copy.deepcopy(bz)
bzneg[bz > 0] = 0
pdyn = density*1e6*protonmass*(speed*1e3)**2*1e9 #in nanoPascal
Ey = speed*abs(bzneg)*1e-3 #now Ey is in mV/m
dst_burton = np.zeros(len(bz))
Ec=0.5
a=3.6*1e-5
b=0.2*100 #*100 due to different dynamic pressure einheit in Burton
c=20
d=-1.5/1000
lrc=0
for i in range(len(bz)-1):
if Ey[i] > Ec:
F = d*(Ey[i]-Ec)
else: F=0
#Burton 1975 p4208: Dst=Dst0+bP^1/2-c
# Ring current Dst
deltat_sec = (time[i+1]-time[i])*86400 #deltat must be in seconds
rc = lrc + (F-a*lrc)*deltat_sec
# Dst of ring current and magnetopause currents
dst_burton[i+1] = rc + b*np.sqrt(pdyn[i+1]) - c
lrc = rc
return dst_burton
def calc_dst_obrien(time, bz, speed, density):
"""Calculates Dst from solar wind input according to OBrien and McPherron 2000 method.
Parameters
==========
time : np.array
Array containing time variables.
bz : np.array
Array containing Bz in coordinate system ?.
speed : np.array
Array containing solar wind speed.
density : np.array
Array containing Bz in coordinate system ?.
Returns
=======
dst_burton : np.array
Array with calculated values over timesteps time.
"""
protonmass=1.6726219*1e-27 #kg
bzneg = copy.deepcopy(bz)
bzneg[bz > 0] = 0
pdyn = density*1e6*protonmass*(speed*1e3)**2*1e9 #in nanoPascal
Ey = speed*abs(bzneg)*1e-3; #now Ey is in mV/m
Ec=0.49
b=7.26
c=11 #nT
lrc=0
dst_obrien = np.zeros(len(bz))
for i in range(len(bz)-1):
if Ey[i] > Ec: #Ey in mV m
Q = -4.4 * (Ey[i]-Ec)
else: Q=0
tau = 2.4 * np.exp(9.74/(4.69 + Ey[i])) #tau in hours
# Ring current Dst
deltat_hours=(time[i+1]-time[i])*24 # time should be in hours
rc = ((Q - lrc/tau))*deltat_hours + lrc
# Dst of ring current and magnetopause currents
dst_obrien[i+1] = rc + b*np.sqrt(pdyn[i+1])-c;
lrc = rc
return dst_obrien
def calc_dst_temerin_li(time, btot, bx, by, bz, speed, speedx, density, version='2002n', linear_t_correction=False):
"""Calculates Dst from solar wind input according to Temerin and Li 2002 method.
Credits to <NAME>ASP Colorado and <NAME>.
Calls _jit_calc_dst_temerin_li. All constants are defined in there.
Note: vx has to be used with a positive sign throughout the calculation.
Parameters
==========
time : np.array
Array containing time variables.
btot : np.array
Array containing Btot.
bx : np.array
Array containing Bx in coordinate system ?.
by : np.array
Array containing By in coordinate system ?.
bz : np.array
Array containing Bz in coordinate system ?.
speed : np.array
Array containing solar wind speed.
speedx : np.array
Array containing solar wind speed in x-direction.
density : np.array
Array containing solar wind density.
version : str (default='2002')
String determining which model version should be used.
Returns
=======
dst_burton : np.array
Array with calculated Dst values over timesteps time.
"""
# Arrays
dst1=np.zeros(len(bz))
dst2=np.zeros(len(bz))
dst3=np.zeros(len(bz))
dst_tl=np.zeros(len(bz))
# Define initial values (needed for convergence, see Temerin and Li 2002 note)
dst1[0:10]=-15
dst2[0:10]=-13
dst3[0:10]=-2
if version == '2002':
newparams = False
else:
newparams = True
if version in ['2002', '2002n']:
# julian_days = [sunpy.time.julian_day(num2date(x)) for x in time]
julian_days = [astropy.time.Time(num2date(x), format='datetime', scale='utc').jd for x in time]
return _jit_calc_dst_temerin_li_2002(time, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3, dst_tl, julian_days, newparams=newparams)
elif version == '2006':
dst1[0:10], dst2[0:10], dst3[0:10] = -10, -5, -10
ds1995 = time - date2num(datetime(1995,1,1))
ds2000 = time - date2num(datetime(2000,1,1))
# YEARLY DRIFT CORRECTION TERM (NOT IN PAPER)
if linear_t_correction:
drift_corr = -0.014435865642103548 * ds2000 + 9.57670996872173
else:
drift_corr = 0.
return _jit_calc_dst_temerin_li_2006(ds1995, ds2000, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3) + drift_corr
@njit
def _jit_calc_dst_temerin_li_2002(time, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3, dst_tl, julian_days, newparams=True):
"""Fast(er) calculation of Dst using jit on Temerin-Li method."""
#define all constants
p1, p2, p3 = 0.9, 2.18e-4, 14.7
# these need to be found with a fit for 1-2 years before calculation
# taken from the TL code: offset_term_s1 = 6.70 ;formerly named dsto
# offset_term_s2 = 0.158 ;formerly hard-coded 2.27 for 1995-1999
# offset_term_s3 = -0.94 ;formerly named phasea -1.11 for 1995-1999
# offset_term_s4 = -0.00954 ;formerly hard-coded
# offset_term_s5 = 8.159e-6 ;formerly hard-coded
#found by own offset optimization for 2015
#s4 and s5 as in the TL 2002 paper are not used due to problems with the time
if not newparams:
s1, s2, s3, s4, s5 = -2.788, 1.44, -0.92, -1.054, 8.6e-6
initdate = 2449718.5
else:
s1, s2, s3, s4, s5 = 4.29, 5.94, -3.97, 0., 0.
initdate = 2457023.5
a1, a2, a3 = 6.51e-2, 1.37, 8.4e-3
a4, a5, a6 = 6.053e-3, 1.21e-3, 1.55e-3 # a5 = 1.12e-3 before. Error?
tau1, tau2, tau3 = 0.14, 0.18, 9e-2 #days
b1, b2, b3 = 0.792, 1.326, 1.29e-2
c1, c2 = -24.3, 5.2e-2
yearli=365.24
alpha=0.078
beta=1.22
for i in np.arange(1,len(bz)-1):
#t time in days since beginning of 1995 #1 Jan 1995 in Julian days
#t1=sunpy.time.julian_day(mdates.num2date(time_in[i]))-sunpy.time.julian_day('1995-1-1 00:00')
# sunpy.time.julian_day('2015-1-1 00:00') = 2457023.5
t1 = julian_days[i] - initdate
tt = 2*np.pi*t1/yearli
ttt = 2*np.pi*t1
cosphi = np.sin(tt+alpha) * np.sin(ttt-tt-beta) * (9.58589e-2) + np.cos(tt+alpha) * (0.39+0.104528*np.cos(ttt-tt-beta))
#equation 1 use phi from equation 2
sinphi = (1. - cosphi**2.)**0.5
pressureterm = (p1*(btot[i]**2) + density[i] * (p2*(speed[i])**2/(sinphi**2.52) + p3) )**0.5
#2 direct IMF bz term
directterm = 0.478 * bz[i]*(sinphi**11.0)
#3 offset term - the last two terms were cut because don't make sense as t1 rises extremely for later years
offset = s1 + s2 * np.sin(2*np.pi*t1/yearli + s3) + s4*t1 + s5*t1*t1
#or just set it constant
#offset[i]=-5
bt = (by[i]**2 + bz[i]**2)**0.5
if bt == 0.: bt = 1e-12 # Escape dividing by zero error in theta_li
#mistake in 2002 paper - bt is similarly defined as bp (with by bz); but in Temerin and Li's code (dst.pro) bp depends on by and bx
bp = (by[i]**2 + bx[i]**2)**0.5
#contains t1, but in cos and sin
dh = bp*np.cos(np.arctan2(bx[i],by[i])+6.10) * (3.59e-2 * np.cos(2*np.pi*t1/yearli + 0.04) - 2.18e-2*np.sin(2*np.pi*t1-1.60))
theta_li = -(np.arccos(-bz[i]/bt)-np.pi)/2
exx = 1e-3 * abs(speedx[i]) * bt * np.sin(theta_li)**6.1
#t1 and dt are in days
dttl = julian_days[i+1]-julian_days[i]
#4 dst1
#find value of dst1(t-tau1)
#time is in matplotlib format in days:
#im time den index suchen wo time-tau1 am nächsten ist
#und dann bei dst1 den wert mit dem index nehmen der am nächsten ist, das ist dann dst(t-tau1)
#wenn index nicht existiert (am anfang) einfach index 0 nehmen
#check for index where timesi is greater than t minus tau
indtau1 = np.where(time > (time[i]-tau1))
dst1tau1 = dst1[indtau1[0][0]]
dst2tau1 = dst2[indtau1[0][0]]
th1 = 0.725*(sinphi**-1.46)
th2 = 1.83*(sinphi**-1.46)
fe1 = (-4.96e-3) * (1+0.28*dh) * (2*exx+abs(exx-th1) + abs(exx-th2)-th1-th2) * (abs(speedx[i])**1.11) * ((density[i])**0.49) * (sinphi**6.0)
dst1[i+1] = dst1[i] + (a1*(-dst1[i])**a2 + fe1*(1. + (a3*dst1tau1 + a4*dst2tau1)/(1. - a5*dst1tau1 - a6*dst2tau1))) * dttl
#5 dst2
indtau2 = np.where(time > (time[i]-tau2))
dst1tau2 = dst1[indtau2[0][0]]
df2 = (-3.85e-8) * (abs(speedx[i])**1.97) * (bt**1.16) * np.sin(theta_li)**5.7 * (density[i])**0.41 * (1+dh)
fe2 = (2.02e3) * (sinphi**3.13)*df2/(1-df2)
dst2[i+1] = dst2[i] + (b1*(-dst2[i])**b2 + fe2*(1. + (b3*dst1tau2)/(1. - b3*dst1tau2))) * dttl
#6 dst3
indtau3 = | np.where(time > (time[i]-tau3)) | numpy.where |
"""
File: statistics_recorder.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/ComeBertrand
Description: Statistics computation tools that will be the result of the
benchmark computation.
"""
import numpy as np
class StatisticsRecorder(object):
"""Compilation of statistics on a benchmark of a metaheuristic run.
Args:
nb_run (int): Number of runs that will be made of a metaheuristic on
the same problem. Strictly positive.
problem (Problem): The problem on which the statistics will be
computed.
metaheuristic (Metaheuristic): The metaheuristic on which the
statistics will be computed.
base_size (int): Base size for the arrays that will hold the data from
the iterations of the metaheuristic. Default is 256. Strictly
positive.
Attributes:
nb_run (int): number of runs on which statistics are compiled.
problem (Problem): The problem on which the statistics will be
computed.
metaheuristic (Metaheuristic): The metaheuristic on which the
statistics will be computed.
nb_iter_per_run (np.array): Array of size 'nb_run' that holds the
number of iteration made by the metaheuristic for each run.
nb_iter_total (int): Total number of iterations made in all the runs.
best_values (nb.array): Array of size 'nb_run' that hold the best
fitness of each run.
best_value (float): Best fitness in all the runs.
worst_value (float): Worst fitness of the best fitnesses computed
at each run.
mean_value (float): Mean best fitness recorded for each run.
std_value (float): Standard deviation on the best fitness of each
run.
best_time_iter (float): Best time (lower is better) of iteration
computation in all the runs. (in s).
worst_time_iter (float): Worst time (lower is better) of iteration
computation in all the runs. (in s).
mean_time_iter (float): Mean time taken by the iteration computation.
(in s.)
std_time_iter (float): Standard deviation of the time taken by the
iterations computation.
best_time_tot (float): Best time (lower is better) of computation of
a full run. (in s).
worst_time_tot (float): Worst time (lower is better) of computation of
a full run. (in s).
mean_time_tot (float): Mean time taken by the full run computation.
(in s).
std_time_tot (float): Standard deviation of the time taken by the
full run computation.
"""
def __init__(self, nb_run, problem, metaheuristic, base_size=256):
if nb_run <= 0:
raise ValueError("The number of runs must be strictly positive")
if base_size <= 0:
raise ValueError("The base size must be strictly positive")
self.problem = problem
self.metaheuristic = metaheuristic
self._nb_iter = np.zeros(nb_run, np.int)
self._nb_iter_tot = 0
self._nb_run = nb_run
self._current_size_value = base_size
self._current_size_time = base_size
# Values records are indexed by runs.
self._values = np.zeros((nb_run, base_size), np.float)
# Iter time records are all in the same array.
self._time = np.zeros(base_size, np.float)
self._time_tot = np.zeros(nb_run, np.float)
def record_iter_stat(self, num_run, best_solution, time_iteration):
"""Record a statistic concerning an iteration.
Args:
num_run (int): Index of the run in which the iteration took place.
best_solution (Solution): Best solution computed at the end of the
iteration. It has to be evaluated.
time_iteration (float): Time in second taken to compute the
iteration.
"""
if best_solution.fitness is None:
raise ValueError("Statistics cannot be recorded on solutions that "
"have not been evaluated.")
if self._nb_iter[num_run] >= self._current_size_value:
self._current_size_value *= 2
self._values.resize((self._nb_run, self._current_size_value))
if self._nb_iter_tot >= self._current_size_time:
self._current_size_time *= 2
self._time.resize((self._current_size_time,))
self._values[num_run][self._nb_iter[num_run]] = best_solution.fitness
self._time[self._nb_iter_tot] = time_iteration
self._nb_iter[num_run] += 1
self._nb_iter_tot += 1
def record_time_computation(self, num_run, time_computation):
"""Record the time taken by a full metaheuristic run.
Args:
num_run (int): Index of the run in which the iteration took place.
time_computation (float): Time in second taken to compute the
full run.
"""
self._time_tot[num_run] = time_computation
@property
def nb_run(self):
return self._nb_run
@property
def nb_iter_per_run(self):
return self._nb_iter
@property
def nb_iter_total(self):
return self._nb_iter_tot
def get_run_nb_iterations(self, run_index):
return self._nb_iter[run_index]
def get_run_values(self, run_index):
return self._values[run_index]
@property
def best_values(self):
return np.array([self._values[i][max_iter - 1] for i, max_iter
in enumerate(self._nb_iter) if max_iter > 0],
np.float)
@property
def best_value(self):
if len(self.best_values):
return np.amin(self.best_values)
return None
@property
def worst_value(self):
if len(self.best_values):
return np.amax(self.best_values)
return None
@property
def mean_value(self):
if len(self.best_values):
return np.mean(self.best_values)
return None
@property
def std_value(self):
if len(self.best_values):
return np.std(self.best_values)
return None
@property
def times_iter(self):
if self._nb_iter_tot:
return self._time[:self._nb_iter_tot]
return None
@property
def best_time_iter(self):
if self._nb_iter_tot:
return np.amin(self._time[:self._nb_iter_tot])
return None
@property
def worst_time_iter(self):
if self._nb_iter_tot:
return np.amax(self._time[:self._nb_iter_tot])
return None
@property
def mean_time_iter(self):
if self._nb_iter_tot:
return np.mean(self._time[:self._nb_iter_tot])
return None
@property
def std_time_iter(self):
if self._nb_iter_tot:
return np.std(self._time[:self._nb_iter_tot])
return None
@property
def time_tots(self):
if np.any(self._time_tot):
return self._time_tot
return None
@property
def best_time_tot(self):
if np.any(self._time_tot):
return np.amin(self._time_tot)
return None
@property
def worst_time_tot(self):
if np.any(self._time_tot):
return np.amax(self._time_tot)
return None
@property
def mean_time_tot(self):
if np.any(self._time_tot):
return np.mean(self._time_tot)
return None
@property
def std_time_tot(self):
if | np.any(self._time_tot) | numpy.any |
# Author: bbrighttaer
# Project: jova
# Date: 5/20/19
# Time: 1:17 PM
# File: data.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import time
from collections import defaultdict
from math import sqrt
import networkx as nx
import numpy as np
import pandas as pd
import torch
from Bio import Align
from rdkit.Chem import DataStructs
from torch.utils.data import dataset as ds
from tqdm import tqdm
from jova import cuda as _cuda
from jova.data import load_csv_dataset
from jova.feat.mol_graphs import ConvMol
from jova.utils.math import block_diag_irregular
from jova.utils.thread import UnboundedProgressbar
from jova.utils.train_helpers import ViewsReg
def load_prot_dict(prot_desc_dict, prot_seq_dict, prot_desc_path,
sequence_field, phospho_field):
if re.search('davis', prot_desc_path, re.I):
source = 'davis'
elif re.search('metz', prot_desc_path, re.I):
source = 'metz'
elif re.search('kiba', prot_desc_path, re.I):
source = 'kiba'
elif re.search('toxcast', prot_desc_path, re.I):
source = 'toxcast'
elif re.search('human', prot_desc_path, re.I):
source = 'human'
elif re.search('celegans', prot_desc_path, re.I):
source = 'celegans'
elif re.search('egfr_case_study', prot_desc_path, re.I) or re.search('egfr_unfiltered', prot_desc_path, re.I) \
or re.search('egfr_1M17', prot_desc_path, re.I):
source = 'egfr_cs'
df = pd.read_csv(prot_desc_path, index_col=0)
# protList = list(df.index)
for row in df.itertuples():
descriptor = row[2:]
descriptor = np.array(descriptor)
descriptor = np.reshape(descriptor, (1, len(descriptor)))
pair = (source, row[0])
assert pair not in prot_desc_dict
prot_desc_dict[pair] = descriptor
sequence = row[sequence_field]
phosphorylated = row[phospho_field]
assert pair not in prot_seq_dict
prot_seq_dict[pair] = (phosphorylated, sequence)
def load_dti_data(featurizer, dataset_name, dataset_file, prot_seq_dict, input_protein=True, cross_validation=False,
test=False, fold_num=5, split='random', reload=True, predict_cold=False, cold_drug=False,
cold_target=False, cold_drug_cluster=False, split_warm=False, filter_threshold=0,
mode='regression', seed=0, mf_simboost_data_dict=None):
if cross_validation:
test = False
tasks, all_dataset, transformers, \
fp, kernel_dicts, simboost_feats, MF_entities_dict = load_csv_dataset(dataset_name, dataset_file,
featurizer=featurizer,
cross_validation=cross_validation,
test=test, split=split,
reload=reload,
K=fold_num, mode=mode,
predict_cold=predict_cold,
cold_drug=cold_drug,
cold_target=cold_target,
cold_drug_cluster=cold_drug_cluster,
split_warm=split_warm,
prot_seq_dict=prot_seq_dict,
filter_threshold=filter_threshold,
input_protein=input_protein,
seed=seed,
mf_simboost_data_dict=mf_simboost_data_dict)
return tasks, all_dataset, transformers, fp, kernel_dicts, simboost_feats, MF_entities_dict
def load_proteins(prot_desc_path):
"""
Retrieves all proteins in the tuple of paths given.
:param prot_desc_path: A tuple of file paths containing the protein (PSC) descriptors.
:return: A set of dicts: (descriptor information, sequence information)
"""
prot_desc_dict = {}
prot_seq_dict = {}
for path in prot_desc_path:
load_prot_dict(prot_desc_dict, prot_seq_dict, path, 1, 2)
return prot_desc_dict, prot_seq_dict
class DtiDataset(ds.Dataset):
def __init__(self, x_s, y_s, w_s):
"""
Creates a Drug-Target Indication dataset object.
:param x_s: a tuple of X data of each view.
:param y_s: a tuple of y data of each view.
:param w_s: a tuple of label weights of each view.
"""
assert len(x_s) == len(y_s) == len(w_s), "Number of views in x_s must be equal to that of y_s."
self.x_s = x_s
self.y_s = y_s
self.w_s = w_s
def __len__(self):
return len(self.x_s[0])
def __getitem__(self, index):
x_s = []
y_s = []
w_s = []
for view_x, view_y, view_w in zip(self.x_s, self.y_s, self.w_s):
x_s.append(view_x[index])
y_s.append(view_y[index])
w_s.append(view_w[index])
return x_s, y_s, w_s
class Dataset(ds.Dataset):
"""Wrapper for the dataset to pytorch models"""
def __init__(self, views_data):
"""
Creates a dataset wrapper.
:param views_data: Data of all views. Structure: ((X1, Y1), (X2, Y2), ...)
"""
self.X_list = []
self.y_list = []
self.num_views = len(views_data)
for data in views_data:
self.X_list.append(data[0]) # 0 -> X data
self.y_list.append(data[1]) # 1 -> y data
super(Dataset, self).__init__()
def __len__(self):
return len(self.X_list[0])
def __getitem__(self, index):
ret_ds = []
for i in range(self.num_views):
x = self.X_list[i][index]
y = self.y_list[i][index]
ret_ds.append((torch.tensor(x, dtype=torch.float), torch.tensor(y, dtype=torch.long)))
return ret_ds
def batch_collator(batch, prot_desc_dict, spec, cuda_prot=True):
batch = np.array(batch) # batch.shape structure: (batch_size, x-0/y-1/w-2 data, view index)
data = {}
# num_active_views = reduce(lambda x1, x2: x1 + x2, flags.values())
funcs = {
"ecfp4": process_ecfp_view_data,
"ecfp8": process_ecfp_view_data,
"weave": process_weave_view_data,
"gconv": process_gconv_view_data,
"gnn": process_gnn_view_data
}
active_views = []
if isinstance(spec, dict):
for k in spec:
if spec[k]:
active_views.append(k)
else:
active_views.append(spec)
for i, v_name in enumerate(active_views):
func = funcs[v_name]
data[v_name] = (func(batch, prot_desc_dict, i, cuda_prot), batch[:, 1, i], batch[:, 2, i])
return len(batch), data
def process_ecfp_view_data(X, prot_desc_dict, idx, cuda_prot):
"""
Converts ECFP-Protein pair dataset to a pytorch tensor.
:param X:
:param prot_desc_dict:
:return:
"""
mols_tensor = prots_tensor = None
prot_names = None
x_data = None
if X is not None:
x_data = X[:, 0, idx]
mols = [pair[0] for pair in x_data]
mols_tensor = torch.from_numpy(np.array([mol.get_array() for mol in mols]))
prots = [pair[1] for pair in x_data]
prot_names = [prot.get_name() for prot in prots]
prot_desc = [prot_desc_dict[prot_name] for prot_name in prot_names]
prot_desc = np.array(prot_desc)
prot_desc = prot_desc.reshape(prot_desc.shape[0], prot_desc.shape[2])
prots_tensor = torch.from_numpy(prot_desc)
return cuda(mols_tensor.float()), cuda(
prots_tensor.float()) if cuda_prot else prots_tensor.float(), prot_names, x_data
def process_weave_view_data(X, prot_desc_dict, idx, cuda_prot):
"""
Converts Weave-Protein pair dataset to a pytorch tensor.
:param X:
:param prot_desc_dict:
:return:
"""
atom_feat = []
pair_feat = []
atom_split = []
atom_to_pair = []
pair_split = []
prot_descriptor = []
n_atoms_list = []
start = 0
x_data = X[:, 0, idx]
prot_names = []
for im, pair in enumerate(x_data):
mol, prot = pair
n_atoms = mol.get_num_atoms()
n_atoms_list.append(n_atoms)
prot_names.append(prot.get_name())
# number of atoms in each molecule
atom_split.extend([im] * n_atoms)
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(
np.transpose(
np.array([C1.flatten() + start,
C0.flatten() + start])))
# number of pairs for each atom
pair_split.extend(C1.flatten() + start)
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
n_pair_feat = mol.pairs.shape[2]
pair_feat.append(
np.reshape(mol.get_pair_features(),
(n_atoms * n_atoms, n_pair_feat)))
prot_descriptor.append(prot_desc_dict[prot.get_name()])
prots_tensor = torch.from_numpy(np.concatenate(prot_descriptor, axis=0))
mol_data = [
cuda(torch.tensor(np.concatenate(atom_feat, axis=0), dtype=torch.float)),
cuda(torch.tensor(np.concatenate(pair_feat, axis=0), dtype=torch.float)),
cuda(torch.tensor(np.array(pair_split), dtype=torch.int)),
cuda(torch.tensor(np.concatenate(atom_to_pair, axis=0), dtype=torch.long)),
cuda(torch.tensor(np.array(atom_split), dtype=torch.int)),
n_atoms_list
]
return mol_data, cuda(prots_tensor.float()) if cuda_prot else prots_tensor.float(), prot_names, x_data
def process_gconv_view_data(X, prot_desc_dict, idx, cuda_prot):
"""
Converts Graph convolution-Protein pair dataset to a pytorch tensor.
:param X:
:param prot_desc_dict:
:return:
"""
mol_data = []
n_atoms_list = []
x_data = X[:, 0, idx]
mols = []
for pair in x_data:
mol, prot = pair
n_atoms = mol.get_num_atoms()
n_atoms_list.append(n_atoms)
mols.append(mol)
multiConvMol = ConvMol.agglomerate_mols(mols)
mol_data.append(cuda(torch.from_numpy(multiConvMol.get_atom_features())))
mol_data.append(cuda(torch.from_numpy(multiConvMol.deg_slice)))
mol_data.append(cuda(torch.tensor(multiConvMol.membership)))
mol_data.append(n_atoms_list)
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
mol_data.append(cuda(torch.from_numpy(multiConvMol.get_deg_adjacency_lists()[i])))
# protein
prots = [pair[1] for pair in x_data]
prot_names = [prot.get_name() for prot in prots]
prot_desc = [prot_desc_dict[prot_name] for prot_name in prot_names]
prot_desc = np.array(prot_desc)
prot_desc = prot_desc.reshape(prot_desc.shape[0], prot_desc.shape[2])
prots_tensor = cuda(torch.from_numpy(prot_desc)) if cuda_prot else torch.from_numpy(prot_desc)
return mol_data, prots_tensor.float(), prot_names, x_data
def process_gnn_view_data(X, prot_desc_dict, idx, cuda_prot):
prot_names = []
x_data = X[:, 0, idx]
adjacency_matrices = []
fp_profiles = []
prot_desc = []
for pair in x_data:
mol, prot = pair
adjacency_matrices.append(torch.from_numpy(mol.adjacency).float())
fp_profiles.append(cuda(torch.tensor(mol.fingerprints, dtype=torch.long)))
prot_names.append(prot.get_name())
prot_desc.append(prot_desc_dict[prot.get_name()])
# compound
adjacency_matrices = block_diag_irregular(adjacency_matrices)
axis = [len(f) for f in fp_profiles]
M = np.concatenate([np.repeat(len(f), len(f)) for f in fp_profiles])
M = torch.unsqueeze(torch.FloatTensor(M), 1)
fingerprints = torch.cat(fp_profiles)
mol_data = (fingerprints, cuda(adjacency_matrices), cuda(M), axis)
# protein - PSC
prot_desc = np.array(prot_desc)
prot_desc = prot_desc.reshape(prot_desc.shape[0], prot_desc.shape[2])
prots_tensor = cuda(torch.from_numpy(prot_desc)) if cuda_prot else torch.from_numpy(prot_desc)
return mol_data, prots_tensor.float(), prot_names, x_data
# def pad(matrices, pad_value=0):
# """Pad adjacency matrices for batch processing."""
# sizes = [m.shape[0] for m in matrices]
# M = sum(sizes)
# pad_matrices = pad_value + np.zeros((M, M))
# i = 0
# for j, m in enumerate(matrices):
# j = sizes[j]
# pad_matrices[i:i + j, i:i + j] = m
# i += j
# return cuda(torch.FloatTensor(pad_matrices))
def cuda(tensor):
if _cuda:
return tensor.cuda()
else:
return tensor
def get_data(featurizer, flags, prot_sequences, seed, mf_simboost_data_dict=None):
# logger = get_logger(name="Data loader")
print("--------------About to load {}-{} data-------------".format(featurizer, flags['dataset_name']))
data = load_dti_data(featurizer=featurizer,
dataset_name=flags['dataset_name'],
dataset_file=flags['dataset_file'],
prot_seq_dict=prot_sequences,
input_protein=True,
cross_validation=flags['cv'],
test=flags['test'],
fold_num=flags['fold_num'],
split=flags['splitting_alg'],
reload=flags['reload'],
predict_cold=flags['predict_cold'],
cold_drug=flags['cold_drug'],
cold_target=flags['cold_target'],
cold_drug_cluster=flags['cold_drug_cluster'],
split_warm=flags['split_warm'],
mode='regression' if not hasattr(flags, 'mode') else flags['mode'],
seed=seed,
filter_threshold=flags["filter_threshold"],
mf_simboost_data_dict=mf_simboost_data_dict)
print("--------------{}-{} data loaded-------------".format(featurizer, flags['dataset_name']))
return data
def compute_similarity_kernel_matrices(dataset):
"""
Computes the drug-drug and protein-protein kernel matrices for kernel-based methods (e.g. Kron-RLS)
:param dataset:
:return: tuple
"""
start = time.time()
print("Computing kernel matrices (KD_dict, KT_dict)")
all_comps = set()
all_prots = set()
for idx, pair in enumerate(dataset.X):
mol, prot = pair
all_comps.add(mol)
all_prots.add(prot)
# compounds / drugs
comps_mat = {}
for c1 in all_comps:
fp1 = c1.fingerprint
for c2 in all_comps:
fp2 = c2.fingerprint
# Tanimoto coefficient
score = DataStructs.TanimotoSimilarity(fp1, fp2)
comps_mat[Pair(c1, c2)] = score
# proteins / targets
aligner = Align.PairwiseAligner()
aligner.mode = 'local' # SW algorithm
prots_mat = {}
for p1 in all_prots:
seq1 = p1.sequence[1]
p1_score = aligner.score(seq1, seq1)
for p2 in all_prots:
seq2 = p2.sequence[1]
p2_score = aligner.score(seq2, seq2)
score = aligner.score(seq1, seq2)
# Normalized SW score
prots_mat[Pair(p1, p2)] = score / (sqrt(p1_score) * sqrt(p2_score))
print("Kernel entities: Drugs={}, Prots={}".format(len(all_comps), len(all_prots)))
duration = time.time() - start
print("Kernel matrices computation finished in: {:.0f}m {:.0f}s".format(duration // 60, duration % 60))
return comps_mat, prots_mat
def compute_simboost_drug_target_features(dataset, mf_simboost_data_dict, nbins=10, sim_threshold=0.5):
"""
Constructs the type 1,2, and 3 features (with the matrix factorization part) of SimBoost as described in:
https://jcheminf.biomedcentral.com/articles/10.1186/s13321-017-0209-z
The Matrix Factorization part is deferred to the mf.py script.
:param sim_threshold:
:param nbins:
:param dataset:
:return:
"""
assert isinstance(mf_simboost_data_dict, dict), "Drug-Target features dictionary must be provided."
print('SimBoost Drug-Target feature vector computation started')
print('Processing M matrix')
all_comps = set()
all_prots = set()
pair_to_value_y = {}
Mgraph = nx.Graph(name='drug_target_network')
Mrows = defaultdict(list)
Mcols = defaultdict(list)
for x, y, w, id in tqdm(dataset.itersamples()):
mol, prot = x
all_comps.add(mol)
all_prots.add(prot)
pair_to_value_y[Pair(mol, prot)] = y
Mrows[mol].append(y)
Mcols[prot].append(y)
Mgraph.add_edge(mol, prot, weight=y)
print('Number of compounds = %d' % len(all_comps))
print('Number of targets = %d' % len(all_prots))
# compounds / drugs
print('Processing drug similarity matrix')
D = {}
Dgraph = nx.Graph(name='drug_drug_network')
for c1 in tqdm(all_comps):
fp1 = c1.fingerprint
for c2 in all_comps:
fp2 = c2.fingerprint
# Tanimoto coefficient
score = DataStructs.TanimotoSimilarity(fp1, fp2)
D[Pair(c1, c2)] = score
Dgraph.add_nodes_from([c1, c2])
if score >= sim_threshold and c1 != c2:
Dgraph.add_edge(c1, c2)
comp_feats = compute_type2_features(compute_type1_features(Mrows, all_comps, D, nbins), D, Dgraph)
# proteins / targets
print('Processing target similarity matrix')
aligner = Align.PairwiseAligner()
aligner.mode = 'local' # SW algorithm
T = {}
Tgraph = nx.Graph(name='target_target_network')
for p1 in tqdm(all_prots):
seq1 = p1.sequence[1]
p1_score = aligner.score(seq1, seq1)
for p2 in all_prots:
seq2 = p2.sequence[1]
p2_score = aligner.score(seq2, seq2)
score = aligner.score(seq1, seq2)
# Normalized SW score
normalized_score = score / (sqrt(p1_score) * sqrt(p2_score))
T[Pair(p1, p2)] = normalized_score
Tgraph.add_nodes_from([p1, p2])
if normalized_score >= sim_threshold and p1 != p2:
Tgraph.add_edge(p1, p2)
prot_feats = compute_type2_features(compute_type1_features(Mcols, all_prots, T, nbins), T, Tgraph)
pbar = UnboundedProgressbar()
pbar.start()
print('Processing type 3 features')
# Type 3 features
btw_cent = nx.betweenness_centrality(Mgraph)
cls_cent = nx.closeness_centrality(Mgraph)
# eig_cent = nx.eigenvector_centrality(Mgraph, tol=1e-3, max_iter=500)
# pagerank = nx.pagerank(Mgraph, tol=1e-3, max_iter=1000)
drug_target_feats_dict = defaultdict(list)
vec_lengths = []
# Retrieve data from the Matrix Factorization stage
comp_mat = mf_simboost_data_dict['comp_mat']
prot_mat = mf_simboost_data_dict['prot_mat']
comp_index = mf_simboost_data_dict['comp_index']
prot_index = mf_simboost_data_dict['prot_index']
for pair in tqdm(pair_to_value_y):
comp, prot = pair.p1, pair.p2
feat = drug_target_feats_dict[Pair(comp, prot)]
# mf
cidx = comp_index[comp]
pidx = prot_index[prot]
c_vec = comp_mat[cidx].tolist()
p_vec = prot_mat[pidx].tolist()
mf = c_vec + p_vec
feat.extend(mf)
# d.t.ave
d_av_lst = []
for n in Mgraph.neighbors(prot):
if Pair(comp, n) in pair_to_value_y:
d_av_lst.append(pair_to_value_y[Pair(comp, n)])
if len(d_av_lst) > 0:
feat.append(np.mean(d_av_lst))
# t.d.ave
t_av_lst = []
for n in Mgraph.neighbors(comp):
if Pair(n, prot) in pair_to_value_y:
t_av_lst.append(pair_to_value_y[Pair(n, prot)])
if len(t_av_lst) > 0:
feat.append(np.mean(t_av_lst))
# d.t.bt, d.t.cl, d.t.ev
feat.append(btw_cent[comp])
feat.append(btw_cent[prot])
feat.append(cls_cent[comp])
feat.append(cls_cent[prot])
# feat.append(eig_cent[comp])
# feat.append(eig_cent[prot])
# d.t.pr
# feat.append(pagerank[comp])
# feat.append(pagerank[prot])
# add type 1 features
feat.extend(comp_feats[comp])
feat.extend(prot_feats[prot])
vec_lengths.append(len(feat))
# zero-pad all vectors to be of the same dimension
dim = max(vec_lengths)
for k in drug_target_feats_dict:
feat = drug_target_feats_dict[k]
pvec = [0] * (dim - len(feat))
feat.extend(pvec)
pbar.stop()
pbar.join()
print('SimBoost Drug-Target feature vector computation finished. Vector dimension={}'.format(dim))
return drug_target_feats_dict
def compute_type1_features(M, all_E, Edict, nbins):
"""
Computes type 1 features of a set of entities (E)
:param M:
:param Edict:
:param nbins:
:return:
A dict of entity-feature elements
"""
feats_dict = defaultdict(list)
for entity in all_E:
feat = feats_dict[entity]
# n.obs
feat.append(len(M[entity]))
# ave.sim
sim_scores = [Edict[Pair(entity, entity2)] for entity2 in all_E]
feat.append(np.mean(sim_scores))
# hist.sim
hist = np.histogram(sim_scores, bins=nbins)[0]
feat.extend(hist.tolist())
# ave.val in M
feat.append(np.mean(M[entity]))
return feats_dict
def compute_type2_features(type1_feats_dict, Edict, Egraph):
"""
Computes type 2 features of a set of entities whose type 1 features have already been computed.
:param type1_feats_dict:
:param Edict:
:param Egraph:
:return:
A dict of entity-feature elements
"""
feats_dict = defaultdict(list)
btw_cent = nx.betweenness_centrality(Egraph)
cls_cent = nx.closeness_centrality(Egraph)
eig_cent = nx.eigenvector_centrality(Egraph, tol=1e-5, max_iter=200)
pagerank = nx.pagerank(Egraph)
for node in Egraph.nodes():
feat = feats_dict[node]
# num.nb
neighbors = list(Egraph.neighbors(node))
feat.append(len(neighbors))
# k.sim
neighbors_sim_score = [Edict[Pair(node, neighbor)] for neighbor in neighbors]
feat.extend(neighbors_sim_score)
if len(neighbors) > 0:
# k.ave.feat
neighs_t1_feat = | np.array([type1_feats_dict[neighbor] for neighbor in neighbors]) | numpy.array |
"""
Module implements the perturbation class for atomic and lattice relaxation.
"""
import os
from typing import Union, List
import numpy as np
from monty.serialization import loadfn
from pymatgen.core.operations import SymmOp
from pymatgen.core.sites import Site, PeriodicSite
from pymatgen.core.structure import Structure, Lattice
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.groups import SpaceGroup, in_array_list
module_dir = os.path.dirname(__file__)
wyckoff_nums = loadfn(os.path.join(module_dir, "symmetry_rules", "wyckoff_nums.json"))
wyckoff_nums = {int(k): v for k, v in wyckoff_nums.items()}
wyckoff_dims = loadfn(os.path.join(module_dir, "symmetry_rules", "wyckoff_dims.json"))
wyckoff_dims = {int(k): v for k, v in wyckoff_dims.items()}
standard_modes = loadfn(os.path.join(module_dir, "symmetry_rules", "standard_modes.json"))
standard_modes = {int(k): v for k, v in standard_modes.items()}
perturbation_modes = loadfn(os.path.join(module_dir, "symmetry_rules", "perturbation_modes.json"))
perturbation_modes = {int(k): v for k, v in perturbation_modes.items()}
small_addup = np.array([1e-4] * 3)
perturbation_mapping = lambda x, fixed_indices: np.array(
[
0 if i in fixed_indices else x[np.argwhere(np.arange(3)[~np.isin(range(3), fixed_indices)] == i)[0][0]]
for i in range(3)
]
)
class WyckoffPerturbation:
"""
Perturbation class for determining the standard wyckoff position
and generating corresponding equivalent fractional coordinates.
"""
def __init__(
self, int_symbol: int, wyckoff_symbol: str, symmetry_ops: List[SymmOp] = None, use_symmetry: bool = True
):
"""
Args:
int_symbol (int): International number of space group.
wyckoff_symbol (str): Wyckoff symbol.
symmetry_ops (list): Full set of symmetry operations as matrices.
Use specific symmetry operations if initialized.
use_symmetry (bool): Whether to use constraint of symmetry to reduce
parameters space.
"""
self._site = None
self._fit_site = False
self.int_symbol = int_symbol
self.wyckoff_symbol = wyckoff_symbol
self.use_symmetry = use_symmetry
if self.use_symmetry:
self.standard_mode = eval(standard_modes[int_symbol][wyckoff_symbol])
self.dim = wyckoff_dims[int_symbol][wyckoff_symbol]
self.multiplicity = dict(zip(*wyckoff_nums[int_symbol]))[wyckoff_symbol]
self.perturbation_mode = eval(perturbation_modes[int_symbol][wyckoff_symbol])
self.symmetry_ops = (
SpaceGroup.from_int_number(int_symbol).symmetry_ops if not symmetry_ops else symmetry_ops
)
else:
self.standard_mode = eval("lambda p: True")
self.dim = 3
self.multiplicity = 1
self.perturbation_mode = eval("lambda x: x")
self.symmetry_ops = SpaceGroup.from_int_number(1).symmetry_ops
def get_orbit(self, p: Union[List, np.ndarray], tol: float = 1e-3) -> List[np.ndarray]:
"""
Returns the orbit for a point.
Args:
p (list/numpy.array): Fractional coordinated point.
tol (float): Tolerance for determining if sites are the same.
"""
orbit: List[np.ndarray] = []
for symm_op in self.symmetry_ops:
pp = symm_op.operate(p)
pp[(pp + np.ones(3) * tol) % 1.0 < tol] = 0.0
pp = np.mod(np.round(pp, decimals=10), 1)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
return orbit
def sanity_check(self, site: Union[Site, PeriodicSite], wyc_tol: float = 0.3 * 1e-3) -> None:
"""
Check whether the perturbation mode exists.
Args:
site (PeriodicSite): PeriodicSite in Structure.
wyc_tol (float): Tolerance for wyckoff symbol determined coordinates.
"""
p = site.frac_coords
orbits = self.get_orbit(p, wyc_tol)
if len(orbits) != self.multiplicity:
return
for pp in orbits:
if self.standard_mode(pp):
self._site = site # type: ignore
self._fit_site = True
break
def standardize(self, p: Union[List, np.ndarray], tol: float = 1e-3) -> List[float]:
"""
Get the standardized position of p.
Args:
p (list/numpy.array): Fractional coordinated point.
tol (float): Tolerance for determining if sites are the same.
"""
pp: List[float] = []
orbits = self.get_orbit(p, tol)
for pp in orbits: # type: ignore
if self.standard_mode(pp):
break
return pp
@property
def site(self):
"""
Returns the site.
"""
return self._site
@property
def fit_site(self):
"""
Returns whether the site fits any standard wyckoff position.
"""
return self._fit_site
def __repr__(self):
if self._site is not None:
return "{}(spg_int_number={}, wyckoff_symbol={}) {} [{:.4f}, {:.4f}, {:.4f}]".format(
self.__class__.__name__,
self.int_symbol,
self.wyckoff_symbol,
self._site.species_string,
*self._site.frac_coords,
)
return "{}(spg_int_number={}, wyckoff_symbol={})".format(
self.__class__.__name__, self.int_symbol, self.wyckoff_symbol
)
def crystal_system(int_number: int) -> str:
"""
Method for crystal system determination.
Args:
int_number (int): International number of space group.
"""
if int_number <= 2:
return "triclinic"
if int_number <= 15:
return "monoclinic"
if int_number <= 74:
return "orthorhombic"
if int_number <= 142:
return "tetragonal"
if int_number <= 167 and int_number not in [
143,
144,
145,
147,
149,
150,
151,
152,
153,
154,
156,
157,
158,
159,
162,
163,
164,
165,
]:
return "rhombohedral"
if int_number <= 194:
return "hexagonal"
return "cubic"
class LatticePerturbation:
"""
Perturbation class for determining the standard lattice.
"""
def __init__(self, spg_int_symbol: int, use_symmetry: bool = True):
"""
Args:
spg_int_symbol (int): International number of space group.
use_symmetry (bool): Whether to use constraint of symmetry to reduce
parameters space.
"""
self._lattice = None
self._fit_lattice = False
self.spg_int_symbol = spg_int_symbol
self.use_symmetry = use_symmetry
self.crys_system = crystal_system(spg_int_symbol)
def sanity_check(self, lattice: Lattice, abc_tol: float = 1e-3, angle_tol: float = 3e-1) -> None:
"""
Check whether the perturbation mode exists.
Args:
lattice (Lattice): Lattice in Structure.
abc_tol (float): Tolerance for lattice lengths determined by crystal system.
angle_tol (float): Tolerance for lattice angles determined by crystal system.
"""
abc = list(lattice.abc)
angles = list(lattice.angles)
def check(param, ref, tolerance):
return all(abs(i - j) < tolerance for i, j in zip(param, ref) if j is not None)
if not self.use_symmetry:
self.dims = (3, 3)
self.perturbation_mode = lambda x: x
self._lattice = lattice # type: ignore
self._abc = abc
self._fit_lattice = True
return
if self.crys_system == "cubic":
a = abc[0]
if not (check(abc, [a, a, a], abc_tol) and check(angles, [90, 90, 90], angle_tol)):
self._fit_lattice = False
return
self.dims = (1, 0)
self.perturbation_mode = lambda x: np.concatenate((np.repeat(x, repeats=3), np.zeros(3)))
self._lattice = lattice # type: ignore
self._abc = [abc[0]] # type: ignore
self._fit_lattice = True
return
if self.crys_system == "hexagonal":
if not (
np.any([(sum(abs(np.array(abc) - abc[i]) < abc_tol) == 2) for i in np.arange(3)])
and check(np.sort(angles), [90, 90, 120], angle_tol)
):
self._fit_lattice = False
return
self.dims = (2, 0)
indices = [int((sum(abs(np.array(abc) - abc[i]) < abc_tol) == 2)) for i in np.arange(3)]
self.perturbation_mode = lambda x: np.concatenate((x[indices], np.zeros(3)))
self._lattice = lattice # type: ignore
self._abc = [
abc[indices.index(0)], # type: ignore
abc[indices.index(1)],
] # type: ignore
self._fit_lattice = True
return
if self.crys_system == "rhombohedral":
a = abc[0]
alpha = angles[0]
if check(abc, [a, a, a], abc_tol) and check(angles, [alpha, alpha, alpha], angle_tol):
self.dims = (1, 1)
self.perturbation_mode = lambda x: np.concatenate(
(np.repeat(x[0], repeats=3), np.repeat(x[1], repeats=3))
)
self._lattice = lattice # type: ignore
self._abc = [a] # type: ignore
self._fit_lattice = True
return
if np.any([(sum(abs(np.array(abc) - abc[i]) < abc_tol) == 2) for i in np.arange(3)]) and check(
np.sort(angles), [90, 90, 120], angle_tol
):
self.dims = (2, 0)
indices = [int((sum(abs(np.array(abc) - abc[i]) < abc_tol) == 2)) for i in np.arange(3)]
self.perturbation_mode = lambda x: np.concatenate((x[indices], np.zeros(3)))
self._lattice = lattice # type: ignore
self._abc = [
abc[indices.index(0)], # type: ignore
abc[indices.index(1)],
] # type: ignore
self._fit_lattice = True
return
self._fit_lattice = False
return
if self.crys_system == "tetragonal":
if not check(angles, [90, 90, 90], angle_tol):
self._fit_lattice = False
return
if np.any([(sum(abs(np.array(abc) - abc[i]) < abc_tol) == 2) for i in np.arange(3)]):
self.dims = (2, 0)
indices = [int((sum(abs(np.array(abc) - abc[i]) < abc_tol) == 2)) for i in | np.arange(3) | numpy.arange |
import numpy as np
import scipy.ndimage as ndi
def remove_small_region(input, threshold):
labels, nb_labels = ndi.label(input)
label_areas = np.bincount(labels.ravel())
too_small_labels = label_areas < threshold
too_small_mask = too_small_labels[labels]
input[too_small_mask] = 0
return input
class RemoveSmallRegion(object):
def __init__(self, threshold):
self.threshold = threshold
def __call__(self, case):
case['label'] = remove_small_region(case['label'], self.threshold)
return case
def split_dim(input, axis=-1):
sub_arr = | np.split(input, input.shape[axis], axis=axis) | numpy.split |
from typing import Dict, Set, Union
import numpy as np
from pydrake.all import ModelInstanceIndex, MultibodyPlant
from qsim.simulator import QuasistaticSimulator
from quasistatic_simulator_py import (QuasistaticSimulatorCpp)
from .dynamical_system import DynamicalSystem
class QuasistaticDynamics(DynamicalSystem):
def __init__(self, h: float, q_sim_py: QuasistaticSimulator,
q_sim: QuasistaticSimulatorCpp):
super().__init__()
self.h = h
self.q_sim_py = q_sim_py
self.q_sim = q_sim
self.plant = q_sim.get_plant()
self.dim_x = self.plant.num_positions()
self.dim_u = q_sim.num_actuated_dofs()
self.models_all = self.q_sim.get_all_models()
self.models_actuated = self.q_sim.get_actuated_models()
self.models_unactuated = self.q_sim.get_unactuated_models()
# TODO: distinguish between position indices and velocity indices for
# 3D systems.
self.position_indices = self.q_sim.get_velocity_indices()
self.velocity_indices = self.position_indices
# make sure that q_sim_py and q_sim have the same underlying plant.
self.check_plants(
plant_a=q_sim.get_plant(),
plant_b=q_sim_py.get_plant(),
models_all_a=q_sim.get_all_models(),
models_all_b=q_sim_py.get_all_models(),
velocity_indices_a=q_sim.get_velocity_indices(),
velocity_indices_b=q_sim.get_velocity_indices())
@staticmethod
def check_plants(plant_a: MultibodyPlant, plant_b: MultibodyPlant,
models_all_a: Set[ModelInstanceIndex],
models_all_b: Set[ModelInstanceIndex],
velocity_indices_a: Dict[ModelInstanceIndex, np.ndarray],
velocity_indices_b: Dict[ModelInstanceIndex, np.ndarray]):
"""
Make sure that plant_a and plant_b are identical.
"""
assert models_all_a == models_all_b
for model in models_all_a:
name_a = plant_a.GetModelInstanceName(model)
name_b = plant_b.GetModelInstanceName(model)
assert name_a == name_b
idx_a = velocity_indices_a[model]
idx_b = velocity_indices_b[model]
assert idx_a == idx_b
def get_u_indices_into_x(self):
u_indices = np.zeros(self.dim_u, dtype=int)
i_start = 0
for model in self.models_actuated:
indices = self.velocity_indices[model]
n_a_i = len(indices)
u_indices[i_start: i_start + n_a_i] = indices
i_start += n_a_i
return u_indices
def get_q_a_cmd_dict_from_u(self, u: np.ndarray):
q_a_cmd_dict = dict()
i_start = 0
for model in self.models_actuated:
n_v_i = self.plant.num_velocities(model)
q_a_cmd_dict[model] = u[i_start: i_start + n_v_i]
i_start += n_v_i
return q_a_cmd_dict
def get_q_dict_from_x(self, x: np.ndarray):
q_dict = {
model: x[n_q_indices]
for model, n_q_indices in self.position_indices.items()}
return q_dict
def get_x_from_q_dict(self, q_dict: Dict[ModelInstanceIndex, np.ndarray]):
x = np.zeros(self.dim_x)
for model, n_q_indices in self.position_indices.items():
x[n_q_indices] = q_dict[model]
return x
def get_u_from_q_cmd_dict(self,
q_cmd_dict: Dict[ModelInstanceIndex, np.ndarray]):
u = np.zeros(self.dim_u)
i_start = 0
for model in self.models_actuated:
n_v_i = self.plant.num_velocities(model)
u[i_start: i_start + n_v_i] = q_cmd_dict[model]
i_start += n_v_i
return u
def get_Q_from_Q_dict(self,
Q_dict: Dict[ModelInstanceIndex, np.ndarray]):
Q = np.eye(self.dim_x)
for model, idx in self.velocity_indices.items():
Q[idx, idx] = Q_dict[model]
return Q
def get_R_from_R_dict(self,
R_dict: Dict[ModelInstanceIndex, np.ndarray]):
R = np.eye(self.dim_u)
i_start = 0
for model in self.models_actuated:
n_v_i = self.plant.num_velocities(model)
R[i_start: i_start + n_v_i, i_start: i_start + n_v_i] = \
np.diag(R_dict[model])
i_start += n_v_i
return R
def publish_trajectory(self, x_traj):
q_dict_traj = [self.get_q_dict_from_x(x) for x in x_traj]
self.q_sim_py.animate_system_trajectory(h=self.h,
q_dict_traj=q_dict_traj)
def dynamics_py(self, x: np.ndarray, u: np.ndarray, mode: str = 'qp_mp',
requires_grad: bool = False,
grad_from_active_constraints: bool = False):
"""
:param x: the position vector of self.q_sim.plant.
:param u: commanded positions of models in
self.q_sim.models_actuated, concatenated into one vector.
"""
q_dict = self.get_q_dict_from_x(x)
q_a_cmd_dict = self.get_q_a_cmd_dict_from_u(u)
tau_ext_dict = self.q_sim_py.calc_tau_ext([])
self.q_sim_py.update_mbp_positions(q_dict)
q_next_dict = self.q_sim_py.step(
q_a_cmd_dict, tau_ext_dict, self.h,
mode=mode, requires_grad=requires_grad,
grad_from_active_constraints=grad_from_active_constraints)
return self.get_x_from_q_dict(q_next_dict)
def dynamics(self, x: np.ndarray, u: np.ndarray, requires_grad: bool = False,
grad_from_active_constraints: bool = True):
"""
:param x: the position vector of self.q_sim.plant.
:param u: commanded positions of models in
self.q_sim.models_actuated, concatenated into one vector.
"""
q_dict = self.get_q_dict_from_x(x)
q_a_cmd_dict = self.get_q_a_cmd_dict_from_u(u)
tau_ext_dict = self.q_sim.calc_tau_ext([])
self.q_sim.update_mbp_positions(q_dict)
self.q_sim.step(
q_a_cmd_dict, tau_ext_dict, self.h,
self.q_sim_py.sim_params.contact_detection_tolerance,
requires_grad=requires_grad,
grad_from_active_constraints=grad_from_active_constraints)
q_next_dict = self.q_sim.get_mbp_positions()
return self.get_x_from_q_dict(q_next_dict)
def dynamics_batch(self, x, u):
"""
Batch dynamics. Uses pytorch for
-args:
x (np.array, dim: B x n): batched state
u (np.array, dim: B x m): batched input
-returns:
x_next (np.array, dim: B x n): batched next state
"""
n_batch = x.shape[0]
x_next = np.zeros((n_batch, self.dim_x))
for i in range(n_batch):
x_next[i] = self.dynamics(x[i], u[i])
return x_next
def jacobian_xu(self, x, u):
AB = np.zeros((self.dim_x, self.dim_x + self.dim_u))
self.dynamics(x, u, requires_grad=True)
AB[:, :self.dim_x] = self.q_sim.get_Dq_nextDq()
AB[:, self.dim_x:] = self.q_sim.get_Dq_nextDqa_cmd()
return AB
def calc_AB_exact(self, x_nominal: np.ndarray, u_nominal: np.ndarray):
return self.jacobian_xu(x_nominal, u_nominal)
def calc_AB_first_order(self, x_nominal: np.ndarray, u_nominal: np.ndarray,
n_samples: int, std_u: Union[np.ndarray, float]):
"""
x_nominal: (n_x,) array, 1 state.
u_nominal: (n_u,) array, 1 input.
"""
# np.random.seed(2021)
du = | np.random.normal(0, std_u, size=[n_samples, self.dim_u]) | numpy.random.normal |
#!/usr/bin/env python3
# Using am_sensors/simulatedSensors
# [TODO]
# - Differentiate between std in static or moving behaviour
import math
from math import sin, cos, pi
import rospy
import tf
from std_msgs.msg import Header
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3, PoseWithCovariance, PoseWithCovarianceStamped, TwistWithCovariance
from sensor_msgs.msg import NavSatFix, Imu
from am_driver.msg import WheelEncoder
from am_driver.msg import SensorStatus, CurrentStatus
from nav_msgs.msg import Odometry
import math
import threading
import matplotlib.pyplot as plt
from matplotlib import colors
import matplotlib as mpl
import seaborn as sns
from scipy.spatial.transform import Rotation as Rot
import numpy as np
import pymap3d as pm
# import the random module
import random
class AEKF_Sim():
def __init__(self):
# Define name of the Node
rospy.init_node("AEKF5_Sim", anonymous=True)
# Define the run type of the Filter
self.test = True
self.print = False
self.ros = True
# Define the self.lock to allow multi-threading
self.lock = threading.Lock()
# Check if the filter is ready to start
self.filter = False
# Get the current time
now = rospy.get_time()
# Kalman states
self.x_t = 0.0
self.y_t = 0.0
self.yaw_t = 0.0
self.x_dot_t = 0.0
self.yaw_dot_t = 0.0
# Frequency of the Kalman filter
self.rate = 250
# Steps to slowly account the control input
self.steps = 250
# State-Vector
self.X_t = np.array([self.x_t, self.y_t, self.yaw_t,
self.x_dot_t, self.yaw_dot_t])
self.X_Pred = self.X_t
self.X_control = np.array([self.x_t, self.y_t, self.yaw_t,
self.x_dot_t, self.yaw_dot_t])
self.X_wheel_odom = np.array([self.x_t, self.y_t, self.yaw_t,
self.x_dot_t, self.yaw_dot_t])
self.X_visual_odom = np.array([self.x_t, self.y_t, self.yaw_t,
self.x_dot_t, self.yaw_dot_t])
# Filter Covariance Matrix
self.P_t = np.eye(5)*1e-5
self.P_Pred = self.P_t
# Filter Innovation Matrix
self.K = np.diag(np.zeros(5))
# Initialise Measurements Vector
self.Z = np.array([])
# Initialise Measurements Covariance Matrix
self.R = np.array([])
# Initialise Measurements Matrix
self.H = np.zeros((5,0))
# Initialise Measurements Jacobian Matrix
self.J_H = np.zeros((5,0))
print("Initialised AEKF_Sim")
# Define set of topics to subscribe to
rospy.Subscriber('Odom_Ground', Odometry, self.GroundTruth)
self.ground_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
rospy.Subscriber('cmd_vel', Twist, self.Control)
self.control_fusion = True
self.control_measure = False
self.control_t = -1
self.control_state = np.array([0.0, 0.0])
rospy.Subscriber('current_status', CurrentStatus, self.CurrentStatus)
self.current_status = 1
rospy.Subscriber('/wheel_odometry/odom', Vector3, self.WheelOdometer)
self.wheel_fusion = True
self.wheel_odometer_measure = False
self.wheel_odometer_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.wheel_odometer_bias = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.wheel_odometer_var = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
rospy.Subscriber('/visual_odometry/odom', Vector3, self.VisualOdometer)
self.visual_fusion = True
self.visual_odometer_measure = False
self.visual_odometer_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.visual_odometer_bias = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.visual_odometer_var = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.gps_th = 0
rospy.Subscriber('automower_gps/GPSfix', Vector3, self.GPS)
self.gps_fusion = True
self.gps_measure = False
self.gps_state = np.array([0.0, 0.0, 0.0])
self.gps_bias = np.array([0.0, 0.0, 0.0])
self.gps_var = np.array([0.0, 0.0, 0.0])
rospy.Subscriber('gps_left/NMEA_fix', Vector3, self.gps_left)
self.gps_left_fusion = True
self.gps_left_measure = False
self.gps_left_hz = 5
self.gps_left_state = np.array([0.0, 0.0, 0.0])
self.gps_left_bias = np.array([0.0, 0.0, 0.0])
self.gps_left_var = np.array([0.0, 0.0, 0.0])
rospy.Subscriber('gps_right/NMEA_fix', Vector3, self.gps_right)
self.gps_right_fusion = True
self.gps_right_measure = False
self.gps_right_hz = 5
self.gps_right_state = np.array([0.0, 0.0, 0.0])
self.gps_right_bias = np.array([0.0, 0.0, 0.0])
self.gps_right_var = np.array([0.0, 0.0, 0.0])
rospy.Subscriber('imu_left/data_raw', Vector3, self.ImuLeft)
self.imu_left_fusion = True
self.imu_left_measure = False
self.imu_left_t = now
self.imu_left_state = np.array([0.0, 0.0, 0.0])
self.imu_left_bias = np.array([0.0, 0.0, 0.0])
self.imu_left_var = np.array([np.deg2rad(10), 0.05, 2.5])
rospy.Subscriber('imu_right/data_raw', Vector3, self.ImuRight)
self.imu_right_fusion = True
self.imu_right_measure = False
self.imu_right_t = now
self.imu_right_state = np.array([0.0, 0.0, 0.0])
self.imu_right_bias = np.array([0.0, 0.0, 0.0])
self.imu_right_var = np.array([np.deg2rad(10), 0.05, 2.5])
# Define set of topics to publish
if(self.test and self.ros):
self.odom_control_pub = rospy.Publisher('Odom_Control', Odometry, queue_size=20)
self.odom_wheel_pub = rospy.Publisher('Odom_Wheel_Sim', Odometry, queue_size=20)
self.odom_visual_pub = rospy.Publisher('Odom_Visual_Sim', Odometry, queue_size=20)
self.odom_aekf_sim_pub = rospy.Publisher('Odom_AEKF_Sim', Odometry, queue_size=20)
# Prediction step with only the kinematic model
def Predict(self, dt):
dt = 0.004 # Fix it to avoid computation issues and keep a discrete time system
# State-Transition Matrix
A_t = np.array([[1.0, 0.0, 0.0, cos(self.X_t[2])*dt, 0.0],
[0.0, 1.0, 0.0, sin(self.X_t[2])*dt, 0.0],
[0.0, 0.0, 1.0, 0.0, dt],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
A_control = np.array([[1.0, 0.0, 0.0, cos(self.X_control[2])*dt, 0.0],
[0.0, 1.0, 0.0, sin(self.X_control[2])*dt, 0.0],
[0.0, 0.0, 1.0, 0.0, dt],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
A_wheel_odom = np.array([[1.0, 0.0, 0.0, cos(self.X_wheel_odom[2])*dt, 0.0],
[0.0, 1.0, 0.0, sin(self.X_wheel_odom[2])*dt, 0.0],
[0.0, 0.0, 1.0, 0.0, dt],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
A_visual_odom = np.array( [[1.0, 0.0, 0.0, cos(self.X_visual_odom[2])*dt, 0.0],
[0.0, 1.0, 0.0, sin(self.X_visual_odom[2])*dt, 0.0],
[0.0, 0.0, 1.0, 0.0, dt],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]])
# Jacobian of Transition Matrix
J_A = np.array([[1.0, 0.0, 0.0, -sin(self.X_t[2])*dt, 0.0],
[0.0, 1.0, 0.0, cos(self.X_t[2])*dt, 0.0],
[0.0, 0.0, 1.0, 0.0, dt ],
[0.0, 0.0, 0.0, 1.0, 0.0 ],
[0.0, 0.0, 0.0, 0.0, 1.0 ]])
# Prediction Covariance
sigma = ( ( abs(self.imu_left_state[2]) + abs(self.imu_right_state[2]) ) / 2 ) ** 2
# Q2 Complete
Q = np.array([ [(sin(self.X_t[2])**2)*(dt**2)/2, -(sin(self.X_t[2])*cos(self.X_t[2]))*(dt**2)/2, 0.0, -(sin(self.X_t[2]))*(dt**2), 0.0 ],
[-(sin(self.X_t[2])*cos(self.X_t[2]))*(dt**2)/2, (cos(self.X_t[2])**2)*(dt**2)/2, 0.0, (cos(self.X_t[2]))*(dt**2), 0.0 ],
[0.0, 0.0, dt**2/2, 0.0, dt**2],
[-(sin(self.X_t[2]))*(dt**2), (cos(self.X_t[2]))*(dt**2), 0.0, dt, 0.0 ],
[0.0, 0.0, dt**2, 0.0, dt ]]) * sigma
# Check control difference
u_t = np.array([0.0,
0.0,
0.0,
(self.control_state[0] - self.X_t[3]),
(self.control_state[1] - self.X_t[4])])
u_control = np.array([0.0,
0.0,
0.0,
(self.control_state[0] - self.X_control[3]),
(self.control_state[1] - self.X_control[4])])
u_wheel_odom = np.array([0.0,
0.0,
0.0,
(self.wheel_odometer_state[0] - self.X_wheel_odom[3]),
(self.wheel_odometer_state[1] - self.X_wheel_odom[4])])
u_visual_odom = np.array([0.0,
0.0,
0.0,
(self.visual_odometer_state[0] - self.X_visual_odom[3]),
(self.visual_odometer_state[1] - self.X_visual_odom[4])])
B = np.diag(np.array([0,0,0, dt, dt]))
# Make sure the execution is safe
self.lock.acquire()
try:
# Control data
self.X_control = A_control @ self.X_control + u_control
# Wheel Odom data
self.X_wheel_odom = A_wheel_odom @ self.X_wheel_odom + u_wheel_odom
# Odom data
self.X_visual_odom = A_visual_odom @ self.X_visual_odom + u_visual_odom
# Prediction State
if(self.control_fusion):
self.X_Pred = A_t @ self.X_t + B @ u_t
# Prediction Covariance Matrix
self.P_Pred = J_A @ self.P_t @ J_A.T + Q
else:
self.X_Pred = A_t @ self.X_t
# Prediction Covariance Matrix
self.P_Pred = J_A @ self.P_t @ J_A.T + Q
finally:
self.lock.release() # release self.lock, no matter what
if(self.test and self.ros):
# Send the Update of the Control to ROS
header = Header()
header.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work
header.frame_id = "odom"
# since all odometry is 6DOF we'll need a quaternion created from yaw
odom_quat = tf.transformations.quaternion_from_euler(0, 0, self.X_control[2])
# next, we'll publish the pose message over ROS
pose = Pose(Point(self.X_control[0], self.X_control[1], 0.), Quaternion(*odom_quat))
pose_covariance = [0] * 36
pose_control = PoseWithCovariance(pose, pose_covariance)
# next, we'll publish the pose message over ROS
twist = Twist(Vector3(self.X_control[3], 0, 0),Vector3(0.0, 0.0, self.X_control[4]))
twist_covariance = [0] * 36
twist_control = TwistWithCovariance(twist, twist_covariance)
odom_control = Odometry(header, "base_link", pose_control, twist_control)
# publish the message
self.odom_control_pub.publish(odom_control)
if(self.test and self.ros):
# Send the Update of the Odometry to Ros
header = Header()
header.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work
header.frame_id = "odom"
# since all odometry is 6DOF we'll need a quaternion created from yaw
odom_quat = tf.transformations.quaternion_from_euler(0, 0, self.X_wheel_odom[2])
# next, we'll publish the pose message over ROS
pose = Pose(Point(self.X_wheel_odom[0], self.X_wheel_odom[1], 0.), Quaternion(*odom_quat))
pose_covariance = [0] * 36
pose_odom = PoseWithCovariance(pose, pose_covariance)
# next, we'll publish the pose message over ROS
twist = Twist(Vector3(self.X_wheel_odom[3], 0, 0),Vector3(0.0, 0.0, self.X_wheel_odom[4]))
twist_covariance = [0] * 36
twist_odom = TwistWithCovariance(twist, twist_covariance)
odom_odom = Odometry(header, "base_link", pose_odom, twist_odom)
# publish the message
self.odom_wheel_pub.publish(odom_odom)
if(self.test and self.ros):
# Send the Update of the Odometry to Ros
header = Header()
header.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work
header.frame_id = "odom"
# since all odometry is 6DOF we'll need a quaternion created from yaw
odom_quat = tf.transformations.quaternion_from_euler(0, 0, self.X_visual_odom[2])
# next, we'll publish the pose message over ROS
pose = Pose(Point(self.X_visual_odom[0], self.X_visual_odom[1], 0.), Quaternion(*odom_quat))
pose_covariance = [0] * 36
pose_odom = PoseWithCovariance(pose, pose_covariance)
# next, we'll publish the pose message over ROS
twist = Twist(Vector3(self.X_visual_odom[3], 0, 0),Vector3(0.0, 0.0, self.X_visual_odom[4]))
twist_covariance = [0] * 36
twist_odom = TwistWithCovariance(twist, twist_covariance)
odom_odom = Odometry(header, "base_link", pose_odom, twist_odom)
# publish the message
self.odom_visual_pub.publish(odom_odom)
if(self.test or self.ros):
# Send the Prediction to Ros
header = Header()
header.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work
header.frame_id = "odom"
# since all odometry is 6DOF we'll need a quaternion created from yaw
odom_quat = tf.transformations.quaternion_from_euler(0, 0, self.X_Pred[2])
# next, we'll publish the pose message over ROS
pose = Pose(Point(self.X_Pred[0], self.X_Pred[1], 0.), Quaternion(*odom_quat))
pose_covariance = [0] * 36
pose_covariance[0] = self.P_Pred[0][0]
pose_covariance[1] = self.P_Pred[0][1]
pose_covariance[5] = self.P_Pred[0][2]
pose_covariance[6] = self.P_Pred[1][0]
pose_covariance[7] = self.P_Pred[1][1]
pose_covariance[11] = self.P_Pred[1][2]
pose_covariance[30] = self.P_Pred[2][0]
pose_covariance[31] = self.P_Pred[2][1]
pose_covariance[35] = self.P_Pred[2][2]
pose_ekf = PoseWithCovariance(pose, pose_covariance)
# next, we'll publish the pose message over ROS
twist = Twist(Vector3(self.X_Pred[3], 0, 0),Vector3(0.0, 0.0, self.X_Pred[4]))
twist_covariance = [0] * 36
twist_covariance[0] = self.P_Pred[3][3]
twist_covariance[5] = self.P_Pred[3][4]
twist_covariance[30] = self.P_Pred[4][3]
twist_covariance[35] = self.P_Pred[4][4]
twist_ekf = TwistWithCovariance(twist, twist_covariance)
odom_ekf = Odometry(header, "base_link", pose_ekf, twist_ekf)
# publish the message
self.odom_aekf_sim_pub.publish(odom_ekf)
# Prediction step without measurement updates
def UpdateNoMeasures(self):
# Make sure the execution is safe
self.lock.acquire()
try:
self.X_t = self.X_Pred
self.P_t = self.P_Pred
finally:
self.lock.release() # release self.lock, no matter what
if(self.test and self.print):
print("UpdateNoMeasures \t" + str(self.X_t[0:3]))
# Update step with the measurements
def Update(self):
# Check if there are more updates
if(self.wheel_odometer_measure or self.visual_odometer_measure or self.imu_left_measure or self.imu_right_measure or self.gps_measure or self.gps_left_measure >= self.gps_left_hz or self.gps_right_measure >= self.gps_right_hz ):
# Debug test
text = "Update "
# Make sure the execution is safe
self.lock.acquire()
try:
if(self.wheel_odometer_measure):
self.Z = np.append(self.Z, np.array([self.wheel_odometer_state[0], self.wheel_odometer_state[1]]))
self.R = np.append(self.R, np.array([self.wheel_odometer_state[2], self.wheel_odometer_state[3]]))
self.H = np.column_stack([self.H, np.array([0,0,0,1,0]), np.array([0,0,0,0,1])])
self.J_H = np.column_stack([self.J_H, np.array([0,0,0,1,0]), np.array([0,0,0,0,1])])
text += " WOdom "
else:
text += " \t "
if(self.visual_odometer_measure):
self.Z = np.append(self.Z, np.array([self.visual_odometer_state[0], self.visual_odometer_state[1]]))
self.R = np.append(self.R, np.array([self.visual_odometer_state[2], self.visual_odometer_state[3]]))
self.H = np.column_stack([self.H, np.array([0,0,0,1,0]), np.array([0,0,0,0,1])])
self.J_H = np.column_stack([self.J_H, np.array([0,0,0,1,0]), np.array([0,0,0,0,1])])
text += " VOdom "
else:
text += " \t "
if(self.gps_measure):
self.Z = np.append(self.Z, np.array([self.gps_state[0], self.gps_state[1], self.gps_state[2]]))
self.R = np.append(self.R, np.array([self.gps_state[3], self.gps_state[4], self.gps_state[5]]))
self.H = np.column_stack([self.H, np.array([1,0,0,0,0]), np.array([0,1,0,0,0]), np.array([0,0,1,0,0])])
self.J_H = np.column_stack([self.J_H, np.array([1,0,0,0,0]), np.array([0,1,0,0,0]), np.array([0,0,1,0,0])])
text += " GPS "
else:
text += " \t "
if(self.gps_left_measure):
self.Z = np.append(self.Z, np.array([self.gps_left_state[0], self.gps_left_state[1]]))
self.R = np.append(self.R, np.array([self.gps_left_state[3], self.gps_left_state[4]]))
self.H = np.column_stack([self.H, np.array([1,0,0,0,0]), np.array([0,1,0,0,0])])
self.J_H = np.column_stack([self.J_H, np.array([1,0,0,0,0]), np.array([0,1,0,0,0])])
self.gps_left_measure = False
text += " gps_l "
else:
text += " \t "
if(self.gps_right_measure):
self.Z = np.append(self.Z, np.array([self.gps_right_state[0], self.gps_right_state[1]]))
self.R = np.append(self.R, np.array([self.gps_right_state[3], self.gps_right_state[4]]))
self.H = np.column_stack([self.H, np.array([1,0,0,0,0]), np.array([0,1,0,0,0])])
self.J_H = np.column_stack([self.J_H, np.array([1,0,0,0,0]), np.array([0,1,0,0,0])])
self.gps_right_measure = False
text += " gps_r "
else:
text += " \t "
if(self.imu_left_measure):
self.Z = np.append(self.Z, np.array([self.imu_left_state[1], self.imu_left_state[2]]))
self.R = np.append(self.R, np.array([self.imu_left_state[4], self.imu_left_state[5]]))
self.H = np.column_stack([self.H, np.array([0,0,0,0,1]), np.array([0,0,0,0,0])])
self.J_H = np.column_stack([self.J_H, np.array([0,0,0,0,1]), np.array([0,0,0,0,0])])
text += " imu_l "
else:
text += " \t "
if(self.imu_right_measure):
self.Z = np.append(self.Z, np.array([self.imu_right_state[1], self.imu_right_state[2]]))
self.R = np.append(self.R, np.array([self.imu_right_state[4], self.imu_right_state[5]]))
self.H = np.column_stack([self.H, np.array([0,0,0,0,1]), np.array([0,0,0,0,0])])
self.J_H = np.column_stack([self.J_H, np.array([0,0,0,0,1]), np.array([0,0,0,0,0])])
text += " imu_r "
else:
text += " \t "
# Reset Measurements check
self.wheel_odometer_measure = self.visual_odometer_measure = self.gps_measure = self.imu_left_measure = self.imu_right_measure = False
# Transpose matrices after their creation
Update_H = self.H.T
Update_J_H = self.J_H.T
Update_R = np.diag(self.R)
# Store measurements vector for update
Update_Z = self.Z
# Initialise Measurements Vector
self.Z = | np.array([]) | numpy.array |
import numpy as np
from scipy.linalg import block_diag
import sys
sys.path.insert(1,'../')
from stderr_calibration import MinDist
"""Unit tests of minimum distance inference functions
Intended for use with the "pytest" testing framework
"""
# Define small-scale problem used for some tests
G = np.array([[1,0],[1,1],[0,2]])
h = lambda x: x @ G.T
theta = np.array([1,1])
mu = h(theta)
sigma = np.array([1,2,0.5])
V_fullinfo = sigma.reshape(-1,1) * np.array([[1,0.5,0.5],[0.5,1,0.5],[0.5,0.5,1]]) * sigma
V_blockdiag = V_fullinfo.copy()
V_blockdiag[0,1:] = np.nan
V_blockdiag[1:,0] = np.nan
# Define test cases
def test_closedform():
"""Test closed-form formulas
"""
obj = MinDist(h,mu,moment_se=sigma)
# Estimation with default weight matrix
res = obj.fit(opt_init=np.array(np.zeros(theta.shape)), eff=False)
W = np.diag(1/sigma**2)
aux = np.linalg.solve(G.T @ W @ G, G.T @ W).T
np.testing.assert_allclose(res['moment_loadings'], aux)
np.testing.assert_allclose(res['estim_se'], sigma @ np.abs(aux))
# Efficient estimation (see formulas in paper appendix)
res_eff = obj.fit(opt_init=np.array(np.zeros(theta.shape)))
if sigma[0]*np.abs(G[1,0]*G[2,1]) <= sigma[1]*np.abs(G[0,0]*G[2,1])+sigma[2]*np.abs(G[0,0]*G[1,1]):
x = | np.array([1/G[0,0], 0, 0]) | numpy.array |
#! /usr/bin/env python
# calculate average gradient for a certain delay
# if third argument is 'npz', then assume we have compressed numpy files
import sys, glob, os, re
import numpy as np
grad_dir = sys.argv[1]
delay = int(sys.argv[2])
if len(sys.argv) > 3:
if sys.argv[3] != 'npz':
raise ValueError("Third argument should be 'npz' if the gradient matrices have been compressed.")
sys.exit(1)
extension = 'npz'
else:
extension = 'npy'
list_grads = glob.glob('{0}/timestep_*/delay_{1}/*.{2}'.format(grad_dir, delay, extension))
tmp_sum = 0.0
tmp_denom = 0.0
for grad_matrix in list_grads:
if extension == 'npy':
tmp_sum += | np.load(grad_matrix) | numpy.load |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 bily Huazhong University of Science and Technology
#
# Distributed under terms of the MIT license.
"""Class for tracking using a track model."""
# 老cls+新reg
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os.path as osp
import numpy as np
import cv2
from cv2 import imwrite
from copy import deepcopy
from utils.infer_utils import convert_bbox_format, Rectangle
from utils.misc_utils import get_center, get
from scripts.anchor_related import Anchor
# chose model:
# model 1: normal SSD with signal expanded
# model 2: normal SSD without signal expanded
# model 3: siamrpn's encode method
MODEL = 2
class TargetState(object):
"""Represent the target state."""
def __init__(self, bbox, search_pos, scale_idx):
self.bbox = bbox # (cx, cy, w, h) in the original image
self.search_pos = search_pos # target center position in the search image
self.scale_idx = scale_idx # scale index in the searched scales
class Tracker(object):
"""Tracker based on the siamese model."""
def __init__(self, siamese_model, model_config, track_config):
self.siamese_model = siamese_model
self.model_config = model_config
self.track_config = track_config
self.num_scales = track_config['num_scales']
logging.info('track num scales -- {}'.format(self.num_scales))
scales = np.arange(self.num_scales) - get_center(self.num_scales)
self.search_factors = [self.track_config['scale_step'] ** x for x in scales]
self.x_image_size = track_config['x_image_size'] # Search image size
self.window = None # Cosine window
self.log_level = track_config['log_level']
self.anchor_op = Anchor(17, 17)
self.anchors = self.anchor_op.anchors
self.anchors = self.anchor_op.corner_to_center(self.anchors)
def track(self, sess, first_bbox, frames, logdir='/tmp'):
"""Runs tracking on a single image sequence."""
# Get initial target bounding box and convert to center based
bbox = convert_bbox_format(first_bbox, 'center-based')
# Feed in the first frame image to set initial state.
bbox_feed = [bbox.y, bbox.x, bbox.height, bbox.width]
input_feed = [frames[0], bbox_feed]
frame2crop_scale = self.siamese_model.initialize(sess, input_feed)
# Storing target state
original_target_height = bbox.height
original_target_width = bbox.width
search_center = np.array([get_center(self.x_image_size),
get_center(self.x_image_size)])
current_target_state = TargetState(bbox=bbox,
search_pos=search_center,
scale_idx=int(get_center(self.num_scales)))
include_first = get(self.track_config, 'include_first', False)
logging.info('Tracking include first -- {}'.format(include_first))
# Run tracking loop
reported_bboxs = []
# 读取gt画在image crop上,来验证回归框
# f = open('./__Data/tracking-one-Curation/Data/VID/train/a/202008280001/track.txt')
# gt_box = f.readlines()
for i, filename in enumerate(frames):
if i > 0 or include_first: # We don't really want to process the first image unless intended to do so.
# current_target_state:前一帧的bbox信息
bbox_feed = [current_target_state.bbox.y, current_target_state.bbox.x,
current_target_state.bbox.height, current_target_state.bbox.width]
input_feed = [filename, bbox_feed]
# 将当前帧和前一帧的bbox送进模型,得到响应图,对当前帧进行滑窗检测
outputs, metadata = self.siamese_model.inference_step(sess, input_feed)
search_scale_list = outputs['scale_xs'] # 缩放倍数
response = outputs['response_up']
response_size = response.shape[1]
reg_pred = outputs['reg_pred']
# Choose the scale whole response map has the highest peak
if self.num_scales > 1:
response_max = | np.max(response, axis=(1, 2)) | numpy.max |
import numpy as np
import cv2
import os
import pickle
import torch as t
import h5py
import pandas as pd
from NNsegmentation.models import Segment
from NNsegmentation.data import predict_whole_map
from SingleCellPatch.extract_patches import within_range
from pipeline.segmentation import instance_clustering
from SingleCellPatch.generate_trajectories import frame_matching
import matplotlib
from matplotlib import cm
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from matplotlib.ticker import NullLocator
import seaborn as sns
import imageio
from HiddenStateExtractor.vq_vae import VQ_VAE, CHANNEL_MAX, CHANNEL_VAR, prepare_dataset
from HiddenStateExtractor.naive_imagenet import read_file_path, DATA_ROOT
from HiddenStateExtractor.morphology_clustering import select_clean_trajecteories, Kmean_on_short_trajs
from HiddenStateExtractor.movement_clustering import save_traj
import statsmodels.api as sm
import scipy
RAW_DATA_PATH = '/mnt/comp_micro/Projects/CellVAE/Combined'
sites = ['D%d-Site_%d' % (i, j) for j in range(9) for i in range(3, 6)]
def enhance_contrast(mat, a=1.5, b=-10000):
mat2 = cv2.addWeighted(mat, 1.5, mat, 0, -10000)
return mat2
def plot_patch(sample_path, out_path, boundary=False, channel=0):
with h5py.File(sample_path, 'r') as f:
mat = np.array(f['masked_mat'][:, :, channel].astype('uint16'))
mask = np.array(f['masked_mat'][:, :, 2].astype('uint16'))
mat2 = enhance_contrast(mat, 1.5, -10000)
cv2.imwrite(out_path, mat2)
feat = 'save_0005_before'
fs = sorted(pickle.load(open('./HiddenStateExtractor/file_paths_bkp.pkl', 'rb')))
trajs = pickle.load(open('./HiddenStateExtractor/trajectory_in_inds.pkl', 'rb'))
dats_ = pickle.load(open('./HiddenStateExtractor/%s_PCA.pkl' % feat, 'rb'))
sizes = pickle.load(open(DATA_ROOT + '/Data/EncodedSizes.pkl', 'rb'))
ss = [sizes[f][0] for f in fs]
PC1_vals = dats_[:, 0]
PC1_range = (np.quantile(PC1_vals, 0.4), np.quantile(PC1_vals, 0.6))
PC2_vals = dats_[:, 1]
PC2_range = (np.quantile(PC2_vals, 0.4), np.quantile(PC2_vals, 0.6))
# PC1
vals = dats_[:, 0]
path = '/data/michaelwu/CellVAE/PC_samples/PC1'
val_std = np.std(vals)
thr0 = np.quantile(vals, 0.1)
thr1 = np.quantile(vals, 0.9)
samples0 = [f for i, f in enumerate(fs) if vals[i] < thr0]
samples1 = [f for i, f in enumerate(fs) if vals[i] > thr1]
sample_ts = []
for t in trajs:
traj_PCs = np.array([vals[ind] for ind in trajs[t]])
start = np.mean(traj_PCs[:3])
end = np.mean(traj_PCs[-3:])
traj_PC_diff = traj_PCs[1:] - traj_PCs[:-1]
if np.abs(end - start) > 1.2 * val_std and np.median(traj_PC_diff) < 0.5 * val_std:
sample_ts.append(t)
np.random.seed(123)
for i, f in enumerate(np.random.choice(samples0, (10,), replace=False)):
plot_patch(f, path + '/sample_low_%d.png' % i)
for i, f in enumerate(np.random.choice(samples1, (10,), replace=False)):
plot_patch(f, path + '/sample_high_%d.png' % i)
for t in np.random.choice(sample_ts, (10,), replace=False):
save_traj(t, path + '/sample_traj_%s.gif' % t.replace('/', '_'))
# PC2, controlling for PC1
vals = dats_[:, 1]
path = '/data/michaelwu/CellVAE/PC_samples/PC2'
vals_filtered = [v for i, v in enumerate(vals) if PC1_range[0] < PC1_vals[i] < PC1_range[1]]
val_std = np.std(vals_filtered)
thr0 = np.quantile(vals_filtered, 0.1)
thr1 = np.quantile(vals_filtered, 0.9)
samples0 = [f for i, f in enumerate(fs) if vals[i] < thr0 and PC1_range[0] < PC1_vals[i] < PC1_range[1]]
samples1 = [f for i, f in enumerate(fs) if vals[i] > thr1 and PC1_range[0] < PC1_vals[i] < PC1_range[1]]
sample_ts = []
for t in trajs:
traj_PCs = np.array([vals[ind] for ind in trajs[t]])
start = np.mean(traj_PCs[:3])
end = np.mean(traj_PCs[-3:])
traj_PC_diff = traj_PCs[1:] - traj_PCs[:-1]
if np.abs(end - start) > 1.2 * val_std and np.median(traj_PC_diff) < 0.5 * val_std:
sample_ts.append(t)
np.random.seed(123)
for i, f in enumerate(np.random.choice(samples0, (10,), replace=False)):
plot_patch(f, path + '/sample_low_%d.png' % i)
for i, f in enumerate(np.random.choice(samples1, (10,), replace=False)):
plot_patch(f, path + '/sample_high_%d.png' % i)
for t in np.random.choice(sample_ts, (10,), replace=False):
save_traj(t, path + '/sample_traj_%s.gif' % t.replace('/', '_'))
# PC3, controlling for PC1, PC2
vals = dats_[:, 2]
path = '/data/michaelwu/CellVAE/PC_samples/PC3'
vals_filtered = [v for i, v in enumerate(vals) \
if PC1_range[0] < PC1_vals[i] < PC1_range[1] and PC2_range[0] < PC2_vals[i] < PC2_range[1]]
val_std = np.std(vals_filtered)
thr0 = np.quantile(vals_filtered, 0.1)
thr1 = np.quantile(vals_filtered, 0.9)
samples0 = [f for i, f in enumerate(fs) if vals[i] < thr0 and \
PC1_range[0] < PC1_vals[i] < PC1_range[1] and PC2_range[0] < PC2_vals[i] < PC2_range[1]]
samples1 = [f for i, f in enumerate(fs) if vals[i] > thr1 and \
PC1_range[0] < PC1_vals[i] < PC1_range[1] and PC2_range[0] < PC2_vals[i] < PC2_range[1]]
sample_ts = []
for t in trajs:
traj_PCs = np.array([vals[ind] for ind in trajs[t]])
start = np.mean(traj_PCs[:3])
end = np.mean(traj_PCs[-3:])
traj_PC_diff = traj_PCs[1:] - traj_PCs[:-1]
if np.abs(end - start) > 1.2 * val_std and np.median(traj_PC_diff) < 0.5 * val_std:
sample_ts.append(t)
np.random.seed(123)
for i, f in enumerate( | np.random.choice(samples0, (10,), replace=False) | numpy.random.choice |
# Copyright (c) lobsterpy development team
# Distributed under the terms of a BSD 3-Clause "New" or "Revised" License
"""
This module defines classes to analyze the COHPs automatically
"""
from collections import Counter
from typing import Optional
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.lobster.lobsterenv import LobsterNeighbors
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
class Analysis:
"""
Analysis class of COHP data from Lobster
Attributes:
condensed_bonding_analysis: dict including a summary of the most important bonding properties
final_dict_bonds: dict including information on ICOHPs per bond type
final_dict_ions: dict including information on environments of cations
chemenv: pymatgen.io.lobster.lobsterenv.LobsterNeighbors object
lse: LightStructureEnvironment from pymatgen
cutoff_icohp: Cutoff in percentage for evaluating neighbors based on ICOHP values.
cutoff_icohp*max_icohp limits the number of considered environments
anion_types: Set of Element objects from pymatgen
list_equivalent_sites: list of site indices of sites that indicate which sites are equivalent
e.g., [0 1 2 2 2] where site 0, 1, 2 indicate sites that are independent from each other
path_to_charge: str that describes the path to CHARGE.lobster
path_to_cohpcar: str that describes the path to COHPCAR.lobster
path_to_icohplist: str that describes the path to ICOHPLIST.lobster
path_to_poscar: str that describes path to POSCAR
path_to_madelung: str that describes path to POSCAR
set_cohps: list of cohps
set_coordination_ions: list of coodination environment strings for each cation
set_equivalent_sites: set of inequivalent sites
set_inequivalent_ions: set of inequivalent cations/sites in the structure
set_infos_bonds: information on cation anion bonds
spg: space group information
structure: Structure object
type_charge: which charges are considered here
whichbonds: which bonds will be considered in analysis
"""
def __init__(
self,
path_to_poscar: str,
path_to_icohplist: str,
path_to_cohpcar: str,
path_to_charge: Optional[str] = None,
path_to_madelung: Optional[str] = None,
whichbonds: str = "cation-anion",
cutoff_icohp: float = 0.1,
summed_spins=True,
type_charge=None,
):
"""
This is a class to analyse bonding information automatically
Args:
path_to_poscar: path to POSCAR (e.g., "POSCAR")
path_to_icohplist: path to ICOHPLIST.lobster (e.g., "ICOHPLIST.lobster")
path_to_cohpcar: path to COHPCAR.lobster (e.g., "COHPCAR.lobster")
path_to_charge: path to CHARGE.lobster (e.g., "CHARGE.lobster")
path_to_madelung: path to MadelungEnergies.lobster (e.g., "MadelungEnergies.lobster")
whichbonds: selects which kind of bonds are analyzed. "cation-anion" is the default
cutoff_icohp: only bonds that are stronger than cutoff_icohp*strongest ICOHP will be considered
summed_spins: if true, spins will be summed
type_charge: If no path_to_charge is given, Valences will be used. Otherwise, Mulliken charges.
Löwdin charges cannot be selected at the moment.
"""
self.path_to_poscar = path_to_poscar
self.path_to_icohplist = path_to_icohplist
self.path_to_cohpcar = path_to_cohpcar
self.whichbonds = whichbonds
self.cutoff_icohp = cutoff_icohp
self.path_to_charge = path_to_charge
self.path_to_madelung = path_to_madelung
self.setup_env()
self.get_information_all_bonds(summed_spins=summed_spins)
# This determines how cations and anions
if path_to_charge is None:
self.type_charge = "Valences"
else:
if type_charge is None:
self.type_charge = "Mulliken"
elif type_charge == "Mulliken":
self.type_charge = "Mulliken"
elif type_charge == "Löwdin":
raise ValueError(
"Only Mulliken charges can be used here at the moment. Implementation will follow."
)
else:
self.type_charge = "Valences"
print(
"type_charge cannot be read! Please use Mulliken/Löwdin. Now, we will use valences"
)
self.set_condensed_bonding_analysis()
self.set_summary_dicts()
self.path_to_madelung = path_to_madelung
def setup_env(self):
"""
This method helps setting up the light structure environments based on COHPs
Returns:
None
"""
self.structure = Structure.from_file(self.path_to_poscar)
sga = SpacegroupAnalyzer(structure=self.structure)
symmetry_dataset = sga.get_symmetry_dataset()
equivalent_sites = symmetry_dataset["equivalent_atoms"]
self.list_equivalent_sites = equivalent_sites
self.set_equivalent_sites = list(set(equivalent_sites))
self.spg = symmetry_dataset["international"]
if self.whichbonds == "cation-anion":
try:
self.chemenv = LobsterNeighbors(
filename_ICOHP=self.path_to_icohplist,
structure=Structure.from_file(self.path_to_poscar),
additional_condition=1,
perc_strength_ICOHP=self.cutoff_icohp,
filename_CHARGE=self.path_to_charge,
valences_from_charges=True,
adapt_extremum_to_add_cond=True,
)
except ValueError as err:
if (
str(err) == "min() arg is an empty sequence"
or str(err)
== "All valences are equal to 0, additional_conditions 1 and 3 and 5 and 6 will not work"
):
raise ValueError(
"Consider switching to an analysis of all bonds and not only cation-anion bonds."
" It looks like no cations are detected."
)
raise err
elif self.whichbonds == "all":
# raise ValueError("only cation anion bonds implemented so far")
self.chemenv = LobsterNeighbors(
filename_ICOHP=self.path_to_icohplist,
structure=Structure.from_file(self.path_to_poscar),
additional_condition=0,
perc_strength_ICOHP=self.cutoff_icohp,
filename_CHARGE=self.path_to_charge,
valences_from_charges=True,
adapt_extremum_to_add_cond=True,
)
else:
raise ValueError("only cation anion bonds implemented so far")
# determine cations and anions
try:
if self.whichbonds == "cation-anion":
self.lse = self.chemenv.get_light_structure_environment(
only_cation_environments=True
)
elif self.whichbonds == "all":
self.lse = self.chemenv.get_light_structure_environment(
only_cation_environments=False
)
except ValueError:
class Lse:
"""Test class when error was raised"""
def __init__(self, chemenv):
"""
Test class when error was raised
Args:
chemenv (LobsterNeighbors): LobsterNeighbors object
"""
self.coordination_environments = [
[{"ce_symbol": str(len(coord))}] for coord in chemenv
]
self.lse = Lse(self.chemenv.list_coords)
def get_information_all_bonds(self, summed_spins=True):
"""
This method will gather all information on the bonds within the compound
Returns:
None
"""
if self.whichbonds == "cation-anion":
# this will only analyze cation anion bonds which simplifies the analysis
self.set_inequivalent_ions = []
self.set_coordination_ions = []
self.set_infos_bonds = []
self.set_labels_cohps = []
self.set_cohps = []
# only_bonds_to
self.anion_types = self.chemenv.get_anion_types()
for ice, ce in enumerate(self.lse.coordination_environments):
# only look at inequivalent sites (use of symmetry to speed everything up!)!
# only look at those cations that have cation-anion bonds
if ice in self.set_equivalent_sites and ce[0]["ce_symbol"] is not None:
self.set_inequivalent_ions.append(ice)
ce = ce[0]["ce_symbol"]
self.set_coordination_ions.append(ce)
cation_anion_infos = self.chemenv.get_info_icohps_to_neighbors(
[ice]
)
self.set_infos_bonds.append(cation_anion_infos)
aniontype_labels = []
aniontype_cohps = []
# go through all anions in the structure!
for anion in self.anion_types:
# get labels and summed cohp objects
labels, summedcohps = self.chemenv.get_info_cohps_to_neighbors(
self.path_to_cohpcar,
[ice],
summed_spin_channels=summed_spins,
per_bond=False,
only_bonds_to=[str(anion)],
)
aniontype_labels.append(labels)
aniontype_cohps.append(summedcohps)
self.set_labels_cohps.append(aniontype_labels)
self.set_cohps.append(aniontype_cohps)
elif self.whichbonds == "all":
# this will only analyze all bonds
self.set_inequivalent_ions = []
self.set_coordination_ions = []
self.set_infos_bonds = []
self.set_labels_cohps = []
self.set_cohps = []
# only_bonds_to
self.elements = self.structure.composition.elements
# self.anion_types = self.chemenv.get_anion_types()
for ice, ce in enumerate(self.lse.coordination_environments):
# only look at inequivalent sites (use of symmetry to speed everything up!)!
# only look at those cations that have cation-anion bonds
if ice in self.set_equivalent_sites and ce[0]["ce_symbol"] is not None:
self.set_inequivalent_ions.append(ice)
ce = ce[0]["ce_symbol"]
self.set_coordination_ions.append(ce)
bonds_infos = self.chemenv.get_info_icohps_to_neighbors([ice])
self.set_infos_bonds.append(bonds_infos)
type_labels = []
type_cohps = []
for element in self.elements:
# get labels and summed cohp objects
labels, summedcohps = self.chemenv.get_info_cohps_to_neighbors(
self.path_to_cohpcar,
[ice],
onlycation_isites=False,
summed_spin_channels=summed_spins,
per_bond=False,
only_bonds_to=[str(element)],
)
type_labels.append(labels)
type_cohps.append(summedcohps)
self.set_labels_cohps.append(type_labels)
self.set_cohps.append(type_cohps)
@staticmethod
def _get_strenghts_for_each_bond(pairs, strengths, nameion=None):
"""
Args:
pairs: list of list including labels for the atoms, e.g., [['O3', 'Cu1'], ['O3', 'Cu1']]
strengths (list of float): list that gives the icohp strenghts as a float, [-1.86287, -1.86288]
nameion: string including the name of the cation in the list, e.g Cu1
Returns:
dict including inormation on icohps for each bond type, e.g.
{'Yb-Sb': [-1.59769, -2.14723, -1.7925, -1.60773, -1.80149, -2.14335]}
"""
dict_strenghts = {}
for pair, strength in zip(pairs, strengths):
if nameion is not None:
new = [
LobsterNeighbors._split_string(pair[0])[0],
LobsterNeighbors._split_string(pair[1])[0],
]
new = Analysis._sort_name(new, nameion)
string_here = new[0] + "-" + new[1]
else:
new = sorted(
[
LobsterNeighbors._split_string(pair[0])[0],
LobsterNeighbors._split_string(pair[1])[0],
]
)
string_here = new[0] + "-" + new[1]
if string_here not in dict_strenghts:
dict_strenghts[string_here] = []
dict_strenghts[string_here].append(strength)
return dict_strenghts
@staticmethod
def _sort_name(pair, nameion=None):
"""
will place the cation first in a list of name strings
Args:
pair: ["O","Cu"]
nameion: "Cu"
Returns:
will return list of str, e.g. ["Cu", "O"]
"""
if nameion is not None:
new = []
if pair[0] == nameion:
new.append(pair[0])
new.append(pair[1])
elif pair[1] == nameion:
new.append(pair[1])
new.append(pair[0])
return new
def _get_antibdg_states(self, cohps, labels, nameion=None, limit=0.01):
"""
will return a dictionary including information on antibonding states
e.g., similar to: {'Cu-O': True, 'Cu-F': True}
Args:
cohps: list of pymatgen.electronic_structure.cohp.Cohp ojbects
labels: ['2 x Cu-O', '4 x Cu-F']
nameion: string of the cation name, e.g. "Cu"
limit: limit to detect antibonding states
Returns:
dict including in formation on whether antibonding interactions exist,
e.g., {'Cu-O': True, 'Cu-F': True}
"""
dict_antibd = {}
for label, cohp in zip(labels, cohps):
# print(labels)
if label is not None:
if nameion is not None:
new = label.split(" ")[2].split("-")
sorted_new = self._sort_name(new, nameion)
new_label = sorted_new[0] + "-" + sorted_new[1]
else:
new = label.split(" ")[2].split("-")
sorted_new = sorted(new.copy())
new_label = sorted_new[0] + "-" + sorted_new[1]
antbd = cohp.has_antibnd_states_below_efermi(limit=limit)
if Spin.down in antbd:
dict_antibd[new_label] = antbd[Spin.up] or antbd[Spin.down]
else:
dict_antibd[new_label] = antbd[Spin.up]
return dict_antibd
def _integrate_antbdstates_below_efermi_for_set_cohps(self, labels, cohps, nameion):
"""
.. warning:: NEEDS MORE TESTS
This method will return a dictionary including information on antibonding states
important is however that only the energy range can be considered that has been computed
(i.e., this might not be all)
e.g., similar to: {'Cu-O': {'integral': 4.24374775705, 'perc': 5.7437713186999995},
'Cu-F': {'integral': 3.07098300965, 'perc': 4.25800841445}}
Args:
cohps: list of pymatgen.electronic_structure.cohp.Cohp ojbects
labels: ['2 x Cu-O', '4 x Cu-F']
nameion: string of the cation name, e.g. "Cu"
Returns:
dict including in formation on whether antibonding interactions exist,
e.g., {'Cu-O': {'integral': 4.24374775705, 'perc': 5.7437713186999995},
'Cu-F': {'integral': 3.07098300965, 'perc': 4.25800841445}}}
"""
dict_antibd = {}
for label, cohp in zip(labels, cohps):
if label is not None:
new = label.split(" ")[2].split("-")
sorted_new = self._sort_name(new, nameion)
new_label = sorted_new[0] + "-" + sorted_new[1]
integral, perc = self._integrate_antbdstates_below_efermi(cohp, -2)
dict_antibd[new_label] = {"integral": integral, "perc": perc}
return dict_antibd
@staticmethod
def _integrate_antbdstates_below_efermi(cohp, start=-30):
"""
.. warning:: NEEDS MORE TESTS
This method integrates the whole COHP curve that has been computed. The energy range is be very important
Args:
cohp: cohp object
start: where does the integration start
Returns:
absolute value of antibonding interactions, percentage value of antibonding interaction
"""
# This integrates the whole COHP curve that has been computed. Just be aware that you might be
# neglecting some low-lying interactions due to the energy range
def abstrapz_positive(y, x=None, dx=0.001):
"""
This method will integrate only one side of the COHP
Args:
y: Energy values
x: COHP values
dx: how fine should the integration steps be
Returns:
integrated value
"""
y = np.asanyarray(y)
if x is None:
d = dx
else:
x = np.asanyarray(x)
d = np.diff(x)
ret = d * (y[1:] + y[:-1]) / 2.0
return ret[ret > 0.0].sum() # The important line
def abstrapz_negative(y, x=None, dx=0.001):
"""
This method will integrate only one side of the COHP
Args:
y: Energy values
x: COHP values
dx: how fine should the integration steps be
Returns:
integrated value
"""
y = np.asanyarray(y)
if x is None:
d = dx
else:
x = np.asanyarray(x)
d = | np.diff(x) | numpy.diff |
import numpy as np
from itertools import combinations
import dask.array as dsa
from ..core import (
histogram,
_ensure_correctly_formatted_bins,
_ensure_correctly_formatted_range,
)
from .fixtures import empty_dask_array
import pytest
bins_int = 10
bins_str = "auto"
bins_arr = np.linspace(-4, 4, 10)
range_ = (0, 1)
@pytest.mark.parametrize("density", [False, True])
@pytest.mark.parametrize("block_size", [None, 1, 2])
@pytest.mark.parametrize("axis", [1, None])
@pytest.mark.parametrize("bins", [10, np.linspace(-4, 4, 10), "auto"])
@pytest.mark.parametrize("range_", [None, (-4, 4)])
def test_histogram_results_1d(block_size, density, axis, bins, range_):
nrows, ncols = 5, 20
# Setting the random seed here prevents np.testing.assert_allclose
# from failing beow. We should investigate this further.
np.random.seed(2)
data = np.random.randn(nrows, ncols)
h, bin_edges = histogram(
data, bins=bins, range=range_, axis=axis, block_size=block_size, density=density
)
expected_shape = (
(nrows, len(bin_edges[0]) - 1) if axis == 1 else (len(bin_edges[0]) - 1,)
)
assert h.shape == expected_shape
# make sure we get the same thing as numpy.histogram
if axis:
bins_np = np.histogram_bin_edges(
data, bins=bins, range=range_
) # Use same bins for all slices below
expected = np.stack(
[
np.histogram(data[i], bins=bins_np, range=range_, density=density)[0]
for i in range(nrows)
]
)
else:
expected = np.histogram(data, bins=bins, range=range_, density=density)[0]
norm = nrows if (density and axis) else 1
| np.testing.assert_allclose(h, expected / norm) | numpy.testing.assert_allclose |
# Written by Dr <NAME>, Marda Science LLC
# for the USGS Coastal Change Hazards Program
#
# MIT License
#
# Copyright (c) 2020, Marda Science LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, time
USE_GPU = True #False #True
# DO_CRF_REFINE = True
if USE_GPU == True:
##use the first available GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '0' #'1'
else:
## to use the CPU (not recommended):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#suppress tensorflow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#utils
#keras functions for early stopping and model weights saving
# from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
import tensorflow as tf #numerical operations on gpu
from joblib import Parallel, delayed
from numpy.lib.stride_tricks import as_strided as ast
from skimage.morphology import remove_small_holes, remove_small_objects
from skimage.restoration import inpaint
from scipy.ndimage import maximum_filter
from skimage.transform import resize
from tqdm import tqdm
from skimage.filters import threshold_otsu
from skimage.morphology import rectangle, erosion # noqa
import matplotlib.pyplot as plt
SEED=42
np.random.seed(SEED)
AUTO = tf.data.experimental.AUTOTUNE # used in tf.data.Dataset API
tf.random.set_seed(SEED)
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print('GPU name: ', tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
import tensorflow.keras.backend as K
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import create_pairwise_bilateral, unary_from_labels
from skimage.filters.rank import median
from skimage.morphology import disk
from tkinter import filedialog
from tkinter import *
import json
from skimage.io import imsave
from numpy.lib.stride_tricks import as_strided as ast
#-----------------------------------
def mean_iou(y_true, y_pred):
"""
mean_iou(y_true, y_pred)
This function computes the mean IoU between `y_true` and `y_pred`: this version is tensorflow (not numpy) and is used by tensorflow training and evaluation functions
INPUTS:
* y_true: true masks, one-hot encoded.
* Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
* y_pred: predicted masks, either softmax outputs, or one-hot encoded.
* Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS:
* IoU score [tensor]
"""
yt0 = y_true[:,:,:,0]
yp0 = tf.keras.backend.cast(y_pred[:,:,:,0] > 0.5, 'float32')
inter = tf.math.count_nonzero(tf.logical_and(tf.equal(yt0, 1), tf.equal(yp0, 1)))
union = tf.math.count_nonzero(tf.add(yt0, yp0))
iou = tf.where(tf.equal(union, 0), 1., tf.cast(inter/union, 'float32'))
return iou
#-----------------------------------
def dice_coef(y_true, y_pred):
"""
dice_coef(y_true, y_pred)
This function computes the mean Dice coefficient between `y_true` and `y_pred`: this version is tensorflow (not numpy) and is used by tensorflow training and evaluation functions
INPUTS:
* y_true: true masks, one-hot encoded.
* Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
* y_pred: predicted masks, either softmax outputs, or one-hot encoded.
* Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS:
* Dice score [tensor]
"""
smooth = 1.
y_true_f = tf.reshape(tf.dtypes.cast(y_true, tf.float32), [-1])
y_pred_f = tf.reshape(tf.dtypes.cast(y_pred, tf.float32), [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
##========================================================
def rescale(dat,
mn,
mx):
'''
rescales an input dat between mn and mx
'''
m = min(dat.flatten())
M = max(dat.flatten())
return (mx-mn)*(dat-m)/(M-m)+mn
# ##====================================
# def standardize(img):
# #standardization using adjusted standard deviation
# N = np.shape(img)[0] * np.shape(img)[1]
# s = np.maximum(np.std(img), 1.0/np.sqrt(N))
# m = np.mean(img)
# img = (img - m) / s
# img = rescale(img, 0, 1)
# del m, s, N
#
# if np.ndim(img)!=3:
# img = np.dstack((img,img,img))
#
# return img
##====================================
def standardize(img):
#standardization using adjusted standard deviation
N = np.shape(img)[0] * np.shape(img)[1]
s = np.maximum(np.std(img), 1.0/np.sqrt(N))
m = np.mean(img)
img = (img - m) / s
# img = rescale(img, 0, 1)
del m, s, N
#
# if np.ndim(img)!=3:
# img = np.dstack((img,img,img))
return img
###############################################################
### MODEL FUNCTIONS
###############################################################
#-----------------------------------
def batchnorm_act(x):
"""
batchnorm_act(x)
This function applies batch normalization to a keras model layer, `x`, then a relu activation function
INPUTS:
* `z` : keras model layer (should be the output of a convolution or an input layer)
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS:
* batch normalized and relu-activated `x`
"""
x = tf.keras.layers.BatchNormalization()(x)
return tf.keras.layers.Activation("relu")(x)
#-----------------------------------
def conv_block(x, filters, kernel_size = (7,7), padding="same", strides=1):
"""
conv_block(x, filters, kernel_size = (7,7), padding="same", strides=1)
This function applies batch normalization to an input layer, then convolves with a 2D convol layer
The two actions combined is called a convolutional block
INPUTS:
* `filters`: number of filters in the convolutional block
* `x`:input keras layer to be convolved by the block
OPTIONAL INPUTS:
* `kernel_size`=(3, 3): tuple of kernel size (x, y) - this is the size in pixels of the kernel to be convolved with the image
* `padding`="same": see tf.keras.layers.Conv2D
* `strides`=1: see tf.keras.layers.Conv2D
GLOBAL INPUTS: None
OUTPUTS:
* keras layer, output of the batch normalized convolution
"""
conv = batchnorm_act(x)
return tf.keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides)(conv)
#-----------------------------------
def bottleneck_block(x, filters, kernel_size = (7,7), padding="same", strides=1):
"""
bottleneck_block(x, filters, kernel_size = (7,7), padding="same", strides=1)
This function creates a bottleneck block layer, which is the addition of a convolution block and a batch normalized/activated block
INPUTS:
* `filters`: number of filters in the convolutional block
* `x`: input keras layer
OPTIONAL INPUTS:
* `kernel_size`=(3, 3): tuple of kernel size (x, y) - this is the size in pixels of the kernel to be convolved with the image
* `padding`="same": see tf.keras.layers.Conv2D
* `strides`=1: see tf.keras.layers.Conv2D
GLOBAL INPUTS: None
OUTPUTS:
* keras layer, output of the addition between convolutional and bottleneck layers
"""
conv = tf.keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
conv = conv_block(conv, filters, kernel_size=kernel_size, padding=padding, strides=strides)
bottleneck = tf.keras.layers.Conv2D(filters, kernel_size=(1, 1), padding=padding, strides=strides)(x)
bottleneck = batchnorm_act(bottleneck)
return tf.keras.layers.Add()([conv, bottleneck])
#-----------------------------------
def res_block(x, filters, kernel_size = (7,7), padding="same", strides=1):
"""
res_block(x, filters, kernel_size = (7,7), padding="same", strides=1)
This function creates a residual block layer, which is the addition of a residual convolution block and a batch normalized/activated block
INPUTS:
* `filters`: number of filters in the convolutional block
* `x`: input keras layer
OPTIONAL INPUTS:
* `kernel_size`=(3, 3): tuple of kernel size (x, y) - this is the size in pixels of the kernel to be convolved with the image
* `padding`="same": see tf.keras.layers.Conv2D
* `strides`=1: see tf.keras.layers.Conv2D
GLOBAL INPUTS: None
OUTPUTS:
* keras layer, output of the addition between residual convolutional and bottleneck layers
"""
res = conv_block(x, filters, kernel_size=kernel_size, padding=padding, strides=strides)
res = conv_block(res, filters, kernel_size=kernel_size, padding=padding, strides=1)
bottleneck = tf.keras.layers.Conv2D(filters, kernel_size=(1, 1), padding=padding, strides=strides)(x)
bottleneck = batchnorm_act(bottleneck)
return tf.keras.layers.Add()([bottleneck, res])
#-----------------------------------
def upsamp_concat_block(x, xskip):
"""
upsamp_concat_block(x, xskip)
This function takes an input layer and creates a concatenation of an upsampled version and a residual or 'skip' connection
INPUTS:
* `xskip`: input keras layer (skip connection)
* `x`: input keras layer
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS:
* keras layer, output of the addition between residual convolutional and bottleneck layers
"""
u = tf.keras.layers.UpSampling2D((2, 2))(x)
return tf.keras.layers.Concatenate()([u, xskip])
#-----------------------------------
def iou(obs, est, nclasses):
IOU=0
for n in range(1,nclasses+1):
component1 = obs==n
component2 = est==n
overlap = component1*component2 # Logical AND
union = component1 + component2 # Logical OR
calc = overlap.sum()/float(union.sum())
if not np.isnan(calc):
IOU += calc
if IOU>1:
IOU=IOU/n
return IOU
#-----------------------------------
def res_unet(sz, f, nclasses=1):
"""
res_unet(sz, f, nclasses=1)
This function creates a custom residual U-Net model for image segmentation
INPUTS:
* `sz`: [tuple] size of input image
* `f`: [int] number of filters in the convolutional block
* flag: [string] if 'binary', the model will expect 2D masks and uses sigmoid. If 'multiclass', the model will expect 3D masks and uses softmax
* nclasses [int]: number of classes
OPTIONAL INPUTS:
* `kernel_size`=(3, 3): tuple of kernel size (x, y) - this is the size in pixels of the kernel to be convolved with the image
* `padding`="same": see tf.keras.layers.Conv2D
* `strides`=1: see tf.keras.layers.Conv2D
GLOBAL INPUTS: None
OUTPUTS:
* keras model
"""
inputs = tf.keras.layers.Input(sz)
## downsample
e1 = bottleneck_block(inputs, f); f = int(f*2)
e2 = res_block(e1, f, strides=2); f = int(f*2)
e3 = res_block(e2, f, strides=2); f = int(f*2)
e4 = res_block(e3, f, strides=2); f = int(f*2)
_ = res_block(e4, f, strides=2)
## bottleneck
b0 = conv_block(_, f, strides=1)
_ = conv_block(b0, f, strides=1)
## upsample
_ = upsamp_concat_block(_, e4)
_ = res_block(_, f); f = int(f/2)
_ = upsamp_concat_block(_, e3)
_ = res_block(_, f); f = int(f/2)
_ = upsamp_concat_block(_, e2)
_ = res_block(_, f); f = int(f/2)
_ = upsamp_concat_block(_, e1)
_ = res_block(_, f)
## classify
if nclasses==1:
outputs = tf.keras.layers.Conv2D(nclasses, (1, 1), padding="same", activation="sigmoid")(_)
else:
outputs = tf.keras.layers.Conv2D(nclasses, (1, 1), padding="same", activation="softmax")(_)
#model creation
model = tf.keras.models.Model(inputs=[inputs], outputs=[outputs])
return model
#-----------------------------------
def seg_file2tensor_3band(f, resize):
"""
"seg_file2tensor(f)"
This function reads a jpeg image from file into a cropped and resized tensor,
for use in prediction with a trained segmentation model
INPUTS:
* f [string] file name of jpeg
OPTIONAL INPUTS: None
OUTPUTS:
* image [tensor array]: unstandardized image
GLOBAL INPUTS: TARGET_SIZE
"""
bits = tf.io.read_file(f)
if 'jpg' in f:
bigimage = tf.image.decode_jpeg(bits)
elif 'png' in f:
bigimage = tf.image.decode_png(bits)
if USE_LOCATION:
gx,gy = np.meshgrid(np.arange(bigimage.shape[1]), np.arange(bigimage.shape[0]))
loc = np.sqrt(gx**2 + gy**2)
loc /= loc.max()
loc = (255*loc).astype('uint8')
bigimage = np.dstack((bigimage, loc))
w = tf.shape(bigimage)[0]
h = tf.shape(bigimage)[1]
if resize:
tw = TARGET_SIZE[0]
th = TARGET_SIZE[1]
resize_crit = (w * th) / (h * tw)
image = tf.cond(resize_crit < 1,
lambda: tf.image.resize(bigimage, [w*tw/w, h*tw/w]), # if true
lambda: tf.image.resize(bigimage, [w*th/h, h*th/h]) # if false
)
nw = tf.shape(image)[0]
nh = tf.shape(image)[1]
image = tf.image.crop_to_bounding_box(image, (nw - tw) // 2, (nh - th) // 2, tw, th)
# image = tf.cast(image, tf.uint8) #/ 255.0
return image, w, h, bigimage
else:
return None, w, h, bigimage
#-----------------------------------
def seg_file2tensor_4band(f, fir, resize):
"""
"seg_file2tensor(f)"
This function reads a jpeg image from file into a cropped and resized tensor,
for use in prediction with a trained segmentation model
INPUTS:
* f [string] file name of jpeg
OPTIONAL INPUTS: None
OUTPUTS:
* image [tensor array]: unstandardized image
GLOBAL INPUTS: TARGET_SIZE
"""
bits = tf.io.read_file(f)
if 'jpg' in f:
bigimage = tf.image.decode_jpeg(bits)
elif 'png' in f:
bigimage = tf.image.decode_png(bits)
bits = tf.io.read_file(fir)
if 'jpg' in fir:
nir = tf.image.decode_jpeg(bits)
elif 'png' in f:
nir = tf.image.decode_png(bits)
if USE_LOCATION:
gx,gy = np.meshgrid(np.arange(bigimage.shape[1]), np.arange(bigimage.shape[0]))
loc = np.sqrt(gx**2 + gy**2)
loc /= loc.max()
loc = (255*loc).astype('uint8')
bigimage = np.dstack((bigimage, loc))
if USE_LOCATION:
bigimage = tf.concat([bigimage, nir],-1)[:,:,:N_DATA_BANDS+1]
else:
bigimage = tf.concat([bigimage, nir],-1)[:,:,:N_DATA_BANDS]
w = tf.shape(bigimage)[0]
h = tf.shape(bigimage)[1]
if resize:
tw = TARGET_SIZE[0]
th = TARGET_SIZE[1]
resize_crit = (w * th) / (h * tw)
image = tf.cond(resize_crit < 1,
lambda: tf.image.resize(bigimage, [w*tw/w, h*tw/w]), # if true
lambda: tf.image.resize(bigimage, [w*th/h, h*th/h]) # if false
)
nw = tf.shape(image)[0]
nh = tf.shape(image)[1]
image = tf.image.crop_to_bounding_box(image, (nw - tw) // 2, (nh - th) // 2, tw, th)
# image = tf.cast(image, tf.uint8) #/ 255.0
return image, w, h, bigimage
else:
return None, w, h, bigimage
##========================================================
def fromhex(n):
""" hexadecimal to integer """
return int(n, base=16)
##========================================================
def label_to_colors(
img,
mask,
alpha,#=128,
colormap,#=class_label_colormap, #px.colors.qualitative.G10,
color_class_offset,#=0,
do_alpha,#=True
):
"""
Take MxN matrix containing integers representing labels and return an MxNx4
matrix where each label has been replaced by a color looked up in colormap.
colormap entries must be strings like plotly.express style colormaps.
alpha is the value of the 4th channel
color_class_offset allows adding a value to the color class index to force
use of a particular range of colors in the colormap. This is useful for
example if 0 means 'no class' but we want the color of class 1 to be
colormap[0].
"""
colormap = [
tuple([fromhex(h[s : s + 2]) for s in range(0, len(h), 2)])
for h in [c.replace("#", "") for c in colormap]
]
cimg = | np.zeros(img.shape[:2] + (3,), dtype="uint8") | numpy.zeros |
# Version 3.1; <NAME>; Polar Geospatial Center, University of Minnesota; 2019
# Translated from MATLAB code written by <NAME>, Ohio State University, 2018
from __future__ import division
import os
import sys
import traceback
import warnings
import ogr
import numpy as np
import scipy.stats
from scipy import interpolate
if sys.version_info[0] < 3:
import raster_array_tools as rat
from filter_scene import getDataDensityMap, readSceneMeta, rescaleDN
else:
from lib import raster_array_tools as rat
from lib.filter_scene import getDataDensityMap, readSceneMeta, rescaleDN
# The spatial reference of the strip, set at the beginning of scenes2strips()
# to the spatial reference of the first scene DEM in order and used for
# comparison to the spatial references of all other source raster files.
__STRIP_SPAT_REF__ = None
# The Catalog ID of "Image 1" as parsed from the output scene metadata files for
# an intrack stereo SETSM DEM strip. It is expected that all ortho scenes in the
# intrack strip correspond to the same Catalog ID.
__INTRACK_ORTHO_CATID__ = None
HOLD_GUESS_OFF = 0
HOLD_GUESS_ALL = 1
HOLD_GUESS_UPDATE_RMSE = 2
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class SpatialRefError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class RasterDimensionError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class MetadataError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
def scenes2strips(demFiles,
maskSuffix=None, filter_options=(), max_coreg_rmse=1,
trans_guess=None, trans_err_guess=None, rmse_guess=None,
hold_guess=HOLD_GUESS_OFF, check_guess=True,
use_second_ortho=False):
"""
From MATLAB version in Github repo 'setsm_postprocessing', 3.0 branch:
function [X,Y,Z,M,O,trans,rmse,f]=scenes2strips(varargin)
%SCENES2STRIPS merge scenes into strips
%
% [x,y,z,m,o,trans,rmse,f]=scenes2strips(demdir,f) merges the
% scene geotiffs listed in cellstr f within directory demdir after
% ordering them by position. If a break in coverage is detected between
% scene n and n+1 only the first 1:n scenes will be merged. The data are
% coregistered at overlaps using iterative least squares, starting with
% scene n=1.
% Outputs are the strip grid coorinates x,y and strip elevation, z,
% matchtag, m and orthoimage, o. The 3D translations are given in 3xn
% vector trans, along with root-mean-squared of residuals, rmse. The
% output f gives the list of filenames in the mosaic. If a break is
% detected, the list of output files will be less than the input.
%
% [...]=scenes2strips(...,'maskFileSuffix',value) will apply the mask
% identified as the dem filename with the _dem.tif replaced by
% _maskSuffix
% [...]=scenes2strips(...,'max_coreg_rmse',value) will set a new maximum
% coregistration error limit in meters (default=1). Errors above this
% limit will result in a segment break.
%
% Version 3.1, <NAME>, Ohio State University, 2015.
If maskFileSuffix='edgemask', edge and data masks identified as the DEM
filename with the _dem.tif replaced by _edgemask.tif and _datamask.tif,
respectively, will be applied.
"""
from batch_scenes2strips import getDemSuffix, selectBestMatchtag, selectBestOrtho, selectBestOrtho2
demSuffix = getDemSuffix(demFiles[0])
cluster_min_px = 1000 # Minimum data cluster area for 2m.
add_min_px = 50000 # Minimum number of unmasked pixels scene must add to existing segment to not be skipped.
# Order scenes in north-south or east-west direction by aspect ratio.
num_scenes = len(demFiles)
if trans_guess is None and trans_err_guess is None and rmse_guess is None:
print("Ordering {} scenes".format(num_scenes))
demFiles_ordered = orderPairs(demFiles)
elif trans_err_guess is not None and trans_guess is None:
raise InvalidArgumentError("`trans_guess_err` argument can only be used in conjunction "
"with `trans_guess` argument")
elif trans_guess is not None and trans_guess.shape[1] != num_scenes:
raise InvalidArgumentError("`trans_guess` array must be of shape (3, N) where N is the number "
"of scenes in `demFiles`, but was {}".format(trans_guess.shape))
elif rmse_guess is not None and rmse_guess.shape[1] != num_scenes:
raise InvalidArgumentError("`rmse_guess` array must be of shape (1, N) where N is the number "
"of scenes in `demFiles`, but was {}".format(rmse_guess.shape))
else:
# Files should already be properly ordered if a guess is provided.
# Running `orderPairs` on them could detrimentally change their order.
demFiles_ordered = list(demFiles)
num_scenes = len(demFiles_ordered)
# Initialize output stats.
trans = np.zeros((3, num_scenes))
trans_err = trans.copy()
rmse = np.zeros((1, num_scenes))
if check_guess:
trans_check = np.copy(trans)
trans_err_check = np.copy(trans_err)
rmse_check = np.copy(rmse)
# Get projection reference of the first scene to be used in equality checks
# with the projection reference of all scenes that follow.
global __STRIP_SPAT_REF__
__STRIP_SPAT_REF__ = rat.extractRasterData(demFiles_ordered[0], 'spat_ref')
if __STRIP_SPAT_REF__.ExportToProj4() == '':
raise SpatialRefError("DEM '{}' spatial reference ({}) has no PROJ4 representation "
"and is likely erroneous".format(demFiles_ordered[0], __STRIP_SPAT_REF__.ExportToWkt()))
# File loop.
skipped_scene = False
segment_break = False
for i in range(num_scenes+1):
if skipped_scene:
skipped_scene = False
trans[:, i-1] = np.nan
trans_err[:, i-1] = np.nan
rmse[0, i-1] = np.nan
if i >= num_scenes:
break
if ( (trans_guess is not None and np.any(np.isnan(trans_guess[:, i])))
or (trans_err_guess is not None and np.any(np.isnan(trans_err_guess[:, i])))
or (rmse_guess is not None and np.isnan(rmse_guess[0, i]))):
# State of scene is somewhere between naturally redundant
# or redundant by masking, as classified by prior s2s run.
skipped_scene = True
continue
# Construct filenames.
demFile = demFiles_ordered[i]
matchFile = selectBestMatchtag(demFile)
orthoFile = selectBestOrtho(demFile)
ortho2File = selectBestOrtho2(demFile) if use_second_ortho else None
metaFile = demFile.replace(demSuffix, 'meta.txt')
if maskSuffix is None:
print("No mask applied")
maskFile = None
else:
maskFile = demFile.replace(demSuffix, maskSuffix)
if use_second_ortho and ortho2File is None:
raise InvalidArgumentError("`use_second_ortho=True`, but second ortho could not be found")
print("Scene {} of {}: {}".format(i+1, len(demFiles_ordered), demFile))
# try:
x, y, z, m, o, o2, md = loadData(demFile, matchFile, orthoFile, ortho2File, maskFile, metaFile)
# except:
# print("Data read error:")
# traceback.print_exc()
# print("...skipping")
# continue
# Apply masks.
x, y, z, m, o, o2, md = applyMasks(x, y, z, m, o, o2, md, filter_options, maskSuffix)
# Check for redundant scene.
if np.count_nonzero(~np.isnan(z)) <= add_min_px:
print("Not enough (unmasked) data, skipping")
skipped_scene = True
continue
dx = x[1] - x[0]
dy = y[1] - y[0]
# Fix grid so that x, y coordinates of
# pixels in overlapping scenes will match up.
if ((x[1] / dx) % 1 != 0) or ((y[1] / dy) % 1 != 0):
x, y, z, m, o, o2, md = regrid(x, y, z, m, o, o2, md)
# If this is the first scene in strip,
# set as strip and continue to next scene.
if 'X' not in vars():
X, Y, Z, M, O, O2, MD = x, y, z, m, o, o2, md
del x, y, z, m, o, o2, md
continue
# Pad new arrays to stabilize interpolation.
buff = int(10*dx + 1)
z = np.pad(z, buff, 'constant', constant_values=np.nan)
m = np.pad(m, buff, 'constant', constant_values=0)
o = np.pad(o, buff, 'constant', constant_values=0)
o2 = np.pad(o2, buff, 'constant', constant_values=0) if o2 is not None else None
md = np.pad(md, buff, 'constant', constant_values=1)
x = np.concatenate((x[0] - dx*np.arange(buff, 0, -1), x,
x[-1] + dx*np.arange(1, buff+1)))
y = np.concatenate((y[0] + dx*np.arange(buff, 0, -1), y,
y[-1] - dx*np.arange(1, buff+1)))
# Expand strip coverage to encompass new scene.
if x[0] < X[0]:
X1 = np.arange(x[0], X[0], dx)
X = np.concatenate((X1, X))
Z, M, O, O2, MD = expandCoverage(Z, M, O, O2, MD, X1, direction='left')
del X1
if x[-1] > X[-1]:
X1 = np.arange(X[-1]+dx, x[-1]+dx, dx)
X = np.concatenate((X, X1))
Z, M, O, O2, MD = expandCoverage(Z, M, O, O2, MD, X1, direction='right')
del X1
if y[0] > Y[0]:
Y1 = np.arange(y[0], Y[0], -dx)
Y = np.concatenate((Y1, Y))
Z, M, O, O2, MD = expandCoverage(Z, M, O, O2, MD, Y1, direction='up')
del Y1
if y[-1] < Y[-1]:
Y1 = np.arange(Y[-1]-dx, y[-1]-dx, -dx)
Y = np.concatenate((Y, Y1))
Z, M, O, O2, MD = expandCoverage(Z, M, O, O2, MD, Y1, direction='down')
del Y1
# Map new DEM pixels to swath. These must return integers. If not,
# interpolation will be required, which is currently not supported.
c0 = np.where(x[0] == X)[0][0]
c1 = np.where(x[-1] == X)[0][0] + 1
r0 = np.where(y[0] == Y)[0][0]
r1 = np.where(y[-1] == Y)[0][0] + 1
# Crop to overlap.
Xsub = np.copy( X[c0:c1])
Ysub = np.copy( Y[r0:r1])
Zsub = np.copy( Z[r0:r1, c0:c1])
Msub = np.copy( M[r0:r1, c0:c1])
Osub = np.copy( O[r0:r1, c0:c1])
O2sub = np.copy(O2[r0:r1, c0:c1]) if O2 is not None else None
MDsub = np.copy(MD[r0:r1, c0:c1])
# NEW MOSAICKING CODE
# Crop to just region of overlap.
A = (~np.isnan(Zsub) & ~np.isnan(z))
# Check for segment break.
if np.count_nonzero(A) <= cluster_min_px:
print("Not enough overlap, segment break")
segment_break = True
break
r, c = cropBorder(A, 0, buff)
# Make overlap mask removing isolated pixels.
strip_nodata = np.isnan(Zsub[r[0]:r[1], c[0]:c[1]])
scene_data = ~np.isnan( z[r[0]:r[1], c[0]:c[1]])
strip_mask_water_and_cloud = (MDsub[r[0]:r[1], c[0]:c[1]] > 1)
# Nodata in strip and data in scene is a one.
A = rat.bwareaopen(strip_nodata & scene_data, cluster_min_px, in_place=True).astype(np.float32)
# Check for redundant scene.
num_px_to_add = np.count_nonzero((A == 1) & ~strip_mask_water_and_cloud)
print("Number of unmasked pixels to add to strip: {}".format(num_px_to_add))
if num_px_to_add <= add_min_px:
print("Redundant scene, skipping")
skipped_scene = True
continue
# Data in strip and nodata in scene is a two.
A[rat.bwareaopen(~strip_nodata & ~scene_data, cluster_min_px, in_place=True)] = 2
del strip_nodata, scene_data
Ar = rat.imresize(A, 0.1, 'nearest')
# Check for redundant scene.
if not np.any(Ar):
print("Redundant scene, skipping")
skipped_scene = True
continue
# Locate pixels on outside of boundary of overlap region.
Ar_nonzero = (Ar != 0)
B = np.where(rat.bwboundaries_array(Ar_nonzero, noholes=True))
cz_rows, cz_cols = [], []
for cc in [
[0, 0 ],
[0, Ar.shape[1]-1],
[Ar.shape[0]-1, Ar.shape[1]-1],
[Ar.shape[0]-1, 0 ]]:
if Ar[tuple(cc)] == 0:
cz_rows.append(cc[0])
cz_cols.append(cc[1])
if len(cz_rows) > 0:
# Pixels outside of the convex hull of input points for interpolate.griddata
# currently can't be extrapolated linear-ly (by default they are filled with NaN).
# Let this region be filled with NaN, but we get a better edge on the convex hull
# by changing corner pixels that are zero to NaN.
corner_zeros = (np.array(cz_rows), np.array(cz_cols))
Ar[corner_zeros] = np.nan
# Add the corner coordinates to the list of boundary coordinates,
# which will be used for interpolation.
By = np.concatenate((B[0], corner_zeros[0]))
Bx = np.concatenate((B[1], corner_zeros[1]))
B = (By, Bx)
del corner_zeros, By, Bx
del cz_rows, cz_cols
# Use the coordinates and values of boundary pixels
# to interpolate values for pixels with zero value.
Ar_zero_coords = np.where(~Ar_nonzero)
Ar_interp = interpolate.griddata(B, Ar[B], Ar_zero_coords, 'linear')
Ar[Ar_zero_coords] = Ar_interp
del Ar_nonzero, Ar_zero_coords, Ar_interp
# Fill in the regions outside the convex hull of the boundary points
# using a nearest extrapolation of all points on the boundary of the
# overlap region (including the gaps that were just interpolated).
Ar_outer = np.isnan(Ar)
Ar_outer_coords = np.where(Ar_outer)
B = np.where(rat.bwboundaries_array(~Ar_outer))
Ar_extrap = interpolate.griddata(B, Ar[B], Ar_outer_coords, 'nearest')
Ar[Ar_outer_coords] = Ar_extrap
# Nearest extrapolation is granular, so it is smoothed.
Ar_smooth = rat.moving_average(Ar, 5, zero_border=False)
Ar[Ar_outer] = Ar_smooth[Ar_outer]
del Ar_outer, Ar_outer_coords, Ar_extrap
Ar = rat.imresize(Ar, A.shape, 'bilinear')
Ar[(A == 1) & (Ar != 1)] = 1
Ar[(A == 2) & (Ar != 2)] = 2
A = np.clip(Ar - 1, 0, 1)
del Ar
W = (~np.isnan(Zsub)).astype(np.float32)
W[r[0]:r[1], c[0]:c[1]] = A
del A
W[np.isnan(Zsub) & np.isnan(z)] = np.nan
# Shift weights so that more of the reference layer is kept.
f0 = 0.25 # overlap fraction where ref z weight goes to zero
f1 = 0.55 # overlap fraction where ref z weight goes to one
W = np.clip((1/(f1-f0))*W - f0/(f1-f0), 0, 1)
# Remove <25% edge of coverage from each in pair.
strip_nodata = (W == 0)
Zsub[strip_nodata] = np.nan
Msub[strip_nodata] = 0
Osub[strip_nodata] = 0
if O2sub is not None:
O2sub[strip_nodata] = 0
MDsub[strip_nodata] = 0
scene_nodata = (W == 1)
z[scene_nodata] = np.nan
m[scene_nodata] = 0
o[scene_nodata] = 0
if o2 is not None:
o2[scene_nodata] = 0
md[scene_nodata] = 0
del strip_nodata, scene_nodata
# Coregistration
P0 = getDataDensityMap(Msub[r[0]:r[1], c[0]:c[1]]) > 0.9
# Check for segment break.
if not np.any(P0):
print("Not enough data overlap, segment break")
segment_break = True
break
P1 = getDataDensityMap(m[r[0]:r[1], c[0]:c[1]]) > 0.9
# Check for redundant scene.
if not np.any(P1):
print("Redundant scene, skipping")
skipped_scene = True
continue
# Coregister this scene to the strip mosaic.
if ( hold_guess == HOLD_GUESS_ALL and not check_guess
and (trans_guess is not None and trans_err_guess is not None and rmse_guess is not None)):
trans[:, i] = trans_guess[:, i]
trans_err[:, i] = trans_err_guess[:, i]
rmse[0, i] = rmse_guess[0, i]
else:
trans[:, i], trans_err[:, i], rmse[0, i] = coregisterdems(
Xsub[c[0]:c[1]], Ysub[r[0]:r[1]], Zsub[r[0]:r[1], c[0]:c[1]],
x[c[0]:c[1]], y[r[0]:r[1]], z[r[0]:r[1], c[0]:c[1]],
P0, P1,
(trans_guess[:, i] if trans_guess is not None else trans_guess),
hold_guess != HOLD_GUESS_OFF
)[[1, 2, 3]]
if check_guess:
error_tol = 10**-2
if trans_guess is not None:
trans_check[:, i] = trans[:, i]
if not np.allclose(trans_check[:, i], trans_guess[:, i], rtol=0, atol=error_tol, equal_nan=True):
print("`trans_check` vector out of `coregisterdems` does not match `trans_guess` within error tol ({})".format(error_tol))
print("`trans_guess`:")
print(np.array2string(trans_guess, precision=4, max_line_width=np.inf))
print("`trans_check`:")
print(np.array2string(trans_check, precision=4, max_line_width=np.inf))
if rmse_guess is not None:
rmse_check[0, i] = rmse[0, i]
if not np.allclose(rmse_check[0, i], rmse_guess[0, i], rtol=0, atol=error_tol, equal_nan=True):
print("`rmse_check` out of `coregisterdems` does not match `rmse_guess` within error tol ({})".format(error_tol))
print("`rmse_guess`:")
print(np.array2string(rmse_guess, precision=4, max_line_width=np.inf))
print("`rmse_check`:")
print(np.array2string(rmse_check, precision=4, max_line_width=np.inf))
if hold_guess != HOLD_GUESS_OFF:
if trans_guess is not None:
trans[:, i] = trans_guess[:, i]
if trans_err_guess is not None:
trans_err[:, i] = trans_err_guess[:, i]
if rmse_guess is not None and hold_guess == HOLD_GUESS_ALL:
rmse[0, i] = rmse_guess[0, i]
# Check for segment break.
if np.isnan(rmse[0, i]):
print("Unable to coregister, segment break")
segment_break = True
elif rmse[0, i] > max_coreg_rmse:
print("Final RMSE is greater than cutoff value ({} > {}), segment break".format(
rmse[0, i], max_coreg_rmse))
segment_break = True
else:
pass
if segment_break:
break
# Interpolation grid
xi = x - trans[1, i]
yi = y - trans[2, i]
# Check that uniform spacing is maintained (sometimes rounding errors).
if len(np.unique(np.diff(xi))) > 1:
xi = np.round(xi, 4)
if len(np.unique(np.diff(yi))) > 1:
yi = np.round(yi, 4)
# Interpolate floating data to the reference grid.
zi = rat.interp2_gdal(xi, yi, z-trans[0, i], Xsub, Ysub, 'linear')
del z
# Interpolate matchtag to the same grid.
mi = rat.interp2_gdal(xi, yi, m.astype(np.float32), Xsub, Ysub, 'nearest')
mi[np.isnan(mi)] = 0 # convert back to uint8
mi = mi.astype(np.bool)
del m
# Interpolate ortho to same grid.
oi = o.astype(np.float32)
oi[oi == 0] = np.nan # Set border to NaN so it won't be interpolated.
oi = rat.interp2_gdal(xi, yi, oi, Xsub, Ysub, 'cubic')
del o
if o2 is not None:
# Interpolate ortho2 to same grid.
o2i = o2.astype(np.float32)
o2i[o2i == 0] = np.nan # Set border to NaN so it won't be interpolated.
o2i = rat.interp2_gdal(xi, yi, o2i, Xsub, Ysub, 'cubic')
del o2
else:
o2i = None
# Interpolate mask to the same grid.
mdi = rat.interp2_gdal(xi, yi, md.astype(np.float32), Xsub, Ysub, 'nearest')
mdi[np.isnan(mdi)] = 0 # convert back to uint8
mdi = mdi.astype(np.uint8)
del md
del Xsub, Ysub
# Remove border 0's introduced by NaN interpolation.
M3 = ~np.isnan(zi)
M3 = rat.imerode(M3, 6) # border cutline
zi[~M3] = np.nan
mi[~M3] = 0 # also apply to matchtag
del M3
# Remove border on ortho separately.
M4 = ~np.isnan(oi)
M4 = rat.imerode(M4, 6)
oi[~M4] = np.nan
del M4
if o2i is not None:
# Remove border on ortho2 separately.
M5 = ~np.isnan(o2i)
M5 = rat.imerode(M5, 6)
o2i[~M5] = np.nan
del M5
# Make weighted elevation grid.
A = Zsub*W + zi*(1-W)
Zsub_only = ~ | np.isnan(Zsub) | numpy.isnan |
import argparse
import scipy.io
import torch
import numpy as np
import os
from torchvision import datasets
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from shutil import copyfile
from PIL import Image
#######################################################################
# Evaluate
parser = argparse.ArgumentParser(description='Demo')
parser.add_argument('--query_index', default=300, type=int, help='test_image_index')
parser.add_argument('--test_dir',default='./Food-cropped/pytorch',type=str, help='./test_data')
parser.add_argument('--adv',action='store_true', help='./test_data')
opts = parser.parse_args()
data_dir = opts.test_dir
image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ) for x in ['gallery','query']}
#####################################################################
#Show result
def imshow(path, title=None):
"""Imshow for Tensor."""
im = plt.imread(path)
plt.imshow(im)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
######################################################################
result = scipy.io.loadmat('pytorch_result.mat')
if opts.adv:
result = scipy.io.loadmat('attack_query/ft_ResNet50_all-5/16/query.mat')
query_feature = torch.FloatTensor(result['query_f'])
query_label = result['query_label'][0]
result = scipy.io.loadmat('pytorch_result.mat')
gallery_feature = torch.FloatTensor(result['gallery_f'])
gallery_label = result['gallery_label'][0]
multi = os.path.isfile('multi_query.mat')
if multi:
m_result = scipy.io.loadmat('multi_query.mat')
mquery_feature = torch.FloatTensor(m_result['mquery_f'])
mquery_cam = m_result['mquery_cam'][0]
mquery_label = m_result['mquery_label'][0]
mquery_feature = mquery_feature.cuda()
query_feature = query_feature.cuda()
gallery_feature = gallery_feature.cuda()
#######################################################################
# sort the images
def sort_img(qf, ql, gf, gl):
query = qf.view(-1,1)
# print(query.shape)
score = torch.mm(gf,query)
score = score.squeeze(1).cpu()
score = score.numpy()
# predict index
index = np.argsort(score) #from small to large
index = index[::-1]
#good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index = | np.argwhere(gl==-1) | numpy.argwhere |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
License:
Copyright (c) 2019 <NAME>, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Usage:
null_list = find_nulls(nx,ny,nz, xv,yv,zv, B_x1,B_y1,B_z1, tolerance=None)
This function checks the existence of null points and localize them in the grid
Parameters: nx, ny, nz, xv, yv, zv, B_x1, B_y1, B_z1, tolerence(=None)
nx, ny, nz represent the resolution of the grid
xv, yv, zv are the coordinate in the x, y and z directions
B_x1, B_y1, B_z1 are the components of the B field
tolerance sets the exit condition for Newton's method (root finding)
Returns: null_list
null_list is a concatenated list of coordinates for all detected nulls
"""
import numpy as np
import math
from numpy.linalg import inv
def find_nulls(nx,ny,nz,xv,yv,zv,B_x1,B_y1,B_z1, tolerance=None):
null_list = []
num_nulls = 0
# calculating the sign change of the field components at the corners of the cells of the grid
bx_sc = field_sign_change(B_x1)
by_sc = field_sign_change(B_y1)
bz_sc = field_sign_change(B_z1)
# REDUCTION STAGE: keeping the indices of those cells for which the field components change sign at one of the vertices of the cells
ind_list = np.array(np.where(bx_sc & by_sc & bz_sc)).T
if not tolerance: tolerance = 10**-5
# looping over the cells that pass the reduction stage
for ind in ind_list:
# retrieving the indices that satisfy the reduction stage
i = ind[0]
j = ind[1]
k = ind[2]
# trilinear interpolation
tri_x = trilinear_coeffs(xv,yv,zv,i,j,k,B_x1)
tri_y = trilinear_coeffs(xv,yv,zv,i,j,k,B_y1)
tri_z = trilinear_coeffs(xv,yv,zv,i,j,k,B_z1)
trilinear = np.array([tri_x,tri_y,tri_z])
# BILINEAR STAGE
# creating three lists that store the sign of each field component on the faces of the cube
# the sign is appended only if the location given by the bilinear interpolation is on the face that is being considered
bxx = []
byy = []
bzz = []
# FACE 1
# f is the parameter that tells the code if we're on an x/y/z face
f = 0
face1 = xv[i]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = x_face(face1,trilinear[0],trilinear[1])
z_sign1 = bilinear(bxby,yv,zv,j,k,face1,tri_z,f)
# append the sublist to the main list only if it is not empty
if z_sign1:
bzz.append(z_sign1)
# by = 0 and bz = 0
bybz = x_face(face1,trilinear[1],trilinear[2])
x_sign1 = bilinear(bybz,yv,zv,j,k,face1,tri_x,f)
if x_sign1:
bxx.append(x_sign1)
# bx = 0 and bz = 0
bxbz = x_face(face1,trilinear[0],trilinear[2])
y_sign1 = bilinear(bxbz,yv,zv,j,k,face1,tri_y,f)
if y_sign1:
byy.append(y_sign1)
# FACE 2
f = 0
face2 = xv[i+1]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = x_face(face2,trilinear[0],trilinear[1])
z_sign2 = bilinear(bxby,yv,zv,j,k,face2,tri_z,f)
if z_sign2:
bzz.append(z_sign2)
# by = 0 and bz = 0
bybz = x_face(face2,trilinear[1],trilinear[2])
x_sign2 = bilinear(bybz,yv,zv,j,k,face2,tri_x,f)
if x_sign2:
bxx.append(x_sign2)
# bx = 0 and bz = 0
bxbz = x_face(face2,trilinear[0],trilinear[2])
y_sign2 = bilinear(bxbz,yv,zv,j,k,face2,tri_y,f)
if y_sign2:
byy.append(y_sign2)
# FACE 3
f = 1
face3 = yv[j]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = y_face(face3,trilinear[0],trilinear[1])
z_sign3 = bilinear(bxby,xv,zv,i,k,face3,tri_z,f)
if z_sign3:
bzz.append(z_sign3)
# by = 0 and bz = 0
bybz = y_face(face3,trilinear[1],trilinear[2])
x_sign3 = bilinear(bybz,xv,zv,i,k,face3,tri_x,f)
if x_sign3:
bxx.append(x_sign3)
# bx = 0 and bz = 0
bxbz = y_face(face3,trilinear[0],trilinear[2])
y_sign3 = bilinear(bxbz,xv,zv,i,k,face3,tri_y,f)
if y_sign3:
byy.append(y_sign3)
# FACE 4
f = 1
face4 = yv[j+1]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = y_face(face4,trilinear[0],trilinear[1])
z_sign4 = bilinear(bxby,xv,zv,i,k,face4,tri_z,f)
if z_sign4:
bzz.append(z_sign4)
# by = 0 and bz = 0
bybz = y_face(face4,trilinear[1],trilinear[2])
x_sign4 = bilinear(bybz,xv,zv,i,k,face4,tri_x,f)
if x_sign4:
bxx.append(x_sign4)
# bx = 0 and bz = 0
bxbz = y_face(face4,trilinear[0],trilinear[2])
y_sign4 = bilinear(bxbz,xv,zv,i,k,face4,tri_y,f)
if y_sign4:
byy.append(y_sign4)
# FACE 5
f = 2
face5 = zv[k]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = z_face(face5,trilinear[0],trilinear[1])
z_sign5 = bilinear(bxby,xv,yv,i,j,face5,tri_z,f)
if z_sign5:
bzz.append(z_sign5)
# by = 0 and bz = 0
bybz = z_face(face5,trilinear[1],trilinear[2])
x_sign5 = bilinear(bybz,xv,yv,i,j,face5,tri_x,f)
if x_sign5:
bxx.append(x_sign5)
# bx = 0 and bz = 0
bxbz = z_face(face5,trilinear[0],trilinear[2])
y_sign5 = bilinear(bxbz,xv,yv,i,j,face5,tri_y,f)
if y_sign5:
byy.append(y_sign5)
# FACE 6
f = 2
face6 = zv[k+1]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = z_face(face6,trilinear[0],trilinear[1])
z_sign6 = bilinear(bxby,xv,yv,i,j,face6,tri_z,f)
if z_sign6:
bzz.append(z_sign6)
# by = 0 and bz = 0
bybz = z_face(face6,trilinear[1],trilinear[2])
x_sign6 = bilinear(bybz,xv,yv,i,j,face6,tri_x,f)
if x_sign6:
bxx.append(x_sign6)
# bx = 0 and bz = 0
bxbz = z_face(face6,trilinear[0],trilinear[2])
y_sign6 = bilinear(bxbz,xv,yv,i,j,face6,tri_y,f)
if y_sign6:
byy.append(y_sign6)
# making flat lists
bxx = [item for sublist in bxx for item in sublist]
byy = [item for sublist in byy for item in sublist]
bzz = [item for sublist in bzz for item in sublist]
# if the function check_sign detects a change in sign in at least one of the three field components, then a single null point must exist in the cell
# hence, apply Newton-Raphson method to find its location
if (not check_sign(bxx)) or (not check_sign(byy)) or (not check_sign(bzz)):
# if not (check_sign(bxx) and check_sign(byy) and check_sign(bzz)):
# NEWTON RAPHSON METHOD
# first guess: centre of the cube
xg = 0.5
yg = 0.5
zg = 0.5
xs = xv[i]+(xv[i+1]-xv[i])*xg
ys = yv[j]+(yv[j+1]-yv[j])*yg
zs = zv[k]+(zv[k+1]-zv[k])*zg
# grid size
delta_x = xv[i+1]-xv[i]
delta_y = yv[j+1]-yv[j]
delta_z = zv[k+1]-zv[k]
# values of solution
x = [0]
y = [0]
z = [0]
# step size
step_x = []
step_y = []
step_z = []
# error relative to the local grid size
err_rel_grid = []
# error relative to the solution
err_rel_sol = []
converged = False
# set a counter to limit the number of iterations
n_steps = 0
while (not converged) and (n_steps < 11):
n_steps += 1
# calculating B field magnitude and components at the guessed location
B = B_field(xs,ys,zs,trilinear)
jac = jacobian(xs,ys,zs,trilinear)
if np.linalg.det(jac)==0:
print('The matrix is singular')
break
else:
jac_inv = inv(jacobian(xs,ys,zs,trilinear))
xs_prev = xs
ys_prev = ys
zs_prev = zs
xs = xs_prev-(jac_inv[0,0]*B[1]+jac_inv[0,1]*B[2]+jac_inv[0,2]*B[3])
ys = ys_prev-(jac_inv[1,0]*B[1]+jac_inv[1,1]*B[2]+jac_inv[1,2]*B[3])
zs = zs_prev-(jac_inv[2,0]*B[1]+jac_inv[2,1]*B[2]+jac_inv[2,2]*B[3])
new_B = B_field(xs,ys,zs,trilinear)
step_x.append(xs-xs_prev)
step_y.append(ys-ys_prev)
step_z.append(zs-zs_prev)
x.append(xs_prev+step_x[-1])
y.append(ys_prev+step_y[-1])
z.append(zs_prev+step_z[-1])
err_rel_grid.append(math.sqrt((step_x[-1]/delta_x)**2+(step_y[-1]/delta_y)**2+(step_z[-1]/delta_z)**2))
err_rel_sol.append(math.sqrt((step_x[-1]/x[-1])**2+(step_y[-1]/y[-1])**2+(step_z[-1]/z[-1])**2))
if np.max([err_rel_grid[-1], err_rel_sol[-1]]) < tolerance:
converged = True
B1 = math.sqrt(B_x1[i,j,k]**2 + B_y1[i,j,k]**2 + B_z1[i,j,k]**2)
B2 = math.sqrt(B_x1[i+1,j,k]**2 + B_y1[i+1,j,k]**2 + B_z1[i+1,j,k]**2)
B3 = math.sqrt(B_x1[i,j+1,k]**2 + B_y1[i,j+1,k]**2 + B_z1[i,j+1,k]**2)
B4 = math.sqrt(B_x1[i+1,j+1,k]**2 + B_y1[i+1,j+1,k]**2 + B_z1[i+1,j+1,k]**2)
B5 = math.sqrt(B_x1[i,j,k+1]**2 + B_y1[i,j,k+1]**2 + B_z1[i,j,k+1]**2)
B6 = math.sqrt(B_x1[i+1,j,k+1]**2 + B_y1[i+1,j,k+1]**2 + B_z1[i+1,j,k+1]**2)
B7 = math.sqrt(B_x1[i,j+1,k+1]**2 + B_y1[i,j+1,k+1]**2 + B_z1[i,j+1,k+1]**2)
B8 = math.sqrt(B_x1[i+1,j+1,k+1]**2 + B_y1[i+1,j+1,k+1]**2 + B_z1[i+1,j+1,k+1]**2)
if n_steps>100:
print('Maximum number of steps exceeded -- exiting')
if converged:
if ((xv[i] <= xs <= xv[i+1]) and (yv[j] <= ys <= yv[j+1]) and (zv[k] <= zs <= zv[k+1])):
if new_B[0] < tolerance*np.mean([B1,B2,B3,B4,B5,B6,B7,B8]):
num_nulls+=1
# here if we want, we can also get the eigenvectors/eigenvalues
# use your previous function to get jacobian of magnetic field
# use numpy.linalg.eig to find eigen-stuff of jacobian
if zs <= zv[-2]: # this excludes the null points located on the null line that goes around the two outermost shells
this_null = {'i':i, 'j':j, 'k':k, 'n': num_nulls, 'x': xs, 'y': ys, 'z': zs, 'B': new_B[0], 'Error' : np.array([err_rel_grid[-1], err_rel_sol[-1]]).max(), 'iter' : n_steps }
null_list.append(this_null)
return(null_list)
# function that checks if Bx/By/Bz changes sign:
# it compares the length of the list with the occurrence of each sign
# if '1' (positive) appears 8 times, then B has the same sign at all 8 corners
# similarly for -1 (negative) and 0 (field component = 0)
def check_sign(vertices):
if len(vertices) < 1:
return True
return len(vertices) == vertices.count(vertices[0])
def field_sign_change (f):
# returns a mask of dim (nx-1, ny-1, nz-1).
# true implies that the component changes signs at one of the vertices of the rhs cell.
p000 = (np.roll(f, (-0,-0,-0), axis=(0,1,2)) > 0)
p100 = (np.roll(f, (-1,-0,-0), axis=(0,1,2)) > 0)
p010 = (np.roll(f, (-0,-1,-0), axis=(0,1,2)) > 0)
p110 = (np.roll(f, (-1,-1,-0), axis=(0,1,2)) > 0)
p001 = (np.roll(f, (-0,-0,-1), axis=(0,1,2)) > 0)
p101 = (np.roll(f, (-1,-0,-1), axis=(0,1,2)) > 0)
p011 = (np.roll(f, (-0,-1,-1), axis=(0,1,2)) > 0)
p111 = (np.roll(f, (-1,-1,-1), axis=(0,1,2)) > 0)
all_pos = ( p000 & p100 & p010 & p110 & p001 & p101 & p011 & p111 )[:-1,:-1,:-1]
all_neg = ( ~p000 & ~p100 & ~p010 & ~p110 & ~p001 & ~p101 & ~p011 & ~p111 )[:-1,:-1,:-1]
fsc = ( ~all_pos & ~all_neg )
return(fsc)
# this function returns the trilinear coefficients for a particular field component 'B'
# u, v, w are the x, y, z coordinates with respective indices i,j,k
def trilinear_coeffs(u, v, w ,i, j, k, B):
a = (1/((u[i]-u[i+1])*(v[j]-v[j+1])*(w[k]-w[k+1])))*(B[i,j,k+1]*u[i+1]*v[j+1]*w[k]+B[i,j+1,k]*u[i+1]*v[j]*w[k+1]+B[i+1,j,k]*u[i]*v[j+1]*w[k+1]+B[i+1,j+1,k+1]*u[i]*v[j]*w[k]-B[i,j,k]*u[i+1]*v[j+1]*w[k+1]-B[i,j+1,k+1]*u[i+1]*v[j]*w[k]-B[i+1,j,k+1]*u[i]*v[j+1]*w[k]-B[i+1,j+1,k]*u[i]*v[j]*w[k+1])
b = (1/((u[i]-u[i+1])*(v[j]-v[j+1])*(w[k]-w[k+1])))*(B[i,j,k]*v[j+1]*w[k+1]-B[i,j,k+1]*v[j+1]*w[k]-B[i,j+1,k]*v[j]*w[k+1]+B[i,j+1,k+1]*v[j]*w[k]-B[i+1,j,k]*v[j+1]*w[k+1]+B[i+1,j,k+1]*v[j+1]*w[k]+B[i+1,j+1,k]*v[j]*w[k+1]-B[i+1,j+1,k+1]*v[j]*w[k])
c = (1/((u[i]-u[i+1])*(v[j]-v[j+1])*(w[k]-w[k+1])))*(B[i,j,k]*u[i+1]*w[k+1]-B[i,j,k+1]*u[i+1]*w[k]-B[i,j+1,k]*u[i+1]*w[k+1]+B[i,j+1,k+1]*u[i+1]*w[k]-B[i+1,j,k]*u[i]*w[k+1]+B[i+1,j,k+1]*u[i]*w[k]+B[i+1,j+1,k]*u[i]*w[k+1]-B[i+1,j+1,k+1]*u[i]*w[k])
d = (1/((u[i]-u[i+1])*(v[j]-v[j+1])*(w[k]-w[k+1])))*(-B[i,j,k]*w[k+1]+B[i,j,k+1]*w[k]+B[i,j+1,k]*w[k+1]-B[i,j+1,k+1]*w[k]+B[i+1,j,k]*w[k+1]-B[i+1,j,k+1]*w[k]-B[i+1,j+1,k]*w[k+1]+B[i+1,j+1,k+1]*w[k])
e = (1/((u[i]-u[i+1])*(v[j]-v[j+1])*(w[k]-w[k+1])))*(B[i,j,k]*u[i+1]*v[j+1]-B[i,j,k+1]*u[i+1]*v[j+1]-B[i,j+1,k]*u[i+1]*v[j]+B[i,j+1,k+1]*u[i+1]*v[j]-B[i+1,j,k]*u[i]*v[j+1]+B[i+1,j,k+1]*u[i]*v[j+1]+B[i+1,j+1,k]*u[i]*v[j]-B[i+1,j+1,k+1]*u[i]*v[j])
f = (1/((u[i]-u[i+1])*(v[j]-v[j+1])*(w[k]-w[k+1])))*(-B[i,j,k]*v[j+1]+B[i,j,k+1]*v[j+1]+B[i,j+1,k]*v[j]-B[i,j+1,k+1]*v[j]+B[i+1,j,k]*v[j+1]-B[i+1,j,k+1]*v[j+1]-B[i+1,j+1,k]*v[j]+B[i+1,j+1,k+1]*v[j])
g = (1/((u[i]-u[i+1])*(v[j]-v[j+1])*(w[k]-w[k+1])))*(-B[i,j,k]*u[i+1]+B[i,j,k+1]*u[i+1]+B[i,j+1,k]*u[i+1]-B[i,j+1,k+1]*u[i+1]+B[i+1,j,k]*u[i]-B[i+1,j,k+1]*u[i]-B[i+1,j+1,k]*u[i]+B[i+1,j+1,k+1]*u[i])
h = (1/((u[i]-u[i+1])*(v[j]-v[j+1])*(w[k]-w[k+1])))*(B[i,j,k]-B[i,j,k+1]-B[i,j+1,k]+B[i,j+1,k+1]-B[i+1,j,k]+B[i+1,j,k+1]+B[i+1,j+1,k]-B[i+1,j+1,k+1])
tri_c = [a,b,c,d,e,f,g,h]
return(tri_c)
# this function returns the magnetic field at a point location and its components
def B_field(x,y,z,k): # k is the array of trilinear coefficients
#trilinear extrapolation
bx = k[0,0] + k[0,1]*x +k[0,2]*y + k[0,3]*x*y + k[0,4]*z + k[0,5]*x*z + k[0,6]*y*z + k[0,7]*x*y*z
by = k[1,0] + k[1,1]*x +k[1,2]*y + k[1,3]*x*y + k[1,4]*z + k[1,5]*x*z + k[1,6]*y*z + k[1,7]*x*y*z
bz = k[2,0] + k[2,1]*x +k[2,2]*y + k[2,3]*x*y + k[2,4]*z + k[2,5]*x*z + k[2,6]*y*z + k[2,7]*x*y*z
#magnitude of B field at the location
magnitude = math.sqrt(bx*bx+by*by+bz*bz)
b = [magnitude,bx,by,bz]
return(b)
# this function returns the jacobian matrix calculated at a point location
def jacobian(x,y,z,k):
dbxdx = k[0,1] + k[0,3]*y + k[0,5]*z + k[0,7]*y*z
dbxdy = k[0,2] + k[0,3]*x + k[0,6]*z + k[0,7]*x*z
dbxdz = k[0,4] + k[0,5]*x + k[0,6]*y + k[0,7]*x*y
dbydx = k[1,1] + k[1,3]*y + k[1,5]*z + k[1,7]*y*z
dbydy = k[1,2] + k[1,3]*x + k[1,6]*z + k[1,7]*x*z
dbydz = k[1,4] + k[1,5]*x + k[1,6]*y + k[1,7]*x*y
dbzdx = k[2,1] + k[2,3]*y + k[2,5]*z + k[2,7]*y*z
dbzdy = k[2,2] + k[2,3]*x + k[2,6]*z + k[2,7]*x*z
dbzdz = k[2,4] + k[2,5]*x + k[2,6]*y + k[2,7]*x*y
jac = np.array([[dbxdx,dbxdy,dbxdz],[dbydx,dbydy,dbydz],[dbzdx,dbzdy,dbzdz]])
return(jac)
# the following 3 functions determine the bilinear coefficients according to the face that is being analysed
# coord = face that is being analysed (i.e face with coordinate x = ...)
# j and k are the trilinear coefficients for the two field components used for the intersection
def x_face(coord,j,k):
a1 = j[0] + j[1]*coord
a2 = k[0] + k[1]*coord
b1 = j[2] + j[3]*coord
b2 = k[2] + k[3]*coord
c1 = j[4] + j[5]*coord
c2 = k[4] + k[5]*coord
d1 = j[6] + j[7]*coord
d2 = k[6] + k[7]*coord
coeff = np.array([[a1,b1,c1,d1],[a2,b2,c2,d2]])
return(coeff)
def y_face(coord,j,k):
a1 = j[0] + j[2]*coord
a2 = k[0] + k[2]*coord
b1 = j[1] + j[3]*coord
b2 = k[1] + k[3]*coord
c1 = j[4] + j[6]*coord
c2 = k[4] + k[6]*coord
d1 = j[5] + j[7]*coord
d2 = k[5] + k[7]*coord
coeff = np.array([[a1,b1,c1,d1],[a2,b2,c2,d2]])
return(coeff)
def z_face(coord,j,k):
a1 = j[0] + j[4]*coord
a2 = k[0] + k[4]*coord
b1 = j[1] + j[5]*coord
b2 = k[1] + k[5]*coord
c1 = j[2] + j[6]*coord
c2 = k[2] + k[6]*coord
d1 = j[3] + j[7]*coord
d2 = k[3] + k[7]*coord
coeff = np.array([[a1,b1,c1,d1],[a2,b2,c2,d2]])
return(coeff)
# this function returns the roots of a quadratic equation
# k is the array of bilinear coefficients
def quad_roots (k):
a = k[0,1]*k[1,3]-k[1,1]*k[0,3]
b = k[0,0]*k[1,3]-k[1,0]*k[0,3]+k[0,1]*k[1,2]-k[1,1]*k[0,2]
c = k[0,0]*k[1,2]-k[1,0]*k[0,2]
if (b*b-4*a*c)>0 and a !=0:
root1 = (-b+math.sqrt(b*b-4*a*c))/(2*a)
root2 = (-b-math.sqrt(b*b-4*a*c))/(2*a)
sol = np.array([root1,root2])
return(sol)
# given the intersection of two field components, this function returns the sign of the thrid component at the two roots
# bi_coeff = bilinear coefficients for the intersecting field lines
# u and v are the directions along which we consider the intersection (i.e. bx = by = 0 --> u = x and v = y)
# u_i and v_i are the indeces associated with u and v (i,j,k)
# face is the face of the cell that we consider for the analysis
# tri_ is an array of the trilinear coefficients
# k is a parameter that tells on which face we are ( k=0 for face = x, k = 1 for face = y, k = 2 for face = z)
def bilinear(bi_coeff, u, v, u_i, v_i, face, tri_,k):
b_sign = []
a = bi_coeff[0,1]*bi_coeff[1,3]-bi_coeff[1,1]*bi_coeff[0,3]
b = bi_coeff[0,0]*bi_coeff[1,3]-bi_coeff[1,0]*bi_coeff[0,3]+bi_coeff[0,1]*bi_coeff[1,2]-bi_coeff[1,1]*bi_coeff[0,2]
c = bi_coeff[0,0]*bi_coeff[1,2]-bi_coeff[1,0]*bi_coeff[0,2]
#bilinear test applies only if determinant is greater than zero and a is non zero
if (b*b - 4*a*c) > 0 and a != 0:
u_roots = quad_roots(bi_coeff)
#check that each root lies withing the range given by the the two corners of the cell
if u[u_i] <= u_roots[0] <= u[u_i+1]:
foo = bi_coeff[0,0] + bi_coeff[0,1]*u_roots[0]
bar = bi_coeff[0,2] + bi_coeff[0,3]*u_roots[0]
if bar != 0:
v1 = -foo/bar
if v[v_i] <= v1 <= v[v_i+1]:
if k == 0:
#calculate third components magnitude at the first root by using trilinear expression
b1 = tri_[0] + tri_[1]*face + tri_[2]*u_roots[0] + tri_[3]*face*u_roots[0] + tri_[4]*v1 + tri_[5]*v1*face + tri_[6]*u_roots[0]*v1 + tri_[7]*u_roots[0]*face*v1
b_sign.append( | np.sign(b1) | numpy.sign |
import io
import multiprocessing as mp
import os
import pickle
from glob import glob
import numpy as np
from datetime import datetime
import struct
import time
import pandas as pd
import sys
import itertools as it
n_cores = 16
res_abs = mp.Manager().list()
res_ent = mp.Manager().list()
with open('/home/mephistopheies/storage2/data/camera-model-id/tmp/p_test_985_m1_u0.pkl', 'rb') as f:
p_test = pickle.load(f)
classes = sorted([k for (k, _) in list(p_test.values())[0]])
Q = np.zeros((len(p_test), len(classes)), dtype=np.float32)
files = []
for fname, d in p_test.items():
d = dict(d)
for ix, c in enumerate(classes):
Q[len(files), ix] = d[c]
files.append(fname)
grid = np.arange(0.6, 1.5, 0.2)
grid = list(it.product(*([grid]*10)))
print(len(grid))
def process(q, iolock):
n = 0
min_abs = 999
w_min_abs = None
max_ent = -1
w_max_ent = None
while True:
w = q.get()
if w is None:
res_abs.append((min_abs, w_min_abs))
res_ent.append((max_ent, w_max_ent))
break
w = np.array(w)
P = Q*w
P = P/P.sum(axis=1)[:, np.newaxis]
dist = P.sum(axis=0)/264
z = | np.abs(1 - dist) | numpy.abs |
"""
Contains some common helper functions for output modules
"""
import datetime
import numpy as np
from scipy import special
from src.common.config.global_config import cfg
def map_x_to_image(y):
"""
Map x-axis (griding_num) estimations to image coordinates
Args:
y: one result sample (can be directly from net or post-processed -> all number types should be accepted)
Returns: x coordinates for each lane
"""
lanes = []
offset = 0.5 # different values used in ufld project. demo: 0.0, test: 0.5
for i in range(y.shape[1]):
out_i = y[:, i]
lane = [
int((loc + offset) * float(cfg.img_width) / (cfg.griding_num - 1))
# int(round((loc + 0.5) * float(cfg.img_width) / (cfg.griding_num - 1)))
if loc != -2
else -2
for loc in out_i
]
lanes.append(lane)
return lanes
def evaluate_predictions(y):
"""
Evaluate predictions
Tries to improve the estimation by including all probabilities instead of only using the most probable class
Args:
y: one result sample
Returns:
2D array containing x values (float) per h_sample and lane
"""
out = y.data.cpu().numpy() # load data to cpu and convert to numpy
out_loc = np.argmax(out, axis=0) # get most probably x-class per lane and h_sample
# do some stuff i dont fully understand to improve x accuracy
prob = special.softmax(out[:-1, :, :], axis=0) # relative probability with sum() == 1.0
idx = | np.arange(cfg.griding_num) | numpy.arange |
from flask import Flask, request, Response
import cv2
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import numpy as np
import cv2
import keras
from keras.models import load_model
model = load_model("my_model_vgg.h5")
model._make_predict_function()
def preprocess_image(image):
sobely = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=5)
return sobely
def sort_key(elem):
return elem[0]
def predict_classes(path):
img = cv2.imread(path)
img = cv2.resize(img, (64,64))
img = preprocess_image(img)
img = np.array(img).astype(np.float64)
img -= | np.mean(img) | numpy.mean |
# -*- coding: utf-8 -*-
# Author : <NAME>
# e-mail : <EMAIL>
# Powered by Seculayer © 2021 Service Model Team, R&D Center.
import tensorflow as tf
from mlps.common.Common import Common
from mlps.common.exceptions.ParameterError import ParameterError
from mlps.core.apeflow.api.algorithms.tf.keras.TFKerasAlgAbstract import TFKerasAlgAbstract
from mlps.core.apeflow.interface.utils.tf.TFUtils import TFUtils
class KRNN(TFKerasAlgAbstract):
# MODEL INFORMATION
ALG_CODE = "KRNN"
ALG_TYPE = ["Classifier", "Regressor"]
DATA_TYPE = ["Single"]
VERSION = "1.0.0"
def __init__(self, param_dict, ext_data=None):
super(KRNN, self).__init__(param_dict, ext_data)
def _check_parameter(self, param_dict):
_param_dict = super(KRNN, self)._check_parameter(param_dict)
# Parameter Setting
try:
_param_dict["hidden_units"] = list(map(int, str(param_dict["hidden_units"]).split(",")))
_param_dict["cell_units"] = int(param_dict["cell_units"])
_param_dict["act_fn"] = str(param_dict["act_fn"])
_param_dict["model_nm"] = str(param_dict["model_nm"])
_param_dict["alg_sn"] = str(param_dict["alg_sn"])
_param_dict["algorithm_type"] = str(param_dict["algorithm_type"])
_param_dict["rnn_cell"] = str(param_dict["rnn_cell"])
_param_dict["dropout_prob"] = float(param_dict["dropout_prob"])
_param_dict["optimizer_fn"] = str(param_dict["optimizer_fn"])
_param_dict["learning_rate"] = float(param_dict["learning_rate"])
_param_dict["seq_length"] = int(param_dict["seq_length"])
except:
raise ParameterError
return _param_dict
def _build(self):
# Parameter Setting
input_units = self.param_dict["input_units"]
output_units = self.param_dict["output_units"]
hidden_units = self.param_dict["hidden_units"]
act_fn = self.param_dict["act_fn"]
model_nm = self.param_dict["model_nm"]
alg_sn = self.param_dict["alg_sn"]
cell_units = self.param_dict["cell_units"]
rnn_cell = self.param_dict["rnn_cell"]
dropout_prob = self.param_dict["dropout_prob"]
optimizer_fn = self.param_dict["optimizer_fn"]
learning_rate = self.param_dict["learning_rate"]
seq_length = self.param_dict["seq_length"]
activation = eval(Common.ACTIVATE_FN_CODE_DICT[act_fn])
# Generate to Keras Model
self.model = tf.keras.Sequential()
self.inputs = tf.keras.Input(shape=input_units, name="{}_{}_X".format(model_nm, alg_sn))
self.model.add(self.inputs)
cell = None
if rnn_cell == "RNN":
cell = tf.keras.layers.SimpleRNN(
units=cell_units,
activation=activation,
dropout=dropout_prob,
# input_shape=(seq_length, input_units),
name="{}_{}_cell".format(model_nm, alg_sn),
)
elif rnn_cell == "GRU":
cell = tf.keras.layers.GRU(
units=cell_units,
activation=activation,
dropout=dropout_prob,
# input_shape=(seq_length, input_units),
name="{}_{}_cell".format(model_nm, alg_sn),
)
elif rnn_cell == "LSTM":
cell = tf.keras.layers.LSTM(
units=cell_units,
activation=activation,
dropout=dropout_prob,
# input_shape=(seq_length, input_units),
name="{}_{}_cell".format(model_nm, alg_sn),
)
#
# layer = tf.keras.layers.RNN(
# cell=cell,
# dtype=tf.float32,
# return_state=True
# )
self.model.add(cell)
units = TFUtils.get_units(cell_units, hidden_units, output_units)
# TFNNFactory.feedforward_network_keras(self.model, units, activation, self.param_dict, input_layer=False)
model_nm = "{}_{}".format(self.param_dict["model_nm"], self.param_dict["alg_sn"])
TFUtils.tf_keras_mlp_block_v2(
self.model, units, activation,
dropout_prob=self.param_dict["dropout_prob"], name=model_nm, alg_type=self.param_dict["algorithm_type"]
)
self.predicts = self.model.get_layer(index=-1)
if self.param_dict["algorithm_type"] == "Classifier":
self.model.compile(
loss='categorical_crossentropy',
optimizer=eval(Common.OPTIMIZER_FN_CODE_DICT[optimizer_fn])(learning_rate),
metrics=['accuracy']
)
elif self.param_dict["algorithm_type"] == "Regressor":
self.model.compile(
loss="mse",
optimizer=eval(Common.OPTIMIZER_FN_CODE_DICT[optimizer_fn])(learning_rate),
)
self.model.summary(print_fn=self.LOGGER.info)
if __name__ == '__main__':
physical_devices = tf.config.list_physical_devices('GPU')
print("physical devices: ", physical_devices)
tf.config.experimental.set_memory_growth(physical_devices[0], True)
__param_dict = {
"algorithm_code": "KRNN",
"algorithm_type": "Classifier",
"data_type": "Single",
"method_type": "Basic",
"input_units": (2,),
"output_units": "2",
"hidden_units": "64,32,4",
"global_step": "100",
"dropout_prob": "0.2",
"optimizer_fn": "Adadelta",
"model_nm": "KRNN-0",
"alg_sn": "0",
"job_type": "learn",
"depth": "0",
"global_sn": "0",
"learning_rate": "0.1",
"rnn_cell": "GRU",
"cell_units": "4",
"seq_length": "1",
"act_fn": "Tanh",
"early_type": "0",
"minsteps": "100",
"early_key": "loss",
"early_value": "0.0002",
"num_workers": "1"
}
# classifier
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# rnn = KRNN(param_dict=param_dict)
#
# x = [
# [
# [1,2,3,4,5],
# [1,2,3,4,5],
# [1,2,3,4,5],
# [1,2,3,4,5]
# ],
# [
# [1, 2, 3, 4, 5],
# [1, 2, 3, 4, 5],
# [1, 2, 3, 4, 5],
# [1, 2, 3, 4, 5]
# ],
# ]
# import numpy as np
#
# x = np.array(x)
# y = np.array([[1, 0], [1, 0]])
# rnn.model.fit(x, y=y, epochs=100)
# regressor
import numpy as np
dataset = {
"x": np.array([[[-1., -1.]], [[-2., -1.]], [[1., 1.]], [[2., 1.]]]),
# "y": np.array([[0.5, 0.5], [0.8, 0.2], [0.3, 0.7], [0.1, 0.9]]),
"y": | np.array([[1, 0], [1, 0], [1, 0], [1, 0]]) | numpy.array |
from numpy import isnan, take, any, all, logical_or, logical_and, logical_not, atleast_1d, \
asarray, argmin, argsort, abs, isfinite, dot#where
import numpy as np
# for PyPy
from openopt.kernel.nonOptMisc import where
from bisect import bisect_right
from FuncDesigner.Interval import splitDomainForDiscreteVariable
try:
from bottleneck import nanmin
except ImportError:
from numpy import nanmin
def getTruncatedArrays(ind, y, e, indT, _s):
# TODO: rework it when numpy will have appropriate inplace function
s = ind.size
y = take(y, ind, axis=0, out=y[:s])
e = take(e, ind, axis=0, out=e[:s])
_s = _s[ind]
if indT is not None:
indT = indT[ind]
return y, e, indT, _s#, nlh, nlh_0
def adjustDiscreteVarBounds(y, e, p):
# TODO: rework it
#n = p.n
# TODO: remove the cycle, use vectorization
for i in p._discreteVarsNumList:
v = p._freeVarsList[i]
y[:, i], e[:, i] = splitDomainForDiscreteVariable(y[:, i], e[:, i], v)
# ind = y>e
# assert not any(ind)
# y[ind], e[ind] = e[ind], y[ind]
# Ind = any(y>e, 1)
# trunc_ind = where(logical_not(Ind))[0]
# # TODO: is it triggered? // updated: can be from MOP or cons
# if any(Ind):
# ind = where(logical_not(Ind))[0]
# s = ind.size
# y = take(y, ind, axis=0, out=y[:s])
# e = take(e, ind, axis=0, out=e[:s])
# _s = _s[ind]
# if indT is not None:
# indT = indT[ind]
return y, e#, trunc_ind#_s, indT
def func7(y, e, o, a, _s, indT, nlhc, residual):
r10 = logical_and(all(isnan(o), 1), all(isnan(a), 1))
if any(r10):
j = where(logical_not(r10))[0]
lj = j.size
y = take(y, j, axis=0, out=y[:lj])
e = take(e, j, axis=0, out=e[:lj])
o = take(o, j, axis=0, out=o[:lj])
a = take(a, j, axis=0, out=a[:lj])
_s = _s[j]
if indT is not None:
indT = indT[j]
if nlhc is not None:
nlhc = take(nlhc, j, axis=0, out=nlhc[:lj])
if residual is not None:
residual = take(residual, j, axis=0, out=residual[:lj])
return y, e, o, a, _s, indT, nlhc, residual
def func9(an, fo, g, p):
#ind = searchsorted(ar, fo, side='right')
if p.probType in ('NLSP', 'SNLE') and p.maxSolutions != 1:
mino = atleast_1d([node.key for node in an])
ind = mino > 0
if not any(ind):
return an, g
else:
g = nanmin((g, nanmin(mino[ind])))
ind2 = where(logical_not(ind))[0]
#an = take(an, ind2, axis=0, out=an[:ind2.size])
#an = asarray(an[ind2])
an = [an[i] for i in ind2]
return an, g
elif p.solver.dataHandling == 'sorted':
#OLD
mino = [node.key for node in an]
ind = bisect_right(mino, fo)
if ind == len(mino):
return an, g
else:
g = nanmin((g, nanmin(atleast_1d(mino[ind]))))
return an[:ind], g
elif p.solver.dataHandling == 'raw':
#NEW
mino = atleast_1d([node.key for node in an])
r10 = mino > fo
if not any(r10):
return an, g
else:
ind = where(r10)[0]
g = nanmin((g, nanmin(atleast_1d(mino)[ind])))
#an = asarray(an)
ind2 = where(logical_not(r10))[0]
#an = take(an, ind2, axis=0, out=an[:ind2.size])
an = [an[i] for i in ind2]
return an, g
# NEW 2
# curr_tnlh = [node.tnlh_curr for node in an]
# import warnings
# warnings.warn('! fix g')
return an, g
else:
assert 0, 'incorrect nodes remove approach'
def func5(an, nn, g, p):
m = len(an)
if m <= nn: return an, g
mino = np.array([node.key for node in an])
if nn == 1: # box-bound probs with exact interval analysis
ind = argmin(mino)
assert ind in (0, 1), 'error in interalg engine'
g = nanmin((mino[1-ind], g))
an = [an[i] for i in ind]
elif m > nn:
if p.solver.dataHandling == 'raw':
ind = | argsort(mino) | numpy.argsort |
import pandas as pd
from pathlib import Path
import pylab as pl
import my_figure as myfig
from scipy.stats import ttest_ind, ttest_1samp
import numpy as np
from tqdm import tqdm
import cv2
from deepposekit.io import DataGenerator
from matplotlib.colors import to_rgb
import imageio
from PIL import Image
#from my_general_helpers import butter_lowpass_filter
def curvature(x1, y1, x2, y2, x3, y3):#, x4, y4, x5, y5):
dx1 = x1 - x2
dy1 = y1 - y2
dx2 = x2 - x3
dy2 = y2 - y3
# dx3 = x2 - x3
# dy3 = y2 - y3
# dx4 = x3 - x4
# dy4 = y3 - y4
#
# dx5 = x3 - x4
# dy5 = y3 - y4
# dx6 = x4 - x5
# dy6 = y4 - y5
dotProduct1 = dx1 * dx2 + dy1 * dy2
modOfVectors1 = np.sqrt(dx1**2 + dy1**2) * np.sqrt(dx2**2 + dy2**2)
#
# dotProduct2 = dx3 * dx4 + dy3 * dy4
# modOfVectors2 = np.sqrt(dx3**2 + dy3**2) * np.sqrt(dx4**2 + dy4**2)
#
# dotProduct3 = dx5 * dx6 + dy5 * dy6
# modOfVectors3 = np.sqrt(dx5**2 + dy5**2) * np.sqrt(dx6**2 + dy6**2)
return np.degrees(np.arccos(dotProduct1/modOfVectors1))# + \
#np.degrees(np.arccos(dotProduct2/modOfVectors2)) + \
#np.degrees(np.arccos(dotProduct3/modOfVectors3))
def angle_between_points(x1, y1, x2, y2, x3, y3):
ang1 = np.degrees(np.arctan2(y1 - y2, x1 - x2))
ang2 = np.degrees(np.arctan2(y3 - y2, x3 - x2))
if np.ndim(x1) == 0:
if ang1 < 0:
ang1 = 360 + ang1
if ang2 < 0:
ang2 = 360 + ang2
if ang2 > ang1:
ang2 -= 360
else:
ind = np.where(ang1 < 0)
ang1[ind] = 360 + ang1[ind]
ind = np.where(ang2 < 0)
ang2[ind] = 360 + ang2[ind]
ind = np.where(ang2 > ang1)
ang2[ind] -= 360
return (ang1 - ang2) - 180
def luminance_equation(x, y):
r = | np.sqrt(x**2 + y**2) | numpy.sqrt |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
import pytest
from op_tester import op_tester
def test_erf_0(op_tester):
x = np.array([0., -1., 10.]).astype(np.float32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
o = builder.aiOnnx.erf([i0])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.erf(tx)
return [out]
op_tester.run(init_builder, reference, 'infer')
@pytest.mark.parametrize("dtype", [np.float32, np.float16])
def test_erf_0b(op_tester, dtype):
x = np.array([0., -1., 10.]).astype(dtype)
expected = np.array([0., -0.84270079, 1.]).astype(dtype)
def init_builder(builder):
i0 = builder.addInputTensor(x)
o = builder.aiOnnx.erf([i0])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
return [expected]
# Lower precision for float16
if dtype == np.float16:
op_tester.atol = 1e-03
op_tester.run(init_builder, reference, 'infer')
def test_erf_1(op_tester):
x = | np.random.randn(1, 3, 32, 32) | numpy.random.randn |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
RMP2
'''
import copy
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf import __config__
WITH_T2 = getattr(__config__, 'mp_mp2_with_t2', True)
def kernel(mp, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2, verbose=None):
if mo_energy is not None or mo_coeff is not None:
# For backward compatibility. In pyscf-1.4 or earlier, mp.frozen is
# not supported when mo_energy or mo_coeff is given.
assert(mp.frozen == 0 or mp.frozen is None)
if eris is None:
eris = mp.ao2mo(mo_coeff)
if mo_energy is None:
mo_energy = eris.mo_energy
nocc = mp.nocc
nvir = mp.nmo - nocc
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
if with_t2:
t2 = numpy.empty((nocc,nocc,nvir,nvir), dtype=eris.ovov.dtype)
else:
t2 = None
emp2 = 0
for i in range(nocc):
if isinstance(eris.ovov, numpy.ndarray) and eris.ovov.ndim == 4:
# When mf._eri is a custom integrals wiht the shape (n,n,n,n), the
# ovov integrals might be in a 4-index tensor.
gi = eris.ovov[i]
else:
gi = numpy.asarray(eris.ovov[i*nvir:(i+1)*nvir])
gi = gi.reshape(nvir,nocc,nvir).transpose(1,0,2)
t2i = gi.conj()/lib.direct_sum('jb+a->jba', eia, eia[i])
emp2 += | numpy.einsum('jab,jab', t2i, gi) | numpy.einsum |
import numpy as np
from econml.utilities import cross_product
from statsmodels.tools.tools import add_constant
class AbstracDynamicPanelDGP:
def __init__(self, n_periods, n_treatments, n_x):
self.n_periods = n_periods
self.n_treatments = n_treatments
self.n_x = n_x
return
def create_instance(self, *args, **kwargs):
pass
def _gen_data_with_policy(self, n_units, policy_gen, random_seed=123):
pass
def static_policy_data(self, n_units, tau, random_seed=123):
def policy_gen(Tpre, X, period):
return tau[period]
return self._gen_data_with_policy(n_units, policy_gen, random_seed=random_seed)
def adaptive_policy_data(self, n_units, policy_gen, random_seed=123):
return self._gen_data_with_policy(n_units, policy_gen, random_seed=random_seed)
def static_policy_effect(self, tau, mc_samples=1000):
Y_tau, _, _, _ = self.static_policy_data(mc_samples, tau)
Y_zero, _, _, _ = self.static_policy_data(
mc_samples, np.zeros((self.n_periods, self.n_treatments)))
return np.mean(Y_tau[np.arange(Y_tau.shape[0]) % self.n_periods == self.n_periods - 1]) - \
np.mean(Y_zero[np.arange(Y_zero.shape[0]) %
self.n_periods == self.n_periods - 1])
def adaptive_policy_effect(self, policy_gen, mc_samples=1000):
Y_tau, _, _, _ = self.adaptive_policy_data(mc_samples, policy_gen)
Y_zero, _, _, _ = self.static_policy_data(
mc_samples, np.zeros((self.n_periods, self.n_treatments)))
return np.mean(Y_tau[np.arange(Y_tau.shape[0]) % self.n_periods == self.n_periods - 1]) - \
np.mean(Y_zero[np.arange(Y_zero.shape[0]) %
self.n_periods == self.n_periods - 1])
class DynamicPanelDGP(AbstracDynamicPanelDGP):
def __init__(self, n_periods, n_treatments, n_x):
super().__init__(n_periods, n_treatments, n_x)
def create_instance(self, s_x, sigma_x, sigma_y, conf_str, hetero_strength=0, hetero_inds=None,
autoreg=.5, state_effect=.5, random_seed=123):
np.random.seed(random_seed)
self.s_x = s_x
self.conf_str = conf_str
self.sigma_x = sigma_x
self.sigma_y = sigma_y
self.hetero_inds = hetero_inds.astype(
int) if hetero_inds is not None else hetero_inds
self.endo_inds = np.setdiff1d(
np.arange(self.n_x), hetero_inds).astype(int)
# The first s_x state variables are confounders. The final s_x variables are exogenous and can create
# heterogeneity
self.Alpha = np.random.uniform(-1, 1,
size=(self.n_x, self.n_treatments))
self.Alpha /= np.linalg.norm(self.Alpha, axis=1, ord=1, keepdims=True)
self.Alpha *= state_effect
if self.hetero_inds is not None:
self.Alpha[self.hetero_inds] = 0
self.Beta = np.zeros((self.n_x, self.n_x))
for t in range(self.n_x):
self.Beta[t, :] = autoreg * np.roll(np.random.uniform(low=4.0**(-np.arange(
0, self.n_x)), high=4.0**(-np.arange(1, self.n_x + 1))), t)
if self.hetero_inds is not None:
self.Beta[np.ix_(self.endo_inds, self.hetero_inds)] = 0
self.Beta[np.ix_(self.hetero_inds, self.endo_inds)] = 0
self.epsilon = np.random.uniform(-1, 1, size=self.n_treatments)
self.zeta = np.zeros(self.n_x)
self.zeta[:self.s_x] = self.conf_str / self.s_x
self.y_hetero_effect = np.zeros(self.n_x)
self.x_hetero_effect = np.zeros(self.n_x)
if self.hetero_inds is not None:
self.y_hetero_effect[self.hetero_inds] = np.random.uniform(.5 * hetero_strength, 1.5 * hetero_strength) / \
len(self.hetero_inds)
self.x_hetero_effect[self.hetero_inds] = np.random.uniform(.5 * hetero_strength, 1.5 * hetero_strength) / \
len(self.hetero_inds)
self.true_effect = np.zeros((self.n_periods, self.n_treatments))
self.true_effect[0] = self.epsilon
for t in np.arange(1, self.n_periods):
self.true_effect[t, :] = (self.zeta.reshape(
1, -1) @ np.linalg.matrix_power(self.Beta, t - 1) @ self.Alpha)
self.true_hetero_effect = np.zeros(
(self.n_periods, (self.n_x + 1) * self.n_treatments))
self.true_hetero_effect[0, :] = cross_product(add_constant(self.y_hetero_effect.reshape(1, -1), has_constant='add'),
self.epsilon.reshape(1, -1))
for t in np.arange(1, self.n_periods):
self.true_hetero_effect[t, :] = cross_product(add_constant(self.x_hetero_effect.reshape(1, -1), has_constant='add'),
self.zeta.reshape(1, -1) @ np.linalg.matrix_power(self.Beta, t - 1) @ self.Alpha)
return self
def hetero_effect_fn(self, t, x):
if t == 0:
return (np.dot(self.y_hetero_effect, x.flatten()) + 1) * self.epsilon
else:
return (np.dot(self.x_hetero_effect, x.flatten()) + 1) *\
(self.zeta.reshape(1, -1) @ np.linalg.matrix_power(self.Beta, t - 1)
@ self.Alpha).flatten()
def _gen_data_with_policy(self, n_units, policy_gen, random_seed=123):
np.random.seed(random_seed)
Y = np.zeros(n_units * self.n_periods)
T = np.zeros((n_units * self.n_periods, self.n_treatments))
X = np.zeros((n_units * self.n_periods, self.n_x))
groups = np.zeros(n_units * self.n_periods)
for t in range(n_units * self.n_periods):
period = t % self.n_periods
if period == 0:
X[t] = np.random.normal(0, self.sigma_x, size=self.n_x)
T[t] = policy_gen(np.zeros(self.n_treatments), X[t], period)
else:
X[t] = (np.dot(self.x_hetero_effect, X[t - 1]) + 1) * np.dot(self.Alpha, T[t - 1]) + \
np.dot(self.Beta, X[t - 1]) + \
np.random.normal(0, self.sigma_x, size=self.n_x)
T[t] = policy_gen(T[t - 1], X[t], period)
Y[t] = (np.dot(self.y_hetero_effect, X[t]) + 1) * np.dot(self.epsilon, T[t]) + \
np.dot(X[t], self.zeta) + \
np.random.normal(0, self.sigma_y)
groups[t] = t // self.n_periods
return Y, T, X, groups
def observational_data(self, n_units, gamma, s_t, sigma_t, random_seed=123):
""" Generated observational data with some observational treatment policy parameters
Parameters
----------
n_units : how many units to observe
gamma : what is the degree of auto-correlation of the treatments across periods
s_t : sparsity of treatment policy; how many states does it depend on
sigma_t : what is the std of the exploration/randomness in the treatment
"""
Delta = np.zeros((self.n_treatments, self.n_x))
Delta[:, :s_t] = self.conf_str / s_t
def policy_gen(Tpre, X, period):
return gamma * Tpre + (1 - gamma) * np.dot(Delta, X) + \
np.random.normal(0, sigma_t, size=self.n_treatments)
return self._gen_data_with_policy(n_units, policy_gen, random_seed=random_seed)
class LongRangeDynamicPanelDGP(DynamicPanelDGP):
def __init__(self, n_periods, n_treatments, n_x):
super().__init__(n_periods, n_treatments, n_x)
def create_instance(self, s_x, sigma_x, sigma_y, conf_str, hetero_strength=0, hetero_inds=None,
autoreg=.5, state_effect=.5, random_seed=123):
np.random.seed(random_seed)
self.s_x = s_x
self.conf_str = conf_str
self.sigma_x = sigma_x
self.sigma_y = sigma_y
self.hetero_inds = hetero_inds.astype(
int) if hetero_inds is not None else hetero_inds
self.endo_inds = np.setdiff1d(
np.arange(self.n_x), hetero_inds).astype(int)
# The first s_x state variables are confounders. The final s_x variables are exogenous and can create
# heterogeneity
self.Alpha = state_effect * np.ones((self.n_x, self.n_treatments))
if self.hetero_inds is not None:
self.Alpha[self.hetero_inds] = 0
self.Beta = autoreg * np.eye(self.n_x)
self.epsilon = np.random.uniform(-1, 1, size=self.n_treatments)
self.zeta = np.zeros(self.n_x)
self.zeta[:self.s_x] = self.conf_str / self.s_x
self.y_hetero_effect = np.zeros(self.n_x)
self.x_hetero_effect = np.zeros(self.n_x)
if self.hetero_inds is not None:
self.y_hetero_effect[self.hetero_inds] = np.random.uniform(.5 * hetero_strength, 1.5 * hetero_strength) / \
len(self.hetero_inds)
self.x_hetero_effect[self.hetero_inds] = np.random.uniform(.5 * hetero_strength, 1.5 * hetero_strength) / \
len(self.hetero_inds)
self.true_effect = np.zeros((self.n_periods, self.n_treatments))
self.true_effect[0] = self.epsilon
for t in np.arange(1, self.n_periods):
self.true_effect[t, :] = (self.zeta.reshape(
1, -1) @ np.linalg.matrix_power(self.Beta, t - 1) @ self.Alpha)
self.true_hetero_effect = np.zeros(
(self.n_periods, (self.n_x + 1) * self.n_treatments))
self.true_hetero_effect[0, :] = cross_product(add_constant(self.y_hetero_effect.reshape(1, -1), has_constant='add'),
self.epsilon.reshape(1, -1))
for t in np.arange(1, self.n_periods):
self.true_hetero_effect[t, :] = cross_product(add_constant(self.x_hetero_effect.reshape(1, -1), has_constant='add'),
self.zeta.reshape(1, -1) @ np.linalg.matrix_power(self.Beta, t - 1) @ self.Alpha)
return self
class EndogenousDynamicPanelDGP(DynamicPanelDGP):
def __init__(self, n_periods, n_treatments, n_x):
super().__init__(n_periods, n_treatments, n_x)
def create_instance(self, s_x, sigma_x, sigma_y, conf_str, hetero_strength=0, hetero_inds=None,
autoreg=.5, state_effect=.5, random_seed=123):
np.random.seed(random_seed)
self.s_x = s_x
self.conf_str = conf_str
self.sigma_x = sigma_x
self.sigma_y = sigma_y
self.hetero_inds = hetero_inds.astype(
int) if hetero_inds is not None else hetero_inds
# The first s_x state variables are confounders. The final s_x variables are exogenous and can create
# heterogeneity
self.Alpha = state_effect * \
np.ones((self.n_x, self.n_treatments))/self.n_treatments
self.Beta = autoreg * np.eye(self.n_x)
self.epsilon = np.random.uniform(-1, 1, size=self.n_treatments)
self.zeta = np.zeros(self.n_x)
self.zeta[:self.s_x] = self.conf_str / self.s_x
self.y_hetero_effect = np.zeros(self.n_x)
self.x_hetero_effect = np.zeros(self.n_x)
if self.hetero_inds is not None:
self.y_hetero_effect[self.hetero_inds] = np.random.uniform(.5 * hetero_strength, 1.5 * hetero_strength) / \
len(self.hetero_inds)
self.x_hetero_effect[self.hetero_inds] = np.random.uniform(.5 * hetero_strength, 1.5 * hetero_strength) / \
len(self.hetero_inds)
self.true_effect = | np.zeros((self.n_periods, self.n_treatments)) | numpy.zeros |
import numpy as np
from simfempy import fems
from simfempy.applications.application import Application
from simfempy.tools.analyticalfunction import AnalyticalFunction
# ================================================================= #
class Heat(Application):
"""
Class for the (stationary) heat equation
$$
rhoCp (T_t + beta\cdot\nabla T) -\div(kheat \nabla T) = f domain
kheat\nabla\cdot n + alpha T = g bdry
$$
After initialization, the function setMesh(mesh) has to be called
Then, solve() solves the stationary problem
Parameters in the constructor:
fem: only p1 or cr1
problemdata
method
masslumpedbdry, masslumpedvol
Paramaters used from problemdata:
rhocp
kheat
reaction
alpha
they can either be given as global constant, cell-wise constants, or global function
- global constant is taken from problemdata.paramglobal
- cell-wise constants are taken from problemdata.paramcells
- problemdata.paramglobal is taken from problemdata.datafct and are called with arguments (color, xc, yc, zc)
Possible parameters for computaion of postprocess:
errors
bdry_mean: computes mean temperature over boundary parts according to given color
bdry_nflux: computes mean normal flux over boundary parts according to given color
"""
def __format__(self, spec):
if spec=='-':
repr = super(Heat, self).__format__(spec)
repr += f"\nfem={self.fem}"
return repr
return self.__repr__()
def __repr__(self):
repr = super(Heat, self).__repr__()
repr += f"\nfem={self.fem}"
return repr
def __init__(self, **kwargs):
fem = kwargs.pop('fem','p1')
if fem == 'p1': self.fem = fems.p1.P1(kwargs)
elif fem == 'cr1': self.fem = fems.cr1.CR1(kwargs)
else: raise ValueError("unknown fem '{}'".format(fem))
self.convection = 'convection' in kwargs['problemdata'].params.fct_glob.keys()
super().__init__(**kwargs)
def setMesh(self, mesh):
super().setMesh(mesh)
# if mesh is not None: self.mesh = mesh
self._checkProblemData()
self.fem.setMesh(self.mesh)
# colorsdirichlet = self.problemdata.bdrycond.colorsOfType("Dirichlet")
# colorsflux = self.problemdata.postproc.colorsOfType("bdry_nflux")
# self.bdrydata = self.fem.prepareBoundary(colorsdirichlet, colorsflux)
self.bdrydata = self.fem.prepareBoundary(self.problemdata.bdrycond, self.problemdata.postproc)
self.kheatcell = self.compute_cell_vector_from_params('kheat', self.problemdata.params)
self.problemdata.params.scal_glob.setdefault('rhocp',1)
# TODO: non-constant rhocp
rhocp = self.problemdata.params.scal_glob.setdefault('rhocp', 1)
if self.convection:
convectionfct = self.problemdata.params.fct_glob['convection']
self.convdata = self.fem.prepareAdvection(convectionfct, rhocp)
colorsinflow = self.findInflowColors()
colorsdir = self.problemdata.bdrycond.colorsOfType("Dirichlet")
if not set(colorsinflow).issubset(set(colorsdir)):
raise ValueError(f"Inflow boundaries need to be subset of Dirichlet boundaries {colorsinflow=} {colorsdir=}")
def findInflowColors(self):
colors=[]
for color in self.mesh.bdrylabels.keys():
faces = self.mesh.bdrylabels[color]
if np.any(self.convdata.betart[faces]<-1e-10): colors.append(color)
return colors
def _checkProblemData(self):
if self.verbose: print(f"checking problem data {self.problemdata=}")
if self.convection:
convection_given = self.problemdata.params.fct_glob['convection']
if not isinstance(convection_given, list):
p = "problemdata.params.fct_glob['convection']"
raise ValueError(f"need '{p}' as a list of length dim of str or AnalyticalSolution")
elif isinstance(convection_given[0],str):
self.problemdata.params.fct_glob['convection'] = [AnalyticalFunction(expr=e) for e in convection_given]
else:
if not isinstance(convection_given[0], AnalyticalFunction):
raise ValueError(f"convection should be given as 'str' and not '{type(convection_given[0])}'")
if len(self.problemdata.params.fct_glob['convection']) != self.mesh.dimension:
raise ValueError(f"{self.mesh.dimension=} {self.problemdata.params.fct_glob['convection']=}")
bdrycond = self.problemdata.bdrycond
for color in self.mesh.bdrylabels:
if not color in bdrycond.type: raise ValueError(f"color={color} not in bdrycond={bdrycond}")
if bdrycond.type[color] in ["Robin"]:
if not color in bdrycond.param:
raise ValueError(f"Robin condition needs paral 'alpha' color={color} bdrycond={bdrycond}")
def defineRhsAnalyticalSolution(self, solexact):
def _fctu(x, y, z):
kheat = self.problemdata.params.scal_glob['kheat']
beta = self.problemdata.params.fct_glob['convection']
rhs = np.zeros(x.shape)
for i in range(self.mesh.dimension):
rhs += beta[i](x,y,z) * solexact.d(i, x, y, z)
rhs -= kheat * solexact.dd(i, i, x, y, z)
return rhs
def _fctu2(x, y, z):
kheat = self.problemdata.params.scal_glob['kheat']
rhs = np.zeros(x.shape)
for i in range(self.mesh.dimension):
rhs -= kheat * solexact.dd(i, i, x, y, z)
return rhs
if self.convection: return _fctu
return _fctu2
def defineNeumannAnalyticalSolution(self, problemdata, color):
solexact = problemdata.solexact
def _fctneumann(x, y, z, nx, ny, nz):
kheat = self.problemdata.params.scal_glob['kheat']
rhs = np.zeros(x.shape)
normals = nx, ny, nz
for i in range(self.mesh.dimension):
rhs += kheat * solexact.d(i, x, y, z) * normals[i]
return rhs
return _fctneumann
def defineRobinAnalyticalSolution(self, problemdata, color):
solexact = problemdata.solexact
alpha = problemdata.bdrycond.param[color]
# alpha = 1
def _fctrobin(x, y, z, nx, ny, nz):
kheat = self.problemdata.params.scal_glob['kheat']
rhs = np.zeros(x.shape)
normals = nx, ny, nz
# print(f"{alpha=}")
# rhs += alpha*solexact(x, y, z)
rhs += solexact(x, y, z)
for i in range(self.mesh.dimension):
# rhs += kheat * solexact.d(i, x, y, z) * normals[i]
rhs += kheat * solexact.d(i, x, y, z) * normals[i]/alpha
return rhs
return _fctrobin
def setParameter(self, paramname, param):
if paramname == "dirichlet_strong": self.fem.dirichlet_strong = param
else:
if not hasattr(self, self.paramname):
raise NotImplementedError("{} has no paramater '{}'".format(self, self.paramname))
cmd = "self.{} = {}".format(self.paramname, param)
eval(cmd)
def computeForm(self, u, coeffmass=None):
du2 = self.A@u
du = np.zeros_like(u)
bdrycond = self.problemdata.bdrycond
colorsrobin = bdrycond.colorsOfType("Robin")
colorsdir = bdrycond.colorsOfType("Dirichlet")
self.fem.computeFormDiffusion(du, u, self.kheatcell)
self.fem.formBoundary(du, u, self.bdrydata, self.kheatcell, colorsdir)
if self.convection:
self.fem.computeFormConvection(du, u, self.convdata)
if coeffmass is not None:
self.fem.massDot(du, u, coeff=coeffmass)
self.fem.vectorBoundaryStrongEqual(du, u, self.bdrydata)
if not | np.allclose(du,du2) | numpy.allclose |
import numpy as np
from joblib import Parallel, delayed
np.seterr(all='raise', divide='raise', over='raise', under='raise', invalid='raise')
def euclidean_distance(point1_xy, point2_xy, x_coord=0, y_coord=1, c_coord=2):
diff_x = point2_xy[x_coord] - point1_xy[x_coord]
diff_y = point2_xy[y_coord] - point1_xy[y_coord]
square_add = np.power(diff_x, 2) + np.power(diff_y, 2)
distance = np.sqrt(square_add)
return distance
def area_polygon(xy_coordinates, x_coord=0, y_coord=1, c_coord=2):
"""
Enclosed polygon area calculation using Shoelace formula
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Parameters
----------
xy_coordinates : array of shape (num_points_closed_figure, 2)
Returns
-------
area : Scalar value of area of the closed shape
"""
x_coordinates = xy_coordinates[:, x_coord]
y_coordinates = xy_coordinates[:, y_coord]
dot_product = np.dot(x_coordinates, np.roll(y_coordinates, 1))
area = 0.5*np.abs(dot_product)
return area
def trajectory_area(onesample, keypoint_idx, x_coord=0, y_coord=1, c_coord=2):
x_coordinates = onesample[:, keypoint_idx, x_coord]
y_coordinates = onesample[:, keypoint_idx, y_coord]
dot_product = np.dot(x_coordinates, np.roll(y_coordinates, 1))
area = 0.5*np.abs(dot_product)
return area
def shape_orientation_angel(point1_xy, point2_xy):
numer = point2_xy[1] - point1_xy[1]
denom = point2_xy[0] - point1_xy[0]
alpha = np.arctan2(numer, denom)
return alpha
def shape_perimeter(shape_xy_points):
perimeter = 0.0
num_points = shape_xy_points.shape[0]
for onepoint in range(num_points):
if onepoint < (num_points - 2):
point1_xy = shape_xy_points[onepoint, :]
point2_xy = shape_xy_points[onepoint+1, :]
dist = euclidean_distance(point1_xy, point2_xy)
elif onepoint == (num_points - 1):
point1_xy = shape_xy_points[0, :]
point2_xy = shape_xy_points[onepoint, :]
dist = euclidean_distance(point1_xy, point2_xy)
perimeter = dist + perimeter
return perimeter
def trajectory_perimeter(onesample, keypoint_idx, x_coord=0, y_coord=1, c_coord=2):
perimeter = 0.0
dist = 0.0
num_frames = onesample.shape[0]
for oneframe_idx in range(num_frames):
if oneframe_idx < (num_frames - 2):
point1_xy = onesample[oneframe_idx, keypoint_idx, :]
point2_xy = onesample[oneframe_idx+1, keypoint_idx, :]
dist = euclidean_distance(point1_xy, point2_xy)
elif oneframe_idx == (num_frames - 1):
point1_xy = onesample[0, keypoint_idx, :]
point2_xy = onesample[oneframe_idx, keypoint_idx, :]
dist = euclidean_distance(point1_xy, point2_xy)
perimeter = dist + perimeter
return perimeter
def shape_compactness(shape_area, shape_perimeter):
numer = 4*(np.pi)*shape_area
denom = np.power(shape_perimeter, 2)
try:
compactness = numer/denom
except FloatingPointError:
print("Exception shape_compactness")
compactness = 0.0
return compactness
def law_of_cosines(vertexA, vertexB, vertexC):
"""
angle will be inscribed at vertexC
"""
sideA = euclidean_distance(vertexB, vertexC)
sideB = euclidean_distance(vertexA, vertexC)
sideC = euclidean_distance(vertexA, vertexB)
# length_scaling_factor = 1e6
numer = (np.power(sideA, 2) + np.power(sideB, 2) - np.power(sideC, 2))
denom = (2*sideA*sideB)
try:
angle_C = np.arccos(numer/denom)
except FloatingPointError:
print("Exception law_of_cosines")
angle_C = 0.0
return angle_C
###############################################################################
###############################################################################
def facial_features_oneframe(oneframe):
"""
Features calculated using the paper https://ieeexplore.ieee.org/abstract/document/4813472
"""
mouth_area = area_polygon(oneframe[48:60, :])
mouth_height = euclidean_distance(oneframe[51, :], oneframe[57, :])
mouth_width = euclidean_distance(oneframe[48, :], oneframe[54, :])
# alpha = shape_orientation_angel(oneframe[57, :], oneframe[51, :])
# mouth_o1_orientation = np.sin(2*alpha)
# mouth_o2_orientation = np.cos(alpha)
# perimeter = shape_perimeter(oneframe[48:60, :])
# compactness = shape_compactness(mouth_area, perimeter)
# try:
# eccentricity = mouth_height/mouth_width
# except FloatingPointError:
# print("Exception facial_features_oneframe")
# eccentricity = 0.0
# return mouth_area, mouth_height, mouth_width, mouth_o1_orientation, mouth_o2_orientation, compactness, eccentricity
return mouth_area, mouth_height, mouth_width
### Main facial features from all frames for one sample
def facial_features_onesample(onesample):
face_data = onesample[:, 25:95, :]
num_frames = face_data.shape[0]
features = np.asarray(Parallel(n_jobs=-1)(delayed(facial_features_oneframe)(face_data[oneframe_idx, :, :]) for oneframe_idx in range(num_frames)))
rows, cols = features.shape[0], features.shape[1]
facial_features_onesample = | np.zeros((rows, cols+6)) | numpy.zeros |
import os
import re
import logging
import wave
import time
import threading
import signal
import atexit
import functools
import collections
import typer
import numpy as np
from mltk import cli
@cli.root_cli.command('classify_audio')
def classify_audio_command(
model: str = typer.Argument(...,
help='''\b
On of the following:
- MLTK model name
- Path to .tflite file
- Path to model archive file (.mltk.zip)
NOTE: The model must have been previously trained for keyword spotting''',
metavar='<model>'
),
accelerator: str = typer.Option(None, '--accelerator', '-a',
help='''\b
Name of accelerator to use while executing the audio classification ML model.
If omitted, then use the reference kernels
NOTE: It is recommended to NOT use an accelerator if running on the PC since the HW simulator can be slow.''',
metavar='<name>'
),
use_device:bool = typer.Option(False, '-d', '--device',
help='''\b
If provided, then run the keyword spotting model on an embedded device, otherwise use the PC's local microphone.
If this option is provided, then the device must be locally connected'''
),
port:str = typer.Option(None,
help='''\b
Serial COM port of a locally connected embedded device.
This is only used with the --device option.
'If omitted, then attempt to automatically determine the serial COM port''',
metavar='<port>'
),
verbose: bool = typer.Option(False, '--verbose', '-v',
help='Enable verbose console logs'
),
average_window_duration_ms: int = typer.Option(None, '--window_duration', '-w',
help='''\b
Controls the smoothing. Drop all inference results that are older than <now> minus window_duration.
Longer durations (in milliseconds) will give a higher confidence that the results are correct, but may miss some commands''',
metavar='<duration ms>'
),
minimum_count: int = typer.Option(None, '--count', '-c',
help='The *minimum* number of inference results to average when calculating the detection value. Set to 0 to disable averaging',
metavar='<count>'
),
detection_threshold: int = typer.Option(None, '--threshold', '-t',
help='Minimum averaged model output threshold for a class to be considered detected, 0-255. Higher values increase precision at the cost of recall',
metavar='<threshold>'
),
suppression_ms: int = typer.Option(None, '--suppression', '-s',
help='Amount of milliseconds to wait after a keyword is detected before detecting new keywords',
metavar='<suppression ms>'
),
latency_ms: int = typer.Option(None, '--latency', '-l',
help='This the amount of time in milliseconds between processing loops',
metavar='<latency ms>'
),
microphone: str = typer.Option(None, '--microphone', '-m',
help='For non-embedded, this specifies the name of the PC microphone to use',
metavar='<name>'
),
volume_gain: int = typer.Option(None, '--volume', '-u',
help='Set the volume gain scaler (i.e. amplitude) to apply to the microphone data. If 0 or omitted, no scaler is applied',
metavar='<volume gain>'
),
dump_audio: bool = typer.Option(False, '--dump-audio', '-x',
help='Dump the raw microphone and generate a corresponding .wav file',
),
dump_raw_spectrograms: bool = typer.Option(False, '--dump-raw-spectrograms', '-w',
help='Dump the raw (i.e. unquantized) generated spectrograms to .jpg images and .mp4 video',
),
dump_quantized_spectrograms: bool = typer.Option(False, '--dump-spectrograms', '-z',
help='Dump the quantized generated spectrograms to .jpg images and .mp4 video',
),
sensitivity: float = typer.Option(None, '--sensitivity', '-i',
help='Sensitivity of the activity indicator LED. Much less than 1.0 has higher sensitivity',
),
app_path: str = typer.Option(None, '--app',
help='''\b
By default, the audio_classifier app is automatically downloaded.
This option allows for overriding with a custom built app.
Alternatively, if using the --device option, set this option to "none" to NOT program the audio_classifier app to the device.
In this case, ONLY the .tflite will be programmed and the existing audio_classifier app will be re-used.
''',
metavar='<path>'
),
is_unit_test: bool = typer.Option(False, '--test',
help='Run as a unit test',
),
):
"""Classify keywords/events detected in a microphone's streaming audio
NOTE: This command is experimental. Use at your own risk!
\b
This command runs an audio classification application on either the local PC OR
on an embedded target. The audio classification application loads the given
audio classification ML model (e.g. Keyword Spotting) and streams real-time audio
from the local PC's/embedded target's microphone into the ML model.
\b
System Dataflow:
Microphone -> AudioFeatureGenerator -> ML Model -> Command Recognizer -> Local Terminal
\b
Refer to the mltk.models.tflite_micro.tflite_micro_speech model for a reference on how to train
an ML model that works the audio classification application.
\b
----------
Examples
----------
\b
# Classify audio on local PC using tflite_micro_speech model
# Simulate the audio loop latency to be 200ms
# i.e. If the app was running on an embedded target, it would take 200ms per audio loop
# Also enable verbose logs
mltk classify_audio tflite_micro_speech --latency 200 --verbose
\b
# Classify audio on an embedded target using model: ~/workspace/my_model.tflite
# and the following classifier settings:
# - Set the averaging window to 1200ms (i.e. drop samples older than <now> minus window)
# - Set the minimum sample count to 3 (i.e. must have at last 3 samples before classifying)
# - Set the threshold to 175 (i.e. the average of the inference results within the averaging window must be at least 175 of 255)
# - Set the suppression to 750ms (i.e. Once a keyword is detected, wait 750ms before detecting more keywords)
# i.e. If the app was running on an embedded target, it would take 200ms per audio loop
mltk classify_audio /home/john/my_model.tflite --device --window 1200ms --count 3 --threshold 175 --suppression 750
\b
# Classify audio and also dump the captured raw audio and spectrograms
mltk classify_audio tflite_micro_speech --dump-audio --dump-spectrograms
"""
# Import all required packages here instead of at top
# to help improve the CLI's responsiveness
from mltk.core import (
TfliteModel,
TfliteModelParameters,
load_mltk_model,
load_tflite_or_keras_model
)
from mltk.utils import firmware_apps
from mltk.utils import commander
from mltk.utils.system import (get_current_os, make_path_executable, send_signal)
from mltk.utils.shell_cmd import run_shell_cmd
from mltk.utils.serial_reader import SerialReader
from mltk.utils.path import (create_tempdir, fullpath, create_user_dir, clean_directory)
from mltk.utils.jlink_stream import (JlinkStream, JLinkDataStream, JlinkStreamOptions)
from mltk.utils.python import install_pip_package
from mltk.utils.logger import get_logger
logger = cli.get_logger()
have_cv2 = False
try:
install_pip_package('opencv-python', 'cv2', logger=logger)
from cv2 import cv2
have_cv2 = True
except Exception as e:
try:
import cv2
have_cv2 = True
except:
pass
accelerator = cli.parse_accelerator_option(accelerator)
# If the filepath to a .tflite model file was provided
if model.endswith('.tflite'):
model_path = fullpath(model)
tflite_model = TfliteModel.load_flatbuffer_file(model_path)
# Otherwise, find the MLTK Model file
else:
try:
mltk_model = load_mltk_model(
model,
print_not_found_err=True
)
tflite_model = load_tflite_or_keras_model(
mltk_model,
model_type='tflite'
)
except Exception as e:
cli.handle_exception('Failed to load model', e)
input_dtype = tflite_model.inputs[0].dtype
platform = get_current_os() if not use_device else commander.query_platform()
###############################################################
def _run_audio_classifier_on_device(
tflite_model_params:TfliteModelParameters,
dump_audio_dir:str,
dump_raw_spectrograms_dir:str,
dump_quantized_spectrograms_dir:str,
):
"""Run the audio_classifier app on an embedded device"""
nonlocal port
# Program the audio_classifier app and .tflite model
# to the device's flash
firmware_apps.program_image_with_model(
name='mltk_audio_classifier',
platform=platform,
accelerator=accelerator,
tflite_model=tflite_model,
logger=logger,
halt=True,
firmware_image_path=app_path
)
# If no serial COM port is provided,
# then attemp to resolve it based on common Silab's board COM port description
port = port or 'regex:JLink CDC UART Port'
# Start the serial COM port reader
logger.info('Running audio classifier on device ...')
logger.info('Press CTRL+C to exit\n')
with SerialReader(
port=port,
baud=115200,
outfile=logger,
start_regex=re.compile(r'.*Audio Classifier.*', re.IGNORECASE),
fail_regex=[
re.compile(r'.*hardfault.*', re.IGNORECASE),
re.compile(r'.*assert.*', re.IGNORECASE),
re.compile(r'.*error.*', re.IGNORECASE)
]
) as reader:
commander.reset_device()
if is_unit_test:
_start_ctrl_c_timer()
stop_event = None
if dump_audio_dir or dump_raw_spectrograms_dir or dump_quantized_spectrograms_dir:
# Wait for the device to be ready
while True:
reader.read(timeout=0.10)
# Check if any errors ocurred
if reader.error_message:
raise RuntimeError(f'Device error: {reader.error_message}')
if reader.started:
break
stop_event = _start_jlink_processor(
dump_audio_dir=dump_audio_dir,
dump_raw_spectrograms_dir=dump_raw_spectrograms_dir,
dump_quantized_spectrograms_dir=dump_quantized_spectrograms_dir,
tflite_model_params=tflite_model_params
)
try:
while not reader.read(timeout=.010):
time.sleep(0.005)
if reader.error_message:
if stop_event is not None:
stop_event.set()
raise RuntimeError(f'Device error: {reader.error_message}')
except KeyboardInterrupt:
if stop_event is not None:
stop_event.set()
###############################################################
def _run_audio_classifier_on_pc(
dump_audio_dir:str,
dump_raw_spectrograms_dir:str,
dump_quantized_spectrograms_dir:str,
):
"""Run audio_classifier app on local PC"""
nonlocal app_path
if app_path is None:
app_path = firmware_apps.get_image(
name='mltk_audio_classifier',
accelerator=None,
platform=platform,
logger=logger
)
make_path_executable(app_path)
tflite_name = tflite_model.filename or 'mltk_audio_classifier.tflite'
tmp_tflite_path = create_tempdir('tmp_models') + f'/{os.path.splitext(tflite_name)[0]}.tflite'
tflite_model.save(tmp_tflite_path)
cmd = [app_path, '--model', tmp_tflite_path]
if latency_ms is not None:
cmd.extend(['--latency', str(latency_ms)])
if dump_audio_dir:
cmd.extend(['--dump_audio', dump_audio_dir])
if dump_raw_spectrograms_dir:
cmd.extend(['--dump_raw_spectrograms', dump_raw_spectrograms_dir])
if dump_quantized_spectrograms_dir:
cmd.extend(['--dump_spectrograms', dump_quantized_spectrograms_dir])
env = os.environ
if microphone:
env['MLTK_MICROPHONE'] = microphone
if is_unit_test:
_start_ctrl_c_timer()
cmd_str = ' '.join(cmd)
logger.debug(cmd_str)
retcode, retmsg = run_shell_cmd(cmd, outfile=logger, env=env)
if retcode != 0:
raise RuntimeError(f'Command failed (err code: {retcode}): {cmd_str}\n{retmsg}')
###############################################################
def _generate_wav_from_dumped_audio(
dump_dir:str,
sample_rate:int
):
"""Generate a .wav file from the dumped audio chunks generated by the audio_classifier app"""
audio_data = bytearray()
src_dir = f'{dump_dir}/bin'
count = 0
while True:
p = f'{src_dir}/{count}.int16.bin'
if not os.path.exists(p):
break
count += 1
with open(p, 'rb') as f:
audio_data.extend(f.read())
if len(audio_data) == 0:
return
wav_path = f'{dump_dir}/dumped_audio.wav'
with wave.open(wav_path,'w') as wav:
# pylint: disable=no-member
wav.setnchannels(1) # mono
wav.setsampwidth(2) # 16-bit
wav.setframerate(sample_rate)
wav.writeframesraw(bytes(audio_data))
logger.info(f'Generated: {wav_path}')
###############################################################
def _dtype_to_str(dtype:np.dtype) -> str:
if dtype == np.int8:
return 'int8'
if dtype == np.uint16:
return 'uint16'
if dtype == np.float32:
return 'float32'
raise RuntimeError(f'Unsupported dtype {dtype}')
###############################################################
def _generate_video_from_dumped_spectrograms(
dump_dir:str,
dtype:np.dtype
):
"""Combine the genated .jpg spectrograms into an .mp4 video file"""
spec_1_path = f'{dump_dir}/jpg/1.jpg'
video_path = f'{dump_dir}/dump_spectrograms.mp4'
dtype_str = _dtype_to_str(dtype)
fps_name = f'{dtype_str}_spectrogram_fps'
if fps_name not in globals():
return
spectrogram_fps = globals()[fps_name]
if not os.path.exists(spec_1_path):
return
spectrogram = cv2.imread(spec_1_path)
height, width = spectrogram.shape[:2]
logger.info(f'Spectrogram rate: {spectrogram_fps:.1f} frame/s')
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(video_path, fourcc, spectrogram_fps, (width, height))
for i in range(1, int(1e9)):
jpg_path = f'{dump_dir}/jpg/{i}.jpg'
if not os.path.exists(jpg_path):
break
video.write(cv2.imread(jpg_path))
video.release()
logger.info(f'Generated: {video_path}')
###############################################################
def _start_spectrogram_jpg_generator(
dump_dir:str,
dtype:np.dtype
):
"""Start a thread to periodically sample the spectrogram dump directory and generate a .jpg when one if found
This converts from a numpy .txt array to a .jpg of the spectrogram
"""
stop_event = threading.Event()
dtype_str = _dtype_to_str(dtype)
src_dir = f'{dump_dir}/bin'
dst_dir = f'{dump_dir}/jpg'
os.makedirs(dst_dir, exist_ok=True)
resize_dim_range = (240, 480) # Ensure the jpg dims are within this range
def _convert_npy_to_jpg_loop():
fps_list = collections.deque(maxlen=15)
prev_time = None
counter = 1
while not stop_event.is_set():
src_path = f'{src_dir}/{counter}.{dtype_str}.npy.txt'
next_path = f'{src_dir}/{counter+1}.{dtype_str}.npy.txt'
dst_path = f'{dst_dir}/{counter}.jpg'
# We wait until the NEXT spectrogram file is found
# this way, we avoid race-conditions and ensure the current
# spectrogram is fully written
if not os.path.exists(next_path):
time.sleep(0.050)
continue
if prev_time is None:
prev_time = time.time()
else:
now = time.time()
elapsed = (now - prev_time) or .1
prev_time = now
fps_list.append(elapsed)
globals()[f'{dtype_str}_spectrogram_fps'] = len(fps_list) / sum(fps_list)
counter += 1
try:
spectrogram = np.loadtxt(src_path, delimiter=',')
except Exception as e:
logger.debug(f'Failed to read {src_path}, err: {e}')
continue
# Transpose to put the time on the x-axis
spectrogram = np.transpose(spectrogram)
# Convert from int8 to uint8
if dtype_str == 'int8':
spectrogram = np.clip(spectrogram +128, 0, 255)
spectrogram = spectrogram.astype(np.uint8)
elif dtype_str == 'uint16':
spectrogram = | np.clip(spectrogram / 4, 0, 255) | numpy.clip |
from __future__ import print_function, division
import os, sys, warnings, platform
from time import time
import numpy as np
if "PyPy" not in platform.python_implementation():
from scipy.io import loadmat, savemat
from Florence.Tensor import makezero, itemfreq, unique2d, in2d
from Florence.Utils import insensitive
from .vtk_writer import write_vtu
try:
import meshpy.triangle as triangle
has_meshpy = True
except ImportError:
has_meshpy = False
from .HigherOrderMeshing import *
from .NodeArrangement import *
from .GeometricPath import *
from warnings import warn
from copy import deepcopy
"""
Mesh class providing most of the pre-processing functionalities of the Core module
<NAME> - 13/06/2015
"""
class Mesh(object):
"""Mesh class provides the following functionalities:
1. Generating higher order meshes based on a linear mesh, for tris, tets, quads and hexes
2. Generating linear tri and tet meshes based on meshpy back-end
3. Generating linear tri meshes based on distmesh back-end
4. Finding bounary edges and faces for tris and tets, in case they are not provided by the mesh generator
5. Reading Salome meshes in binary (.dat/.txt/etc) format
6. Reading gmsh files .msh
7. Checking for node numbering order of elements and fixing it if desired
8. Writing meshes to unstructured vtk file format (.vtu) in xml and binary formats,
including high order elements
"""
def __init__(self, element_type=None):
super(Mesh, self).__init__()
# self.faces and self.edges ARE BOUNDARY FACES
# AND BOUNDARY EDGES, RESPECTIVELY
self.degree = None
self.ndim = None
self.edim = None
self.nelem = None
self.nnode = None
self.elements = None
self.points = None
self.corners = None
self.edges = None
self.faces = None
self.element_type = element_type
self.face_to_element = None
self.edge_to_element = None
self.boundary_edge_to_element = None
self.boundary_face_to_element = None
self.all_faces = None
self.all_edges = None
self.interior_faces = None
self.interior_edges = None
# TYPE OF BOUNDARY FACES/EDGES
self.boundary_element_type = None
# FOR GEOMETRICAL CURVES/SURFACES
self.edge_to_curve = None
self.face_to_surface = None
self.spatial_dimension = None
self.reader_type = None
self.reader_type_format = None
self.reader_type_version = None
self.writer_type = None
self.filename = None
# self.has_meshpy = has_meshpy
def SetElements(self,arr):
self.elements = arr
def SetPoints(self,arr):
self.points = arr
def SetEdges(self,arr):
self.edges = arr
def SetFaces(self,arr):
self.faces = arr
def GetElements(self):
return self.elements
def GetPoints(self):
return self.points
def GetEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetEdgesTri()
elif self.element_type == "quad":
self.GetEdgesQuad()
elif self.element_type == "pent":
self.GetEdgesPent()
elif self.element_type == "tet":
self.GetEdgesTet()
elif self.element_type == "hex":
self.GetEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.all_edges
def GetBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetBoundaryEdgesTri()
elif self.element_type == "quad":
self.GetBoundaryEdgesQuad()
elif self.element_type == "pent":
self.GetBoundaryEdgesPent()
elif self.element_type == "tet":
self.GetBoundaryEdgesTet()
elif self.element_type == "hex":
self.GetBoundaryEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.edges
def GetInteriorEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetInteriorEdgesTri()
elif self.element_type == "quad":
self.GetInteriorEdgesQuad()
elif self.element_type == "pent":
self.GetInteriorEdgesPent()
elif self.element_type == "tet":
self.GetInteriorEdgesTet()
elif self.element_type == "hex":
self.GetInteriorEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.interior_edges
def GetFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetFacesTet()
elif self.element_type == "hex":
self.GetFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.all_faces
def GetBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetBoundaryFacesTet()
elif self.element_type == "hex":
self.GetBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.faces
def GetInteriorFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetInteriorFacesTet()
elif self.element_type == "hex":
self.GetInteriorFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.interior_faces
def GetElementsEdgeNumbering(self):
assert self.element_type is not None
if self.element_type == "tri":
return self.GetElementsEdgeNumberingTri()
elif self.element_type == "quad":
return self.GetElementsEdgeNumberingQuad()
else:
raise ValueError('Type of element not understood')
return self.edge_to_element
def GetElementsWithBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
return self.GetElementsWithBoundaryEdgesTri()
elif self.element_type == "quad":
return self.GetElementsWithBoundaryEdgesQuad()
else:
raise ValueError('Type of element not understood')
return self.boundary_edge_to_element
def GetElementsFaceNumbering(self):
assert self.element_type is not None
if self.element_type == "tet":
return self.GetElementsFaceNumberingTet()
elif self.element_type == "hex":
return self.GetElementsFaceNumberingHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.face_to_element
def GetElementsWithBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
return self.GetElementsWithBoundaryFacesTet()
elif self.element_type == "hex":
return self.GetElementsWithBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.boundary_face_to_element
@property
def Bounds(self):
"""Returns bounds of a mesh i.e. the minimum and maximum coordinate values
in every direction
"""
assert self.points is not None
if self.points.shape[1] == 3:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1]),
np.min(self.points[:,2])],
[np.max(self.points[:,0]),
np.max(self.points[:,1]),
np.max(self.points[:,2])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 2:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1])],
[np.max(self.points[:,0]),
np.max(self.points[:,1])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 1:
bounds = np.array([[np.min(self.points[:,0])],
[np.max(self.points[:,0])]])
makezero(bounds)
return bounds
else:
raise ValueError("Invalid dimension for mesh coordinates")
def GetEdgesTri(self):
"""Find all edges of a triangular mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementTri(p-1)[0]
# CHECK IF FACES ARE ALREADY AVAILABLE
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1 and self.all_edges.shape[1] == p+1:
warn("Mesh edges seem to be already computed. I am going to recompute them")
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.zeros((3*self.elements.shape[0],p+1),dtype=np.uint64)
edges[:self.elements.shape[0],:] = self.elements[:,node_arranger[0,:]]
edges[self.elements.shape[0]:2*self.elements.shape[0],:] = self.elements[:,node_arranger[1,:]]
edges[2*self.elements.shape[0]:,:] = self.elements[:,node_arranger[2,:]]
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesTet
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesTet":
self.all_edges = edges
return edges
def GetBoundaryEdgesTri(self):
"""Find boundary edges (lines) of triangular mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementTri(p-1)[0]
# CONCATENATE ALL THE EDGES MADE FROM ELEMENTS
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]]),axis=0)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesTri(self):
"""Computes interior edges of a triangular mesh
returns:
interior_edges ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesTri()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesTri()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetFacesTet(self):
"""Find all faces (surfaces) in the tetrahedral mesh (boundary & interior).
Sets all_faces property and returns it
returns:
arr: numpy ndarray of all faces
"""
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 3 and p > 1:
pass
else:
return self.all_faces
node_arranger = NodeArrangementTet(p-1)[0]
fsize = int((p+1.)*(p+2.)/2.)
# GET ALL FACES FROM THE ELEMENT CONNECTIVITY
faces = np.zeros((4*self.elements.shape[0],fsize),dtype=np.uint64)
faces[:self.elements.shape[0],:] = self.elements[:,node_arranger[0,:]]
faces[self.elements.shape[0]:2*self.elements.shape[0],:] = self.elements[:,node_arranger[1,:]]
faces[2*self.elements.shape[0]:3*self.elements.shape[0],:] = self.elements[:,node_arranger[2,:]]
faces[3*self.elements.shape[0]:,:] = self.elements[:,node_arranger[3,:]]
# REMOVE DUPLICATES
self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)
face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)
face_to_element[:,0] = idx % self.elements.shape[0]
face_to_element[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_to_element
return self.all_faces
def GetEdgesTet(self):
"""Find all edges (lines) of tetrahedral mesh (boundary & interior)"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1] == 2 and p > 1:
pass
else:
return self.all_edges
# FIRST GET BOUNDARY FACES
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 3 and p > 1:
self.GetFacesTet()
else:
self.GetFacesTet()
# BUILD A 2D MESH
tmesh = Mesh()
# tmesh = deepcopy(self)
tmesh.element_type = "tri"
tmesh.elements = self.all_faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# COMPUTE ALL EDGES
self.all_edges = tmesh.GetEdgesTri()
return self.all_edges
def GetBoundaryFacesTet(self):
"""Find boundary faces (surfaces) of a tetrahedral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.faces,np.ndarray):
if self.faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.faces.shape[1] == 3 and p > 1:
pass
else:
return
node_arranger = NodeArrangementTet(p-1)[0]
# CONCATENATE ALL THE FACES MADE FROM ELEMENTS
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES
freqs_inv = itemfreq(inv)
faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.faces = uniques[faces_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)
# THE FOLLOWING WILL COMPUTE FACES BASED ON SORTING AND NOT TAKING INTO ACCOUNT
# THE ELEMENT CONNECTIVITY
# boundary_face_to_element[:,0] = np.remainder(idx[faces_ext_flags],self.elements.shape[0])
# boundary_face_to_element[:,1] = np.floor_divide(idx[faces_ext_flags],self.elements.shape[0])
# OR EQUIVALENTLY
# boundary_face_to_element[:,0] = idx[faces_ext_flags] % self.elements.shape[0]
# boundary_face_to_element[:,1] = idx[faces_ext_flags] // self.elements.shape[0]
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES
all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
# boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]
self.faces = self.faces.astype(np.uint64)
self.boundary_face_to_element = boundary_face_to_element
def GetBoundaryEdgesTet(self):
"""Find boundary edges (lines) of tetrahedral mesh.
Note that for tetrahedrals this function is more robust than Salome's default edge generator
"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
# FIRST GET BOUNDARY FACES
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesTet()
# BUILD A 2D MESH
tmesh = Mesh()
tmesh.element_type = "tri"
tmesh.elements = self.faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES
self.edges = tmesh.GetEdgesTri()
def GetInteriorFacesTet(self):
"""Computes interior faces of a tetrahedral mesh
returns:
interior_faces ndarray of interior faces
face_flags 1D array of face flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesTet()
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesTet()
face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)
face_flags[face_flags==True] = 1
face_flags[face_flags==False] = 0
interior_faces = self.all_faces[face_flags==False,:]
return interior_faces, face_flags
def GetInteriorEdgesTet(self):
"""Computes interior faces of a tetrahedral mesh
returns:
interior_edges ndarray of interior edges
edge_flags 1D array of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesTet()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesTet()
edge_flags = in2d(self.all_edges.astype(self.edges.dtype),self.edges,consider_sort=True)
edge_flags[edge_flags==True] = 1
edge_flags[edge_flags==False] = 0
interior_edges = self.all_edges[edge_flags==False,:]
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetEdgesQuad(self):
"""Find the all edges of a quadrilateral mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesHex
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesHex":
self.all_edges = edges
return edges
def GetBoundaryEdgesQuad(self):
"""Find boundary edges (lines) of a quadrilateral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesQuad(self):
"""Computes interior edges of a quadrilateral mesh
returns:
interior_faces ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesQuad()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesQuad()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetFacesHex(self):
"""Find all faces (surfaces) in the hexahedral mesh (boundary & interior).
Sets all_faces property and returns it
returns:
arr: numpy ndarray of all faces
"""
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 4 and p > 1:
pass
else:
return self.all_faces
node_arranger = NodeArrangementHex(p-1)[0]
fsize = int((p+1)**3)
# GET ALL FACES FROM THE ELEMENT CONNECTIVITY
faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# REMOVE DUPLICATES
self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)
face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)
face_to_element[:,0] = idx % self.elements.shape[0]
face_to_element[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_to_element
return self.all_faces
def GetEdgesHex(self):
"""Find all edges (lines) of tetrahedral mesh (boundary & interior)"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1] == 2 and p > 1:
pass
else:
return self.all_edges
# FIRST GET BOUNDARY FACES
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesHex()
# BUILD A 2D MESH
tmesh = Mesh()
# tmesh = deepcopy(self)
tmesh.element_type = "quad"
tmesh.elements = self.all_faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# COMPUTE ALL EDGES
self.all_edges = tmesh.GetEdgesQuad()
return self.all_edges
def GetBoundaryFacesHex(self):
"""Find boundary faces (surfaces) of a hexahedral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.faces,np.ndarray):
if self.faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.faces.shape[1] == 4 and p > 1:
pass
else:
return
node_arranger = NodeArrangementHex(p-1)[0]
# CONCATENATE ALL THE FACES MADE FROM ELEMENTS
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES
freqs_inv = itemfreq(inv)
faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.faces = uniques[faces_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES
all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
# boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]
self.faces = self.faces.astype(np.uint64)
self.boundary_face_to_element = boundary_face_to_element
def GetBoundaryEdgesHex(self):
"""Find boundary edges (lines) of hexahedral mesh.
"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
# FIRST GET BOUNDARY FACES
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesHex()
# BUILD A 2D MESH
tmesh = Mesh()
tmesh.element_type = "quad"
tmesh.elements = self.faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES
self.edges = tmesh.GetEdgesQuad()
def GetInteriorFacesHex(self):
"""Computes interior faces of a hexahedral mesh
returns:
interior_faces ndarray of interior faces
face_flags 1D array of face flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesHex()
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesHex()
face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)
face_flags[face_flags==True] = 1
face_flags[face_flags==False] = 0
interior_faces = self.all_faces[face_flags==False,:]
return interior_faces, face_flags
def GetInteriorEdgesHex(self):
"""Computes interior faces of a hexahedral mesh
returns:
interior_edges ndarray of interior edges
edge_flags 1D array of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesHex()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesHex()
edge_flags = in2d(self.all_edges.astype(self.edges.dtype),self.edges,consider_sort=True)
edge_flags[edge_flags==True] = 1
edge_flags[edge_flags==False] = 0
interior_edges = self.all_edges[edge_flags==False,:]
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetEdgesPent(self):
"""Find the all edges of a pentagonal mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = np.array([
[0,1],
[1,2],
[2,3],
[3,4],
[4,0],
])
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],
self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
self.all_edges = edges
return edges
def GetBoundaryEdgesPent(self):
"""Find boundary edges (lines) of a pentagonal mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = np.array([
[0,1],
[1,2],
[2,3],
[3,4],
[4,0],
])
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],
self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesPent(self):
"""Computes interior edges of a pentagonal mesh
returns:
interior_faces ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesPent()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesPent()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetHighOrderMesh(self,p=1, silent=True, **kwargs):
"""Given a linear tri, tet, quad or hex mesh compute high order mesh based on it.
This is a static method linked to the HigherOrderMeshing module"""
if not isinstance(p,int):
raise ValueError("p must be an integer")
else:
if p < 1:
raise ValueError("Value of p={} is not acceptable. Provide p>=1.".format(p))
if self.degree is None:
self.InferPolynomialDegree()
C = p-1
if 'C' in kwargs.keys():
if kwargs['C'] != p - 1:
raise ValueError("Did not understand the specified interpolation degree of the mesh")
del kwargs['C']
# DO NOT COMPUTE IF ALREADY COMPUTED FOR THE SAME ORDER
if self.degree == None:
self.degree = self.InferPolynomialDegree()
if self.degree == p:
return
# SITUATIONS WHEN ANOTHER HIGH ORDER MESH IS REQUIRED, WITH ONE HIGH
# ORDER MESH ALREADY AVAILABLE
if self.degree != 1 and self.degree - 1 != C:
dum = self.GetLinearMesh(remap=True)
self.__dict__.update(dum.__dict__)
if not silent:
print('Generating p = '+str(C+1)+' mesh based on the linear mesh...')
t_mesh = time()
# BUILD A NEW MESH BASED ON THE LINEAR MESH
if self.element_type == 'line':
nmesh = HighOrderMeshLine(C,self,**kwargs)
if self.element_type == 'tri':
if self.edges is None:
self.GetBoundaryEdgesTri()
# nmesh = HighOrderMeshTri(C,self,**kwargs)
nmesh = HighOrderMeshTri_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'tet':
# nmesh = HighOrderMeshTet(C,self,**kwargs)
nmesh = HighOrderMeshTet_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'quad':
if self.edges is None:
self.GetBoundaryEdgesQuad()
nmesh = HighOrderMeshQuad(C,self,**kwargs)
elif self.element_type == 'hex':
nmesh = HighOrderMeshHex(C,self,**kwargs)
self.points = nmesh.points
self.elements = nmesh.elements.astype(np.uint64)
if isinstance(self.corners,np.ndarray):
# NOT NECESSARY BUT GENERIC
self.corners = nmesh.corners.astype(np.uint64)
if isinstance(self.edges,np.ndarray):
self.edges = nmesh.edges.astype(np.uint64)
if isinstance(self.faces,np.ndarray):
if isinstance(nmesh.faces,np.ndarray):
self.faces = nmesh.faces.astype(np.uint64)
self.nelem = nmesh.nelem
self.nnode = self.points.shape[0]
self.element_type = nmesh.info
self.degree = C+1
self.ChangeType()
if not silent:
print('Finished generating the high order mesh. Time taken', time()-t_mesh,'sec')
def EdgeLengths(self,which_edges='boundary'):
"""Computes length of edges, for 2D and 3D meshes
which_edges: [str] 'boundary' for boundary edges only
and 'all' for all edges
"""
assert self.points is not None
assert self.element_type is not None
lengths = None
if which_edges == 'boundary':
if self.edges is None:
self.GetBoundaryEdges()
edge_coords = self.points[self.edges[:,:2],:]
lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
elif which_edges == 'all':
if self.all_edges is None:
self.GetEdges()
edge_coords = self.points[self.all_edges[:,:2],:]
lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
return lengths
def Lengths(self,):
"""Computes length of all types of elements
"""
self.__do_essential_memebers_exist__()
if self.element_type == "line":
coords = self.points[self.elements[:,:2],:]
lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)
else:
self.GetEdges()
coord = self.all_edges
coords = self.points[self.elements[:,:2],:]
lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)
return lengths
def Areas(self, with_sign=False, gpoints=None):
"""Find areas of all 2D elements [tris, quads].
For 3D elements returns surface areas of all faces
input:
with_sign: [str] compute with/without sign
gpoints: [ndarray] given coordinates to use instead of
self.points
returns: 1D array of nelem x 1 containing areas
"""
assert self.elements is not None
assert self.element_type is not None
if gpoints is None:
assert self.points is not None
gpoints = self.points
if self.element_type == "tri":
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2] = gpoints
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*np.linalg.det(points[self.elements[:,:3],:])
elif self.element_type == "quad":
# NODE ORDERING IS IRRELEVANT, AS IT IS THESE AREAS
# WHICH DETERMINE NODE ORDERING
# AREA OF QUAD ABCD = AREA OF ABC + AREA OF ACD
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2] = gpoints
# FIND AREAS ABC
area0 = np.linalg.det(points[self.elements[:,:3],:])
# FIND AREAS ACD
area1 = np.linalg.det(points[self.elements[:,[0,2,3]],:])
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*(area0+area1)
elif self.element_type == "tet":
# GET ALL THE FACES
faces = self.GetFacesTet()
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2]=gpoints[:,:2]
area0 = np.linalg.det(points[faces[:,:3],:])
points[:,:2]=gpoints[:,[2,0]]
area1 = np.linalg.det(points[faces[:,:3],:])
points[:,:2]=gpoints[:,[1,2]]
area2 = np.linalg.det(points[faces[:,:3],:])
area = 0.5*np.linalg.norm(area0+area1+area2)
elif self.element_type == "hex":
from Florence.Tensor import unique2d
C = self.InferPolynomialDegree() - 1
area = 0
node_arranger = NodeArrangementHex(C)[0]
for i in range(node_arranger.shape[0]):
# print node_arranger[i,:]
# AREA OF FACES
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
if i==0 or i==1:
points[:,:2] = gpoints[:,:2]
elif i==2 or i==3:
points[:,:2] = gpoints[:,[0,2]]
elif i==4 or i==5:
points[:,:2] = gpoints[:,1:]
# FIND AREAS ABC
area0 = np.linalg.det(points[self.elements[:,node_arranger[i,:3]],:])
# FIND AREAS ACD
area1 = np.linalg.det(points[self.elements[:,node_arranger[i,1:]],:])
# FIND AREAS OF ALL THE ELEMENTS
area += 0.5*np.linalg.norm(area0+area1)
# print area
raise ValueError('Hex areas implementation requires further checks')
else:
raise NotImplementedError("Computing areas for", self.element_type, "elements not implemented yet")
if with_sign is False:
if self.element_type == "tri" or self.element_type == "quad":
area = np.abs(area)
elif self.element_type == "tet":
raise NotImplementedError('Numbering order of tetrahedral faces could not be determined')
return area
def Volumes(self, with_sign=False, gpoints=None):
"""Find Volumes of all 3D elements [tets, hexes]
input:
with_sign: [str] compute with/without sign
gpoints: [ndarray] given coordinates to use instead of
self.points
returns: 1D array of nelem x 1 containing volumes
"""
assert self.elements is not None
assert self.element_type is not None
if self.points.shape[1] == 2:
raise ValueError("2D mesh does not have volume")
if gpoints is None:
assert self.points is not None
gpoints = self.points
if self.element_type == "tet":
a = gpoints[self.elements[:,0],:]
b = gpoints[self.elements[:,1],:]
c = gpoints[self.elements[:,2],:]
d = gpoints[self.elements[:,3],:]
det_array = np.dstack((a-d,b-d,c-d))
# FIND VOLUME OF ALL THE ELEMENTS
volume = 1./6.*np.linalg.det(det_array)
elif self.element_type == "hex":
# Refer: https://en.wikipedia.org/wiki/Parallelepiped
a = gpoints[self.elements[:,0],:]
b = gpoints[self.elements[:,1],:]
c = gpoints[self.elements[:,3],:]
d = gpoints[self.elements[:,4],:]
det_array = np.dstack((b-a,c-a,d-a))
# FIND VOLUME OF ALL THE ELEMENTS
volume = np.linalg.det(det_array)
else:
raise NotImplementedError("Computing volumes for", self.element_type, "elements not implemented yet")
if with_sign is False:
volume = np.abs(volume)
return volume
def Sizes(self, with_sign=False):
"""Computes the size of elements for all element types.
This is a generic method that for 1D=lengths, for 2D=areas and for 3D=volumes.
It works for planar and curved elements
"""
self.__do_essential_memebers_exist__()
try:
from Florence import DisplacementFormulation
except ImportError:
raise ValueError("This functionality requires Florence's support")
if self.element_type != "line":
# FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED
formulation = DisplacementFormulation(self)
sizes = np.zeros(self.nelem)
if not with_sign:
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetVolume(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
else:
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetSignedVolume(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
return sizes
else:
warn("Sizes of line elements could be incorrect if the mesh is curvilinear")
return self.Lengths()
def AspectRatios(self,algorithm='edge_based'):
"""Compute aspect ratio of the mesh element-by-element.
For 2D meshes aspect ratio is aspect ratio is defined as
the ratio of maximum edge length to minimum edge length.
For 3D meshes aspect ratio can be either length or area based.
input:
algorithm: [str] 'edge_based' or 'face_based'
returns:
aspect_ratio: [1D array] of size (self.nelem) containing aspect ratio of elements
"""
assert self.points is not None
assert self.element_type is not None
aspect_ratio = None
if algorithm == 'edge_based':
if self.element_type == "tri":
edge_coords = self.points[self.elements[:,:3],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
AC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
minimum = np.minimum(np.minimum(AB,AC),BC)
maximum = np.maximum(np.maximum(AB,AC),BC)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "quad":
edge_coords = self.points[self.elements[:,:4],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,0,:] - edge_coords[:,3,:],axis=1)
minimum = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "tet":
edge_coords = self.points[self.elements[:,:4],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
AC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,0,:],axis=1)
AD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
BD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
minimum = np.minimum(np.minimum(np.minimum(np.minimum(np.minimum(AB,AC),AD),BC),BD),CD)
maximum = np.maximum(np.maximum(np.maximum(np.maximum(np.maximum(AB,AC),AD),BC),BD),CD)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "hex":
edge_coords = self.points[self.elements[:,:8],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,0,:] - edge_coords[:,3,:],axis=1)
minimum0 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum0 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
AB = np.linalg.norm(edge_coords[:,5,:] - edge_coords[:,4,:],axis=1)
BC = np.linalg.norm(edge_coords[:,6,:] - edge_coords[:,5,:],axis=1)
CD = np.linalg.norm(edge_coords[:,7,:] - edge_coords[:,6,:],axis=1)
DA = np.linalg.norm(edge_coords[:,4,:] - edge_coords[:,7,:],axis=1)
minimum1 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum1 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
AB = np.linalg.norm(edge_coords[:,4,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,5,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,6,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,7,:] - edge_coords[:,3,:],axis=1)
minimum2 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum2 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
minimum = np.minimum(minimum0,np.minimum(minimum1,minimum2))
maximum = np.maximum(maximum0,np.maximum(maximum1,maximum2))
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "line":
raise ValueError("Line elments do no have aspect ratio")
elif algorithm == 'face_based':
raise NotImplementedError("Face/area based aspect ratio is not implemented yet")
return aspect_ratio
def FaceNormals(self):
"""Computes outward unit normals on faces.
This is a generic method for all element types apart from lines. If the mesh is in 2D plane
then the unit outward normals will point in Z direction. If the mesh is quad or tri type but
in 3D plane, this will still compute the correct unit outward normals. outwardness can only
be guaranteed for volume meshes.
This method is different from the method self.Normals() as the latter can compute normals
for 1D/2D elements in-plane
"""
self.__do_memebers_exist__()
points = np.copy(self.points)
if points.shape[1] < 3:
dum = np.zeros((points.shape[0],3))
dum[:,:points.shape[1]] = points
points = dum
if self.element_type == "tet" or self.element_type == "hex":
faces = self.faces
elif self.element_type == "tri" or self.element_type == "quad":
faces = self.elements
else:
raise ValueError("Cannot compute face normals on {}".format(self.element_type))
face_coords = self.points[faces[:,:3],:]
p1p0 = face_coords[:,1,:] - face_coords[:,0,:]
p2p0 = face_coords[:,2,:] - face_coords[:,0,:]
normals = np.cross(p1p0,p2p0)
norm_normals = np.linalg.norm(normals,axis=1)
normals[:,0] /= norm_normals
normals[:,1] /= norm_normals
normals[:,2] /= norm_normals
# CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER
if self.element_type == "tet" or self.element_type == "hex":
self.GetElementsWithBoundaryFaces()
meds = self.Medians()
face_element_meds = meds[self.boundary_face_to_element[:,0],:]
p1pm = face_coords[:,1,:] - face_element_meds
# IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP
_check = np.einsum("ij,ij->i",normals,p1pm)
normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]
return normals
def Normals(self, show_plot=False):
"""Computes unit outward normals to the boundary for all element types.
Unity and outwardness are guaranteed
"""
self.__do_memebers_exist__()
ndim = self.InferSpatialDimension()
if self.element_type == "tet" or self.element_type == "hex":
normals = self.FaceNormals()
elif self.element_type == "tri" or self.element_type == "quad" or self.element_type == "line":
if self.points.shape[1] == 3:
normals = self.FaceNormals()
else:
if self.element_type == "tri" or self.element_type == "quad":
edges = self.edges
elif self.element_type == "line":
edges = self.elements
edge_coords = self.points[edges[:,:2],:]
p1p0 = edge_coords[:,1,:] - edge_coords[:,0,:]
normals = np.zeros_like(p1p0)
normals[:,0] = -p1p0[:,1]
normals[:,1] = p1p0[:,0]
norm_normals = np.linalg.norm(normals,axis=1)
normals[:,0] /= norm_normals
normals[:,1] /= norm_normals
# CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER
if self.element_type == "tri" or self.element_type == "quad":
self.GetElementsWithBoundaryEdges()
meds = self.Medians()
edge_element_meds = meds[self.boundary_edge_to_element[:,0],:]
p1pm = edge_coords[:,1,:] - edge_element_meds
# IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP
_check = np.einsum("ij,ij->i",normals,p1pm)
normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]
if show_plot:
if ndim == 2:
mid_edge_coords = 0.5*(edge_coords[:,1,:] + edge_coords[:,0,:])
import matplotlib.pyplot as plt
figure = plt.figure()
self.SimplePlot(figure=figure, show_plot=False)
q = plt.quiver(mid_edge_coords[:,0], mid_edge_coords[:,1],
normals[:,0], normals[:,1],
color='Teal', headlength=5, width=0.004)
plt.axis('equal')
plt.axis('off')
plt.tight_layout()
plt.show()
elif ndim == 3:
mid_face_coords = np.sum(self.points[self.faces,:3],axis=1)/self.faces.shape[1]
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
self.SimplePlot(figure=figure, show_plot=False)
mlab.quiver3d(mid_face_coords[:,0], mid_face_coords[:,1], mid_face_coords[:,2],
normals[:,0], normals[:,1], normals[:,2],
color=(0.,128./255,128./255),line_width=2)
mlab.show()
return normals
def Angles(self, degrees=True):
"""Compute angles of 2D meshes. Strictly 2D meshes and linear elements.
If the mesh is curved the angles would be inaccurate
input:
degrees [bool] if True returns angles in degrees
otherwise in radians
returns:
angles [2D array] of angles per element. Angles are
computed per element so every element will
have as many angles as it's nodes
"""
self.__do_essential_memebers_exist__()
if self.InferElementalDimension() != 2:
raise ValueError("Angles can be computed only for 2D elements")
if self.InferSpatialDimension() != 2:
raise ValueError("Angles can be computed only in 2-dimensional plane")
nodeperelem = self.InferNumberOfNodesPerLinearElement()
angles = np.zeros((self.nelem, nodeperelem))
norm = lambda x: np.linalg.norm(x,axis=1)
edge_coords = self.points[self.elements[:,:],:]
if self.element_type == "tri":
AB = edge_coords[:,1,:] - edge_coords[:,0,:]
AC = edge_coords[:,2,:] - edge_coords[:,0,:]
BC = edge_coords[:,2,:] - edge_coords[:,1,:]
angles[:,0] = np.einsum("ij,ij->i",AB,AC) / (norm(AB)*norm(AC))
angles[:,1] = np.einsum("ij,ij->i",AC,BC) / (norm(AC)*norm(BC))
angles[:,2] = np.einsum("ij,ij->i",BC,-AB)/ (norm(BC)*norm(AB))
angles = np.arccos(angles)
elif self.element_type == "quad":
AB = edge_coords[:,1,:] - edge_coords[:,0,:]
BC = edge_coords[:,2,:] - edge_coords[:,1,:]
CD = edge_coords[:,3,:] - edge_coords[:,2,:]
DA = edge_coords[:,0,:] - edge_coords[:,3,:]
angles[:,0] = np.einsum("ij,ij->i",AB,BC) / (norm(AB)*norm(BC))
angles[:,1] = np.einsum("ij,ij->i",BC,CD) / (norm(BC)*norm(CD))
angles[:,2] = np.einsum("ij,ij->i",CD,DA) / (norm(CD)*norm(DA))
angles[:,3] = np.einsum("ij,ij->i",DA,-AB)/ (norm(DA)*norm(AB))
angles = np.arccos(angles)
if degrees:
angles *= 180/np.pi
return angles
def BoundingBoxes(self, show_plot=False, figure=None):
"""Computes a bounding box for every element.
This method complements the Bounds method/property in that it computes
the bounds for every individual element
returns:
bboxes [3D array] of nelem x ndim x ndim of bounding
boxes for every element
"""
self.__do_essential_memebers_exist__()
ndim = self.InferSpatialDimension()
all_elem_coords = self.points[self.elements]
mins = all_elem_coords.min(axis=1)
maxs = all_elem_coords.max(axis=1)
bboxes = np.zeros((2*self.nelem,self.points.shape[1]))
bboxes[::2] = mins
bboxes[1::2] = maxs
bboxes = bboxes.reshape(self.nelem,2,self.points.shape[1])
if show_plot:
if ndim == 3:
point_generator = lambda bbox: np.array([
[ bbox[0,0], bbox[0,1], bbox[0,2] ],
[ bbox[1,0], bbox[0,1], bbox[0,2] ],
[ bbox[1,0], bbox[1,1], bbox[0,2] ],
[ bbox[0,0], bbox[1,1], bbox[0,2] ],
[ bbox[0,0], bbox[0,1], bbox[1,2] ],
[ bbox[1,0], bbox[0,1], bbox[1,2] ],
[ bbox[1,0], bbox[1,1], bbox[1,2] ],
[ bbox[0,0], bbox[1,1], bbox[1,2] ]
])
elif ndim == 2:
point_generator = lambda bbox: np.array([
[ bbox[0,0], bbox[0,1] ],
[ bbox[1,0], bbox[0,1] ],
[ bbox[1,0], bbox[1,1] ],
[ bbox[0,0], bbox[1,1] ]
])
nsize = 4 if ndim ==2 else 8
ranger = np.arange(nsize)
bmesh = Mesh()
bmesh.element_type = "quad" if ndim ==2 else "hex"
bmesh.elements = np.arange(self.nelem*nsize).reshape(self.nelem,nsize)
bmesh.points = np.zeros((self.nelem*nsize,ndim))
bmesh.nelem = self.nelem
bmesh.nnode = bmesh.points.shape[0]
for i in range(0,self.nelem):
bmesh.points[i*nsize:(i+1)*nsize,:] = point_generator(bboxes[i])
if ndim == 2:
import matplotlib.pyplot as plt
if figure is None:
figure = plt.figure()
self.SimplePlot(figure=figure, show_plot=False)
bmesh.SimplePlot(figure=figure, show_plot=False, edge_color='r')
plt.show()
else:
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
self.SimplePlot(figure=figure, show_plot=False)
bmesh.SimplePlot(figure=figure, show_plot=False, plot_faces=False, edge_color='r')
mlab.show()
return bboxes
def Medians(self, geometric=True):
"""Computes median of the elements tri, tet, quad, hex based on the interpolation function
input:
geometric [Bool] geometrically computes median without relying on FEM bases
retruns:
median: [ndarray] of median of elements
bases_at_median: [1D array] of (p=1) bases at median
"""
self.__do_essential_memebers_exist__()
median = None
if geometric == True:
median = np.sum(self.points[self.elements,:],axis=1)/self.elements.shape[1]
return median
else:
try:
from Florence.FunctionSpace import Tri, Tet
from Florence.QuadratureRules import FeketePointsTri, FeketePointsTet
except ImportError:
raise ImportError("This functionality requires florence's support")
if self.element_type == "tri":
eps = FeketePointsTri(2)
middle_point_isoparametric = eps[6,:]
if not np.isclose(sum(middle_point_isoparametric),-0.6666666):
raise ValueError("Median of triangle does not match [-0.3333,-0.3333]. "
"Did you change your nodal spacing or interpolation functions?")
hpBases = Tri.hpNodal.hpBases
bases_for_middle_point = hpBases(0,middle_point_isoparametric[0],
middle_point_isoparametric[1])[0]
median = np.einsum('ijk,j',self.points[self.elements[:,:3],:],bases_for_middle_point)
elif self.element_type == "tet":
middle_point_isoparametric = FeketePointsTet(3)[21]
if not np.isclose(sum(middle_point_isoparametric),-1.5):
raise ValueError("Median of tetrahedral does not match [-0.5,-0.5,-0.5]. "
"Did you change your nodal spacing or interpolation functions?")
# C = self.InferPolynomialDegree() - 1
hpBases = Tet.hpNodal.hpBases
bases_for_middle_point = hpBases(0,middle_point_isoparametric[0],
middle_point_isoparametric[1],middle_point_isoparametric[2])[0]
median = np.einsum('ijk,j',self.points[self.elements[:,:4],:],bases_for_middle_point)
else:
raise NotImplementedError('Median for {} elements not implemented yet'.format(self.element_type))
return median, bases_for_middle_point
def FindElementContainingPoint(self, point, algorithm="fem", find_parametric_coordinate=True,
scaling_factor=5., tolerance=1.0e-7, maxiter=20, use_simple_bases=False, return_on_geometric_finds=False,
initial_guess=None, initial_guesses=None, restart=False):
"""Find which element does a point lie in using specificed algorithm.
The FEM isoparametric coordinate of the point is returned as well.
If the isoparametric coordinate of the point is not required, issue find_parametric_coordinate=False
input:
point: [tuple] XYZ of enquiry point
algorithm: [str] either 'fem' or 'geometric'. The 'fem' algorithm uses k-d tree
search to get the right bounding box around as few elements as possible.
The size of the box can be specified by the user through the keyword scaling_factor.
The geometric algorithm is a lot more stable and converges much quicker.
The geomtric algorithm first identifies the right element using volume check,
then tries all possible combination of initial guesses to get the FEM
isoparametric point. Trying all possible combination with FEM can be potentially
more costly since bounding box size can be large.
return_on_geometric_finds:
[bool] if geometric algorithm is chosen and this option is on, then it returns
the indices of elements as soon as the volume check and no further checks are
done. This is useful for situations when searching for points that are meant to
be in the interior of the elements rather than at the boundaries or nodes
otherwise the number of elements returned by geometric algorithm is going to be
more than one
return:
element_index [int/1D array of ints] element(s) containing the point.
If the point is shared between many elements a 1D array is returned
iso_parametric_point [1D array] the parametric coordinate of the point within the element.
return only if find_parametric_coordinate=True
"""
if restart:
if initial_guesses is None:
if self.element_type == "pent":
initial_guesses = np.array([
[0.,0.],
[1.,0.],
[1.,0.5],
[0.5,1.],
[0.,1.],
])
else:
raise ValueError("restart option for this element type is only supported if initial_guesses are available")
for i in range(initial_guesses.shape[0]):
ret_val = self.FindElementContainingPoint(point, algorithm=algorithm,
find_parametric_coordinate=find_parametric_coordinate,
scaling_factor=scaling_factor, tolerance=tolerance, maxiter=maxiter,
use_simple_bases=use_simple_bases, return_on_geometric_finds=return_on_geometric_finds,
initial_guess=initial_guesses[i,:], restart=False)
if ret_val[1] is not None:
break
return ret_val
self.__do_essential_memebers_exist__()
C = self.InferPolynomialDegree() - 1
if C > 0:
warn("Note that finding a point within higher order curved mesh is not supported yet")
if C > 0 and algorithm == "geometric":
warn("High order meshes are not supported using geometric algorithim. I am going to operate on linear mesh")
if use_simple_bases:
raise ValueError("Simple bases for high order elements are not available")
return
ndim = self.InferSpatialDimension()
assert len(point) == ndim
from Florence.FunctionSpace import PointInversionIsoparametricFEM
candidate_element, candidate_piso = None, None
if self.element_type == "tet" and algorithm == "fem":
algorithm = "geometric"
if algorithm == "fem":
scaling_factor = float(scaling_factor)
max_h = self.EdgeLengths().max()
# max_h=1.
# FOR CURVED ELEMENTS
# max_h = self.LargestSegment().max()
# GET A BOUNDING BOX AROUND THE POINT, n TIMES LARGER THAN MAXIMUM h, WHERE n is the SCALING FACTOR
if ndim==3:
bounding_box = (point[0]-scaling_factor*max_h,
point[1]-scaling_factor*max_h,
point[2]-scaling_factor*max_h,
point[0]+scaling_factor*max_h,
point[1]+scaling_factor*max_h,
point[2]+scaling_factor*max_h)
elif ndim==2:
bounding_box = (point[0]-scaling_factor*max_h,
point[1]-scaling_factor*max_h,
point[0]+scaling_factor*max_h,
point[1]+scaling_factor*max_h)
# SELECT ELEMENTS ONLY WITHIN THE BOUNDING BOX
mesh = deepcopy(self)
idx_kept_element = self.RemoveElements(bounding_box)[1]
if ndim==3:
for i in range(self.nelem):
coord = self.points[self.elements[i,:],:]
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True, use_simple_bases=use_simple_bases,
initial_guess=initial_guess)
if converged:
# if p_iso[0] >= -1. and p_iso[0] <=1. and \
# p_iso[1] >= -1. and p_iso[1] <=1. and \
# p_iso[2] >= -1. and p_iso[2] <=1. :
if (p_iso[0] > -1. or np.isclose(p_iso[0],-1.,rtol=tolerance)) and \
(p_iso[0] < 1. or np.isclose(p_iso[0], 1.,rtol=tolerance)) and \
(p_iso[1] > -1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[1] < 1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[2] > -1. or np.isclose(p_iso[2],-1.,rtol=tolerance)) and \
(p_iso[2] < 1. or np.isclose(p_iso[2], 1.,rtol=tolerance)) :
candidate_element, candidate_piso = i, p_iso
break
elif ndim==2:
for i in range(self.nelem):
coord = self.points[self.elements[i,:],:]
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True, use_simple_bases=use_simple_bases,
initial_guess=initial_guess)
# if p_iso[0] >= -1. and p_iso[0] <=1. and \
# p_iso[1] >= -1. and p_iso[1] <=1.:
# candidate_element, candidate_piso = i, p_iso
# break
if (p_iso[0] > -1. or np.isclose(p_iso[0],-1.,rtol=tolerance)) and \
(p_iso[0] < 1. or np.isclose(p_iso[0], 1.,rtol=tolerance)) and \
(p_iso[1] > -1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[1] < 1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) :
candidate_element, candidate_piso = i, p_iso
break
self.__update__(mesh)
# print(candidate_element)
if candidate_element is not None:
candidate_element = idx_kept_element[candidate_element]
if find_parametric_coordinate:
return candidate_element, candidate_piso
else:
return candidate_element
else:
if self.element_type == "tet":
from Florence.QuadratureRules.FeketePointsTet import FeketePointsTet
initial_guesses = FeketePointsTet(C)
def GetVolTet(a0,b0,c0,d0):
det_array = np.dstack((a0-d0,b0-d0,c0-d0))
# FIND VOLUME OF ALL THE ELEMENTS
volume = 1./6.*np.abs(np.linalg.det(det_array))
return volume
a = self.points[self.elements[:,0],:]
b = self.points[self.elements[:,1],:]
c = self.points[self.elements[:,2],:]
d = self.points[self.elements[:,3],:]
o = np.tile(point,self.nelem).reshape(self.nelem,a.shape[1])
# TOTAL VOLUME
vol = self.Volumes()
# PARTS' VOLUMES
vol0 = GetVolTet(a,b,c,o)
vol1 = GetVolTet(a,b,o,d)
vol2 = GetVolTet(a,o,c,d)
vol3 = GetVolTet(o,b,c,d)
criterion_check = vol0+vol1+vol2+vol3-vol
elems = np.isclose(criterion_check,0.,rtol=tolerance)
elems_idx = np.where(elems==True)[0]
elif self.element_type == "quad":
from Florence.QuadratureRules.GaussLobattoPoints import GaussLobattoPointsQuad
initial_guesses = GaussLobattoPointsQuad(C)
def GetAreaQuad(a0,b0,c0,d0):
# AREA OF QUAD ABCD = AREA OF ABC + AREA OF ACD
a00 = np.ones((a0.shape[0],3),dtype=np.float64); a00[:,:2] = a0
b00 = np.ones((b0.shape[0],3),dtype=np.float64); b00[:,:2] = b0
c00 = np.ones((c0.shape[0],3),dtype=np.float64); c00[:,:2] = c0
d00 = np.ones((d0.shape[0],3),dtype=np.float64); d00[:,:2] = d0
# FIND AREAS ABC
area0 = np.abs(np.linalg.det(np.dstack((a00,b00,c00))))
# FIND AREAS ACD
area1 = np.abs(np.linalg.det(np.dstack((a00,c00,d00))))
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*(area0+area1)
return area
a = self.points[self.elements[:,0],:]
b = self.points[self.elements[:,1],:]
c = self.points[self.elements[:,2],:]
d = self.points[self.elements[:,3],:]
o = np.tile(point,self.nelem).reshape(self.nelem,a.shape[1])
# TOTAL VOLUME
vol = self.Areas()
# PARTS' VOLUMES - DONT CHANGE THE ORDERING OF SPECIALLY vol1
vol0 = GetAreaQuad(o,c,b,a)
vol1 = GetAreaQuad(o,a,d,c)
criterion_check = vol0+vol1-vol
elems = np.isclose(criterion_check,0.,rtol=tolerance)
elems_idx = np.where(elems==True)[0]
else:
raise NotImplementedError("Geometric algorithm for {} elements not implemented yet".format(self.element_type))
if return_on_geometric_finds:
return elems_idx
for i in range(len(elems_idx)):
coord = self.points[self.elements[elems_idx[i],:],:]
# TRY ALL POSSIBLE INITIAL GUESSES - THIS IS CHEAP AS THE SEARCH SPACE CONTAINS ONLY A
# FEW ELEMENTS
for guess in initial_guesses:
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True,
use_simple_bases=use_simple_bases, initial_guess=guess)
if converged:
break
if converged:
candidate_element, candidate_piso = elems_idx[i], p_iso
break
if find_parametric_coordinate:
return candidate_element, candidate_piso
else:
return candidate_element
def AverageJacobian(self):
"""Computes average Jacobian of elements for all element types over a mesh
This is a generic method that for 1D=lengths, for 2D=areas and for 3D=volumes.
It works for planar and curved elements
"""
self.__do_essential_memebers_exist__()
try:
from Florence import DisplacementFormulation
except ImportError:
raise ValueError("This functionality requires Florence's support")
if self.element_type != "line":
# FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED
formulation = DisplacementFormulation(self)
sizes = np.zeros(self.nelem)
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetAverageJacobian(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
return sizes.mean()
else:
raise ValueError("Not implemented for 1D elements")
def LargestSegment(self, smallest_element=True, nsamples=30,
plot_segment=False, plot_element=False, figure=None, save=False, filename=None):
"""Finds the largest segment that can fit in an element. For curvilinear elements
this measure can be used as (h) for h-refinement studies
input:
smallest_element [bool] if the largest segment size is to be computed in the
smallest element (i.e. element with the smallest area in 2D or
smallest volume in 3D). Default is True. If False, then the largest
segment in the largest element will be computed.
nsample: [int] number of sample points along the curved
edges of the elements. The maximum distance between
all combinations of these points is the largest
segment
plot_segment: [bool] plots segment on tope of [curved/straight] mesh
plot_element: [bool] plots the straight/curved element to which the segment
belongs
figure: [an instance of matplotlib/mayavi.mlab figure for 2D/3D]
save: [bool] wether to save the figure or not
filename: [str] file name for the figure to be save
returns:
largest_segment_length [float] maximum segment length that could be fit within either the
"""
self.__do_memebers_exist__()
if self.element_type == "hex" or self.element_type == "tet":
quantity = self.Volumes()
elif self.element_type == "quad" or self.element_type == "tri":
quantity = self.Areas()
if smallest_element:
omesh = self.GetLocalisedMesh(quantity.argmin())
else:
omesh = self.GetLocalisedMesh(quantity.argmax())
try:
from Florence.PostProcessing import PostProcess
except:
raise ImportError('This function requires florence PostProcessing module')
return
if save:
if filename is None:
raise ValueError("No file name provided. I am going to write one the current directory")
filename = PWD(__file__) + "/output.png"
if self.element_type == "tri":
tmesh = PostProcess.TessellateTris(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "quad":
tmesh = PostProcess.TessellateQuads(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "tet":
tmesh = PostProcess.TessellateTets(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "hex":
tmesh = PostProcess.TessellateHexes(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
ndim = omesh.InferSpatialDimension()
nnode = tmesh.points.shape[0]
largest_segment_lengths = []
nodes = np.array((1,ndim))
for i in range(nnode):
tiled_points = np.tile(tmesh.points[i,:][:,None],nnode).T
segment_lengths = np.linalg.norm(tmesh.points - tiled_points, axis=1)
largest_segment_lengths.append(segment_lengths.max())
nodes = np.vstack((nodes, np.array([i,segment_lengths.argmax()])[None,:]))
largest_segment_lengths = np.array(largest_segment_lengths)
nodes = nodes[1:,:]
largest_segment_length = largest_segment_lengths.max()
corresponding_nodes = nodes[largest_segment_lengths.argmax(),:]
if plot_segment:
segment_coords = tmesh.points[corresponding_nodes,:]
if ndim==2:
import matplotlib.pyplot as plt
if figure == None:
figure = plt.figure()
if plot_element:
if omesh.element_type == "tri":
PostProcess.CurvilinearPlotTri(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
elif omesh.element_type == "quad":
PostProcess.CurvilinearPlotQuad(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
tmesh.SimplePlot(figure=figure,show_plot=False)
if save:
plt.savefig(filename,bbox_inches="tight",dpi=300)
plt.show()
elif ndim==3:
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
if plot_element:
if omesh.element_type == "tet":
PostProcess.CurvilinearPlotTet(omesh,
np.zeros_like(omesh.points),plot_points=True, point_radius=0.13,
figure=figure, interpolation_degree=nsamples, show_plot=False)
elif omesh.element_type == "hex":
PostProcess.CurvilinearPlotHex(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
tmesh.GetEdges()
edge_coords = tmesh.points[np.unique(tmesh.all_edges),:]
mlab.triangular_mesh(tmesh.points[:,0],tmesh.points[:,1],tmesh.points[:,2],
tmesh.elements, representation='wireframe', color=(0,0,0))
# # mlab.points3d(edge_coords[:,0],edge_coords[:,1],edge_coords[:,2],color=(1., 99/255., 71./255), scale_factor=0.03)
# # mlab.plot3d(segment_coords[:,0],segment_coords[:,1],segment_coords[:,2], color=(227./255, 66./255, 52./255))
mlab.points3d(edge_coords[:,0],edge_coords[:,1],edge_coords[:,2],color=(1., 99/255., 71./255), scale_factor=0.17)
mlab.plot3d(segment_coords[:,0],segment_coords[:,1],segment_coords[:,2],
color=(227./255, 66./255, 52./255), line_width=10., representation="wireframe")
if save:
mlab.savefig(filename,dpi=300)
mlab.show()
return largest_segment_length
def CheckNodeNumbering(self,change_order_to='retain', verbose=True):
"""Checks for node numbering order of the imported mesh. Mesh can be tri or tet
input:
change_order_to: [str] {'clockwise','anti-clockwise','retain'} changes the order to clockwise,
anti-clockwise or retains the numbering order - default is 'retain'
output:
original_order: [str] {'clockwise','anti-clockwise','retain'} returns the original numbering order"""
self.__do_essential_memebers_exist__()
# CHECK IF IT IS LINEAR MESH
nodeperelem = self.InferNumberOfNodesPerLinearElement()
assert self.elements.shape[1] == nodeperelem
quantity = np.array([])
if self.element_type == "tri":
quantity = self.Areas(with_sign=True)
elif self.element_type == "quad":
quantity = self.Areas(with_sign=True)
elif self.element_type == "tet":
quantity = self.Volumes(with_sign=True)
elif self.element_type == "hex":
quantity = self.Volumes(with_sign=True)
original_order = ''
# CHECK NUMBERING
if (quantity > 0).all():
original_order = 'anti-clockwise'
if change_order_to == 'clockwise':
self.elements = np.fliplr(self.elements)
elif (quantity < 0).all():
original_order = 'clockwise'
if change_order_to == 'anti-clockwise':
self.elements = np.fliplr(self.elements)
else:
original_order = 'mixed'
if change_order_to == 'clockwise':
self.elements[quantity>0,:] = np.fliplr(self.elements[quantity>0,:])
elif change_order_to == 'anti-clockwise':
self.elements[quantity<0,:] = np.fliplr(self.elements[quantity<0,:])
if original_order == 'anti-clockwise':
print(u'\u2713'.encode('utf8')+b' : ','Imported mesh has',original_order,'node ordering')
else:
print(u'\u2717'.encode('utf8')+b' : ','Imported mesh has',original_order,'node ordering')
return original_order
def GetElementsEdgeNumberingTri(self):
"""Finds edges of elements and their flags saying which edge they are [0,1,2].
At most a triangle can have all its three edges on the boundary.
output:
edge_elements: [1D array] array containing elements which have edges
on the boundary
Note that this method sets the self.edge_to_element to edge_elements,
so the return value is not strictly necessary
"""
if isinstance(self.edge_to_element,np.ndarray):
if self.edge_to_element.shape[0] > 1:
return self.edge_to_element
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
if self.all_edges is None:
self.GetEdgesTri()
all_edges = np.concatenate((self.elements[:,:2],self.elements[:,[1,2]],
self.elements[:,[2,0]]),axis=0).astype(np.int64)
all_edges, idx = unique2d(all_edges,consider_sort=True,order=False, return_index=True)
edge_elements = np.zeros((all_edges.shape[0],2),dtype=np.int64)
edge_elements[:,0] = idx % self.elements.shape[0]
edge_elements[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_elements
return self.edge_to_element
def GetElementsWithBoundaryEdgesTri(self):
"""Finds elements which have edges on the boundary.
At most an element can have all its three edges on the boundary.
output:
edge_elements: [2D array] array containing elements which have edge
on the boundary [cloumn 0] and a flag stating which edges they are [column 1]
"""
if isinstance(self.boundary_edge_to_element,np.ndarray):
if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:
return self.boundary_edge_to_element
# DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.edges is not None
edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
for i in range(self.edges.shape[0]):
x = []
for j in range(2):
x.append(np.where(self.elements[:,:3]==self.edges[i,j])[0])
# FIND WHICH ELEMENTS CONTAIN ALL FACE NODES - FOR INTERIOR ELEMENTS
# THEIR CAN BE MORE THAN ONE ELEMENT CONTAINING ALL FACE NODES
z = x[0]
for k in range(1,len(x)):
z = np.intersect1d(x[k],z)
# CHOOSE ONLY ONE OF THESE ELEMENTS
edge_elements[i,0] = z[0]
# WHICH COLUMNS IN THAT ELEMENT ARE THE FACE NODES LOCATED
cols = np.array([np.where(self.elements[z[0],:]==self.edges[i,0])[0],
np.where(self.elements[z[0],:]==self.edges[i,1])[0]
])
cols = np.sort(cols.flatten())
if cols[0] == 0 and cols[1] == 1:
edge_elements[i,1] = 0
elif cols[0] == 1 and cols[1] == 2:
edge_elements[i,1] = 1
elif cols[0] == 0 and cols[1] == 2:
edge_elements[i,1] = 2
self.boundary_edge_to_element = edge_elements
return edge_elements
def GetElementsWithBoundaryFacesTet(self):
"""Finds elements which have faces on the boundary.
At most a tetrahedral element can have all its four faces on the boundary.
output:
boundary_face_to_element: [2D array] array containing elements which have face
on the boundary [column 0] and a flag stating which faces they are [column 1]
"""
if self.boundary_face_to_element is not None:
return self.boundary_face_to_element
# DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.faces is not None
# THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK
# IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME
# EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY
# HENCE THIS MAPPING BECOMES NECESSARY
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0).astype(self.faces.dtype)
all_faces_in_faces = in2d(all_faces,self.faces[:,:3],consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER
# NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND
# FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.
# WE NEED TO FIND THIS MAPPING NOW
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementTet(C)[0]
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:3].astype(np.int64) - self.faces[:,:3].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Florence.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:3],self.faces[:,:3],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetElementsFaceNumberingTet(self):
"""Finds which faces belong to which elements and which faces of the elements
they are e.g. 0, 1, 2 or 3.
output:
face_elements: [2D array] nfaces x 2 array containing elements which have face
on the boundary with their flags
Note that this method also sets the self.face_to_element to face_elements,
so the return value is not strictly necessary
"""
if isinstance(self.face_to_element,np.ndarray):
if self.face_to_element.shape[0] > 1:
return self.face_to_element
assert self.elements is not None
# GET ALL FACES FROM ELEMENT CONNECTIVITY
if self.all_faces is None:
self.GetFacesTet()
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0).astype(np.int64)
_,idx = unique2d(all_faces,consider_sort=True,order=False, return_index=True)
face_elements = np.zeros((self.all_faces.shape[0],2),dtype=np.int64)
face_elements[:,0] = idx % self.elements.shape[0]
face_elements[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_elements
return self.face_to_element
def ArrangeFacesTet(self):
"""Arranges all the faces of tetrahedral elements
with triangular type node ordering """
if self.all_faces is None:
self.all_faces = self.GetFacesTet()
if self.face_to_element is None:
self.GetElementsFaceNumberingTet()
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
node_arranger = NodeArrangementTet(p-1)[0]
# for i in range(self.face_to_element.shape[0]):
# self.all_faces = self.elements[self.face_to_element[i,0],node_arranger[self.face_to_element[i,1],:]]
self.all_faces = self.elements[self.face_to_element[:,0][:,None],node_arranger[self.face_to_element[:,1],:]]
def GetElementsEdgeNumberingQuad(self):
"""Finds edges of elements and their flags saying which edge they are [0,1,2,3].
At most a quad can have all its four edges on the boundary.
output:
edge_elements: [1D array] array containing elements which have edges
on the boundary
Note that this method sets the self.edge_to_element to edge_elements,
so the return value is not strictly necessary
"""
if isinstance(self.edge_to_element,np.ndarray):
if self.edge_to_element.shape[0] > 1:
return self.edge_to_element
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
if self.all_edges is None:
self.GetEdgesQuad()
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.int64)
all_edges, idx = unique2d(all_edges,consider_sort=True,order=False, return_index=True)
edge_elements = np.zeros((all_edges.shape[0],2),dtype=np.int64)
# edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)
edge_elements[:,0] = idx % self.elements.shape[0]
edge_elements[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_elements
return self.edge_to_element
def GetElementsWithBoundaryEdgesQuad(self):
"""Finds elements which have edges on the boundary.
At most a quad can have all its four edges on the boundary.
output:
boundary_edge_to_element: [2D array] array containing elements which have face
on the boundary [cloumn 0] and a flag stating which edges they are [column 1]
"""
if isinstance(self.boundary_edge_to_element,np.ndarray):
if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:
return self.boundary_edge_to_element
# DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.edges is not None
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(self.edges.dtype)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.boundary_edge_to_element = boundary_edge_to_element
return self.boundary_edge_to_element
def GetElementsWithBoundaryFacesHex(self):
"""Finds elements which have faces on the boundary.
At most a hexahedral can have all its 8 faces on the boundary.
output:
boundary_face_to_element: [2D array] array containing elements which have face
on the boundary [column 0] and a flag stating which faces they are [column 1]
"""
# DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.faces is not None
if self.boundary_face_to_element is not None:
return self.boundary_face_to_element
# THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK
# IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME
# EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY
# HENCE THIS MAPPING BECOMES NECESSARY
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementHex(C)[0]
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(self.faces.dtype)
all_faces_in_faces = in2d(all_faces,self.faces[:,:4],consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER
# NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND
# FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.
# WE NEED TO FIND THIS MAPPING NOW
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:4].astype(np.int64) - self.faces[:,:4].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Florence.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:4],self.faces[:,:4],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetElementsFaceNumberingHex(self):
"""Finds which faces belong to which elements and which faces of the elements
they are e.g. 0, 1, 2 or 3.
output:
face_elements: [2D array] nfaces x 2 array containing elements which have face
on the boundary with their flags
Note that this method also sets the self.face_to_element to face_elements,
so the return value is not strictly necessary
"""
if isinstance(self.face_to_element,np.ndarray):
if self.face_to_element.shape[0] > 1:
return self.face_to_element
assert self.elements is not None
# GET ALL FACES FROM ELEMENT CONNECTIVITY
if self.all_faces is None:
self.GetFacesHex()
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementHex(C)[0]
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(self.all_faces.dtype)
_,idx = unique2d(all_faces,consider_sort=True,order=False, return_index=True)
face_elements = np.zeros((self.all_faces.shape[0],2),dtype=np.int64)
face_elements[:,0] = idx % self.elements.shape[0]
face_elements[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_elements
return self.face_to_element
def ArrangeFacesHex(self):
"""Arranges all the faces of hexahedral elements
with quadrilateral type node ordering """
if self.all_faces is None:
self.all_faces = self.GetFacesHex()
if self.face_to_element is None:
self.GetElementsFaceNumberingHex()
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
node_arranger = NodeArrangementHex(p-1)[0]
self.all_faces = self.elements[self.face_to_element[:,0][:,None],node_arranger[self.face_to_element[:,1],:]]
def GetNodeCommonality(self):
"""Finds the elements sharing a node.
The return values are linked lists [list of numpy of arrays].
Each numpy array within the list gives the elements that contain a given node.
As a result the size of the linked list is nnode
outputs:
els: [list of numpy arrays] element numbers containing nodes
pos: [list of numpy arrays] elemental positions of the nodes
res_flat: [list of numpy arrays] position of nodes in the
flattened element connectivity.
"""
self.__do_essential_memebers_exist__()
elements = self.elements.ravel()
idx_sort = np.argsort(elements)
sorted_elements = elements[idx_sort]
vals, idx_start = np.unique(sorted_elements, return_index=True)
# Sets of indices
flat_pos = np.split(idx_sort, idx_start[1:])
els = np.split(idx_sort // int(self.elements.shape[1]), idx_start[1:])
pos = np.split(idx_sort % int(self.elements.shape[1]), idx_start[1:])
# In case one wants to return only the duplicates i.e. filter keeping only items occurring more than once
# vals, idx_start, count = np.unique(sorted_elements, return_counts=True, return_index=True)
# vals = vals[count > 1]
# res = filter(lambda x: x.size > 1, res)
return els, pos, flat_pos
def Read(self, filename=None, element_type="tri", reader_type=None, reader_type_format=None,
reader_type_version=None, order=0, read_surface_info=False, **kwargs):
"""Convenience mesh reader method to dispatch call to subsequent apporpriate methods"""
if reader_type != 'read_separate':
if not isinstance(filename,str):
raise ValueError("filename must be a string")
return
if reader_type is None:
if filename.split('.')[-1] == "msh":
reader_type = "gmsh"
elif filename.split('.')[-1] == "obj":
reader_type = "obj"
elif filename.split('.')[-1] == "unv":
reader_type = "unv"
elif filename.split('.')[-1] == "fro":
reader_type = "fro"
elif filename.split('.')[-1] == "dat":
for key in kwargs.keys():
inkey = insensitive(key)
if "connectivity" in inkey and "delimiter" not in inkey:
reader_type = "read_separate"
break
if reader_type is None:
raise ValueError("Mesh file format was not undertood. Please specify it using reader_type keyword")
self.filename = filename
self.reader_type = reader_type
self.reader_type_format = reader_type_format
self.reader_type_version = reader_type_version
if self.reader_type == 'salome':
self.ReadSalome(filename, element_type=element_type, read_surface_info=read_surface_info)
elif reader_type == 'GID':
self.ReadGIDMesh(filename, element_type, order)
elif self.reader_type == 'gmsh':
self.ReadGmsh(filename, element_type=element_type, read_surface_info=read_surface_info)
elif self.reader_type == 'obj':
self.ReadOBJ(filename, element_type=element_type, read_surface_info=read_surface_info)
elif self.reader_type == 'fenics':
self.ReadFenics(filename, element_type)
elif self.reader_type == 'vtu':
self.ReadVTK(filename)
elif self.reader_type == 'unv':
self.ReadUNV(filename, element_type)
elif self.reader_type == 'fro':
self.ReadFRO(filename, element_type)
elif self.reader_type == 'read_separate':
# READ MESH FROM SEPARATE FILES FOR CONNECTIVITY AND COORDINATES
from Florence.Utils import insensitive
# return insensitive(kwargs.keys())
for key in kwargs.keys():
inkey = insensitive(key)
if "connectivity" in inkey and "delimiter" not in inkey:
connectivity_file = kwargs.get(key)
if "coordinate" in insensitive(key) and "delimiter" not in inkey:
coordinates_file = kwargs.get(key)
self.ReadSeparate(connectivity_file,coordinates_file,element_type,
delimiter_connectivity=',',delimiter_coordinates=',')
elif self.reader_type == 'ReadHDF5':
self.ReadHDF5(filename)
self.nnode = self.points.shape[0]
# MAKE SURE MESH DATA IS CONTIGUOUS
self.points = np.ascontiguousarray(self.points)
self.elements = np.ascontiguousarray(self.elements)
return
def ReadSalome(self, filename, element_type="tri", read_surface_info=False):
"""Salome .dat format mesh reader"""
if element_type == "line":
el = "102"
bel = ""
elif element_type == "tri":
el = "203"
bel = "102"
elif element_type == "quad":
el = "204"
bel = "102"
elif element_type == "tet":
el = "304"
bel = "203"
elif element_type == "hex":
el = "308"
bel = "204"
if read_surface_info is True and element_type == "line":
warn("No surface info for lines. I am going to ignore this")
read_surface_info = False
with open(filename,'r') as f:
lines = f.readlines()
info = lines[0].rstrip().split()
self.nnode = int(info[0])
all_nelem = int(info[1])
nodes = lines[1:self.nnode+1]
points = []
for line in nodes:
points.append([float(i) for i in line.rstrip().split()[1:4]])
self.points = np.array(points,copy=True)
self.nnode = self.points.shape[0]
edges, faces, elements = [], [], []
for counter in range(self.nnode+1,len(lines)):
line = lines[counter].rstrip().split()
if read_surface_info:
if bel == line[1]:
faces.append([int(i) for i in line[2:]])
if el == line[1]:
elements.append([int(i) for i in line[2:]])
self.element_type = element_type
self.elements = np.array(elements,dtype=np.int64,copy=True) - 1
self.nelem = self.elements.shape[0]
if self.nelem == 0:
raise ValueError("file does not contain {} elements".format(element_type))
ndim = self.InferSpatialDimension()
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
def ReadSeparate(self,connectivity_file,coordinates_file,mesh_type, edges_file = None, faces_file = None,
delimiter_connectivity=' ',delimiter_coordinates=' ', delimiter_edges=' ', delimiter_faces=' ',
ignore_cols_connectivity=None,ignore_cols_coordinates=None,ignore_cols_edges=None,
ignore_cols_faces=None,index_style='c'):
"""Read meshes when the element connectivity and nodal coordinates are written in separate files
input:
connectivity_file: [str] filename containing element connectivity
coordinates_file: [str] filename containing nodal coordinates
mesh_type: [str] type of mesh tri/tet/quad/hex
edges_file: [str] filename containing edges of the mesh (if not given gets computed)
faces_file: [str] filename containing faces of the mesh (if not given gets computed)
delimiter_connectivity: [str] delimiter for connectivity_file - default is white space/tab
delimiter_coordinates: [str] delimiter for coordinates_file - default is white space/tab
delimiter_edges: [str] delimiter for edges_file - default is white space/tab
delimiter_faces: [str] delimiter for faces_file - default is white space/tab
ignore_cols_connectivity: [int] no of columns to be ignored (from the start) in the connectivity_file
ignore_cols_coordinates: [int] no of columns to be ignored (from the start) in the coordinates_file
ignore_cols_edges: [int] no of columns to be ignored (from the start) in the connectivity_file
ignore_cols_faces: [int] no of columns to be ignored (from the start) in the coordinates_file
index_style: [str] either 'c' C-based (zero based) indexing or 'f' fortran-based
(one based) indexing for elements connectivity - default is 'c'
"""
index = 0
if index_style == 'c':
index = 1
from time import time; t1=time()
self.elements = np.loadtxt(connectivity_file,dtype=np.int64,delimiter=delimiter_connectivity) - index
# self.elements = np.fromfile(connectivity_file,dtype=np.int64,count=-1) - index
self.points = np.loadtxt(coordinates_file,dtype=np.float64,delimiter=delimiter_coordinates)
if ignore_cols_connectivity != None:
self.elements = self.elements[ignore_cols_connectivity:,:]
if ignore_cols_coordinates != None:
self.points = self.points[ignore_cols_coordinates:,:]
if (mesh_type == 'tri' or mesh_type == 'quad') and self.points.shape[1]>2:
self.points = self.points[:,:2]
self.element_type = mesh_type
self.nelem = self.elements.shape[0]
# self.edges = None
if edges_file is None:
if mesh_type == "tri":
self.GetBoundaryEdgesTri()
elif mesh_type == "tet":
self.GetBoundaryEdgesTet()
else:
self.edges = np.loadtxt(edges_file,dtype=np.int64,delimiter=delimiter_edges) - index
if ignore_cols_edges !=None:
self.edges = self.edges[ignore_cols_edges:,:]
if faces_file is None:
if mesh_type == "tet":
self.GetBoundaryFacesTet()
else:
self.faces = np.loadtxt(faces_file,dtype=np.int64,delimiter=delimiter_edges) - index
if ignore_cols_faces !=None:
self.faces = self.faces[ignore_cols_faces:,:]
def ReadGIDMesh(self,filename,mesh_type,polynomial_order = 0):
"""Read GID meshes"""
if self.elements is not None and self.points is not None:
self.__reset__()
self.element_type = mesh_type
ndim, self.nelem, nnode, nboundary = np.fromfile(filename,dtype=np.int64,count=4,sep=' ')
if ndim==2 and mesh_type=="tri":
content = np.fromfile(filename,dtype=np.float64,count=4+3*nnode+4*self.nelem,sep=' ')
self.points = content[4:4+3*nnode].reshape(nnode,3)[:,1:]
self.elements = content[4+3*nnode:4+3*nnode+4*self.nelem].reshape(self.nelem,4)[:,1:].astype(np.int64)
self.elements -= 1
self.GetBoundaryEdgesTri()
if ndim==3 and mesh_type=="tet":
content = np.fromfile(filename,dtype=np.float64,count=4+4*nnode+5*self.nelem+9*nboundary,sep=' ')
self.points = content[4:4+4*nnode].reshape(nnode,4)[:,1:]
self.elements = content[4+4*nnode:4+4*nnode+5*self.nelem].reshape(self.nelem,5)[:,1:].astype(np.int64)
self.elements -= 1
face_flags = content[4*nnode+5*self.nelem+4:].reshape(nboundary,9)[:,1:].astype(np.int64)
self.faces = np.ascontiguousarray(face_flags[:,1:4] - 1)
self.face_to_surface = np.ascontiguousarray(face_flags[:,7] - 1)
# self.boundary_face_to_element = np.ascontiguousarray(face_flags[:,0])
# self.GetBoundaryFacesTet()
self.GetBoundaryEdgesTet()
def ReadVTK(self, filename, element_type=None):
"""Read mesh from a vtu file"""
try:
import vtkInterface as vtki
except IOError:
raise IOError("vtkInterface is not installed. Please install it first using 'pip install vtkInterface'")
self.__reset__()
vmesh = vtki.UnstructuredGrid(filename)
flat_elements = np.copy(np.delete(vmesh.cells, vmesh.offset))
if not np.all(vmesh.celltypes == vmesh.celltypes[0]):
raise IOError("Cannot read VTK files with hybrid elements")
cellflag = vmesh.celltypes[0]
if cellflag == 5:
self.element_type = "tri"
divider = 3
elif cellflag == 9:
self.element_type = "quad"
divider = 4
elif cellflag == 10:
self.element_type = "tet"
divider = 4
elif cellflag == 12:
self.element_type = "hex"
divider = 8
elif cellflag == 3:
self.element_type = "line"
divider = 2
else:
raise IOError("VTK element type not understood")
if element_type is not None:
if self.element_type != element_type:
raise ValueError("VTK file does not contain {} elements".format(element_type))
self.elements = np.ascontiguousarray(flat_elements.reshape(int(flat_elements.shape[0]/divider),divider), dtype=np.uint64)
self.points = np.ascontiguousarray(vmesh.points, dtype=np.float64)
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
return
def ReadGmsh(self, filename, element_type, p=1, read_surface_info=False):
"""Read gmsh (.msh) file"""
try:
fid = open(filename, "r")
except IOError:
print("File '%s' not found." % (filename))
sys.exit()
msh_version = None
# CHECK MSH FILE VERSION
if "MeshFormat" in fid.readline():
msh_version = int(np.floor(float(fid.readline().split(" ")[0])))
if 4 != msh_version and 2 != msh_version:
raise IOError("Only ASCII version 2 and 4 (>=4.1) .msh file formats are supported")
if 4 != msh_version and 2 != msh_version:
raise IOError("Only ASCII version 2 and 4 (>=4.1) .msh file formats are supported")
fid.close()
if self.elements is not None and self.points is not None:
self.__reset__()
self.filename = filename
bel = -1
if element_type == "line":
el = 1
elif element_type == "tri":
if p == 1:
el = 2
bel = 1
elif p == 2:
el = 9
bel = 8
elif element_type == "quad":
if p == 1:
el = 3
bel = 1
elif p == 2:
el = 10
bel = 8
elif element_type == "tet":
if p == 1:
el = 4
bel = 2
elif p == 2:
el = 11
bel = 9
elif element_type == "hex":
if p == 1:
el = 5
bel = 3
elif p == 2:
el = 12
bel = 10
else:
raise ValueError("Element type not understood")
# NEW FAST READER
var = 0 # for old gmsh versions - needs checks
node_blocks, elem_blocks, face_blocks = None, None, None
rem_nnode, rem_nelem, rem_faces = int(1e09), int(1e09), int(1e09)
face_counter = 0
for line_counter, line in enumerate(open(filename)):
item = line.rstrip()
plist = item.split()
if plist[0] == "Dimension":
self.ndim = plist[1]
elif plist[0] == "Vertices":
rem_nnode = line_counter+1
continue
elif plist[0] == "$Nodes":
rem_nnode = line_counter+1
continue
elif plist[0] == "Triangles":
rem_faces = line_counter+1
continue
elif plist[0] == "Tetrahedra":
rem_nelem = line_counter+1
continue
elif plist[0] == "$Elements":
rem_nelem = line_counter+1
var = 1
continue
if msh_version == 2:
if rem_nnode == line_counter:
self.nnode = int(plist[0])
if rem_faces == line_counter:
face_counter = int(plist[0])
if rem_nelem == line_counter:
self.nelem = int(plist[0])
break
else:
if rem_nnode == line_counter:
node_blocks, self.nnode = int(plist[0]), int(plist[1])
if rem_faces == line_counter:
face_blocks, face_counter = int(plist[0]), int(plist[1])
if rem_nelem == line_counter:
elem_blocks, self.nelem = int(plist[0]), int(plist[1])
break
points, elements, faces, face_to_surface = [],[], [], []
if msh_version == 2:
# RE-READ
ns = self.InferNumberOfNodesPerElement(p=p,element_type=element_type)
for line_counter, line in enumerate(open(filename)):
item = line.rstrip()
plist = item.split()
if var == 0:
if line_counter > rem_nnode and line_counter < self.nnode+rem_nnode+1:
points.append([float(i) for i in plist[:3]])
if line_counter > rem_nelem and line_counter < self.nelem+rem_nelem+1:
elements.append([int(i) for i in plist[:4]])
elif var == 1:
if line_counter > rem_nnode and line_counter < self.nnode+rem_nnode+1:
points.append([float(i) for i in plist[1:]])
if line_counter > rem_nelem and line_counter < self.nelem+rem_nelem+1:
if int(plist[1]) == el:
elements.append([int(i) for i in plist[-ns:]])
# READ SURFACE INFO - CERTAINLY ONLY IF SURFACE ELEMENT TYPE IS QUADS/TRIS
if read_surface_info:
if int(plist[1]) == bel:
faces.append([int(i) for i in plist[5:]])
face_to_surface.append(int(plist[4]))
elif msh_version == 4:
# RE-READ
fid = open(filename)
content = fid.readlines()
# READ NODES
nodes_content = content[rem_nnode+1:2*self.nnode+node_blocks+rem_nnode+1]
incrementer, line_number = 0, 0
# LOOP OVER BLOCKS
for i in range(node_blocks):
incrementer = int(nodes_content[line_number].rstrip().split()[3])
# LOOP OVER NODES OF EACH BLOCK
for j in range(line_number+1, line_number+2*incrementer+1):
plist = nodes_content[j].rstrip().split()
if len(plist) == 1:
continue
points.append([float(plist[k]) for k in range(0,len(plist))])
line_number += 2*incrementer + 1
# READ ELEMENTS
elems_content = content[rem_nelem+1:self.nelem+elem_blocks+rem_nelem+1]
incrementer, line_number = 0, 0
# LOOP OVER BLOCKS
for i in range(elem_blocks):
incrementer = int(elems_content[line_number].rstrip().split()[3])
if el == int(elems_content[line_number].rstrip().split()[2]):
# LOOP OVER ELEMENTS OF EACH BLOCK
for j in range(line_number+1, line_number+incrementer+1):
plist = elems_content[j].rstrip().split()
elements.append([int(plist[k]) for k in range(1,len(plist))])
line_number += incrementer + 1
if read_surface_info:
# READ FACES
incrementer, line_number = 0, 0
# LOOP OVER BLOCKS
for i in range(elem_blocks):
incrementer = int(elems_content[line_number].rstrip().split()[3])
surface_tag = int(elems_content[line_number].rstrip().split()[1])
if bel == int(elems_content[line_number].rstrip().split()[2]):
# LOOP OVER FACES OF EACH BLOCK
for j in range(line_number+1, line_number+incrementer+1):
plist = elems_content[j].rstrip().split()
faces.append([int(plist[k]) for k in range(1,len(plist))])
face_to_surface.append(surface_tag)
line_number += incrementer + 1
self.points = np.array(points,copy=True)
self.elements = np.array(elements,copy=True) - 1
# REORDER CONNECTIVITY
if p == 2:
# TRI6
if el == 9:
self.elements = self.elements[:,[0,1,2,3,5,4]]
# QUAD9
elif el == 10:
self.elements = self.elements[:,[0, 1, 2, 3, 4, 7, 8, 5, 6]]
# TET10
elif el == 11:
self.elements = self.elements[:,[0,1,2,3,4,6,5,7,9,8]]
# CORRECT
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
if self.nelem == 0:
raise ValueError("msh file does not contain {} elements".format(element_type))
if read_surface_info:
self.faces = np.array(faces,copy=True) - 1
self.face_to_surface = np.array(face_to_surface, dtype=np.int64, copy=True).flatten()
self.face_to_surface -= 1
# CHECK IF FILLED
if isinstance(self.face_to_surface,list):
if not self.face_to_surface:
self.face_to_surface = None
elif isinstance(self.face_to_surface,np.ndarray):
if self.face_to_surface.shape[0]==0:
self.face_to_surface = None
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
self.element_type = element_type
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
return
def ReadOBJ(self, filename, element_type="tri"):
try:
fid = open(filename, "r")
except IOError:
print("File '%s' not found." % (filename))
sys.exit()
if self.elements is not None and self.points is not None:
self.__reset__()
self.filename = filename
bel = -1
if element_type == "line":
el = 2
elif element_type == "tri":
el = 3
bel = 2
elif element_type == "quad":
el = 4
bel = 2
elif element_type == "tet":
el = 4
bel = 3
elif element_type == "hex":
el = 8
bel = 4
else:
raise ValueError("Element type not understood")
# Read
points, elements, faces = [],[], []
# Normal and texture coordinates
normals, textures = [], []
# Connectivity for texture
telements = []
for line_counter, line in enumerate(open(filename,'r')):
item = line.rstrip()
plist = item.split()
if not plist:
continue
if plist[0] == 'v':
points.append([float(i) for i in plist[1:4]])
if plist[0] == 'f' and len(plist) > el:
cplist = deepcopy(plist)
for i in range(1,el+1):
if "/" in plist[i]:
plist[i] = plist[i].split("/")[0]
elements.append([int(i) for i in plist[1:el+1]])
plist = cplist
has_texture = False
for i in range(1,el+1):
if "/" in plist[i]:
has_texture = True
plist[i] = plist[i].split("/")[1]
if has_texture:
telements.append([int(i) for i in plist[1:el+1]])
if plist[0] == 'vn':
normals.append([float(i) for i in plist[1:4]])
if plist[0] == 'vt':
textures.append([float(i) for i in plist[1:4]])
self.points = np.array(points,copy=True)
self.elements = np.array(elements,copy=True) - 1
if normals:
self.normals = np.array(normals,copy=True)
if textures:
self.textures = np.array(textures,copy=True)
if telements:
self.telements = np.array(telements,copy=True) - 1
# CORRECT
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
if self.nelem == 0:
raise ValueError("obj file does not contain {} elements".format(element_type))
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
self.element_type = element_type
ndim = self.InferSpatialDimension()
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
def ReadFenics(self, filename, element_type):
"""Read ASCII fenics meshes"""
if element_type == "tet":
etype = "tetrahedron"
elif element_type == "hex":
etype = "hexahedron"
elif element_type == "tri":
etype = "triangle"
elif element_type == "quad":
etype = "quadrilateral"
import xml.etree.cElementTree as ET
root = ET.parse(filename).getroot()
X = []
T = []
for child in root:
if child.attrib['celltype'] != etype:
raise ValueError("xml file does not contain {} elements".format(element_type))
for child in root:
for cchild in child:
if cchild.tag == "vertices":
if element_type == "tet" or element_type == "hex":
for child3 in cchild:
x = float(child3.attrib['x'])
y = float(child3.attrib['y'])
z = float(child3.attrib['z'])
X.append([x,y,z])
elif element_type == "tri" or element_type == "quad":
for child3 in cchild:
x = float(child3.attrib['x'])
y = float(child3.attrib['y'])
X.append([x,y])
elif cchild.tag == "cells":
if element_type == "tet":
for child3 in cchild:
v0 = int(child3.attrib['v0'])
v1 = int(child3.attrib['v1'])
v2 = int(child3.attrib['v2'])
v3 = int(child3.attrib['v3'])
T.append([v0,v1,v2,v3])
elif element_type == "tri":
for child3 in cchild:
v0 = int(child3.attrib['v0'])
v1 = int(child3.attrib['v1'])
v2 = int(child3.attrib['v2'])
T.append([v0,v1,v2])
X = np.array(X)
T = np.array(T,dtype=np.int64)
self.elements = T
self.points = X
self.element_type = element_type
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
ndim = self.InferSpatialDimension()
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
def ReadUNV(self, filename, element_type="tri"):
"""Read I-DEAS universal files
"""
try:
fid = open(filename, "r")
fid.close()
except IOError:
print("File '%s' not found." % (filename))
sys.exit()
if self.elements is not None and self.points is not None:
self.__reset__()
self.filename = filename
with open(filename, 'r') as fid:
file_content = fid.readlines()
points, elements = [], []
is_point_line, is_element_line = False, False
is_point_record_line, is_element_record_line = True, True
celems, cpoints = [], []
first_point_line, first_element_line = True, True
read_element_type = element_type
point_ids, element_ids = [], []
for counter, line in enumerate(file_content):
sline = line.rstrip().split()
# Read points
if len(sline) == 1 and sline[0] == "2411":
is_point_line = True
is_element_line = False
if len(sline) == 1 and sline[0] == "2412":
is_point_line = False
is_element_line = True
if len(sline) == 1 and sline[0] == "2477":
is_point_line = False
is_element_line = False
is_point_line = False
is_element_line = False
if is_point_line:
if first_point_line or sline[0] == "-1":
first_point_line = False
continue
if is_point_record_line:
point_id = int(sline[0])
point_ids.append(point_id)
is_point_record_line = False
else:
cpoints = [float(i.replace('D', 'E')) for i in sline]
points.append(cpoints)
is_point_record_line = True
if is_element_line:
if first_element_line or sline[0] == "-1":
first_element_line = False
continue
if is_element_record_line:
# Get number of nodes for this element from the record line
nnode = int(sline[5])
# Read element type
read_element_type = sline[1]
# Set record line to False
is_element_record_line = False
else:
# If it is not a record line then read elements
for i in sline:
celems.append(int(i) - 1)
# If all elements are read set record line to True else False
if len(celems) == nnode:
is_element_record_line = True
else:
is_element_record_line = False
if is_element_record_line:
elements.append(celems)
celems = []
self.points = np.copy(points)
self.elements = np.copy(elements)
# MAP POINTS TO GROUND
point_ids = np.copy(point_ids)
sorter = np.argsort(point_ids)
self.points = self.points[sorter,:]
# MAP TO GROUND
unique_elements, inv_elements = np.unique(self.elements, return_inverse=True)
aranger = np.arange(self.points.shape[0])
self.elements = aranger[inv_elements].reshape(self.elements.shape[0],self.elements.shape[1])
self.element_type = element_type
if read_element_type == "92":
element_type = "tri"
self.elements = self.elements[:,[0,2,4,1,5,3]]
self.degree = 2
elif read_element_type == "118":
element_type = "tet"
self.elements = self.elements[:,[0,2,4,9,1,5,3,6,7,8]]
self.degree = 2
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
while True:
if np.allclose(self.elements[-1,:],0.):
self.elements = self.elements[:-1, :]
else:
break
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
return
def ReadFRO(self, filename, element_type):
"""Read fro mesh"""
if self.elements is not None and self.points is not None:
self.__reset__()
if element_type == "tri":
el = 5
else:
raise NotImplementedError("Reading FRO files for {} elements not yet implemented".format(element_type))
content = np.fromfile(filename, dtype=np.float64, sep=" ")
nelem = int(content[0])
nnode = int(content[1])
nsurface = int(content[3])
points = content[8:8+4*nnode].reshape(nnode,4)[:,1:]
elements = content[8+4*nnode::].reshape(nelem,el)[:,1:-1].astype(np.int64) - 1
face_to_surface = content[8+4*nnode::].reshape(nelem,el)[:,-1].astype(np.int64) - 1
self.nelem = nelem
self.nnode = nnode
self.elements = np.ascontiguousarray(elements)
self.element_type = element_type
self.points = np.ascontiguousarray(points)
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
self.face_to_surface = np.ascontiguousarray(face_to_surface)
return
def ReadHDF5(self,filename):
"""Read mesh from MATLAB HDF5 file format"""
if self.elements is not None and self.points is not None:
self.__reset__()
DictOutput = loadmat(filename)
# GENERIC READER - READS EVERYTHING FROM HDF5 AND ASSIGNS IT TO MESH OBJECT
for key, value in DictOutput.items():
if isinstance(DictOutput[key],np.ndarray):
if "elements" in key or "edge" in key or "face" in key:
setattr(self, key, np.ascontiguousarray(value).astype(np.uint64))
else:
setattr(self, key, np.ascontiguousarray(value))
else:
setattr(self, key, value)
if isinstance(self.element_type,np.ndarray):
self.element_type = str(self.element_type[0])
if isinstance(self.nelem,np.ndarray):
self.nelem = int(self.nelem[0])
for key in self.__dict__.keys():
if isinstance(self.__dict__[str(key)],np.ndarray):
if self.__dict__[str(key)].size == 1:
self.__dict__[str(key)] = np.asscalar(self.__dict__[str(key)])
def ReadDCM(self, filename, element_type="quad", ndim=2):
""" EZ4U mesh reader
"""
if element_type != "quad":
raise NotImplementedError("DCM/EZ4U reader for {} elements not yet implemented".format(element_type))
self.__reset__()
self.element_type = element_type
content = np.fromfile(filename, dtype=np.float64, sep=" ")
self.nnode = int(content[0])
self.nelem = int(content[1])
if ndim==2:
self.points = content[3:self.nnode*4+3].reshape(self.nnode,4)[:,[1,2]]
else:
self.points = content[3:self.nnode*4+3].reshape(self.nnode,4)[:,1:]
self.elements = content[self.nnode*4+3:].astype(np.int64).reshape(self.nelem,11)[:,7:] - 1
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
self.GetEdgesQuad()
self.GetBoundaryEdgesQuad()
def SimplePlot(self, to_plot='faces', color=None, edge_color=None, point_color=None,
plot_points=False, plot_faces=None, plot_edges=True, point_radius=None,
save=False, filename=None, figure=None, show_plot=True, show_axis=False, grid="off"):
"""Simple mesh plot
to_plot: [str] only for 3D. 'faces' to plot only boundary faces
or 'all_faces' to plot all faces
grid: [str] None, "on" or "off"
"""
self.__do_essential_memebers_exist__()
# REDIRECT FOR 3D SURFACE MESHES
if self.element_type == "tri" or self.element_type == "quad":
if self.points.ndim == 2 and self.points.shape[1] == 3:
mesh = self.CreateDummy3DMeshfrom2DMesh()
mesh.SimplePlot(to_plot=to_plot, color=color, plot_points=plot_points,
plot_edges=plot_edges, point_radius=point_radius,
save=save, filename=filename, figure=figure, show_plot=show_plot,
show_axis=show_axis, grid=grid)
return
ndim = self.InferSpatialDimension()
edim = self.InferElementalDimension()
if color is None:
color=(197/255.,241/255.,197/255.)
if edge_color is None:
edge_color = (0,0,0)
if point_color is None:
point_color = (0,0,0)
if grid is None:
grid = "off"
if point_radius is None:
if ndim == 2:
point_radius = 0.75
else:
point_radius = 0.1
if save:
if filename is None:
warn('File name not given. I am going to write one in the current directory')
filename = PWD(__file__) + "/output.png"
else:
if filename.split(".")[-1] == filename:
filename += ".png"
import matplotlib as mpl
if self.element_type == "tri" or self.element_type == "quad" or self.element_type == "pent":
import matplotlib.pyplot as plt
if figure is None:
figure = plt.figure()
elif self.element_type == "tet" or self.element_type == "hex":
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
# os.environ['ETS_TOOLKIT'] = 'wx'
from mayavi import mlab
if to_plot == 'all_faces':
if self.all_faces is None:
self.GetFaces()
faces = self.all_faces
else:
if self.faces is None:
self.GetBoundaryFaces()
faces = self.faces
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
if color is not None:
if isinstance(color,tuple):
if len(color) != 3:
raise ValueError("Color should be given in a rgb/RGB tuple format with 3 values i.e. (x,y,z)")
if color[0] > 1.0 or color[1] > 1.0 or color[2] > 1.0:
color = (color[0]/255.,color[1]/255.,color[2]/255.)
elif isinstance(color,str):
color = mpl.colors.hex2color(color)
if edge_color is not None:
if isinstance(edge_color,tuple):
if len(edge_color) != 3:
raise ValueError("Color should be given in a rgb/RGB tuple format with 3 values i.e. (x,y,z)")
if edge_color[0] > 1.0 or edge_color[1] > 1.0 or edge_color[2] > 1.0:
edge_color = (edge_color[0]/255.,edge_color[1]/255.,edge_color[2]/255.)
elif isinstance(edge_color,str):
edge_color = mpl.colors.hex2color(edge_color)
if plot_faces is None:
if edim == 3:
plot_faces = True
else:
plot_faces = False
if self.element_type == "tri":
plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3],color=edge_color)
if plot_faces:
plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3],
np.ones(self.points.shape[0]), 100, alpha=0.3)
if plot_points:
plt.plot(self.points[:,0],self.points[:,1], "o", color=point_color, markersize=point_radius)
plt.axis("equal")
if not show_axis:
plt.axis('off')
if grid == "on":
plt.grid("on")
if show_plot:
plt.show()
elif self.element_type == "tet":
if plot_faces:
mlab.triangular_mesh(self.points[:,0],self.points[:,1],
self.points[:,2],faces[:,:3],color=color)
radius = 1e-00
if plot_edges:
mlab.triangular_mesh(self.points[:,0],self.points[:,1],self.points[:,2], faces[:,:3],
line_width=radius,tube_radius=radius,color=edge_color,
representation='wireframe')
if plot_points:
mlab.points3d(self.points[:,0],self.points[:,1],self.points[:,2],
color=point_color,mode='sphere',scale_factor=point_radius)
# svpoints = self.points[np.unique(self.faces),:]
# mlab.points3d(svpoints[:,0],svpoints[:,1],svpoints[:,2],color=(0,0,0),mode='sphere',scale_factor=0.005)
# mlab.view(azimuth=135, elevation=45, distance=7, focalpoint=None,
# roll=0, reset_roll=True, figure=None)
if show_plot:
mlab.show()
elif self.element_type=="quad":
C = self.InferPolynomialDegree() - 1
pdim = self.points.shape[1]
edge_elements = self.GetElementsEdgeNumberingQuad()
reference_edges = NodeArrangementQuad(C)[0]
reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)
reference_edges = np.delete(reference_edges,1,1)
self.GetEdgesQuad()
x_edges = np.zeros((C+2,self.all_edges.shape[0]))
y_edges = np.zeros((C+2,self.all_edges.shape[0]))
z_edges = np.zeros((C+2,self.all_edges.shape[0]))
BasesOneD = np.eye(2,2)
for iedge in range(self.all_edges.shape[0]):
ielem = edge_elements[iedge,0]
edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]
if pdim == 2:
x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T
elif pdim == 3:
x_edges[:,iedge], y_edges[:,iedge], z_edges[:,iedge] = self.points[edge,:].T
plt.plot(x_edges,y_edges,'-', color=edge_color)
if plot_points:
plt.plot(self.points[:,0],self.points[:,1], "o", color=point_color, markersize=point_radius)
if plot_faces:
plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3],
np.ones(self.points.shape[0]), 100, alpha=0.3)
plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,[0,2,3]],
np.ones(self.points.shape[0]), 100, alpha=0.3)
plt.axis('equal')
if not show_axis:
plt.axis('off')
if grid == "on":
plt.grid("on")
if show_plot:
plt.show()
elif self.element_type == "hex":
if to_plot == "all_faces":
ProjectionFlags = np.ones(faces.shape[0],dtype=np.int64)
else:
ProjectionFlags = None
from Florence.PostProcessing import PostProcess
tmesh = PostProcess.TessellateHexes(self,np.zeros_like(self.points),plot_points=True,
interpolation_degree=0, ProjectionFlags=ProjectionFlags)
Xplot = tmesh.points
Tplot = tmesh.elements
# color=(197/255.,241/255.,197/255.)
point_line_width = .002
if plot_faces:
trimesh_h = mlab.triangular_mesh(Xplot[:,0], Xplot[:,1], Xplot[:,2], Tplot,
line_width=point_line_width,color=color)
if plot_edges:
src = mlab.pipeline.scalar_scatter(tmesh.x_edges.T.copy().flatten(),
tmesh.y_edges.T.copy().flatten(), tmesh.z_edges.T.copy().flatten())
src.mlab_source.dataset.lines = tmesh.connections
h_edges = mlab.pipeline.surface(src, color = edge_color, line_width=3)
# AVOID WARNINGS
# lines = mlab.pipeline.stripper(src)
# h_edges = mlab.pipeline.surface(lines, color = edge_color, line_width=3)
# mlab.view(azimuth=135, elevation=45, distance=7, focalpoint=None,
# roll=0, reset_roll=True, figure=None)
if plot_points:
mlab.points3d(self.points[:,0],self.points[:,1],self.points[:,2],
color=point_color,mode='sphere',scale_factor=point_radius)
if show_plot:
mlab.show()
elif self.element_type == "line":
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
if self.points.ndim == 1:
self.points = self.points[:,None]
points = np.zeros((self.points.shape[0],3))
if self.points.shape[1] == 1:
points[:,0] = np.copy(self.points[:,0])
if self.points.shape[1] == 2:
points[:,:2] = np.copy(self.points)
elif self.points.shape[1] == 3:
points = np.copy(self.points)
if plot_edges:
src = mlab.pipeline.scalar_scatter(points[:,0],points[:,1],points[:,2])
src.mlab_source.dataset.lines = self.elements[:,:2]
lines = mlab.pipeline.stripper(src)
h_edges = mlab.pipeline.surface(lines, color = (0,0,0), line_width=2)
if plot_points:
h_points = mlab.points3d(points[:,0],points[:,1],points[:,2],color=(0,0,0),mode='sphere',scale_factor=point_radius)
if show_plot:
mlab.show()
else:
raise NotImplementedError("SimplePlot for {} not implemented yet".format(self.element_type))
if save:
ndim = self.InferSpatialDimension()
if ndim == 2:
plt.savefig(filename,format="png",dpi=300)
else:
mlab.savefig(filename,dpi=300)
def PlotMeshNumbering(self, figure=None, show_plot=True):
"""Plots element and node numbers on top of the triangular mesh"""
self.__do_essential_memebers_exist__()
import matplotlib.pyplot as plt
import matplotlib as mpl
if self.element_type == "tri":
if figure is None:
figure = plt.figure()
plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3])
plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3], np.ones(self.points.shape[0]), 100,alpha=0.3)
for i in range(0,self.elements.shape[0]):
coord = self.points[self.elements[i,:],:]
x_avg = np.sum(coord[:,0])/self.elements.shape[1]
y_avg = np.sum(coord[:,1])/self.elements.shape[1]
plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')
for i in range(0,self.points.shape[0]):
plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')
plt.axis('equal')
if show_plot:
plt.show()
elif self.element_type == "quad":
if figure is None:
figure = plt.figure()
point_radius = 3.
C = self.InferPolynomialDegree() - 1
edge_elements = self.GetElementsEdgeNumberingQuad()
reference_edges = NodeArrangementQuad(C)[0]
reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)
reference_edges = np.delete(reference_edges,1,1)
self.GetEdgesQuad()
x_edges = np.zeros((C+2,self.all_edges.shape[0]))
y_edges = np.zeros((C+2,self.all_edges.shape[0]))
BasesOneD = np.eye(2,2)
for iedge in range(self.all_edges.shape[0]):
ielem = edge_elements[iedge,0]
edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]
x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T
plt.plot(x_edges,y_edges,'-k')
for i in range(self.elements.shape[0]):
coord = self.points[self.elements[i,:],:]
x_avg = np.sum(coord[:,0])/self.elements.shape[1]
y_avg = np.sum(coord[:,1])/self.elements.shape[1]
plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')
for i in range(0,self.points.shape[0]):
plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')
plt.axis('equal')
if show_plot:
plt.show()
elif self.element_type == "tet" or self.element_type == "hex":
import matplotlib as mpl
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(800,600))
view = mlab.view()
figure.scene.disable_render = True
color = mpl.colors.hex2color('#F88379')
linewidth = 3.
# trimesh_h = mlab.triangular_mesh(self.points[:,0],
# self.points[:,1], self.points[:,2], self.faces[:,:3],
# line_width=linewidth,tube_radius=linewidth,color=(0,0.6,0.4),
# representation='wireframe') # representation='surface'
# # CHANGE LIGHTING OPTION
# trimesh_h.actor.property.interpolation = 'phong'
# trimesh_h.actor.property.specular = 0.1
# trimesh_h.actor.property.specular_power = 5
# PLOTTING EDGES
from Florence.PostProcessing import PostProcess
tmesh = PostProcess(3,3).Tessellate(self, np.zeros_like(self.points), interpolation_degree=0,
plot_points=True, plot_edges=True, plot_surfaces=False)
x_edges = tmesh.x_edges
y_edges = tmesh.y_edges
z_edges = tmesh.z_edges
connections = tmesh.connections
src = mlab.pipeline.scalar_scatter(x_edges.T.copy().flatten(), y_edges.T.copy().flatten(), z_edges.T.copy().flatten())
src.mlab_source.dataset.lines = connections
h_edges = mlab.pipeline.surface(src, color = (0,0.6,0.4), line_width=linewidth)
# AVOID WARNINGS
# lines = mlab.pipeline.stripper(src)
# h_edges = mlab.pipeline.surface(lines, color = (0,0.6,0.4), line_width=linewidth)
# ELEMENT NUMBERING
# for i in range(0,self.elements.shape[0]):
# coord = self.points[self.elements[i,:],:]
# x_avg = np.sum(coord[:,0])/self.elements.shape[1]
# y_avg = np.sum(coord[:,1])/self.elements.shape[1]
# z_avg = np.sum(coord[:,2])/self.elements.shape[1]
# # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=color)
# mlab.text3d(x_avg,y_avg,z_avg,str(i),color=(0,0,0.),scale=2)
# POINT NUMBERING
for i in range(self.elements.shape[0]):
for j in range(self.elements.shape[1]):
text_obj = mlab.text3d(self.points[self.elements[i,j],0],
self.points[self.elements[i,j],1],self.points[self.elements[i,j],2],str(self.elements[i,j]),
color=(0,0,0.),scale=0.05)
figure.scene.disable_render = False
if show_plot:
# mlab.view(*view)
mlab.show()
def WriteVTK(self, filename=None, result=None, fmt="binary", interpolation_degree=10, ProjectionFlags=None):
"""Write mesh/results to vtu
inputs:
fmt: [str] VTK writer format either "binary" or "xml".
"xml" files do not support big vtk/vtu files
typically greater than 2GB whereas "binary" files can. Also "xml" writer is
in-built whereas "binary" writer depends on evtk/pyevtk module
interpolation_degree: [int] used only for writing high order curved meshes
"""
self.__do_essential_memebers_exist__()
if fmt == "xml":
pass
elif fmt == "binary":
try:
from pyevtk.hl import pointsToVTK, linesToVTK, gridToVTK, unstructuredGridToVTK
from pyevtk.vtk import VtkVertex, VtkLine, VtkTriangle, VtkQuad, VtkTetra, VtkPyramid, VtkHexahedron
except ImportError:
raise ImportError("Could not import evtk. Install it using 'pip install pyevtk'")
else:
raise ValueError("Writer format not understood")
elements = np.copy(self.elements)
cellflag = None
if self.element_type =='tri':
cellflag = 5
offset = 3
if self.elements.shape[1]==6:
cellflag = 22
offset = 6
elif self.element_type =='quad':
cellflag = 9
offset = 4
if self.elements.shape[1]==8:
cellflag = 23
offset = 8
if self.element_type =='tet':
cellflag = 10
offset = 4
if self.elements.shape[1]==10:
cellflag = 24
offset = 10
# CHANGE NUMBERING ORDER FOR PARAVIEW
para_arange = [0,4,1,6,2,5,7,8,9,3]
elements = elements[:,para_arange]
elif self.element_type == 'hex':
cellflag = 12
offset = 8
if self.elements.shape[1] == 20:
cellflag = 25
offset = 20
elif self.element_type == 'line':
cellflag = 3
offset = 2
if filename is None:
warn('File name not specified. I am going to write one in the current directory')
filename = os.path.join(PWD(__file__), "output.vtu")
if ".vtu" in filename and fmt == "binary":
filename = filename.split('.')[0]
if ".vtu" not in filename and fmt == "xml":
filename = filename + ".vtu"
if self.InferPolynomialDegree() > 1:
try:
from Florence.PostProcessing import PostProcess
from Florence.VariationalPrinciple import DisplacementFormulation
except ImportError:
raise RuntimeError("Writing high order elements to VTK is not supported yet")
if result is not None and result.ndim > 1:
raise NotImplementedError("Writing multliple or vector/tensor valued results to binary vtk not supported yet")
return
else:
if result is None:
result = np.zeros_like(self.points)[:,:,None]
if result.ndim == 1:
result = result.reshape(result.shape[0],1,1)
pp = PostProcess(3,3)
pp.SetMesh(self)
pp.SetSolution(result)
pp.SetFormulation(DisplacementFormulation(self,compute_post_quadrature=False))
pp.WriteVTK(filename,quantity=0,interpolation_degree=interpolation_degree, ProjectionFlags=ProjectionFlags)
return
if self.InferSpatialDimension() == 2:
points = np.zeros((self.points.shape[0],3))
points[:,:2] = self.points
else:
points = self.points
if result is None:
if fmt == "xml":
write_vtu(Verts=self.points, Cells={cellflag:elements},fname=filename)
elif fmt == "binary":
unstructuredGridToVTK(filename,
np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),
np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),
np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag)
else:
if isinstance(result, np.ndarray):
if result.ndim > 1:
if result.size == result.shape[0]:
result = result.flatten()
if fmt == "xml":
if result.ndim > 1:
if result.shape[0] == self.nelem:
write_vtu(Verts=self.points, Cells={cellflag:elements},
cvdata={cellflag:result.ravel()},fname=filename)
elif result.shape[0] == self.points.shape[0]:
write_vtu(Verts=self.points, Cells={cellflag:elements},
pvdata=result.ravel(),fname=filename)
else:
if result.shape[0] == self.nelem:
write_vtu(Verts=self.points, Cells={cellflag:elements},cdata=result,fname=filename)
elif result.shape[0] == self.points.shape[0]:
write_vtu(Verts=self.points, Cells={cellflag:elements},pdata=result,fname=filename)
elif fmt == "binary":
if result.ndim <= 1:
if result.shape[0] == self.nelem:
unstructuredGridToVTK(filename,
np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),
np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),
np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,
cellData={'result':np.ascontiguousarray(result.ravel())})
elif result.shape[0] == self.points.shape[0]:
unstructuredGridToVTK(filename,
np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),
np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),
np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,
pointData={'result':np.ascontiguousarray(result.ravel())})
else:
raise NotImplementedError("Writing multliple or vector/tensor valued results to binary vtk not supported yet")
def WriteHDF5(self, filename=None, external_fields=None):
"""Write to MATLAB's HDF5 format
external_fields: [dict or tuple] of fields to save together with the mesh
for instance desired results. If a tuple is given keys in
dictionary will be named results_0, results_1 and so on"""
# DO NOT WRITE IF POINTS DO NOT EXIST - THIS IS TO PREVENT ACCIDENTAL WRITING OF
# POTENTIALLU EMPTY MESH OBJECT
if self.points is None:
warn("Nothing to write")
return
Dict = deepcopy(self.__dict__)
if external_fields is not None:
if isinstance(external_fields,dict):
Dict.update(external_fields)
elif isinstance(external_fields,tuple):
for counter, fields in enumerate(external_fields):
Dict['results_'+str(counter)] = fields
else:
raise AssertionError("Fields should be either tuple or a dict")
if filename is None:
pwd = os.path.dirname(os.path.realpath(__file__))
filename = pwd+'/output.mat'
for key in list(Dict.keys()):
if Dict[str(key)] is None:
del Dict[str(key)]
savemat(filename, Dict, do_compression=True)
def WriteGmsh(self, filename, write_surface_info=False):
"""Write mesh to a .msh (gmsh format) file"""
self.__do_essential_memebers_exist__()
mesh = deepcopy(self)
p = self.InferPolynomialDegree()
# if p > 1:
# mesh = self.GetLinearMesh(remap=True)
element_type = mesh.element_type
edim = mesh.InferElementalDimension()
# THESE TAGS ARE DIFFERENT FROM THE GMSH READER TAGS
bel = -1
if element_type == "line":
el = 1
elif element_type == "tri":
if p == 1:
el = 2
bel = 1
else:
el = 9
bel = 8
elif element_type == "quad":
if p == 1:
el = 3
bel = 1
elif p == 2:
el = 10
bel = 8
elif element_type == "tet":
if p == 1:
el = 4
bel = 2
elif p == 2:
el = 11
bel = 9
elif element_type == "hex":
el = 5
bel = 3
else:
raise ValueError("Element type not understood")
elements = np.copy(mesh.elements).astype(np.int64)
points = mesh.points[np.unique(elements),:]
if el == 9:
elements = elements[:,[0,1,2,3,5,4]]
elif el == 10:
elements = elements[:,[0, 1, 2, 3, 4, 7, 8, 5, 6]]
elif el == 11:
elements = elements[:,[0,1,2,3,4,6,5,7,9,8]]
# Take care of a corner case where nnode != points.shape[0]
if mesh.nnode != points.shape[0]:
mesh.nnode = points.shape[0]
if points.shape[1] == 2:
points = np.hstack((points,np.zeros((points.shape[0],1))))
points_repr = np.zeros((points.shape[0],points.shape[1]+1), dtype=object)
points_repr[:,0] = np.arange(mesh.nnode) + 1
points_repr[:,1:] = points
elements_repr = np.zeros((elements.shape[0],elements.shape[1]+5), dtype=object)
elements_repr[:,0] = np.arange(mesh.nelem) + 1
elements_repr[:,1] = el
elements_repr[:,2] = 2
elements_repr[:,3] = 0
elements_repr[:,4] = 1
elements_repr[:,5:] = elements + 1
if write_surface_info:
if edim == 3:
boundary = np.copy(mesh.faces).astype(np.int64)
elif edim == 2:
boundary = np.copy(mesh.edges).astype(np.int64)
boundary_repr = np.zeros((boundary.shape[0],boundary.shape[1]+5), dtype=object)
boundary_repr[:,0] = np.arange(boundary.shape[0]) + 1
boundary_repr[:,1] = bel
boundary_repr[:,2] = 2
boundary_repr[:,3] = 0
boundary_repr[:,4] = 1
boundary_repr[:,5:] = boundary + 1
elements_repr[:,0] += boundary.shape[0]
gmsh_nelem = mesh.nelem + boundary.shape[0]
else:
gmsh_nelem = mesh.nelem
with open(filename, 'w') as f:
f.write("$MeshFormat\n")
f.write("2.2 0 8\n")
f.write("$EndMeshFormat\n")
f.write("$Nodes\n")
f.write(str(mesh.nnode) + "\n")
np.savetxt(f, points_repr, fmt="%s")
f.write("$EndNodes\n")
f.write("$Elements\n")
f.write(str(gmsh_nelem) + "\n")
if write_surface_info:
np.savetxt(f, boundary_repr, fmt="%s")
np.savetxt(f, elements_repr, fmt="%s")
f.write("$EndElements\n")
def WriteOBJ(self, filename, write_texture=False):
"""Write mesh to an obj file. For 3D elements writes the faces only
"""
self.__do_essential_memebers_exist__()
mesh = deepcopy(self)
p = self.InferPolynomialDegree()
if p > 1:
mesh = self.GetLinearMesh(remap=True)
edim = mesh.InferElementalDimension()
if edim == 2:
elements = np.copy(mesh.elements).astype(np.int64)
elif edim == 3:
elements = np.copy(mesh.faces).astype(np.int64)
else:
raise RuntimeError("Writing obj file for {} elements not supported".format(mesh.element_type))
points = mesh.points[np.unique(elements),:]
if points.shape[1] == 2:
points = np.hstack((points,np.zeros((points.shape[0],1))))
points_repr = np.zeros((points.shape[0],points.shape[1]+1), dtype=object)
points_repr[:,0] = "v"
points_repr[:,1:] = points
elements_repr = np.zeros((elements.shape[0],elements.shape[1]+1), dtype=object)
elements_repr[:,0] = "f"
elements_repr[:,1:] = elements + 1
if write_texture:
textures = mesh.textures[np.unique(elements),:]
textures_repr = np.zeros((textures.shape[0],textures.shape[1]+1), dtype=object)
textures_repr[:,0] = "vt"
textures_repr[:,1:] = textures
elements_repr = np.zeros((mesh.telements.shape[0],mesh.telements.shape[1]+1), dtype=object)
elements_repr[:,0] = "f"
# elements_repr[:,1:] = telements + 1
counter = 0
for i, j in zip(elements,mesh.telements):
curr_row = [str(ii+1)+"/"+str(jj+1) for ii,jj in zip(i,j)]
elements_repr[counter,1:] = curr_row
counter += 1
with open(filename, "w") as f:
# f.write("# "+ str(mesh.nnode))
# f.write('\n')
# f.write("# "+ str(mesh.nelem))
# f.write('\n')
np.savetxt(f, points_repr, fmt="%s")
if write_texture:
np.savetxt(f, textures_repr, fmt="%s")
f.write('\n')
np.savetxt(f, elements_repr, fmt="%s")
def WriteMFEM(self, filename):
"""Write mesh to a mfem file"""
self.__do_memebers_exist__()
nodeperelem = self.InferNumberOfNodesPerElement()
if self.element_type == "tet":
etype = 4
betype = 2
elif self.element_type == "hex":
etype = 5
betype = 3
elif self.element_type == "tri":
etype = 2
betype = 1
elif self.element_type == "quad":
etype = 3
betype = 1
elif self.element_type == "line":
etype = 1
betype = 0
if self.element_type == "tet" or self.element_type == "hex":
boundary = self.faces
elif self.element_type == "tri" or self.element_type == "quad":
boundary = self.edges
elif self.element_type == "line":
boundary = self.corners
with open(filename, 'w') as f:
f.write("MFEM mesh v1.0\n")
f.write("#\n\n")
f.write("dimension\n")
f.write('{}'.format(self.InferSpatialDimension()))
f.write("\n\n")
f.write("elements\n")
f.write('{}'.format(self.nelem))
f.write("\n")
for elem in range(self.nelem):
f.write('1 {} '.format(etype))
for node in range(nodeperelem):
f.write('{} '.format(self.elements[elem,node]))
f.write("\n")
f.write("\n\n")
f.write("boundary\n")
f.write('{}'.format(boundary.shape[0]))
f.write("\n")
for elem in range(boundary.shape[0]):
f.write('1 {} '.format(betype))
for node in range(boundary.shape[1]):
f.write('{} '.format(boundary[elem,node]))
f.write("\n")
f.write("\n\n")
f.write("vertices\n")
f.write('{}'.format(self.points.shape[0]))
f.write("\n")
f.write('{}'.format(self.points.shape[1]))
f.write("\n")
for elem in range(self.points.shape[0]):
for node in range(self.points.shape[1]):
f.write('{} '.format(self.points[elem,node]))
f.write("\n")
@staticmethod
def MeshPyTri(points,facets,*args,**kwargs):
"""MeshPy backend for generating linear triangular mesh"""
info = triangle.MeshInfo()
info.set_points(points)
info.set_facets(facets)
return triangle.build(info,*args,**kwargs)
def Line(self, left_point=0., right_point=1., n=10, p=1):
"""Creates a mesh of on a line for 1D rods/beams"""
self.__reset__()
assert p > 0
if not isinstance(left_point,float):
if not isinstance(left_point,int):
raise ValueError("left_point must be a number")
if not isinstance(right_point,float):
if not isinstance(right_point,int):
raise ValueError("right_point must be a number")
left_point = float(left_point)
right_point = float(right_point)
n = int(n)
if n <= 0:
raise ValueError("Number of discretisation cannot be zero or negative: n={}".format(n))
self.element_type = "line"
self.points = np.linspace(left_point,right_point,p*n+1)[:,None]
self.elements = np.zeros((n,p+1),dtype=np.int64)
for i in range(p+1):
self.elements[:,i] = p*np.arange(0,n)+i
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
def Rectangle(self,lower_left_point=(0,0), upper_right_point=(2,1),
nx=5, ny=5, element_type="tri"):
"""Creates a quad/tri mesh of a rectangle"""
if element_type != "tri" and element_type != "quad":
raise ValueError("Element type should either be tri or quad")
if self.elements is not None and self.points is not None:
self.__reset__()
if (lower_left_point[0] > upper_right_point[0]) or \
(lower_left_point[1] > upper_right_point[1]):
raise ValueError("Incorrect coordinate for lower left and upper right vertices")
nx, ny = int(nx), int(ny)
if nx <= 0 or ny <= 0:
raise ValueError("Number of discretisation cannot be zero or negative: nx={} ny={}".format(nx,ny))
from scipy.spatial import Delaunay
x=np.linspace(lower_left_point[0],upper_right_point[0],nx+1)
y=np.linspace(lower_left_point[1],upper_right_point[1],ny+1)
X,Y = np.meshgrid(x,y)
coordinates = np.dstack((X.ravel(),Y.ravel()))[0,:,:]
if element_type == "tri":
tri_func = Delaunay(coordinates)
self.element_type = "tri"
self.elements = tri_func.simplices
self.nelem = self.elements.shape[0]
self.points = tri_func.points
self.nnode = self.points.shape[0]
self.GetBoundaryEdgesTri()
elif element_type == "quad":
self.nelem = int(nx*ny)
elements = np.zeros((self.nelem,4),dtype=np.int64)
dum_0 = np.arange((nx+1)*ny)
dum_1 = np.array([(nx+1)*i+nx for i in range(ny)])
col0 = np.delete(dum_0,dum_1)
elements[:,0] = col0
elements[:,1] = col0 + 1
elements[:,2] = col0 + nx + 2
elements[:,3] = col0 + nx + 1
self.nnode = int((nx+1)*(ny+1))
self.element_type = "quad"
self.elements = elements
self.points = coordinates
self.nnode = self.points.shape[0]
self.GetBoundaryEdgesQuad()
self.GetEdgesQuad()
def Square(self, lower_left_point=(0,0), side_length=1, nx=5, ny=5, n=None, element_type="tri"):
"""Creates a quad/tri mesh on a square
input:
lower_left_point [tuple] of lower left vertex of the square
side_length: [int] length of side
nx,ny: [int] number of discretisation in each direction
n: [int] number of discretisation in all directions
i.e. nx=ny=n. Overrides nx,ny
"""
if n != None:
nx,ny = n,n
upper_right_point = (side_length+lower_left_point[0],side_length+lower_left_point[1])
self.Rectangle(lower_left_point=lower_left_point,
upper_right_point=upper_right_point,nx=nx,ny=ny,element_type=element_type)
def Triangle(self, c1=(0.,0.), c2=(0.,1.), c3=(1.,0.), npoints=10, element_type="tri", equally_spaced=True):
"""Creates a tri/quad mesh on a triangular region, given coordinates of the three
nodes of the triangle
input:
npoints: [int] number of discritsation
"""
if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple):
raise ValueError("The coordinates c1, c2 and c3 should be given in tuples of two elements each (x,y)")
npoints = int(npoints)
npoints = npoints - 1
if npoints < 0:
npoints = 0
c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3)
opoints = np.vstack((c1,c2,c3))
oelements = np.array([[0,1,2]])
if element_type=="tri":
mesh = self.TriangularProjection(points=opoints, npoints=npoints, equally_spaced=equally_spaced)
self.__update__(mesh)
elif element_type == "quad":
# SPLIT THE TRIANGLE INTO 3 QUADS
omesh = Mesh()
omesh.element_type="tri"
omesh.elements = oelements
omesh.nelem = omesh.elements.shape[0]
omesh.points = opoints
omesh.GetBoundaryEdges()
sys.stdout = open(os.devnull, "w")
omesh.ConvertTrisToQuads()
sys.stdout = sys.__stdout__
npoints = int(npoints/2) + 1
mesh = self.QuadrilateralProjection(points=omesh.points[omesh.elements[0,:],:],
npoints=npoints, equally_spaced=equally_spaced)
for i in range(1,omesh.nelem):
mesh += self.QuadrilateralProjection(points=omesh.points[omesh.elements[i,:],:],
npoints=npoints, equally_spaced=equally_spaced)
self.__update__(mesh)
def Arc(self, center=(0.,0.), radius=1., nrad=16, ncirc=40,
start_angle=0., end_angle=np.pi/2., element_type="tri",
refinement=False, refinement_level=2, algorithm="standard"):
"""Creates a structured quad/tri mesh on an arc
input:
start_angle/end_angle: [float] starting and ending angles in radians. Angle
is measured anti-clockwise. Default start angle is
positive x-axis
refinement_level: [int] number of elements that each element has to be
splitted to
"""
# CHECK FOR ANGLE
PI = u"\u03C0".encode('utf-8').strip()
EPS = np.finfo(np.float64).eps
if np.abs(start_angle) + EPS > 2.*np.pi:
raise ValueError("The starting angle should be either in range [-2{},0] or [0,2{}]".format(PI,PI))
if np.abs(end_angle) + EPS > 2.*np.pi:
raise ValueError("The end angle should be either in range [-2{},0] or [0,2{}]".format(PI,PI))
a1 = np.sign(start_angle) if np.sign(start_angle)!=0. else np.sign(end_angle)
a2 = np.sign(end_angle) if np.sign(end_angle)!=0. else np.sign(start_angle)
if a1 == a2:
total_angle = np.abs(end_angle - start_angle)
if np.isclose(total_angle,0.) or np.isclose(total_angle,2.*np.pi) or total_angle > 2.*np.pi:
self.Circle(center=center, radius=radius, nrad=nrad, ncirc=ncirc, element_type=element_type)
return
if not isinstance(center,tuple):
raise ValueError("The center of the arc should be given in a tuple with two elements (x,y)")
self.__reset__()
if algorithm == "midpoint_subdivision":
from Florence.MeshGeneration.CustomMesher import SubdivisionArc
mesh = SubdivisionArc(center=center, radius=radius, nrad=nrad, ncirc=ncirc,
start_angle=start_angle, end_angle=end_angle,
element_type=element_type, refinement=refinement, refinement_level=refinement_level)
self.__update__(mesh)
return
if refinement:
ndivider = refinement_level
else:
ndivider = 1
ncirc = int(ncirc/ndivider)
nrad = int(nrad/ndivider)
if ncirc % 2 != 0 or ncirc < 2:
ncirc = (ncirc // 2)*2 + 2
radii = radius
radius = np.linspace(0,radii,nrad+1)[1:]
t = np.linspace(start_angle,end_angle,ncirc+1)
x = radius[0]*np.cos(t)[::-1]
y = radius[0]*np.sin(t)[::-1]
points = np.zeros((ncirc+2,2),dtype=np.float64)
points[0,:] = [0.,0.]
points[1:,:] = np.array([x,y]).T
self.elements = np.zeros((ncirc // 2,4),dtype=np.int64)
aranger = np.arange(ncirc // 2)
self.elements[:,1] = 2*aranger + 1
self.elements[:,2] = 2*aranger + 2
self.elements[:,3] = 2*aranger + 3
for i in range(1,nrad):
t = np.linspace(start_angle,end_angle,ncirc+1)
x = radius[i]*np.cos(t)[::-1]
y = radius[i]*np.sin(t)[::-1]
points = np.vstack((points,np.array([x,y]).T))
points[:,0] += center[0]
points[:,1] += center[1]
elements = np.zeros((ncirc,4),dtype=np.int64)
for i in range(1,nrad):
aranger = np.arange(1+ncirc*(i-1),ncirc*i+1)
elements[:,0] = aranger + i - 1
elements[:,1] = aranger + i + ncirc
elements[:,2] = aranger + i + ncirc + 1
elements[:,3] = aranger + i
self.elements = np.concatenate((self.elements,elements),axis=0)
makezero(points)
self.points = points
self.elements[:ncirc // 2,:] = self.elements[:ncirc // 2, [1,2,3,0]]
self.element_type = "quad"
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
self.GetBoundaryEdges()
if refinement:
mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)
for i in range(1,self.nelem):
mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)
self.__update__(mesh)
if element_type == "tri":
sys.stdout = open(os.devnull, "w")
self.ConvertQuadsToTris()
sys.stdout = sys.__stdout__
def Circle(self, center=(0.,0.), radius=1., nrad=16, ncirc=40,
element_type="tri", refinement=False, refinement_level=2, algorithm="standard"):
"""Creates a structured quad/tri mesh on circle
input:
algorithm: [str] either 'standard' or 'midpoint_subdivision'
the latter generates quad meshes with 4 singularities
"""
if not isinstance(center,tuple):
raise ValueError("The center of the circle should be given in a tuple with two elements (x,y)")
self.__reset__()
if algorithm == "midpoint_subdivision":
from Florence.MeshGeneration.CustomMesher import SubdivisionCircle
mesh = SubdivisionCircle(center=center, radius=radius, nrad=nrad, ncirc=ncirc,
element_type=element_type, refinement=refinement, refinement_level=refinement_level)
self.__update__(mesh)
return
if refinement:
ndivider = refinement_level
if nrad==1: nrad=2
else:
ndivider = 1
ncirc = int(ncirc/ndivider)
nrad = int(nrad/ndivider)
if ncirc % 8 != 0 or ncirc < 8:
ncirc = (ncirc // 8)*8 + 8
radii = radius
radius = np.linspace(0,radii,nrad+1)[1:]
t = np.linspace(0,2*np.pi,ncirc+1)
x = radius[0]*np.sin(t)[::-1][:-1]
y = radius[0]*np.cos(t)[::-1][:-1]
points = np.zeros((ncirc+1,2),dtype=np.float64)
points[0,:] = [0.,0.]
points[1:,:] = np.array([x,y]).T
self.elements = np.zeros((ncirc // 2,4),dtype=np.int64)
aranger = np.arange(ncirc // 2)
self.elements[:,1] = 2*aranger + 1
self.elements[:,2] = 2*aranger + 2
self.elements[:,3] = 2*aranger + 3
self.elements[-1,-1] = 1
for i in range(1,nrad):
t = np.linspace(0,2*np.pi,ncirc+1);
x = radius[i]*np.sin(t)[::-1][:-1];
y = radius[i]*np.cos(t)[::-1][:-1];
points = np.vstack((points,np.array([x,y]).T))
points[:,0] += center[0]
points[:,1] += center[1]
elements = np.zeros((ncirc,4),dtype=np.int64)
for i in range(1,nrad):
aranger = np.arange(1+ncirc*(i-1),ncirc*i+1)
elements[:,0] = aranger
elements[:,1] = aranger + ncirc
elements[:,2] = np.append((aranger + 1 + ncirc)[:-1],i*ncirc+1)
elements[:,3] = np.append((aranger + 1)[:-1],1+(i-1)*ncirc)
self.elements = np.concatenate((self.elements,elements),axis=0)
makezero(points)
self.points = points
self.elements[:ncirc // 2,:] = self.elements[:ncirc // 2, [1,2,3,0]]
self.element_type = "quad"
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
self.GetBoundaryEdges()
if refinement:
mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)
for i in range(1,self.nelem):
mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)
self.__update__(mesh)
# SECOND LEVEL OF REFINEMENT IF NEEDED
# mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=2)
# for i in range(1,self.nelem):
# mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=2)
# self.__update__(mesh)
if element_type == "tri":
sys.stdout = open(os.devnull, "w")
self.ConvertQuadsToTris()
sys.stdout = sys.__stdout__
def HollowArc(self, center=(0.,0.), inner_radius=1., outer_radius=2., nrad=16, ncirc=40,
start_angle=0., end_angle=np.pi/2., element_type="tri", refinement=False, refinement_level=2):
"""Creates a structured quad/tri mesh on a hollow arc (i.e. two arc bounded by straight lines)
input:
start_angle/end_angle: [float] starting and ending angles in radians. Angle
is measured anti-clockwise. Default start angle is
positive x-axis
refinement_level: [int] number of elements that each element has to be
splitted to
"""
# CHECK FOR ANGLE
PI = u"\u03C0".encode('utf-8').strip()
EPS = np.finfo(np.float64).eps
if np.abs(start_angle) + EPS > 2.*np.pi:
raise ValueError("The starting angle should be either in range [-2{},0] or [0,2{}]".format(PI,PI))
if np.abs(end_angle) + EPS > 2.*np.pi:
raise ValueError("The end angle should be either in range [-2{},0] or [0,2{}]".format(PI,PI))
if | np.sign(start_angle) | numpy.sign |
from __future__ import print_function
import sys
import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.utils.data as data
from PIL import Image
import _init_paths
from lib.core import ssd_config as cfg
from utils.blob import BaseTransform
from datasets.coco_test import COCODetection
from modeling.SSD import build_ssd
import time
import numpy as np
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--trained_model', default=cfg.PRETRAINED_WEIGHT,
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='Dir to save results')
parser.add_argument('--visual_threshold', default=0.6, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to train model')
parser.add_argument('--coco_root', default=cfg.COCO_ROOT, help='Location of COCO/VOC root directory')
parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks")
args = parser.parse_args()
if args.cuda and torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def test_net(save_folder, net, cuda, testset, transform, thresh):
# dump predictions and assoc. ground truth to text file for now
filename = save_folder+'test_result_new.txt'
num_images = len(testset)
print('~~~~~~~~~~~~~~~~~: ', num_images)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(len(cfg.COCO_CLASSES)+1)]
for i in range(num_images):
im, gt, h, w = testset.pull_item(i)
print('Testing image {:d}/{:d}....'.format(i+1, num_images))
img = testset.pull_image(i)
img_id, annotation = testset.pull_anno(i)
x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
# with open(filename, mode='a') as f:
# f.write('\nGROUND TRUTH FOR: '+str(img_id)+'\n')
# for box in annotation:
# f.write('label: '+' || '.join(str(b) for b in box)+'\n')
if cuda:
x = x.cuda()
t0 = time.time()
y = net(x) # forward pass
detections = y.data
# # scale each detection back up to the image
# scale = torch.Tensor([img.shape[1], img.shape[0],
# img.shape[1], img.shape[0]])
t1 = time.time()
print('timer: %.4f sec.' % (t1 - t0),flush=True)
pred_num = 0
for j in range(1, detections.size(1)):
# # if i!=0:
# j = 0
# while detections[0, i, j, 0] >= 0.1:
# if pred_num == 0:
# with open(filename, mode='a') as f:
# f.write(str(img_id)+'\n')
# score = detections[0, i, j, 0]
# label_name = labelmap[i-1]
# pt = (detections[0, i, j, 1:]*scale).cpu().numpy()
# coords = (pt[0], pt[1], pt[2], pt[3])
# pred_num += 1
# with open(filename, mode='a') as f:
# f.write(str(pred_num)+' label: '+str(i)+' score: ' +
# str(score) + ' '.join(str(c) for c in coords) + '\n')
# j += 1
k = 0
inds = | np.where(detections[0, j, k, 0] > 0.01) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 19:00:38 2020
@author: Mradumay
"""
import math
import matplotlib.pyplot as plt
from scipy.integrate import quad
import numpy as np
import pandas as pd
from colorama import Fore, Style
import os
import glob
import sys
num=int(input("Enter number of planets to be plotted:"))
num1=round(math.sqrt(num))
col=num1
row=math.ceil(num/num1)
cwd=os.getcwd()
extension = 'csv'
cwd1=cwd+"/Exoplanet_Catalogs"
cwd2=cwd+"/Limb_darkening_data"
os.chdir(cwd1)
files = glob.glob('*.{}'.format(extension))
l=len(files)
namef="exoplanet.eu_catalog.csv"
exo=[]
fig=plt.figure(figsize=(row**2+2*row+7,col**2+3*col+3),constrained_layout=False)
newa=[]
for te in range(1,num+1):
itr=0
for entry in files:
if entry==namef and l==1:
catalog=0
print(Fore.WHITE +"Exoplanet.eu catalog found")
print(Style.RESET_ALL)
break
else:
catalog=float(input("Enter 0 for Exoplanet.eu, 1 for NASA arxiv, and 2 for entering manually:"))
break
if l==0:
print(Fore.RED +"No catalog found. Enter parameters manually")
print(Style.RESET_ALL)
catalog=2
while catalog!=0 and catalog!=1 and catalog!=2:
catalog=float(input(Fore.RED +"Wrong option entered. Please re-enter:"))
print(Style.RESET_ALL)
if catalog==0:
for entry in files:
itr=itr+1
if entry==namef:
break
if entry!=namef and itr==l:
sys.exit(Fore.RED +"Exoplanet.eu catalog not found")
print(Style.RESET_ALL)
data=pd.read_csv(os.path.join(cwd1,"exoplanet.eu_catalog.csv"))
df=pd.DataFrame(data)
starrad=df["star_radius"]
planrad=df["radius"]
temp=df["star_teff"]
semiax=df["semi_major_axis"]
name=data["# name"]
eccentricity=data["eccentricity"]
Mass=data["star_mass"]
metallicity=data["star_metallicity"]
exoplanet=input("Enter the name of exoplanet:")
exo.append(exoplanet)
opt=float(input("Enter 1 if you wish to change st_rad,2 for pl_rad, 3 for Teff, 4 for sm_axis else enter any #: "))
g=1
while g!=0:
for i in range(len(starrad)):
if name[i]==exoplanet:
g=0
break
elif name[i]!=exoplanet and i==len(starrad)-1:
exoplanet=input(Fore.RED +"Exoplanet not found. Please check the name and type again:")
print(Style.RESET_ALL)
for i in range(len(starrad)):
if name[i]==exoplanet:
rp1=planrad[i]
rs1=starrad[i]
fa1=temp[i]
al1=semiax[i]
ecc=eccentricity[i]
M1=Mass[i]
met=metallicity[i]
if opt==1 or opt==12 or opt==13 or opt==14:
rs1=float(input("Enter stellar radius:"))
if opt==2 or opt==12 or opt==23 or opt==24:
rp1=float(input("Enter planet radius:"))
if opt==3 or opt==13 or opt==23 or opt==34:
fa1=float(input("Enter effective temperature:"))
if opt==4 or opt==14 or opt==24 or opt==34:
al1=float(input("Enter semi-major axis:"))
if catalog==1:
filename=input("Enter name of NASA arxiv csv file:")
it=0
for entry in files:
it=it+1
if entry==filename:
g1=0
break
if it==len(files):
sys.exit(Fore.RED +"File name incorrect or file missing. Please check file or re-type")
print(Style.RESET_ALL)
data=pd.read_csv(os.path.join(cwd1,filename),error_bad_lines=False,skiprows=361,low_memory=False)
df=pd.DataFrame(data)
planrad=df["pl_radj"]
starrad=df["st_rad"]
temp=df["st_teff"]
semiax=df["pl_orbsmax"]
name=data["pl_name"]
eccentricity=data["pl_orbeccen"]
Mass=data["st_mass"]
metallicity=data["st_metfe"]
exoplanet=input("Enter the name of exoplanet:")
exo.append(exoplanet)
opt=float(input("Enter 1 if you wish to change st_rad,2 for pl_rad, 3 for Teff, 4 for sm_axis else enter any #: "))
g2=1
while g2!=0:
for i in range(len(starrad)):
if name[i]==exoplanet:
g2=0
break
elif name[i]!=exoplanet and i==len(starrad)-1:
exoplanet=input(Fore.RED +"Exoplanet not found. Please check the name and type again:")
print(Style.RESET_ALL)
for i in range(len(starrad)):
if name[i]==exoplanet:
rp1=planrad[i]
rs1=starrad[i]
fa1=temp[i]
al1=semiax[i]
ecc=eccentricity[i]
M1=Mass[i]
met=metallicity[i]
if opt==1 or opt==12 or opt==13 or opt==14:
rs1=float(input("Enter stellar radius:"))
if opt==2 or opt==12 or opt==23 or opt==24:
rp1=float(input("Enter planet radius:"))
if opt==3 or opt==13 or opt==23 or opt==34:
fa1=float(input("Enter effective temperature:"))
if opt==4 or opt==14 or opt==24 or opt==34:
al1=float(input("Enter semi-major axis:"))
para=1
while para!=4 and para!=1 and para!=2:
para=float(input(Fore.RED +'Wrong option entered. Please re-enter:'))
print(Style.RESET_ALL)
if catalog==2:
print(Style.RESET_ALL)
rp1=float(input("Enter radius of planet in Jupiter radii:"))
rs1=float(input("Enter radius of the host star in units of solar radius:"))
fa1=float(input("Enter effective Temperature of host star in K:"))
al1=float(input("Enter semi-major axis of the planet from the star in AU:"))
ecc=float(input("Enter eccentricity:"))
exoplanet=input("Enter name:")
if para==4:
M1=float(input("Enter stellar mass(solar units):"))
met=float(input("Enter metallicity[Fe/H]:"))
if para==1:
u=0.6
met=0
M1=1
if para==2:
u1=float(input("Enter bolometric quadratic coefficient(u1):"))
u2=float(input("Enter bolometric quadratic coefficient(u2):"))
met=0
M1=1
if np.isnan(rs1)==True or np.isnan(fa1)==True or np.isnan(al1)==True:
print(Fore.RED +"Crucial parameter missing")
print(Style.RESET_ALL)
else:
if np.isnan(rp1)==True:
rp1=input(Fore.RED +"Radius of planet is missing. Please enter value in Rj units:")
print(Style.RESET_ALL)
rp1=float(rp1)
if np.isnan(met)==True:
met=float(input(Fore.RED +"Metallicity[Fe/H] missing in dataset. Enter manually:"))
print(Style.RESET_ALL)
if np.isnan(M1)==True:
M1=float(input(Fore.RED +"Stellar mass missing in dataset. Enter manually:"))
print(Style.RESET_ALL)
number=1
obli=0
if np.isnan(ecc)==True:
ecc=0
elif ecc!=0 and ecc<0.3:
print(Fore.WHITE +"Eccentric orbit detected, calculating values at periastron \033[1;30;47m")
print(Style.RESET_ALL)
elif ecc>0.3:
number=4
print(Fore.WHITE +"Highly eccentric orbit(e>0.3). Calculating annual mean \033[1;30;47m")
print(Style.RESET_ALL)
true1=np.linspace(0,270,number)
if te==num:
print(Fore.WHITE +'Generating Plot, Please wait.. \033[1;30;47m')
print(Style.RESET_ALL)
average=[]
inverse=[]
for j in range(0,number):
true=true1[j]*np.pi/180
ob1=float(obli)
ob=ob1*np.pi/180
rp1=float(rp1)
rs1=float(rs1)
al1=float(al1)
fa1=float(fa1)
ecc=float(ecc)
M=M1*2*10**(30)
rs=rs1*6.955*10**8
rp=rp1*6.4*10**6*11.21
al2=al1*1.496*10**11
al=al2*(1-ecc**2)/(1+ecc*abs(math.cos(true)))
d=al-rs-rp
ch=math.acos(rp/(d+rp))
s3=math.floor(57.3*math.asin(abs(rs-rp)/al))
s1=np.pi/2+s3/57.3
s=np.pi/2
symp=math.acos((rs+rp)/al)*57.3
la1=np.linspace(-s1,s1,500)
la2=np.linspace((-s1+ob)*180/np.pi,(s1-ob)*180/np.pi,500)
surfgrav=100*6.67*10**(-11)*M/rs**(2)
logg=math.log10(surfgrav)
oldfor=[]
final=[]
denom=[]
numer=[]
if para==1 and u==0.6:
fa=fa1*1.0573
if para==1 and u==0:
fa=fa1
P=5.67*10**(-8)*fa**(4)*4*np.pi*rs**2
zalist=[]
for k in range(len(la1)):
la=la1[k]
beta=al+rp*math.cos(np.pi-la)
y1=math.acos((rs**2-rp**2*(math.sin(la))**2)/(beta*rs - rp*math.sin(la)*(math.sqrt(rp**2*(math.sin(la))**2-rs**2+beta**2))))*180/np.pi
y4=math.acos((rs**2-rp**2*(math.sin(la))**2)/(beta*rs + rp*math.sin(la)*(math.sqrt(rp**2*(math.sin(la))**2-rs**2+beta**2))))*180/np.pi
y5=math.acos(rs/math.sqrt(al**2+rp**2-2*al*rp*math.cos(la)))*180/np.pi
y6=math.acos((rs+rp*math.sin(la))/al)
y=(y1)*np.pi/180
y2=(y4)*np.pi/180
y3=(y5)*np.pi/180
y7=math.acos((rs+rp*math.sin(la))/al)
ad1=180*math.atan(rs*math.sin(y)/(d+rs-(rs*math.cos(y))))/np.pi
ad=math.floor(ad1)*np.pi/180
vis=math.acos(rs/(math.sqrt(al**2+rp**2-2*al*rp*math.cos(la))))
P1=5.67*10**(-8)*fa1**(4)*4*np.pi*(rs)**2
def function(x,th,la):
a=-rs*math.sin(th)*math.cos(th)*math.sin(np.pi-la)+al*math.cos(np.pi-la)*math.cos(th)+rp*math.cos(th)
b=rs*math.cos(np.pi-la)*(math.cos(th))**2
c=rs**2+al**2+rp**2 - 2*rs*rp*math.sin(th)*math.sin(la)+ 2*al*rp*math.cos(np.pi-la)
e=-2*(al+rp*math.cos(np.pi-la))*rs*math.cos(th)
mu=abs(-rs+(al+rp*math.cos(np.pi-la))*math.cos(th)*math.cos(x)+rp*math.sin(th)*math.sin(la))/(c+e*math.cos(x))**(1/2)
if para==1:
lf=1-u*(1-mu)
if para==2:
lf=1-u1*(1-mu)-u2*(1-mu)**2
return abs(a-b*math.cos(x))*lf*mu/(c+e*math.cos(x))**(3/2)
def integration(th,la):
return quad(function,-y3,y3,args=(th,la))[0]
ll=-y
ul=y
if la>0:
ll=-y
ul=y2
if la<0:
ll=-y2
ul=y
if la>=y and la<np.pi/2 and la<=ch:
ll=-math.acos((al*math.cos(la)-rp)*math.cos(la)/rs+ math.sin(la)*math.sqrt(rs**2-(al*math.cos(la)-rp)**2)/rs)
if la>ch and rs>rp:
ll=math.acos((al*math.cos(la)-rp)*math.cos(la)/rs+ math.sin(la)*math.sqrt(rs**2-(al*math.cos(la)-rp)**2)/rs)
if la>=np.pi/2 and rs>rp:
ll=math.acos((al*math.cos(la)-rp)*math.cos(la)/rs+ math.sin(la)*math.sqrt(rs**2-(al*math.cos(la)-rp)**2)/rs)
if abs(la)>y2 and la<0 and la>-np.pi/2 and abs(la)<=ch:
ul=math.acos((al*math.cos(la)-rp)*math.cos(la)/rs- math.sin(la)*math.sqrt(rs**2-(al*math.cos(la)-rp)**2)/rs)
if abs(la)>ch and la<0 and rs>rp:
ul=-math.acos((al*math.cos(la)-rp)*math.cos(la)/rs- math.sin(la)*math.sqrt(rs**2-(al*math.cos(la)-rp)**2)/rs)
if la<=-np.pi/2 and rs>rp:
ul=-math.acos((al*math.cos(la)-rp)*math.cos(la)/rs- math.sin(la)*math.sqrt(rs**2-(al*math.cos(la)-rp)**2)/rs)
xarr=np.linspace(-y,y,100)
tharr= | np.linspace(ll,ul,100) | numpy.linspace |
"""Create important distributions classes.
Especially provide the logic for a hypererlang distributions with data
fitting.
"""
import logging
import multiprocessing as mp
from itertools import combinations_with_replacement
from typing import Any, Dict, Generator, Iterable, List, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from pynverse import inversefunc
from hoqunm.utils.utils import LOGGING_DIR, Heap, get_logger
class HypererlangSpecs:
"""Specifiactions for hyper-erlang fit.
:param processors: Processors to use for multiprocessing.
:param N: The sum of the length of the erlang distributions for state limitation.
:param convergence_criteria: The convergence cirteria in each round.
:param maximal_distributions: The maximla distributions in each step.
"""
def __init__(self,
processors: int = mp.cpu_count() - 1,
N: int = 10,
convergence_criteria: Optional[List[float]] = None,
maximal_distributions: Optional[List[int]] = None):
self.processors = processors
self.N = N
self.convergence_criteria = convergence_criteria if convergence_criteria is not None else [
1e-4, 1e-6, 1e-8
]
self.maximal_distributions = maximal_distributions if maximal_distributions is not None \
else [50, 25, 1]
if not len(self.convergence_criteria) == len(
self.maximal_distributions):
raise AttributeError(
"Length of convergence criteria and maximal distributions do not match."
)
if self.N <= 0:
raise ValueError(f"N has to be larger then 10. N is {N}")
@staticmethod
def load_dict(arguments: Dict[str, Any]) -> "HypererlangSpecs":
"""Create class from Dict with arguments and values in it.
:param arguments: The dict containing the parameter-argument pairs.
:return: Class instance.
"""
return HypererlangSpecs(**arguments)
class HyperDistribution:
"""A class representing a hyper distributions of a current distributions
type. for compatibility, the methods and attributes are similar to those of
scipy.stats.rv_continuous.
:param distribution: The distributions type.
:param hyper: The hyper parameters.
:param kwargs: The arguments needed for the distributions.
Each will be in list style having the same shape as hyper.
"""
def __init__(self, distribution: scipy.stats.rv_continuous,
hyper: Union[np.ndarray, List[float]],
**kwargs: Union[np.ndarray, List[float]]):
self.dist = self # for compatibility with scipy.stats
self.distribution = distribution
self.name = "hyper" + self.distribution.name
self.hyper = np.asarray(hyper).reshape(-1)
self.hyper = self.hyper / self.hyper.sum()
kwargs = {
key: np.asarray(arg).reshape(-1)
for key, arg in kwargs.items()
}
self.kwargs = [{key: arg[i]
for key, arg in kwargs.items()}
for i in range(self.hyper.shape[0])]
self.paramno = self.hyper.shape[0] * (1 + len(kwargs))
def mean(self) -> float:
"""Return the mean of the distributions.
:return: Mean of the distributions.
"""
return float(
np.sum([
p * self.distribution.mean(**self.kwargs[i])
for i, p in enumerate(self.hyper)
]))
def var(self) -> np.float:
"""Return the variance of the distributions.
:return: Variance of the distributions.
"""
return float(
np.sum([
p * self.distribution.var(**self.kwargs[i])
for i, p in enumerate(self.hyper)
]))
def pdf(self, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""The pdf (probability density function) evaluated at x.
:param x: x values, where the pdf should be evaluated.
:return: Corresponding value of pdf at x.
"""
return np.sum([
p * self.distribution.pdf(x=x, **self.kwargs[i])
for i, p in enumerate(self.hyper)
],
axis=0)
def cdf(self, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""The cdf (culmulative density function) evaluated at x.
:param x: x values, where the pdf should be evaluated.
:return: Corresponding value of cdf at x.
"""
return np.sum([
p * self.distribution.cdf(x=x, **self.kwargs[i])
for i, p in enumerate(self.hyper)
],
axis=0)
def ppf(self, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""The ppf (percent point function - the inverse of the cdf) evaluated at x.
Since this is not analytically available, compute it with an inversefunc module.
:param x: x values, where the ppf should be evaluated.
:return: Corresponding value of ppf at x.
"""
return inversefunc(self.cdf)(x)
def rvs(self, size: np.shape = None) -> Union[np.ndarray, float]:
"""A random value of the random variable.
:param size: The size of the np.array with random values.
:return: Random value(s).
"""
index = np.random.choice(a=self.hyper.shape[0],
p=self.hyper,
size=size)
out = np.zeros(size, dtype="float64")
if size:
for i, _ in enumerate(self.hyper):
out[index == i] = self.distribution.rvs(**self.kwargs[i],
size=size)[index == i]
else:
out = self.distribution.rvs(**self.kwargs[index], size=size)
return out
def log_likelihood(self, x: Union[float, np.ndarray]) -> float:
"""Compute the log likelihood of the hyper_distribution w.r.t to
observed data x.
:param x: The observed data.
:return: The log likelihood.
"""
return np.sum(np.log(self.pdf(x)))
def __str__(self) -> str:
"""A representation of the class very basic.
:return: String of all attributes with respective values.
"""
return str([(key, val) for key, val in self.__dict__.items()
if not callable(getattr(self, key))])
class Hypererlang(HyperDistribution):
"""A class representing a hyper erlang distributions this is in so far
special, that we know an algorithm to fit a hypererlang distributions to
data.
:param hyper: The hyper parameters.
:param kwargs: The arguments needed for the distributions.
Each will be in list style having the same shape as hyper.
"""
name = "hypererlang"
def __init__(self,
hyper: List[float],
paramno: Optional[int] = None,
logger: Optional[logging.Logger] = None,
**kwargs: Union[np.ndarray, List[float]]):
if kwargs.get("lambd"):
lambd = np.asarray(kwargs.pop("lambd"))
kwargs["scale"] = 1 / lambd
super().__init__(scipy.stats.erlang, hyper, **kwargs)
if paramno is not None:
self.paramno = paramno
self.lambd = 1 / np.asarray(kwargs["scale"]).reshape(-1)
self.a = np.asarray(kwargs["a"]).reshape(-1)
self.convergence_error = np.inf
self.log_likelihood_fit = -np.inf
self.logger = logger if logger is not None else get_logger(
"hypererlang_distribution",
LOGGING_DIR.joinpath("hypererlang_distribution.log"))
def save_dict(self) -> Dict[str, Any]:
"""Create dictionary with argument value mapping.
:return: Argument value mapping for class creation.
"""
arguments = {"hyper": self.hyper.tolist(), "paramno": self.paramno}
arguments.update({
key: [arg[key] for arg in self.kwargs]
for key in self.kwargs[-1]
})
arguments["a"] = self.a.tolist()
return arguments
@staticmethod
def load_dict(arguments: Dict[str, Any]) -> "Hypererlang":
"""Create class instance from given dict.
:param arguments: Arguments value mapping for class instance.
:return: Class instance.
"""
return Hypererlang(**arguments)
def ppf(self, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""The ppf (percent point function - the inverse of the cdf) evaluated at x.
Since this is not analytically available, compute it with an inversefunc module.
It is known, that the domain are only positive floats, so provide domain!
:param x: x values, where the ppf should be evaluated.
:return: Corresponding value of ppf at x.
"""
return inversefunc(self.cdf, domain=0)(x)
def fit_lambd_hyper(self,
x: Union[List[float], np.ndarray],
convergence_criterion: float = 1e-6) -> "Hypererlang":
"""fit lambda and hyper parameters for given data until
convergence_criterion is met.
:param x: The data to fit the distributions to.
:param convergence_criterion: The criterion which has to be met in order to quit fitting.
:return: An instane of self.
"""
x = np.asarray(x)
log_a = np.array([
np.sum(np.log(np.arange(1, a_, dtype="float64")))
for i, a_ in enumerate(self.a)
]) # shape(m)
x_ = x.reshape(-1, 1) # shape(k, 1)
runs = 0
self.log_likelihood_fit = self.log_likelihood(x)
while convergence_criterion <= self.convergence_error:
p_ = self.lambd * np.exp((self.a - 1) * np.log(self.lambd * x_) -
log_a - self.lambd * x_) # shape(k, m)
q_ = self.hyper * p_ # shape(k, m)
q_ = q_ / q_.sum(axis=1).reshape(-1, 1) # shape(k, m)
self.hyper = (1 / x_.shape[0]) * q_.sum(axis=0) # shape(m)
self.lambd = self.a * q_.sum(axis=0) / np.sum(q_ * x_,
axis=0) # shape(m)
log_likelihood_fit = self.log_likelihood(x)
self.convergence_error = abs(
(log_likelihood_fit - self.log_likelihood_fit) /
self.log_likelihood_fit)
self.log_likelihood_fit = log_likelihood_fit
runs += 1
for i, kwarg_i in enumerate(self.kwargs):
kwarg_i["scale"] = 1 / self.lambd[i]
return self
def fit(self,
x: Union[List[float], np.ndarray],
specs: Optional[HypererlangSpecs] = None) -> None:
"""Compute a hypererlang distributions which fits the data with EM
algorithm according to "A novel approach for phasetype fitting", where
the length of all erlang distributions is equal to N. The fitting is
done in 3 respective rounds, each reducing the number of configurations
under consideration while increasing the convergence_criterium floc is
appears only for compatibility reasons with
scipy.stats.rv_continuous.fit.
Change the parameters on self!
:param x: The data to fit to.
:param specs: The specifications.
"""
if specs is None:
specs = HypererlangSpecs()
convergence_criteria = np.asarray(specs.convergence_criteria)
maximal_distributions = np.asarray(specs.maximal_distributions)
hypererlangs: Iterable[Hypererlang] = self.iterate_hyp_erl(specs.N)
heap = Heap()
for i, convergence_criterion in enumerate(convergence_criteria):
heap.change_length(maximal_distributions[i])
if specs.processors > 1:
pool = mp.Pool(processes=specs.processors)
for hypererlang_ in hypererlangs:
# this gives all allowed values for r_m
pool.apply_async(hypererlang_.fit_lambd_hyper,
args=(x, convergence_criterion),
callback=heap.push,
error_callback=self.error_callback)
pool.close()
pool.join()
else:
for hypererlang_ in hypererlangs:
# this gives all allowed values for r_m
heap.push(
hypererlang_.fit_lambd_hyper(x, convergence_criterion))
hypererlangs = heap.copy_to_list()
# heap[0] has the paramters we want, so copy them
candidate = heap.nlargest(1)[0]
for key, val in candidate.__dict__.items():
if hasattr(candidate, key):
setattr(self, key, val)
@staticmethod
def iterate_hyp_erl(N: int = 10) -> Generator["Hypererlang", None, None]:
"""Generate all combinations of hypererlang a parameters and yield the
hypererlang such that the sum of the length of all erlang distributions
is equal N.
:param N: The sum of the length of the erlang distributions.
:yield: Hypererlang distrbutions.
"""
assert N > 0
for i in combinations_with_replacement(list(range(N + 1)), N):
if np.sum(i) == N:
a = np.array(i).astype("int")
a = a[a != 0]
yield Hypererlang(hyper=[1 / a.shape[0]] * a.shape[0],
a=a,
lambd=[1] * a.shape[0],
paramno=N * 3)
def __lt__(self, other: "Hypererlang") -> bool:
if hasattr(self, "log_likelihood_fit") and hasattr(
other, "log_likelihood_fit"):
return self.log_likelihood_fit < other.log_likelihood_fit
else:
raise ValueError
def error_callback(self, error: BaseException) -> None:
"""Log error during multiprocessing.
:param error: The error received.
"""
self.logger.warning(error)
def fit_hypererlang(x: Union[List[float], np.ndarray, pd.Series],
specs: Optional[HypererlangSpecs] = None) -> Hypererlang:
"""Compute a hypererlang distributions which fits the data, where the
length of all erlang distributions is equal to N. The fitting is done in k
respective rounds, each reducing the number of configurations under
consideration while increasing the convergence_criterium floc is appears
only for compatibility reasons with scipy.stats.rv_continuous.fit.
:param x: The data to fit to.
:param specs: The specifications.
:return: A hypererlang instance, fitted to x.
"""
if specs is None:
specs = HypererlangSpecs()
fitted_hypererlang = Hypererlang(hyper=[1],
a=1,
lambd=1,
paramno=specs.N * 3)
fitted_hypererlang.fit(x, specs=specs)
return fitted_hypererlang
def fit_expon(x: Union[List[float], np.ndarray, pd.Series]):
"""Fit exponential distributions to data.
:param x: The data to fit to.
:return: A scipy.stats.expon instance, fitted to x.
"""
fitted_expon = scipy.stats.expon(*scipy.stats.expon.fit(x, floc=0))
fitted_expon.paramno = len(fitted_expon.args) + len(fitted_expon.kwds) - 1
fitted_expon.name = "exponential"
return fitted_expon
def plot_distribution_fit(data: pd.Series,
distributions: List[Union[
Hypererlang, scipy.stats.rv_continuous]],
title: str = "<>") -> None:
"""Plot the distributions w.r.t to the data and some measurements to
determine wether the distributions is a good or a bad fit.
:param data: The data to which the distributions was fitted.
:param distributions: The fitted distributions.
:param title: Should include the ward and class from which the data came.
"""
plot_num = len(distributions) + 1
fig = plt.figure(figsize=(12, 4 * plot_num))
dist_plot = fig.add_subplot(plot_num, 1, 1)
bins = 50
dist_plot.hist(data,
bins=bins,
density=True,
label=f"Histogram of the observed data with binsize={bins}")
x_axis = np.arange(0, data.max(), 0.01)
for i, distribution in enumerate(distributions):
dist_plot.plot(x_axis,
distribution.pdf(x_axis),
label=f"Distribution: {distribution.dist.name}")
prob_plot = fig.add_subplot(plot_num, 1, i + 2)
scipy.stats.probplot(x=data, dist=distribution, plot=prob_plot)
prob_plot.set_title(
f"Probability plot with least squares fit for {title}, "
f"distribution: {distribution.dist.name}")
prob_plot.grid(axis="y")
dist_plot.grid(axis="y")
dist_plot.legend()
dist_plot.set_title(title)
fig.tight_layout()
def entropy_sum(q: np.ndarray, p: np.ndarray) -> float:
"""Build the relative entropy and sum over the axis.
:param q: Distribution 1.
:param p: Distribution 2.
:return: The sum of the relative entropies.
"""
return float(np.sum(scipy.stats.entropy(q, p)))
def entropy_max(q: np.ndarray, p: np.ndarray) -> float:
"""Build the relative entropy and max over the axis.
:param q: Distribution 1.
:param p: Distribution 2.
:return: The maximum of the relative entropies.
"""
return float(np.max(scipy.stats.entropy(q, p)))
def relative_distance(q: np.ndarray, p: np.ndarray) -> float:
"""Compute the maximal relative distance of two distributions.
:param q: Distribution 1.
:param p: Distribution 2.
:return: The maximum of the distances.
"""
dist = np.absolute(q - p) / q
dist = np.nan_to_num(dist, 0)
dist[dist == 1] = 0
return np.max(dist)
def total_variation_distance(q: np.ndarray, p: np.ndarray) -> float:
"""Compute the total variation distance of two distributions q and p.
:param q: Distribution 1.
:param p: Distribution 2.
:return: The total variation distance.
"""
t_dist = np.sum(np.absolute(q - p)) / 2
return t_dist
def chi2(data: Union[np.ndarray, pd.Series],
distribution: Union[HyperDistribution, scipy.stats.rv_continuous],
numbins: int = 0):
"""Compute chi2 test for distributions fit. Note that this is favorable for
discrete random variables, but can be problematic for continuous variables.
:param data: The observed data.
:param distribution: The distributions which should be tested on goodness of fit.
:param numbins: The number of bins to use.
:return: The value of the chisquare distributions, the p value.
"""
if not isinstance(distribution, HyperDistribution) and not hasattr(
distribution, "paramno"):
distribution.paramno = len(distribution.args) + len(
distribution.kwds) - 1
if numbins == 0:
numbins = max(2 + distribution.paramno, len(data) // 50)
if numbins > 50:
numbins = 50
f_obs, bin_edges = np.histogram(data, numbins, density=True)
# there are minimum 2 bins
f_exp = np.array([distribution.cdf(bin_edge) for bin_edge in bin_edges])
f_exp[1:] -= f_exp[:-1]
ddof = numbins - 1 - distribution.paramno
chisq, p = scipy.stats.chisquare(f_obs, f_exp[1:], ddof)
return chisq, p
def distribution_to_mean(distributions: np.ndarray) -> np.ndarray:
"""Compute the mean value to obtain the specific mean for exp-dist.
:param distributions: The array of distributions which shall be converted.
:return: An array of same shape, containing the repective means of the distributions.
"""
out = | np.zeros_like(distributions, dtype="float") | numpy.zeros_like |
import time
from abc import ABC, abstractmethod
from collections import deque
import os
import io
import zipfile
import gym
import tensorflow as tf
import numpy as np
from stable_baselines.common import logger
from stable_baselines.common.policies import get_policy_from_name
from stable_baselines.common.utils import set_random_seed, get_schedule_fn
from stable_baselines.common.vec_env import DummyVecEnv, VecEnv, unwrap_vec_normalize, sync_envs_normalization
from stable_baselines.common.monitor import Monitor
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.save_util import data_to_json, json_to_data
class BaseRLModel(ABC):
"""
The base RL model
:param policy: (BasePolicy) Policy object
:param env: (Gym environment) The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param policy_base: (BasePolicy) the base policy used by this method
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 debug
:param support_multi_env: (bool) Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: (bool) Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: (bool) When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: (int) Seed for the pseudo random generators
"""
def __init__(self, policy, env, policy_base, policy_kwargs=None,
verbose=0, device='auto', support_multi_env=False,
create_eval_env=False, monitor_wrapper=True, seed=None):
if isinstance(policy, str) and policy_base is not None:
self.policy_class = get_policy_from_name(policy_base, policy)
else:
self.policy_class = policy
self.env = env
# get VecNormalize object if needed
self._vec_normalize_env = unwrap_vec_normalize(env)
self.verbose = verbose
self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs
self.observation_space = None
self.action_space = None
self.n_envs = None
self.num_timesteps = 0
self.eval_env = None
self.replay_buffer = None
self.seed = seed
self.action_noise = None
# Track the training progress (from 1 to 0)
# this is used to update the learning rate
self._current_progress = 1
# Create and wrap the env if needed
if env is not None:
if isinstance(env, str):
if create_eval_env:
eval_env = gym.make(env)
if monitor_wrapper:
eval_env = Monitor(eval_env, filename=None)
self.eval_env = DummyVecEnv([lambda: eval_env])
if self.verbose >= 1:
print("Creating environment from the given name, wrapped in a DummyVecEnv.")
env = gym.make(env)
if monitor_wrapper:
env = Monitor(env, filename=None)
env = DummyVecEnv([lambda: env])
self.observation_space = env.observation_space
self.action_space = env.action_space
if not isinstance(env, VecEnv):
if self.verbose >= 1:
print("Wrapping the env in a DummyVecEnv.")
env = DummyVecEnv([lambda: env])
self.n_envs = env.num_envs
self.env = env
if not support_multi_env and self.n_envs > 1:
raise ValueError("Error: the model does not support multiple envs requires a single vectorized"
" environment.")
def _get_eval_env(self, eval_env):
"""
Return the environment that will be used for evaluation.
:param eval_env: (gym.Env or VecEnv)
:return: (VecEnv)
"""
if eval_env is None:
eval_env = self.eval_env
if eval_env is not None:
if not isinstance(eval_env, VecEnv):
eval_env = DummyVecEnv([lambda: eval_env])
assert eval_env.num_envs == 1
return eval_env
def scale_action(self, action):
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: (np.ndarray)
:return: (np.ndarray)
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action):
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: (np.ndarray)
:return: (np.ndarray)
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
def _setup_learning_rate(self):
"""Transform to callable if needed."""
self.learning_rate = get_schedule_fn(self.learning_rate)
def _update_current_progress(self, num_timesteps, total_timesteps):
"""
Compute current progress (from 1 to 0)
:param num_timesteps: (int) current number of timesteps
:param total_timesteps: (int)
"""
self._current_progress = 1.0 - float(num_timesteps) / float(total_timesteps)
def _update_learning_rate(self, optimizers):
"""
Update the optimizers learning rate using the current learning rate schedule
and the current progress (from 1 to 0).
:param optimizers: ([th.optim.Optimizer] or Optimizer) An optimizer
or a list of optimizer.
"""
# Log the current learning rate
logger.logkv("learning_rate", self.learning_rate(self._current_progress))
# if not isinstance(optimizers, list):
# optimizers = [optimizers]
# for optimizer in optimizers:
# update_learning_rate(optimizer, self.learning_rate(self._current_progress))
@staticmethod
def safe_mean(arr):
"""
Compute the mean of an array if there is at least one element.
For empty array, return nan. It is used for logging only.
:param arr: (np.ndarray)
:return: (float)
"""
return np.nan if len(arr) == 0 else | np.mean(arr) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 21 11:51:54 2019
@author: Sneha
"""
import tkinter as tk
from tkinter import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
from getActions import getactions
from diffConstraints import diffconstraints
import time
from heapq import *
import random
root= tk.Tk()
show_animation = True
init=[]
final=[]
resolution=1
radius=0
clearance=0
start = time.time()
title='Click point in map to select Initial/Final point.'
class Node:
def __init__(self,node, x, y,theta,vel,cost,ul,ur, pind):
self.node = node
self.x = x
self.y = y
self.theta = theta
self.vel = vel
self.cost = cost
self.pind = pind
self.ul = ul
self.ur = ur
self.parent = None
def PrintTree(self,ax):
if self.parent:
self.parent.PrintTree(ax)
ax.scatter(self.x,self.y,s=10,c='b')
def PrintTreePlot(self,plt,path,pathv):
if self.parent:
self.parent.PrintTreePlot(plt,path,pathv)
plt.plot(self.x,self.y,color='#39ff14', marker='o')
pathv.append([self.ul,self.ur])
path.append([self.x,self.y,self.theta,self.ul,self.ur])
def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(self.cost) + "," + str(self.pind)
def onpick(event):
print(event.xdata,event.ydata)
global init,final,title
if(not(init)):
print('init')
init=[(event.xdata),(event.ydata)]
else:
print('final')
final=[(event.xdata),(event.ydata)]
title='Node Exploration'
return True
def animate(listPnts,rectangles):
global title,root,final,init,resolution,radius,clearance
fig = plt.Figure(figsize=(5,4), dpi=100)
ax = fig.add_subplot(111)
scatter = FigureCanvasTkAgg(fig, root)
scatter.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH)
ax.fill([-6.55,6.55,6.55,-6.55],[-6.05,-6.05,6.05,6.05], color = (0,0,0))
count=-1
for i in (listPnts):
count+=1
ax.fill(i[0],i[1], color = i[2])
for i in (rectangles):
for k in i[0]:
ax.fill(k[0],k[1], color = i[1])
ax.legend()
ax.set_title(title);
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
fig.canvas.mpl_connect('button_press_event',onpick)
tk.Label(root, text="Enter Coordinates").pack()
tk.Label(root, text="Initial point(comma separated x,y-no spaces)").pack()
initial=Entry(root)
if(init):
init_str=str(init[0])+' '+str(init[1])
initial.insert(0,init_str)
initial.pack()
tk.Label(root, text="Final point(comma separated x,y-no spaces)").pack()
final1=Entry(root)
if(final):
final_str=str(final[0])+' '+str(final[1])
final1.insert(0,final_str)
final1.pack()
tk.Button(root, text="Quit", command= lambda:quit(initial,final1)).pack()
root.mainloop()
xdata=[]
ydata=[]
def animated(i,nodes,node,test):
global xdata,ydata
t, y = i.x,i.y
xdata.append(t)
ydata.append(y)
xmin, xmax = ax.get_xlim()
if t >= xmax:
ax.set_xlim(xmin, 2*xmax)
ax.figure.canvas.draw()
line.set_data(xdata, ydata)
if(((nodes[len(nodes)-1].x) == i.x) and (nodes[len(nodes)-1].y == i.y)):
node.PrintTree(ax)
return line,
def quit(initial,final1):
global root,init,final,radius,resolution,clearance
if(initial.get()):
if(len((initial.get()).split(','))==2):
x,y=(initial.get()).split(',')
if(x and y and (float(x)) and (float(y))):
init=[(float(x)/resolution),(float(y)/resolution)]
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid Initial Point.")
label.pack()
test.mainloop()
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid comma separated Initial Point.")
label.pack()
test.mainloop()
elif(init):
init=[(init[0]/resolution),(init[1]/resolution)]
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid Initial Point.")
label.pack()
test.mainloop()
if(final1.get()):
if(len((final1.get()).split(','))==2):
x1,y1=(final1.get()).split(',')
if(x1 and y1 and (float(x1)) and (float(y1))):
final=[(float(x1)/resolution),(float(y1)/resolution)]
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid Final Point.")
label.pack()
test.mainloop()
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid comma separated Final Point.")
label.pack()
test.mainloop()
elif(final):
final=[(final[0]/resolution),(final[1]/resolution)]
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid Final Point.")
label.pack()
test.mainloop()
root.quit()
root.destroy()
def a_star_rrt_planning(sx, sy, gx, gy, ox, oy, reso, rr,rect_corners,actions,plt):
"""
gx: goal x position [m]
gx: goal x position [m]
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
reso: grid resolution [m]
rr: robot radius[m]
"""
print("A* Algorithm.....")
nstart = Node(str(sx)+' '+
str(sy),(sx), (sy),0.0, np.array([0,0,0]), 0.0,0,0, -1)
ngoal = Node(str(gx)+' '+
str(gy),(gx), (gy),0.0, np.array([0,0,0]), 0.0,0,0, -1)
t=10
motion = get_motion_model(sx,sy,0,reso,t,actions)
openset, closedset = dict(), dict()
openset[calc_index(nstart)] = nstart
cost_queue=[(0,nstart.node)]
max_iter=2500;
check_obs_proximity=[nstart]
flag=0
count=0
new_prox_flag=0
ob_close_ctr=0
nodesList=[nstart]
if(verify_node(ngoal, reso, rect_corners) and verify_node(nstart, reso, rect_corners)):
while (openset):
count+=1
if(new_prox_flag==1):
check_obs_proximity=[current]
new_prox_flag=0
(cost1,c_id)=heappop(cost_queue)
current = openset[c_id]
# nodesList.append(current)
check= [v for v in (check_obs_proximity) if v.node == current.node]
if(len(check)==0):
for i in check_obs_proximity:
if(np.abs(current.x-i.x)<(0.2/resolution) and np.abs(current.y-i.y)<(0.2/resolution)):
flag=1
break;
else:
flag=0
if(flag==1):
check_obs_proximity.append(current)
else:
check_obs_proximity=[current]
if(len(check_obs_proximity)>max_iter):
print('Obstacle close -',ob_close_ctr)
new_prox_flag=1
ob_close_ctr+=1
if(ob_close_ctr>3):
print('Switching to RRT algorithm.....')
RRT(openset,closedset,cost_queue,nstart,ngoal, reso, rect_corners,t,actions, nodesList)
break
if (np.abs(current.x - ngoal.x)< (0.1/resolution) and np.abs(current.y - ngoal.y)< (0.1/resolution)):
ngoal.x=current.x
ngoal.y=current.y
ngoal.theta=current.theta
ngoal.pind = current.pind
ngoal.cost = current.cost
ngoal.vel = current.vel
ngoal.ul = current.ul
ngoal.ur = current.ur
ngoal.parent = current.parent
print('Goal found!')
break
# Remove the item from the open set
del openset[c_id]
# Add it to the closed set
closedset[c_id] = current
# expand search grid based on motion model
motion= get_motion_model(current.x,current.y,current.theta,reso,t,actions)
for i, _ in enumerate(motion):
node = Node(str(motion[i][0][0])+' '+
str(motion[i][0][1]),motion[i][0][0],
motion[i][0][1],
motion[i][0][2],
motion[i][1],
round(current.cost + motion[i][2],3),motion[i][3],motion[i][4], c_id)
n_id = calc_index(node)
if n_id in closedset:
continue
if not verify_node(node, reso, rect_corners):
continue
if n_id not in openset:
node.parent=current
openset[n_id] = node # Discover a new node
heappush(cost_queue, (node.cost+calc_heuristic(ngoal, node.x,node.y), node.node))
nodesList.append(node)
else:
#
if openset[n_id].cost >= node.cost:
node.parent=current
count= [i for ((c,v), i) in zip((cost_queue), range(len((cost_queue)))) if v == node.node][0]
cost_queue[count]=(node.cost+calc_heuristic(ngoal, node.x,node.y),node.node)
openset[n_id] = node
return nodesList,ngoal
else:
return 'Initial/Final Point in Obstacle!!',0
def calc_heuristic(n1, x,y):
d = (np.sqrt((x-n1.x)**2 + (y-n1.y)**2))
return d
def GetNearestListIndex(nodeList, rnd):
dlist = [((node.x - rnd[0]) ** 2 + (node.y - rnd[1])
** 2,c_id)for c_id,node in nodeList.items()]
minind,c_id = (min(dlist))
return c_id
def RRT(openQ,closedQ,costQ,start,end, res, rect_corners,t,actions,nodesList):
global resolution
expandDis=0.1/resolution
goalSampleRate=5
randArea=[-5.55, 5.55]
minrand = randArea[0]
maxrand = randArea[1]
while True:
# Random Sampling
if random.randint(0, 100) > goalSampleRate:
rnd = [random.uniform(minrand, maxrand), random.uniform(
minrand, maxrand)]
else:
rnd = [end.x, end.y]
# Find nearest node
nind = GetNearestListIndex(openQ, rnd)
# expand tree
nearestNode = openQ[nind]
motion= get_motion_model(nearestNode.x,nearestNode.y,nearestNode.theta,res,t,actions)
for i, _ in enumerate(motion):
newNode = Node(str(motion[i][0][0])+' '+
str(motion[i][0][1]),motion[i][0][0],
motion[i][0][1],
motion[i][0][2],
motion[i][1],
round(nearestNode.cost + motion[i][2],3),motion[i][3],motion[i][4], nind)
newNode.parent=nearestNode
if newNode.node in closedQ:
continue
if not verify_node(newNode, res, rect_corners):
continue
if newNode.node not in openQ:
openQ[newNode.node]=(newNode)
heappush(costQ, (newNode.cost+calc_heuristic(end, newNode.x,newNode.y), newNode.node))
nodesList.append(newNode)
else:
#
if openQ[newNode.node].cost >= newNode.cost:
count= [i for ((c,v), i) in zip((costQ), range(len((costQ)))) if v == newNode.node][0]
costQ[count]=(newNode.cost+calc_heuristic(end, newNode.x,newNode.y),newNode.node)
openQ[newNode.node] = newNode
# check goal
if (np.abs(newNode.x - end.x)< (expandDis) and np.abs(newNode.y - end.y)< (expandDis)):
end.x=newNode.x
end.y=newNode.y
end.theta=newNode.theta
end.pind = newNode.pind
end.cost = newNode.cost
end.vel = newNode.vel
end.ul = newNode.ul
end.ur = newNode.ur
end.parent = newNode.parent
print('Goal found!')
break
def verify_node(node, res, rect_corners):
global radius,clearance
x=node.x
y=node.y
d=(radius)+(clearance)
c1= ((x-(-1.65/res))*(x-(-1.65/res))+ (y-(4.6/res))*(y-(4.6/res)) - ((0.81/res)+d)*((0.81/res)+d))
c2= ((x-(-1.17/res))*(x-(-1.17/res))+ (y-(2.31/res))*(y-(2.31/res)) - ((0.81/res)+d)*((0.81/res)+d))
c3= ((x-(-1.17/res))*(x-(-1.17/res))+ (y-(-2.31/res))*(y-(-2.31/res)) - ((0.81/res)+d)*((0.81/res)+d))
c4= ((x-(-1.65/res))*(x-(-1.65/res))+ (y-(-4.6/res))*(y-(-4.6/res))- ((0.81/res)+d)*((0.81/res)+d))
#Capsule
u=-3.2516 #x-position of the center
v=3.2505 #y-position of the center
a=(3.1968-1.599)/2 #radius on the x-axis
b=1.599/2 #radius on the y-axis
r = [u-a, u+a,u+a, u-a]
s = [v-b, v-b, v+b,v+b]
u1=u-a
u2=u+a
e1= ((x-(u1/res))*(x-(u1/res))+ (y-(v/res))*(y-(v/res)) - ((b/res)+d)*((b/res)+d))
e2= ((x-(u2/res))*(x-(u2/res))+ (y-(v/res))*(y-(v/res)) - ((b/res)+d)*((b/res)+d))
exist=True
if (x>=(-5.55/res)+d and x<=(5.55/res)-d and y>=(-5.05/res)+d and y<=(5.05/res)-d):
for c in rect_corners:
if( x>=c[0][0]-d and x<=c[0][1]+d and y>=c[1][0]-d and y<=c[1][2]+d):
exist = False
if(exist is True):
if( x>=((r[0]/res)-d) and x<=((r[1]/res)+d) and y>=((s[0]/res)-d) and y<=((s[2]/res)+d)):
exist = False
elif (e1<=0):
exist=False
elif (e2<=0):
exist=False
elif (c1<=0):
exist=False
elif (c2<=0):
exist=False
elif (c3<=0):
exist=False
elif (c4<=0):
exist=False
else:
exist=True
else:
exist=False
return exist
def calc_obstacle_map(ox, oy, reso, vr):
minx = round(min(ox))
miny = round(min(oy))
maxx = round(max(ox))
maxy = round(max(oy))
xwidth = int(round(maxx - minx))
ywidth = int(round(maxy - miny))
return minx, miny, maxx, maxy, xwidth, ywidth
def calc_index(node):
return str(node.x)+' '+str(node.y)
def get_motion_model(x,y,theta,resolution,t,actions):
# dx, dy, cost
motion = []
ul=actions[0,0]
ur=actions[0,1]
c2cdiff=0.1833
[xy,v]=diffconstraints(ul,ur,x,y,theta,resolution,t)
motion.append([xy,v,c2cdiff,actions[0,0],actions[0,1]])
# (Ul,Ur)= (50,50)
ul=actions[1,0]
ur=actions[1,1]
c2cdiff=0.0916
[xy,v]=diffconstraints(ul,ur,x,y,theta,resolution,t)
motion.append([xy,v,c2cdiff,actions[1,0],actions[1,1]])
# (Ul,Ur)= (100,50)
ul=actions[2,0]
ur=actions[2,1]
c2cdiff=0.1374
[xy,v]=diffconstraints(ul,ur,x,y,theta,resolution,t)
motion.append([xy,v,c2cdiff,actions[2,0],actions[2,1]])
# (Ul,Ur)= (50,0)
ul=actions[3,0]
ur=actions[3,1]
c2cdiff=0.0458
[xy,v]=diffconstraints(ul,ur,x,y,theta,resolution,t)
motion.append([xy,v,c2cdiff,actions[3,0],actions[3,1]])
# (Ul,Ur)= (100,0)
ul=actions[4,0]
ur=actions[4,1]
c2cdiff=0.0916
[xy,v]=diffconstraints(ul,ur,x,y,theta,resolution,t)
motion.append([xy,v,c2cdiff,actions[4,0],actions[4,1]])
# (Ul,Ur)= (0,100)
ul=actions[5,0]
ur=actions[5,1]
c2cdiff=0.0916
[xy,v]=diffconstraints(ul,ur,x,y,theta,resolution,t)
motion.append([xy,v,c2cdiff,actions[5,0],actions[5,1]])
# (Ul,Ur)= (0,50)
ul=actions[6,0]
ur=actions[6,1]
c2cdiff=0.0458
[xy,v]=diffconstraints(ul,ur,x,y,theta,resolution,t)
motion.append([xy,v,c2cdiff,actions[6,0],actions[6,1]])
# (Ul,Ur)= (50,100)
ul=actions[7,0]
ur=actions[7,1]
c2cdiff=0.1374
[xy,v]=diffconstraints(ul,ur,x,y,theta,resolution,t)
motion.append([xy,v,c2cdiff,actions[7,0],actions[7,1]])
return motion
def getxs_ys(xs,ys):
global resolution,radius,clearance,init,final
t = np.linspace(0, 2*np.pi, 100)
res=resolution
resolution=1
#Circles
#Circle 1
r1 = (0.81/2)/resolution
n1=-1.65/resolution #x-position of the center
m1=4.6/resolution #radius on the y-axis
p1=n1+r1*np.cos(t)
q1=m1+r1*np.sin(t)
for i in p1:
xs.append(i)
for i in q1:
ys.append(i)
#Circle 2
r2 =( 0.81/2)/resolution
n2=-1.17 /resolution #x-position of the center
m2=2.31/resolution #radius on the y-axis
p2=n2+r2*np.cos(t)
q2=m2+r2*np.sin(t)
for i in p2:
xs.append(i)
for i in q2:
ys.append(i)
#Circle 3
r3 = (0.81/2)/resolution
n3=-1.17/resolution #x-position of the center
m3=-2.31/resolution #radius on the y-axis
p3=n3+r3*np.cos(t)
q3=m3+r3*np.sin(t)
for i in p3:
xs.append(i)
for i in q3:
ys.append(i)
#Circle 4
r4 = (0.81/2)/resolution
n4=-1.65 /resolution #x-position of the center
m4=-4.6 /resolution #radius on the y-axis
p4=n4+r4*np.cos(t)
q4=m4+r4*np.sin(t)
for i in p4:
xs.append(i)
for i in q4:
ys.append(i)
#Capsule
u=-3.2516/resolution #x-position of the center
v=3.2505/resolution #y-position of the center
a=(((3.1968/resolution)-(1.599/resolution))/2) #radius on the x-axis
b=(1.599/2)/resolution #radius on the y-axis
r = [u-a, u+a,u+a, u-a]
s = [v-b, v-b, v+b,v+b]
for i in r:
xs.append(i)
for i in s:
ys.append(i)
u1=u-a
u2=u+a
r1=u1+b*np.cos(t)
s1=v+b*np.sin(t)
r2=u2+b*np.cos(t)
s2=v+b*np.sin(t)
for i in r1:
xs.append(i)
for i in s1:
ys.append(i)
for i in r2:
xs.append(i)
for i in s2:
ys.append(i)
# #Rectangles
rectangles =[[3.2,4.135,0.86,1.83],[4.495,4.595,0.43,0.91],[3.72,1.54,3.66,0.76],
[5.26,0.02,0.58,1.17],[5.095,-0.995,0.91,0.86],[5.26,-2.6825,0.58,1.17],
[4.635,-4.32,1.83,0.76],[2.825,-4.41,1.17,0.58],[0.56,-3.94,2.74,1.52],
[-0.715,-0.985,0.91,1.83],[0.660,-2.02,1.83,0.76],[3.06,-1.795,1.52,1.17]]
for i in range(len(rectangles)):
for j in range(4):
rectangles[i][j]=rectangles[i][j]/resolution
# fig, ax = plt.subplots()
##
##
# ax.fill(r,s,'r',edgecolor='b')
# ax.fill(r1,s1,'r')
# ax.fill(r2,s2,'r')
# ax.fill(p1,q1,'r',edgecolor='b')
# ax.fill(p2,q2,'r',edgecolor='b')
# ax.fill(p3,q3,'r',edgecolor='b')
# ax.fill(p4,q4,'r',edgecolor='b')
# ax.fill(uelpx, uelpy,'b')
rectangle_corner=[]
for i in (rectangles):
x = [i[0]-(i[2]/2), i[0]+(i[2]/2),i[0]+(i[2]/2), i[0]-(i[2]/2)]
y = [i[1]-(i[3]/2), i[1]-(i[3]/2), i[1]+(i[3]/2),i[1]+(i[3]/2)]
for j in x:
xs.append(j)
for j in y:
ys.append(j)
rectangle_corner.append([x,y])
# ax.fill(x, y,'r',edgecolor='b')
ucir1x=[]
ucir1y=[]
for i in range(len(p1)):
ucir1x.append(p1[i]+radius*np.cos(t))
ucir1y.append(q1[i]+radius*np.sin(t))
ucir2x=[]
ucir2y=[]
for i in range(len(p2)):
ucir2x.append(p2[i]+radius*np.cos(t))
ucir2y.append(q2[i]+radius*np.sin(t))
ucir3x=[]
ucir3y=[]
for i in range(len(p3)):
ucir3x.append(p3[i]+radius*np.cos(t))
ucir3y.append(q3[i]+radius*np.sin(t))
ucir4x=[]
ucir4y=[]
for i in range(len(p4)):
ucir4x.append(p4[i]+radius*np.cos(t))
ucir4y.append(q4[i]+radius*np.sin(t))
ucap1x=[]
ucap1y=[]
for i in range(len(r1)):
ucap1x.append(r1[i]+radius*np.cos(t))
ucap1y.append(s1[i]+radius*np.sin(t))
ucap2x=[]
ucap2y=[]
for i in range(len(r2)):
ucap2x.append(r2[i]+radius*np.cos(t))
ucap2y.append(s2[i]+radius*np.sin(t))
uboxx=[]
uboxy=[]
for i in range(4):
uboxx.append(r[i]+radius*np.cos(t))
uboxy.append(s[i]+radius*np.sin(t) )
urecBoxes=[]
for i in rectangle_corner:
uboxrx=[]
uboxry=[]
for j in range(4):
uboxrx.append(i[0][j]+radius*np.cos(t))
uboxry.append(i[1][j]+radius* | np.sin(t) | numpy.sin |
import numpy as np
import scipy.stats as stats
import scipy.linalg as la
import scipy.optimize as optimize
import scipy.integrate as integrate
import sklearn.linear_model
import kernels
import ep_fast
#import EP_cython
np.set_printoptions(precision=4, linewidth=200)
class GradientFields():
def __init__(self, K_nodiag, s0, t_i, prev):
normPDF = stats.norm(0,1)
try: t_i[0]
except: t_i = np.zeros(K_nodiag.shape[0]) + t_i
#general computations (always the same if the fixed effects are 0!!!!!)
self.Ki = normPDF.sf(t_i)
self.Ps = s0 + (1-s0)*self.Ki
self.Pi = self.Ki / self.Ps
self.stdY = np.sqrt(self.Pi * (1-self.Pi))
#compute Atag0 and B0
self.phi_ti = normPDF.pdf(t_i)
self.phitphit = np.outer(self.phi_ti, self.phi_ti)
self.stdY_mat = np.outer(self.stdY, self.stdY)
mat1_temp = self.phi_ti / self.stdY
self.mat1 = np.outer(mat1_temp, mat1_temp)
sumProbs_temp = np.tile(self.Pi, (K_nodiag.shape[0], 1))
sumProbs = sumProbs_temp + sumProbs_temp.T
Atag0_B0_inner_vec = self.Pi*(1-s0)
self.mat2 = np.outer(Atag0_B0_inner_vec, Atag0_B0_inner_vec) + 1-sumProbs*(1-s0)
self.Atag0 = self.mat1*self.mat2
self.B0 = np.outer(self.Ps, self.Ps)
#Compute the elements of the function value (the squared distance between the observed and expected pairwise phenotypic covariance)
self.K_nodiag_AB0 = K_nodiag * self.Atag0/self.B0
self.K_nodiag_sqr_AB0 = K_nodiag * self.K_nodiag_AB0
class PrevTest():
def __init__(self, n, m, prev, useFixed, h2Scale=1.0, prng=None, num_generate=None):
self.prng = prng
if (prng is None): self.prng = np.random.RandomState(args.seed)
self.n = n
self.useFixed = useFixed
self.h2Scale = h2Scale
if num_generate is None:
if prev == 0.5:
numGeno = n
else:
numGeno = np.maximum(int(float(self.n)/float(2*prev)), 25000)
else:
numGeno = num_generate
#generate SNPs
mafs = self.prng.rand(m) * 0.45 + 0.05
self.X = prng.binomial(2, mafs, size=(numGeno, m)).astype(np.float)
mafs_estimated = mafs.copy()
self.X_estimated = self.X.copy()
self.X -= 2*mafs
self.X_estimated -= 2*mafs_estimated
self.X /= np.sqrt(2*mafs*(1-mafs))
self.X_estimated /= np.sqrt(2*mafs_estimated*(1-mafs_estimated))
self.m = m
self.n = n
X_mean_diag = np.mean(np.einsum('ij,ij->i', self.X, self.X)) / self.X.shape[1]
X_estimated_mean_diag = np.mean(np.einsum('ij,ij->i', self.X_estimated, self.X_estimated)) / self.X.shape[1]
self.diag_ratio = X_estimated_mean_diag / X_mean_diag
self.prev = prev
#approx coeffs lam_i and c_i for logistic likelihood
self.logistic_c = np.array([1.146480988574439e+02, -1.508871030070582e+03, 2.676085036831241e+03, -1.356294962039222e+03, 7.543285642111850e+01])
self.logistic_lam = np.sqrt(2)*np.array([0.44 ,0.41, 0.40, 0.39, 0.36])
self.logistic_lam2 = self.logistic_lam**2
self.logistic_clam = self.logistic_c * self.logistic_lam
def genData(self, h2, eDist, numFixed, ascertain=True, scaleG=False, extraSNPs=0, fixedVar=0, frac_cases=0.5, kernel='linear', rbf_scale=1.0):
args.seed += 1
self.true_h2 = h2
self.ascertain = ascertain
self.eDist = eDist
if (numFixed==0): fixedVar=0
if (numFixed > 0): assert fixedVar>0
self.fixedVar = fixedVar
self.covars = self.prng.randn(self.X.shape[0], numFixed)
if (eDist == 'normal' and not scaleG): sig2g = h2/(1-h2)
elif (eDist == 'normal' and scaleG): sig2g = h2
elif (eDist == 'logistic' and not scaleG): sig2g = (np.pi**2)/3.0 * h2 / (1 - h2)
elif (eDist == 'logistic' and scaleG): sig2g = h2
else: raise ValueError('unknown e_dist. Valid value are normal, logistic')
if kernel == 'linear':
self.beta = self.prng.randn(self.m) * np.sqrt(sig2g/self.m) #generate effect sizes
self.g = self.X.dot(self.beta) #generate genetic effects
self.g_estimated = self.X_estimated.dot(self.beta)
elif args.kernel == 'rbf':
assert scaleG
kernel_obj = kernels.ScaledKernel(kernels.RBFKernel(self.X))
K = kernel_obj.getTrainKernel(np.array([np.log(rbf_scale), np.log(sig2g) / 2.0]))
L = la.cholesky(K, lower=True, overwrite_a=True)
self.g = L.dot(np.random.randn(K.shape[0]))
if np.allclose(self.X, self.X_estimated):
self.g_estimated = self.g.copy()
else:
kernel_obj_estimated = kernels.ScaledKernel(kernels.RBFKernel(self.X_estimated))
K_estimated = kernel_obj_estimated.getTrainKernel(np.array([np.log(rbf_scale), np.log(sig2g) / 2.0]))
L_estimated = la.cholesky(K_estimated, lower=True, overwrite_a=True)
self.g_estimated = L_estimated.dot(np.random.randn(K_estimated.shape[0]))
else:
raise ValueError('unknown kernel')
#create identical twins if needed
if self.prev == 0.5:
numGeno = self.n
else:
numGeno = np.maximum(int(float(self.n)/float(2*self.prev)), 25000)
self.fixedEffects = np.ones(numFixed) * (0 if (numFixed==0) else np.sqrt(fixedVar / numFixed))
self.covars = self.prng.randn(self.g.shape[0], numFixed)
m = self.covars.dot(self.fixedEffects)
self.g += m
self.g_estimated += m
if (eDist == 'logistic' and numFixed>0): raise ValueError('logistic distribution with fixed effects not supported')
#generate environmental effect
if (eDist == 'normal' and not scaleG): e = self.prng.randn(self.g.shape[0])
elif (eDist == 'normal' and scaleG): e = self.prng.randn(self.g.shape[0]) * np.sqrt(1 - sig2g)# - (fixedVar if (numFixed>0) else 0))
elif (eDist == 'logistic' and not scaleG): e = stats.logistic(0,1).rvs(self.g.shape[0])
elif (eDist == 'logistic' and scaleG): e = stats.logistic(0,1).rvs(self.g.shape[0]) * np.sqrt(1-sig2g) / np.sqrt((np.pi**2)/3.0)
else: raise ValueError('unknown e distribution: ' + self.eDist)
self.yAll = self.g + e
self.yAll_estimated = self.g_estimated + e
self.affCutoff = | np.percentile(self.yAll, 100*(1-self.prev)) | numpy.percentile |
#! /usr/bin/env python
import numpy
initial_values = { "s": '0238x6011044',
"datablock": None , # ignoring for now because it's so fucking long
"x_Arai": numpy.array([ 0., 0.03917767, 0.06612789, 0.1218238 , 0.18767432, 0.21711849, 0.32091412, 0.48291503, 0.72423703, 1.03139876, 1.27786578, 1.62771574, 1.96549027, 2.50849607, 3.22408793, 3.99894094, 4.38250182]),
"y_Arai": numpy.array([ 1., 1.01807229, 1., 0.98795181, 0.95783133, 0.96987952, 0.98192771, 0.93373494, 0.90361446, 0.81325301, 0.81927711, 0.70481928, 0.70481928, 0.50542169, 0.42168675, 0.13012048, 0.08253012]) ,
"t_Arai": [273, 373.0, 423.0, 473.0, 498.0, 523.0, 548.0, 573.0, 598.0, 623.0, 648.0, 673.0, 698.0, 723.0, 748.0, 773.0, 798.0],
"x_Arai_segment": numpy.array([ 0.1218238 , 0.18767432, 0.21711849, 0.32091412, 0.48291503, 0.72423703, 1.03139876]),
"y_Arai_segment": numpy.array([ 0.98795181, 0.95783133, 0.96987952, 0.98192771, 0.93373494, 0.90361446, 0.81325301]) ,
"x_Arai_mean": 0.44086879270917517,
"y_Arai_mean": 0.93545611015490526,
"x_tail_check": numpy.array([ 0.1218238 , 0.21711849, 0.48291503, 1.03139876, 1.62771574, 2.50849607, 3.99894094]),
"y_tail_check": numpy.array([ 0.98795181, 0.98192771, 0.93373494, 0.84337349, 0.76506024, 0.58313253, 0.17349398]),
"tail_checks_temperatures": numpy.array([ 473., 523., 573., 623., 673., 723., 773.]) ,
"tail_checks_starting_temperatures": numpy.array([ 473., 523., 573., 623., 673., 723., 773.]) ,
"x_ptrm_check": numpy.array([ 0.03622917, 0.10279838, 0.2177615 , 0.49466588, 0.97717872, 1.56873716, 2.43269559]),
"y_ptrm_check": [1.0180722891566265, 0.9879518072289156, 0.9698795180722891, 0.9337349397590361, 0.8132530120481928, 0.7048192771084337, 0.505421686746988], # actually does NOT appear to be a numpy array. weird. maybe it should be, though......?
"ptrm_checks_temperatures": numpy.array([ 373., 473., 523., 573., 623., 673., 723.]),
"ptrm_checks_starting_temperatures": numpy.array([ 473., 523., 573., 623., 673., 723., 773.]),
"zijdblock": [[273.0, 277.5, 79.6, 1.66e-09, 1, 'g', ''], [373.0, 277.3, 81.5, 1.69e-09, 0, 'g', ''], [423.0, 254.2, 85.6, 1.66e-09, 0, 'g', ''], [473.0, 251.7, 86.7, 1.64e-09, 1, 'g', ''], [498.0, 243.3, 86.6, 1.59e-09, 0, 'g', ''], [523.0, 236.4, 87.8, 1.61e-09, 1, 'g', ''], [548.0, 208.9, 86.3, 1.63e-09, 0, 'g', ''], [573.0, 241.3, 87.4, 1.55e-09, 1, 'g', ''], [598.0, 221.4, 87.1, 1.5e-09, 0, 'g', ''], [623.0, 228.5, 86.9, 1.35e-09, 1, 'g', ''], [648.0, 219.1, 87.2, 1.36e-09, 0, 'g', ''], [673.0, 241.2, 86.5, 1.17e-09, 1, 'g', ''], [698.0, 241.1, 86.7, 1.17e-09, 0, 'g', ''], [723.0, 235.8, 86.2, 8.39e-10, 1, 'g', ''], [748.0, 254.9, 86.4, 7e-10, 0, 'g', ''], [773.0, 264.8, 79.7, 2.16e-10, 1, 'g', ''], [798.0, 287.1, 81.3, 1.37e-10, 0, 'g', ''], [823.0, 5.5, 67.0, 1.54e-11, 1, 'g', '']],
"z_temperatures": [273.0, 373.0, 423.0, 473.0, 498.0, 523.0, 548.0, 573.0, 598.0, 623.0, 648.0, 673.0, 698.0, 723.0, 748.0, 773.0, 798.0, 823.0], # also not a numpy array
"start": 3,
"end": 9,
"pars": {'specimen_int_n': 7, 'lab_dc_field': 4e-05},
"specimen_Data": None,
"tmin": 473.0,
"tmax": 623.0,
"tmin_K": 200.,
"tmax_K": 350.
}
#self.stuff = ["s", "datablock", "x_Arai", "y_Arai", "t_Arai", "x_Arai_segment", "y_Arai_segment", "x_tail_check", "y_tail_check", "tail_checks_temperatures", "tail_checks_starting_temperatures", "x_ptrm_check", "y_ptrm_check", "ptrm_checks_temperatures", "ptrm_checks_starting_temperatures", "zijdblock", "z_temperatures", "start", "end", "pars", "specimen_Data", "tmin", "tmax", "tmin_K", "tmax_K"]
York_Regression_values = {
'specimen_YT': 1.0168795878275072,
'delta_x_prime': 0.9277422683265637,
'B_anc': 7.3875474081300247,
'count_IZ': 9,
'count_ZI': 8,
'x_err': | numpy.array([-0.31904499, -0.25319447, -0.22375031, -0.11995467, 0.04204624, 0.28336824, 0.59052997]) | numpy.array |
from gym.spaces import Space
import numpy as np
import torch
import torch.nn as nn
from brains.i_brain import IBrain
from tools.configurations import LSTMCfg
class LSTM(IBrain):
def __init__(self, input_space: Space, output_space: Space, individual: np.ndarray, config: LSTMCfg):
super().__init__(input_space, output_space, individual, config)
self.config = config
self.input_space = input_space
self.input_size = self._size_from_space(input_space)
self.output_size = self._size_from_space(output_space)
self.lstm_num_layers = config.lstm_num_layers
def step(self, ob: np.ndarray):
pass
@classmethod
def get_individual_size(cls, config: LSTMCfg, input_space: Space, output_space: Space):
input_size = cls._size_from_space(input_space)
output_size = cls._size_from_space(output_space)
lstm_num_layers = config.lstm_num_layers
individual_size = 0
# Calculate the number of weights as depicted in https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM
if lstm_num_layers > 0:
individual_size += 4 * output_size * (input_size + output_size)
if config.use_bias:
individual_size += 8 * output_size
for i in range(1, lstm_num_layers):
# Here it is assumed that the LSTM is not bidirectional
individual_size += 8 * output_size * output_size
if config.use_bias:
individual_size += 8 * output_size
return individual_size
class LSTMPyTorch(nn.Module, LSTM):
def __init__(self, input_space: Space, output_space: Space, individual: np.ndarray, config: LSTMCfg):
nn.Module.__init__(self)
LSTM.__init__(self, input_space, output_space, individual, config)
assert len(individual) == self.get_individual_size(
config=config, input_space=input_space, output_space=output_space)
if self.lstm_num_layers <= 0:
raise RuntimeError("LSTMs need at least one layer.")
individual = np.array(individual, dtype=np.float32)
# Disable tracking of the gradients since backpropagation is not used
with torch.no_grad():
self.lstm = nn.LSTM(
self.input_size, self.output_size, num_layers=self.lstm_num_layers, bias=config.use_bias)
# Iterate through all layers and assign the weights from the individual
current_index = 0
for i in range(self.lstm_num_layers):
attr_weight_ih_li = "weight_ih_l{}".format(i)
attr_weight_hh_li = "weight_hh_l{}".format(i)
weight_ih_li = getattr(self.lstm, attr_weight_ih_li)
weight_hh_li = getattr(self.lstm, attr_weight_hh_li)
weight_ih_li_size = np.prod(weight_ih_li.size())
weight_hh_li_size = np.prod(weight_hh_li.size())
# Do not forget to reshape back again
weight_ih_li.data = torch.from_numpy(
individual[current_index: current_index + weight_ih_li_size]).view(weight_ih_li.size())
current_index += weight_ih_li_size
weight_hh_li.data = torch.from_numpy(
individual[current_index: current_index + weight_hh_li_size]).view(weight_hh_li.size())
current_index += weight_hh_li_size
if config.use_bias:
attr_bias_ih_li = "bias_ih_l{}".format(i)
attr_bias_hh_li = "bias_hh_l{}".format(i)
bias_ih_li = getattr(self.lstm, attr_bias_ih_li)
bias_hh_li = getattr(self.lstm, attr_bias_hh_li)
bias_ih_li_size = bias_ih_li.size()[0]
bias_hh_li_size = bias_hh_li.size()[0]
bias_ih_li.data = torch.from_numpy(individual[current_index: current_index + bias_ih_li_size])
current_index += bias_ih_li_size
bias_hh_li.data = torch.from_numpy(individual[current_index: current_index + bias_hh_li_size])
current_index += bias_hh_li_size
assert current_index == len(individual)
# TODO Maybe the hidden values can be initialized differently
self.hidden = (
torch.randn(self.lstm_num_layers, 1, self.output_size),
torch.randn(self.lstm_num_layers, 1, self.output_size)
)
def step(self, ob: np.ndarray):
if self.config.normalize_input:
ob = self._normalize_input(ob, self.input_space, self.config.normalize_input_target)
with torch.no_grad():
# Input requires the form (seq_len, batch, input_size)
out, self.hidden = self.lstm(torch.from_numpy(ob.astype(np.float32)).view(1, 1, -1), self.hidden)
return out.view(self.output_size).numpy()
class LSTMNumPy(LSTM):
def __init__(self, input_space: Space, output_space: Space, individual: np.ndarray, config: LSTMCfg):
super().__init__(input_space, output_space, individual, config)
if self.lstm_num_layers <= 0:
raise RuntimeError("LSTMs need at least one layer.")
# Initialize the first layer, shape will be used for following layers
# First dimension is 4 because it represents the weights for each of the four gates (input, forget, cell, output
# gate)
self.weight_ih_l0 = np.random.randn(4, self.output_size, self.input_size).astype(np.float32)
self.weight_hh_l0 = np.random.randn(4, self.output_size, self.output_size).astype(np.float32)
if config.use_bias:
self.bias_ih_l0 = np.random.randn(4, self.output_size).astype(np.float32)
self.bias_hh_l0 = np.random.randn(4, self.output_size).astype(np.float32)
else:
self.bias_ih_l0 = self.bias_hh_l0 = np.zeros((4, self.output_size)).astype(np.float32)
self.hidden = np.random.randn(self.lstm_num_layers, self.output_size).astype(np.float32)
self.cell_state = np.random.randn(self.lstm_num_layers, self.output_size).astype(np.float32)
individual = np.array(individual, dtype=np.float32)
current_index = 0
if self.lstm_num_layers > 0:
self.weight_ih_l0 = individual[current_index:current_index + self.weight_ih_l0.size].reshape(
self.weight_ih_l0.shape)
current_index += self.weight_ih_l0.size
self.weight_hh_l0 = individual[current_index:current_index + self.weight_hh_l0.size].reshape(
self.weight_hh_l0.shape)
current_index += self.weight_hh_l0.size
if config.use_bias:
self.bias_ih_l0 = individual[current_index:current_index + self.bias_ih_l0.size].reshape(
self.bias_ih_l0.shape)
current_index += self.bias_ih_l0.size
self.bias_hh_l0 = individual[current_index:current_index + self.bias_hh_l0.size].reshape(
self.bias_hh_l0.shape)
current_index += self.bias_hh_l0.size
weight_shape = (4, self.output_size, self.output_size)
bias_shape = (4, self.output_size)
weight_size = np.prod(weight_shape)
bias_size = np.prod(bias_shape)
# Weights for following layers have not been created
for i in range(1, self.lstm_num_layers):
setattr(
self,
"weight_ih_l{}".format(i),
individual[current_index:current_index + weight_size].reshape(weight_shape))
current_index += weight_size
setattr(
self,
"weight_hh_l{}".format(i),
individual[current_index:current_index + weight_size].reshape(weight_shape))
current_index += weight_size
attr_bias_ih_li = "bias_ih_l{}".format(i)
attr_bias_hh_li = "bias_hh_l{}".format(i)
if config.use_bias:
setattr(self, attr_bias_ih_li, individual[current_index:current_index + bias_size].reshape(bias_shape))
current_index += bias_size
setattr(self, attr_bias_hh_li, individual[current_index:current_index + bias_size].reshape(bias_shape))
current_index += bias_size
else:
# Initialize all not used biases with zeros, since they only get added in a step
# Therefore remove the need to check for biases every time when a prediction is called
setattr(self, attr_bias_ih_li, np.zeros(bias_shape).astype(np.float32))
setattr(self, attr_bias_hh_li, np.zeros(bias_shape).astype(np.float32))
assert current_index == len(individual)
@staticmethod
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def step(self, ob: np.ndarray):
if self.config.normalize_input:
ob = self._normalize_input(ob, self.input_space, self.config.normalize_input_target)
x = ob.astype(np.float32)
# The input for the i-th layer is the (i-1)-th hidden feature or if i==0 the input
# Calculated as in the PyTorch description of the LSTM:
# https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM
for i in range(self.lstm_num_layers):
weight_ih_li = getattr(self, "weight_ih_l{}".format(i))
weight_hh_li = getattr(self, "weight_hh_l{}".format(i))
# Even if bias is not used they got initialized (to zeros in this case)
bias_ih_li = getattr(self, "bias_ih_l{}".format(i))
bias_hh_li = getattr(self, "bias_hh_l{}".format(i))
# Input Gate
i_t = self.sigmoid( | np.dot(weight_ih_li[0], x) | numpy.dot |
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0601_raw_text_to_word_embedding.py
@Version : v0.1
@Time : 2019-11-23 14:26
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec060103,P155
@Desc : 深度学习用于文本和序列,处理文本数据——从原始文本到词嵌入
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras.activations import relu, sigmoid
from keras.layers import Dense, Flatten
from keras.layers import Embedding
from keras.losses import binary_crossentropy
from keras.models import Sequential
from keras.optimizers import rmsprop
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from tools import plot_classes_results
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
| np.random.seed(seed) | numpy.random.seed |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# this file. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
# <NAME>
"""
These are unit and integration tests on the auto_HU_NJ module.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
import json
import os
import inspect
# Importing auto_HU_NJ module
from auto_HU_NJ import *
# Current directory
cur_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# Test input directory
base_input_path = 'resources'
def test_parse_BIM():
"""
Testing the parse_BIM function.
"""
# Testing the ruleset for Hurricane-Prone Region (HPR)
res = []
ref = [1, 0, 1, 0]
for i in range(4):
BIM_dir = os.path.join(cur_dir, base_input_path, 'BIM_Data',
'parse_BIM_test_' + str(i+1) + '.json')
with open(BIM_dir) as f:
BIM_input = json.load(f)
BIM_output = parse_BIM(BIM_input['GI'])
res.append(int(BIM_output['HPR']))
# Check
assert_allclose(res, ref, atol=1e-5)
# Testing the ruleset for Wind Borne Debris (WBD)
res = []
ref = [0, 0, 0, 0, 1, 1, 1, 1]
for i in range(8):
BIM_dir = os.path.join(cur_dir, base_input_path, 'BIM_Data',
'parse_BIM_test_' + str(i+1) + '.json')
with open(BIM_dir) as f:
BIM_input = json.load(f)
BIM_output = parse_BIM(BIM_input['GI'])
res.append(int(BIM_output['WBD']))
# Check
assert_allclose(res, ref, atol=1e-5)
# Testing the ruleset for terrain
res = []
ref = [3, 15, 35, 70, 3, 15, 35, 70]
for i in range(8):
BIM_dir = os.path.join(cur_dir, base_input_path, 'BIM_Data',
'parse_BIM_test_' + str(i+1) + '.json')
with open(BIM_dir) as f:
BIM_input = json.load(f)
BIM_output = parse_BIM(BIM_input['GI'])
res.append(int(BIM_output['terrain']))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_building_class():
"""
Testing the building class function.
"""
# Testing the ruleset for classifying Hazus building class
res = []
ref_class = ['WSF', 'WMUH', 'SERB', 'SECB', 'SPMB', 'CERB', 'CECB', 'MSF',
'MERB', 'MECB', 'MLRI', 'MMUH', 'MLRM']
ref = np.ones(13)
for i in range(13):
data_dir = os.path.join(cur_dir, base_input_path, 'BuildingClass_Data',
'building_class_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = building_class(tmp)
print(data_output)
res.append(int(data_output == ref_class[i]))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_WSF_config():
"""
Testing the WSF_config function.
"""
res = []
ref_class = ['WSF2_gab_0_8d_tnail_no',
'WSF2_gab_1_8d_tnail_no',
'WSF2_hip_1_8d_tnail_no',
'WSF2_hip_0_8d_tnail_no',
'8s_strap_no',
'8s_strap_no',
'8s_tnail_no',
'8s_strap_sup',
'8d_strap_std',
'8d_tnail_wkd',
'WSF1']
ref = np.ones(11)
for i in range(11):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'wsf_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = WSF_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_WMUH_config():
"""
Testing the WMUH_config function.
"""
res = []
ref_class = ['WMUH2_flt_spm_god_null',
'WMUH2_flt_spm_god_null',
'WMUH2_flt_spm_god_null',
'WMUH2_gab_null_null_1',
'WMUH2_hip_null_null_1',
'WMUH2_gab_null_null_0',
'WMUH2_hip_null_null_0',
'WMUH2_flt_spm_por_null',
'WMUH2_flt_bur_por_null',
'WMUH2_flt_spm_god_null_8s',
'WMUH2_flt_spm_god_null_8d',
'WMUH2_flt_spm_god_null_8s',
'WMUH2_flt_spm_god_null_8d',
'strap',
'tnail',
'tnail',
'tnail_1',
'WMUH3']
ref = np.ones(18)
for i in range(18):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'wmuh_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = WMUH_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_MSF_config():
"""
Testing the MSF_config function.
"""
res = []
ref_class = ['nav_1',
'nav_0',
'8s',
'8d',
'8s',
'8d',
'MSF2']
ref = np.ones(7)
for i in range(7):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'msf_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = MSF_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_MMUH_config():
"""
Testing the MMUH_config function.
"""
res = []
ref_class = ['flt_1_spm_god',
'flt_1_spm_por',
'flt_1_bur_por',
'8s_strap',
'8d_strap',
'8d_tnail',
'8s_strap',
'8d_tnail',
'MMUH3']
ref = np.ones(9)
for i in range(9):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'mmuh_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = MMUH_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_MLRM_config():
"""
Testing the MLRM_config function.
"""
res = []
ref_class = ['spm',
'bur',
'C',
'D',
'A',
'6d_god',
'6d_por',
'std',
'sup',
'A_1_sup',
'sgl',
'mlt']
ref = np.ones(12)
for i in range(12):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'mlrm_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = MLRM_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_MLRI_config():
"""
Testing the MLRI_config function.
"""
res = []
ref_class = ['sup',
'std',
'god',
'por',
'god',
'por']
ref = np.ones(6)
for i in range(6):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'mlri_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = MLRI_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_MERB_config():
"""
Testing the MERB_config function.
"""
res = []
ref_class = ['bur',
'spm',
'bur',
'C',
'D',
'A',
'std',
'sup',
'low',
'med',
'hig',
'MERBL',
'MERBM',
'MERBH']
ref = np.ones(14)
for i in range(14):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'merb_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = MERB_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_MECB_config():
"""
Testing the MECB_config function.
"""
res = []
ref_class = ['bur',
'spm',
'bur',
'C',
'D',
'A',
'std',
'sup',
'low',
'med',
'hig',
'MECBL',
'MECBM',
'MECBH']
ref = np.ones(14)
for i in range(14):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'mecb_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = MECB_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_CECB_config():
"""
Testing the CECB_config function.
"""
res = []
ref_class = ['bur',
'spm',
'bur',
'C',
'D',
'A',
'low',
'med',
'hig',
'CECBL',
'CECBM',
'CECBH']
ref = np.ones(12)
for i in range(12):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'cecb_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = CECB_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_CERB_config():
"""
Testing the CERB_config function.
"""
res = []
ref_class = ['bur',
'spm',
'bur',
'C',
'D',
'A',
'low',
'med',
'hig',
'CERBL',
'CERBM',
'CERBH']
ref = np.ones(12)
for i in range(12):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'cerb_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = CERB_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
assert_allclose(res, ref, atol=1e-5)
def test_SPMB_config():
"""
Testing the SPMB_config function.
"""
res = []
ref_class = ['god',
'por',
'std',
'sup',
'SPMBS',
'SPMBM',
'SPMBL']
ref = np.ones(7)
for i in range(7):
data_dir = os.path.join(cur_dir, base_input_path, 'Config_Data',
'spmb_test_' + str(i+1) + '.json')
with open(data_dir) as f:
data_input = json.load(f)
tmp = parse_BIM(data_input['GI'])
data_output = SPMB_config(tmp)
print(data_output)
res.append(int(ref_class[i] in data_output))
# Check
| assert_allclose(res, ref, atol=1e-5) | numpy.testing.assert_allclose |
import os
import re
import csv
import talib
import numpy as np
inputs = {
'open': np.random.random(100),
'high': | np.random.random(100) | numpy.random.random |
"""Contains plot functions."""
from platform import system
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon
from . import utils as ut
from . import salt_utils as salt_ut
from . import nb_fun as nbf
def plt_maximize():
"""Enable full screen.
Notes
-----
Come from
https://stackoverflow.com/questions/12439588/how-to-maximize-a-plt-show-window-using-python/22418354#22418354
"""
# See discussion on:
# https://stackoverflow.com/questions/12439588/how-to-maximize-a-plt-show-window-using-python
backend = plt.get_backend()
cfm = plt.get_current_fig_manager()
if backend == "wxAgg":
cfm.frame.Maximize(True)
elif backend == "TkAgg":
if system() == "win32":
cfm.window.state('zoomed') # This is windows only
else:
cfm.resize(*cfm.window.maxsize())
elif backend in ('QT4Agg', 'QT5Agg'):
cfm.window.showMaximized()
elif callable(getattr(cfm, "full_screen_toggle", None)):
if not getattr(cfm, "flag_is_max", None):
cfm.full_screen_toggle()
cfm.flag_is_max = True
else:
raise RuntimeError("plt_maximize() is not implemented for current backend:", backend)
def param_text_box(text_ax, model_name, sim_par=None, fit_par=None, pos=[0.01, 0.25]):
"""Add a text legend with model parameters to the plot.
Parameters
----------
text_ax : matplotlib.axes
Axes where place the text.
model_name : str
The name of the sn model that is used.
sim_par : list(float)
The parameters of the model.
fit_par : list(tuple(float,float))
The fitted parameters and errors.
"""
par_dic = {'salt': [('t0', '.2f'), ('x0', '.2e'), ('mb', '.2f'), ('x1', '.2f'), ('c', '.3f')],
'mw_': [('Rv', '.2f'), ('E(B-V)', '.3f')]}
par = par_dic[model_name]
str_list = [''] * (len(par) + 1)
if sim_par is not None:
str_list[0] += 'SIMULATED PARAMETERS :@'
if fit_par is not None:
str_list[0] += 'FITTED PARAMETERS :@'
for i, p in enumerate(par):
if sim_par is not None:
str_list[i + 1] += f"{p[0]} = {sim_par[i]:{p[1]}}@"
if fit_par is not None:
if isinstance(fit_par[i], (int, float)):
str_list[i + 1] += f"{p[0]} = {fit_par[i]:{p[1]}}"
else:
str_list[i + 1] += f"{p[0]} = {fit_par[i][0]:{p[1]}} $\pm$ {fit_par[i][1]:{p[1]}}@"
final_str = ""
if str_list[0].count('@') == 2:
len_str = []
for i, s in enumerate(str_list):
str_list[i] = s.split('@')
len_str.append(len(str_list[i][0]))
max_len = np.max(len_str)
for i in range(len(str_list)):
final_str += str_list[i][0] + " " * (max_len - len_str[i] + 2) + "| "
final_str += str_list[i][1] + "\n"
elif str_list[0].count('@') == 1:
for i, s in enumerate(str_list):
final_str += str_list[i][:-1] + '\n'
prop = dict(boxstyle='round,pad=1', facecolor='navajowhite', alpha=0.5)
text_ax.axis('off')
text_ax.text(pos[0], pos[1], final_str[:-1], transform=text_ax.transAxes, fontsize=9, bbox=prop)
def plot_lc(
flux_table,
meta,
zp=25.,
mag=False,
Jy=False,
snc_sim_model=None,
snc_fit_model=None,
fit_cov=None,
residuals=False,
full_screen=False,
figsize=(35 / 2.54, 20 / 2.54),
dpi=120):
"""Ploting a lightcurve flux table.
Parameters
----------
flux_table : astropy.Table
The lightcurve to plot.
zp : float, default = 25.
Zeropoint at which rescale the flux.
mag : bool
If True plot the magnitude.
snc_sim_model : sncosmo.Model
Model used to simulate the lightcurve.
snc_fit_model : sncosmo.Model
Model used to fit the lightcurve.
fit_cov : numpy.ndaray(float, size=(4, 4))
sncosmo t0, x0, x1, c covariance matrix from SALT fit.
residuals : bool
If True plot fit residuals.
full_screen : bool
Try to plot the figure in full screen.
Returns
-------
None
Just plot the lightcurve.
"""
plt.rcParams['font.family'] = 'monospace'
bands = np.unique(flux_table['band'])
flux_norm, fluxerr_norm = ut.norm_flux(flux_table, zp)
time = flux_table['time']
t0 = meta['sim_t0']
z = meta['zobs']
time_th = np.linspace(t0 - 19.8 * (1 + z), t0 + 49.8 * (1 + z), 200)
fig = plt.figure(figsize=figsize, dpi=dpi)
###################
# INIT THE FIGURE #
###################
if residuals:
gs = gridspec.GridSpec(3, 1, height_ratios=[0.5, 2, 1])
text_ax = fig.add_subplot(gs[0])
ax0 = fig.add_subplot(gs[1])
ax1 = fig.add_subplot(gs[2], sharex=ax0)
ax1_y_lim = []
elif snc_sim_model is None and (snc_fit_model is None or fit_cov is None):
gs = gridspec.GridSpec(1, 1, height_ratios=[1])
ax0 = fig.add_subplot(gs[0])
else:
gs = gridspec.GridSpec(2, 1, height_ratios=[0.5, 2])
text_ax = fig.add_subplot(gs[0])
ax0 = fig.add_subplot(gs[1])
fig.suptitle(f'SN at redshift z : {z:.5f} and peak at time t$_0$ : {t0:.2f} MJD',
fontsize='xx-large')
plt.xlabel('Time relative to peak', fontsize='x-large')
################
# PLOT SECTION #
################
for b in bands:
band_mask = flux_table['band'] == b
flux_b = flux_norm[band_mask]
fluxerr_b = fluxerr_norm[band_mask]
time_b = time[band_mask]
if mag:
ax0.invert_yaxis()
ax0.set_ylabel('Mag', fontsize='x-large')
# Delete < 0 pts
flux_mask = flux_b > 0
flux_b = flux_b[flux_mask]
fluxerr_b = fluxerr_b[flux_mask]
time_b = time_b[flux_mask]
plot = -2.5 * np.log10(flux_b) + zp
err = 2.5 / np.log(10) * 1 / flux_b * fluxerr_b
if snc_sim_model is not None:
plot_th = snc_sim_model.bandmag(b, 'ab', time_th)
if snc_fit_model is not None:
plot_fit = snc_fit_model.bandmag(b, 'ab', time_th)
if fit_cov is not None:
if snc_fit_model.source.name in ('salt2', 'salt3'):
err_th = salt_ut.compute_salt_fit_error(snc_fit_model,
fit_cov[1:, 1:],
b, time_th, zp)
err_th = 2.5 / \
(np.log(10) * 10**(-0.4 * (plot_fit - zp))) * err_th
if residuals:
fit_pts = snc_fit_model.bandmag(b, 'ab', time_b)
rsd = plot - fit_pts
else:
if Jy:
ax0.set_ylabel('Flux [$\mu$Jy]', fontsize='x-large')
norm = ut.flux_to_Jansky(zp, b)
else:
ax0.set_ylabel(f'Flux (ZP = {zp})', fontsize='x-large')
norm = 1.0
ax0.axhline(ls='dashdot', c='black', lw=1.5)
plot = flux_b * norm
err = fluxerr_b * norm
if snc_sim_model is not None:
plot_th = snc_sim_model.bandflux(b, time_th, zp=zp, zpsys='ab') * norm
if snc_fit_model is not None:
plot_fit = snc_fit_model.bandflux(
b, time_th, zp=zp, zpsys='ab') * norm
if fit_cov is not None:
if snc_fit_model.source.name in ('salt2', 'salt3'):
err_th = salt_ut.compute_salt_fit_error(snc_fit_model, fit_cov[1:, 1:],
b, time_th, zp) * norm
if residuals:
fit_pts = snc_fit_model.bandflux(b, time_b, zp=zp, zpsys='ab') * norm
rsd = plot - fit_pts
p = ax0.errorbar(time_b - t0, plot, yerr=err,
label=b, fmt='o', markersize=2.5)
handles, labels = ax0.get_legend_handles_labels()
if snc_sim_model is not None:
ax0.plot(time_th - t0, plot_th, color=p[0].get_color())
sim_line = Line2D([0], [0], color='k', linestyle='solid')
sim_label = 'Sim'
handles.append(sim_line)
labels.append(sim_label)
if snc_fit_model is not None:
fit_line = Line2D([0], [0], color='k', linestyle='--')
fit_label = 'Fit'
handles.append(fit_line)
labels.append(fit_label)
ax0.plot(time_th - t0, plot_fit, color=p[0].get_color(), ls='--')
if fit_cov is not None:
ax0.fill_between(
time_th - t0,
plot_fit - err_th,
plot_fit + err_th,
alpha=0.5)
if residuals:
ax1.set_ylabel('Data - Model', fontsize='x-large')
ax1.errorbar(time_b - t0, rsd, yerr=err, fmt='o')
ax1.axhline(0, ls='dashdot', c='black', lw=1.5)
ax1_y_lim.append(3 * np.std(rsd))
ax1.plot(time_th - t0, err_th, ls='--', color=p[0].get_color())
ax1.plot(time_th - t0, -err_th, ls='--', color=p[0].get_color())
ax0.legend(handles=handles, labels=labels, fontsize='x-large')
sim_par = None
sim_mwd_par = None
fit_par = None
fit_mwd_par = None
if snc_sim_model is not None:
plt.xlim(snc_sim_model.mintime() - t0, snc_sim_model.maxtime() - t0)
sim_par = [meta['sim_t0'],
meta['sim_x0'],
meta['sim_mb'],
meta['sim_x1'],
meta['sim_c']]
if 'mw_' in snc_sim_model.effect_names:
sim_mwd_par = []
if 'mw_r_v' in meta:
sim_mwd_par.append(meta['mw_r_v'])
else:
mod_index = np.where(np.array(snc_sim_model.effect_names) == 'mw_')[0][0]
sim_mwd_par.append(snc_sim_model.effects[mod_index]._r_v)
sim_mwd_par.append(meta['mw_ebv'])
elif snc_fit_model is not None:
plt.xlim(snc_fit_model.mintime() - t0, snc_fit_model.maxtime() - t0)
else:
plt.xlim(np.min(time) - 1 - t0, np.max(time) + 1 - t0)
if residuals:
ax1.set_ylim(-np.nanmax(ax1_y_lim), np.nanmax(ax1_y_lim))
if snc_fit_model is not None and fit_cov is not None:
mb_fit = snc_fit_model.source_peakmag('bessellb', 'ab')
mb_err = np.sqrt(salt_ut.cov_x0_to_mb(snc_fit_model.parameters[2], fit_cov[1:, 1:])[0, 0])
fit_par = [(snc_fit_model.parameters[1], np.sqrt(fit_cov[0, 0])),
(snc_fit_model.parameters[2], np.sqrt(fit_cov[1, 1])),
(mb_fit, mb_err),
(snc_fit_model.parameters[3], np.sqrt(fit_cov[2, 2])),
(snc_fit_model.parameters[4], np.sqrt(fit_cov[3, 3]))]
if 'mw_' in snc_fit_model.effect_names:
fit_mwd_par = []
if 'mw_r_v' not in snc_fit_model.param_names:
mod_index = np.where(np.array(snc_fit_model.effect_names) == 'mw_')[0][0]
fit_mwd_par.append(snc_fit_model.effects[mod_index]._r_v)
else:
par_idx = np.where(np.asarray(snc_fit_model.param_names) == 'mw_r_v')[0][0]
fit_mwd_par.append(snc_fit_model.parameters[par_idx])
par_idx = np.where(np.asarray(snc_fit_model.param_names) == 'mw_ebv')[0][0]
fit_mwd_par.append(snc_fit_model.parameters[par_idx])
if fit_par is not None or sim_par is not None:
param_text_box(text_ax, model_name='salt', sim_par=sim_par, fit_par=fit_par)
if fit_mwd_par is not None or sim_mwd_par is not None:
param_text_box(text_ax, model_name='mw_', sim_par=sim_mwd_par, fit_par=fit_mwd_par,
pos=[0.4, 0.25])
plt.subplots_adjust(hspace=.0)
if full_screen:
try:
plt_maximize()
except Exception:
pass
plt.show()
def plot_ra_dec(ra, dec, vpec=None, field_list=None, field_dic=None, field_size=None, **kwarg):
"""Plot a mollweide map of ra, dec.
Parameters
----------
ra : list(float)
Right Ascension.
dec : type
Declinaison.
vpec : type
Peculiar velocities.
Returns
-------
None
Just plot the map.
"""
plt.figure()
ax = plt.subplot(111, projection='mollweide')
ax.set_axisbelow(True)
plt.grid()
ra = ra - 2 * np.pi * (ra > np.pi)
if vpec is None:
plt.scatter(ra, dec, s=10, **kwarg)
else:
plot = plt.scatter(ra, dec, c=vpec, vmin=-1500, vmax=1500, s=10, **kwarg)
plt.colorbar(plot, label='$v_p$ [km/s]')
if field_list is not None and field_dic is not None and field_size is not None:
ra_edges = np.array([field_size[0] / 2,
field_size[0] / 2,
-field_size[0] / 2,
-field_size[0] / 2])
dec_edges = np.array([field_size[1] / 2,
-field_size[1] / 2,
-field_size[1] / 2,
field_size[1] / 2])
vec = np.array([np.cos(ra_edges) * np.cos(dec_edges),
np.sin(ra_edges) * np.cos(dec_edges),
np.sin(dec_edges)]).T
for ID in field_list:
# if ID < 880:
ra = field_dic[ID]['ra']
dec = field_dic[ID]['dec']
new_coord = [nbf.R_base(
ra, -dec, v, to_field_frame=False) for v in vec]
new_radec = [[np.arctan2(x[1], x[0]), | np.arcsin(x[2]) | numpy.arcsin |
import pandas as pd
import numpy as np
import json, pickle
from collections import OrderedDict
from utils import *
import yaml
from tqdm import tqdm
import sys
# get the config which we will need throughout the whole script
with open(sys.argv[1], "r") as ymlfile:
config = yaml.load(ymlfile, Loader=yaml.FullLoader)
#this is just for the nameing convention as defined in config
dataset = config["preprocessing"]["dataset"]
print('\ndataset:', dataset)
opts = ["test"]
#READ IN MAIN CSV
dataset_csv_file = config["preprocessing"]["dataset_csv"]
dataset_csv = pd.read_csv(dataset_csv_file, index_col=0)
test_df = dataset_csv
#test_df.to_csv('data/' + dataset + '_' + "test" + ".csv")
print("Number of data points: \nTest: {}".format(len(test_df)))
# create the features and the graph, i.e. the input to GNNs -> REPLACED BY LOADING IN THE PICKLED GRAPH FROM PREPROCESSING FOR PLIG (MARC)
if config["preprocessing"]["use_graph"] == False:
print("skipping graph gen and graph load, not a GNN run")
exit()
elif config["preprocessing"]["external_graph"]["use"]:
print("loading graph from pickle PLIG file")
with open(str(config["preprocessing"]["external_graph"]["path"]), 'rb') as handle: #-> ath was set in config
smile_graph_all = pickle.load(handle)
#only load the subset of the data that you need.
#the graph.pickle file should have all PDBBind_combined entries in it.
smile_graph = {}
for i in dataset_csv["Identifier"]:
smile_graph[i] = smile_graph_all[i]
#print(len(smile_graph.keys()))
assert len(smile_graph.keys()) == len(dataset_csv["Identifier"])
else:
print("creating new graphs")
# collect all smiles
compound_iso_smiles = []
Identifier_list = []
for opt in opts:
df = pd.read_csv('data/' + dataset + '_' + opt + '.csv')
compound_iso_smiles += list(df['compound_iso_smiles'])
Identifier_list += list(df["Identifier"])
# create the features and the graph, i.e. the input to GNNs
smile_graph = {}
for smile, Identifier in zip(compound_iso_smiles, Identifier_list):
g = smile_to_graph(smile, config)
smile_graph[Identifier] = g
#Put this into the "else" statement since standardization not really applicable like this for PLIG
# standardize the data: Therefore, first collect all the data into one array and fit a standard scaler.
ls_compound_iso_smile = list(compound_iso_smiles)
arr = np.array((smile_graph[Identifier_list[0]])[1])
for id in tqdm(Identifier_list[1:]):
atom_features = (smile_graph[id])[1]
arr = np.concatenate((arr, np.stack(atom_features, axis=0)), axis=0)
scaler = StandardScaler()
scaler.fit(arr)
# now apply the standard scaler to all data points
for id in tqdm(Identifier_list):
c = smile_graph[id][0]
for j in range(c):
if len( | np.expand_dims(smile_graph[id][1][j], axis=0) | numpy.expand_dims |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.