prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import random
from collections import defaultdict
import numpy as np
from amplification.tasks.core import idk, uniform, Task, sequences
class IterTask(Task):
interaction_length = 3
zero = 1
one = 2
fixed_vocab = 3
def repr_symbol(self, x):
if x == idk: return '?'
if x == self.one: return '1'
if x == self.zero: return '0'
if x in self.chars: return 'abcdefghijklmnopqrstuv'[x]
def __init__(self, nchars=8, length=2, log_iters=6):
self.nvocab = self.fixed_vocab
self.nchars = nchars
self.length = length
self.log_iters = log_iters
self.size = nchars ** length
self.chars = self.allocate(nchars)
self.min_char = self.chars[0]
self.max_char = self.chars[-1]
self.vars = list(sequences(self.chars, length))
self.question_length = length + log_iters
self.fact_length = 2 * length
self.answer_length = length
def make_dbs(self, difficulty=float('inf')):
size = min(difficulty+8, self.size)
used_vars = random.sample(self.vars, size)
vals_raw = np.random.permutation(size)
vals = np.array(used_vars)[vals_raw]
square_raw = vals_raw
squares_raw = [square_raw]
for i in range(self.log_iters):
square_raw = square_raw[square_raw]
squares_raw.append(square_raw)
squares = [{val:used_vars[squares_raw[i][index]] for index, val in enumerate(used_vars)}
for i in range(self.log_iters)]
fast_db = {"vals": {v:val for v, val in zip(used_vars, vals)},
"vars": used_vars,
"squares_raw": squares_raw,
"squares": squares}
facts = np.concatenate([np.array(used_vars), vals], axis=1)
return facts, fast_db
def are_chars(self, x):
return np.logical_and(np.all(x >= self.min_char), np.all(x <= self.max_char))
def recursive_answer(self, Q):
Q = tuple(Q)
x = Q[:self.length]
n = Q[self.length:]
if not self.are_chars(x) or not np.all(np.isin(n, [self.zero, self.one])):
yield self.pad(idk), None
return
if np.all(n[:-1] == self.zero):
yield (yield None, Q), None
return
leading_bit = np.argmax(n)
shifted = self.zero * np.ones(self.log_iters, dtype=np.int32)
shifted[1:] = n[:-1]
queries = [shifted, shifted]
if n[-1] == self.one:
parity = self.zero * np.ones(self.log_iters, dtype=np.int32)
parity[-1] = self.one
queries.append(parity)
def query(x, n): return np.concatenate([x, n])
for m in queries:
x = (yield None, query(x, m))
if not self.are_chars(x):
yield self.pad(idk), None
return
yield self.pad(x), None
def make_q(self, fast_db):
x = random.choice(fast_db["vars"])
n = np.ones(self.log_iters, dtype=np.int32) * self.zero
leading_bit = np.random.randint(0, self.log_iters-1)
n[leading_bit] = self.one
remainder = self.log_iters- leading_bit - 1
n[leading_bit+1:] = | np.random.choice([self.zero, self.one], remainder) | numpy.random.choice |
import numpy as N
from traits.api import Int
from traitsui.api import BasicEditorFactory
from chaco.api import DataRange1D
from traits.etsconfig.api import ETSConfig
if ETSConfig.toolkit == 'wx':
import wx
from traitsui.wx.editor import Editor
class _ColorMapControl_wx(wx.Window):
"""WX control for showing a color map sample"""
def __init__(self, parent, cmap, padding=10, width=128, height=10):
self.cmap = cmap
self.width = width
self.height = height
super(_ColorMapControl_wx, self).__init__(parent,
size=wx.Size(self.width + padding, self.height + padding))
wx.EVT_PAINT(self, self._on_paint)
def _on_paint(self, event=None):
if self.cmap is None:
# Just show a black bar
data = N.zeros((self.width, self.height, 3), dtype=N.uint8)
else:
mapper = self.cmap(DataRange1D(low=0, high=self.width - 1))
# run a range through the color mapper, discard the alpha channel,
# and multiply so that it's 0-255
clrarray = mapper.map_screen(N.arange(0, self.width))[:, :-1] * 255
# Replicate the array to the required height
data = N.require(N.tile(clrarray, (self.height, 1)),
dtype=N.uint8,
requirements=['C_CONTIGUOUS', 'ALIGNED'])
# Create a bitmap from the array and paint it
wdc = wx.PaintDC(self)
wdx, wdy = self.GetClientSizeTuple()
bitmap = wx.BitmapFromBuffer(self.width, self.height, data)
wdc.DrawBitmap(bitmap, (wdx - self.width) / 2, (wdy - self.height) / 2)
elif ETSConfig.toolkit == 'qt4':
from pyface.qt import QtGui
from traitsui.qt4.editor import Editor
class _ColorMapControl_qt4(QtGui.QLabel):
"""Qt widget for showing a color map sample"""
def __init__(self, parent, cmap, padding=10, width=128, height=10):
self.width = width
self.height = height
QtGui.QLabel.__init__(self, parent)
self.setFixedSize(self.width, self.height)
self.set_cmap(cmap)
def set_cmap(self, cmap):
self.cmap = cmap
if self.cmap is None:
self.data = N.zeros((self.width, self.height), dtype=N.uint8)
colortable = [QtGui.qRgb(0, 0, 0)] * 256
else:
self.data = N.require(
N.outer(N.ones(self.height),
| N.linspace(0, 255, self.width, endpoint=True) | numpy.linspace |
import numpy as np
from psyneulink.core.components.functions.transferfunctions import Logistic
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.process import Process
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.components.system import System
from psyneulink.core.globals.keywords import EXECUTION, LEARNING, PROCESSING, SOFT_CLAMP, VALUE
from psyneulink.core.globals.preferences.componentpreferenceset import REPORT_OUTPUT_PREF, VERBOSE_PREF
from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import MSE
def test_multilayer():
Input_Layer = TransferMechanism(
name='Input Layer',
function=Logistic,
default_variable=np.zeros((2,)),
)
Hidden_Layer_1 = TransferMechanism(
name='Hidden Layer_1',
function=Logistic(),
# default_variable=np.zeros((5,)),
size=5
)
Hidden_Layer_2 = TransferMechanism(
name='Hidden Layer_2',
function=Logistic(),
default_variable=[0, 0, 0, 0],
)
Output_Layer = TransferMechanism(
name='Output Layer',
function=Logistic,
default_variable=[0, 0, 0],
)
Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5)
Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4)
Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3)
# TEST PROCESS.LEARNING WITH:
# CREATION OF FREE STANDING PROJECTIONS THAT HAVE NO LEARNING (Input_Weights, Middle_Weights and Output_Weights)
# INLINE CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# NO EXPLICIT CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# This projection will be used by the process below by referencing it in the process' pathway;
# note: sender and receiver args don't need to be specified
Input_Weights = MappingProjection(
name='Input Weights',
matrix=Input_Weights_matrix,
)
# This projection will be used by the process below by assigning its sender and receiver args
# to mechanismss in the pathway
Middle_Weights = MappingProjection(
name='Middle Weights',
sender=Hidden_Layer_1,
receiver=Hidden_Layer_2,
matrix=Middle_Weights_matrix,
)
# Commented lines in this projection illustrate variety of ways in which matrix and learning signals can be specified
Output_Weights = MappingProjection(
name='Output Weights',
sender=Hidden_Layer_2,
receiver=Output_Layer,
matrix=Output_Weights_matrix,
)
p = Process(
# default_variable=[0, 0],
size=2,
pathway=[
Input_Layer,
# The following reference to Input_Weights is needed to use it in the pathway
# since it's sender and receiver args are not specified in its declaration above
Input_Weights,
Hidden_Layer_1,
# No projection specification is needed here since the sender arg for Middle_Weights
# is Hidden_Layer_1 and its receiver arg is Hidden_Layer_2
# Middle_Weights,
Hidden_Layer_2,
# Output_Weights does not need to be listed for the same reason as Middle_Weights
# If Middle_Weights and/or Output_Weights is not declared above, then the process
# will assign a default for missing projection
# Output_Weights,
Output_Layer
],
clamp_input=SOFT_CLAMP,
learning=LEARNING,
learning_rate=1.0,
target=[0, 0, 1],
prefs={
VERBOSE_PREF: False,
REPORT_OUTPUT_PREF: False
},
)
stim_list = {Input_Layer: [[-1, 30]]}
target_list = {Output_Layer: [[0, 0, 1]]}
def show_target():
i = s.input
t = s.target_input_states[0].parameters.value.get(s)
print('\nOLD WEIGHTS: \n')
print('- Input Weights: \n', Input_Weights.get_mod_matrix(s))
print('- Middle Weights: \n', Middle_Weights.get_mod_matrix(s))
print('- Output Weights: \n', Output_Weights.get_mod_matrix(s))
print('\nSTIMULI:\n\n- Input: {}\n- Target: {}\n'.format(i, t))
print('ACTIVITY FROM OLD WEIGHTS: \n')
print('- Middle 1: \n', Hidden_Layer_1.parameters.value.get(s))
print('- Middle 2: \n', Hidden_Layer_2.parameters.value.get(s))
print('- Output:\n', Output_Layer.parameters.value.get(s))
s = System(
processes=[p],
targets=[0, 0, 1],
learning_rate=1.0,
)
# s.reportOutputPref = True
results = s.run(
num_trials=10,
inputs=stim_list,
targets=target_list,
call_after_trial=show_target,
)
objective_output_layer = s.mechanisms[4]
results_list = []
for elem in s.results:
for nested_elem in elem:
nested_elem = nested_elem.tolist()
try:
iter(nested_elem)
except TypeError:
nested_elem = [nested_elem]
results_list.extend(nested_elem)
expected_output = [
(Output_Layer.get_output_values(s), [np.array([0.22686074, 0.25270212, 0.91542149])]),
(objective_output_layer.output_states[MSE].parameters.value.get(s), np.array(0.04082589331852094)),
(Input_Weights.get_mod_matrix(s), np.array([
[ 0.09900247, 0.19839653, 0.29785764, 0.39739191, 0.49700232],
[ 0.59629092, 0.69403786, 0.79203411, 0.89030237, 0.98885379],
])),
(Middle_Weights.get_mod_matrix(s), np.array([
[ 0.09490249, 0.10488719, 0.12074013, 0.1428774 ],
[ 0.29677354, 0.30507726, 0.31949676, 0.3404652 ],
[ 0.49857336, 0.50526254, 0.51830509, 0.53815062],
[ 0.70029406, 0.70544225, 0.71717037, 0.73594383],
[ 0.90192903, 0.90561554, 0.91609668, 0.93385292],
])),
(Output_Weights.get_mod_matrix(s), np.array([
[-0.74447522, -0.71016859, 0.31575293],
[-0.50885177, -0.47444784, 0.56676582],
[-0.27333719, -0.23912033, 0.8178167 ],
[-0.03767547, -0.00389039, 1.06888608],
])),
(results, [
[np.array([0.8344837 , 0.87072018, 0.89997433])],
[np.array([0.77970193, 0.83263138, 0.90159627])],
[np.array([0.70218502, 0.7773823 , 0.90307765])],
[np.array([0.60279149, 0.69958079, 0.90453143])],
[np.array([0.4967927 , 0.60030321, 0.90610082])],
[np.array([0.4056202 , 0.49472391, 0.90786617])],
[np.array([0.33763025, 0.40397637, 0.90977675])],
[np.array([0.28892812, 0.33633532, 0.9117193 ])],
[np.array([0.25348771, 0.28791896, 0.9136125 ])],
[np.array([0.22686074, 0.25270212, 0.91542149])]
]),
]
# Test nparray output of log for Middle_Weights
for i in range(len(expected_output)):
val, expected = expected_output[i]
# setting absolute tolerance to be in accordance with reference_output precision
# if you do not specify, assert_allcose will use a relative tolerance of 1e-07,
# which WILL FAIL unless you gather higher precision values to use as reference
np.testing.assert_allclose(val, expected, atol=1e-08, err_msg='Failed on expected_output[{0}]'.format(i))
def test_multilayer_log():
Input_Layer = TransferMechanism(
name='Input Layer',
function=Logistic,
default_variable=np.zeros((2,)),
)
Hidden_Layer_1 = TransferMechanism(
name='Hidden Layer_1',
function=Logistic(),
# default_variable=np.zeros((5,)),
size=5
)
Hidden_Layer_2 = TransferMechanism(
name='Hidden Layer_2',
function=Logistic(),
default_variable=[0, 0, 0, 0],
)
Output_Layer = TransferMechanism(
name='Output Layer',
function=Logistic,
default_variable=[0, 0, 0],
)
Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5)
Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4)
Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3)
# TEST PROCESS.LEARNING WITH:
# CREATION OF FREE STANDING PROJECTIONS THAT HAVE NO LEARNING (Input_Weights, Middle_Weights and Output_Weights)
# INLINE CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# NO EXPLICIT CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# This projection will be used by the process below by referencing it in the process' pathway;
# note: sender and receiver args don't need to be specified
Input_Weights = MappingProjection(
name='Input Weights',
matrix=Input_Weights_matrix,
)
# This projection will be used by the process below by assigning its sender and receiver args
# to mechanismss in the pathway
Middle_Weights = MappingProjection(
name='Middle Weights',
sender=Hidden_Layer_1,
receiver=Hidden_Layer_2,
matrix=Middle_Weights_matrix,
)
# Commented lines in this projection illustrate variety of ways in which matrix and learning signals can be specified
Output_Weights = MappingProjection(
name='Output Weights',
sender=Hidden_Layer_2,
receiver=Output_Layer,
matrix=Output_Weights_matrix,
)
p = Process(
# default_variable=[0, 0],
size=2,
pathway=[
Input_Layer,
# The following reference to Input_Weights is needed to use it in the pathway
# since it's sender and receiver args are not specified in its declaration above
Input_Weights,
Hidden_Layer_1,
# No projection specification is needed here since the sender arg for Middle_Weights
# is Hidden_Layer_1 and its receiver arg is Hidden_Layer_2
# Middle_Weights,
Hidden_Layer_2,
# Output_Weights does not need to be listed for the same reason as Middle_Weights
# If Middle_Weights and/or Output_Weights is not declared above, then the process
# will assign a default for missing projection
# Output_Weights,
Output_Layer
],
clamp_input=SOFT_CLAMP,
learning=LEARNING,
learning_rate=1.0,
target=[0, 0, 1],
prefs={
VERBOSE_PREF: False,
REPORT_OUTPUT_PREF: False
},
)
Middle_Weights.set_log_conditions(('mod_matrix', PROCESSING))
stim_list = {Input_Layer: [[-1, 30]]}
target_list = {Output_Layer: [[0, 0, 1]]}
def show_target():
i = s.input
t = s.target_input_states[0].parameters.value.get(s)
print('\nOLD WEIGHTS: \n')
print('- Input Weights: \n', Input_Weights.get_mod_matrix(s))
print('- Middle Weights: \n', Middle_Weights.get_mod_matrix(s))
print('- Output Weights: \n', Output_Weights.get_mod_matrix(s))
print('\nSTIMULI:\n\n- Input: {}\n- Target: {}\n'.format(i, t))
print('ACTIVITY FROM OLD WEIGHTS: \n')
print('- Middle 1: \n', Hidden_Layer_1.parameters.value.get(s))
print('- Middle 2: \n', Hidden_Layer_2.parameters.value.get(s))
print('- Output:\n', Output_Layer.parameters.value.get(s))
s = System(
processes=[p],
targets=[0, 0, 1],
learning_rate=1.0,
)
s.run(
num_trials=10,
inputs=stim_list,
targets=target_list,
call_after_trial=show_target,
)
expected_log_val = np.array(
[
['System-0'],
[[
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[2], [2], [2], [2], [2], [2], [2], [2], [2], [2]],
[ [[ 0.05, 0.1 , 0.15, 0.2 ],
[ 0.25, 0.3 , 0.35, 0.4 ],
[ 0.45, 0.5 , 0.55, 0.6 ],
[ 0.65, 0.7 , 0.75, 0.8 ],
[ 0.85, 0.9 , 0.95, 1. ]],
[[ 0.04789907, 0.09413833, 0.14134241, 0.18938924],
[ 0.24780811, 0.29388455, 0.34096758, 0.38892985],
[ 0.44772121, 0.49364209, 0.54060947, 0.58849095],
[ 0.64763875, 0.69341202, 0.74026967, 0.78807449],
[ 0.84756101, 0.89319513, 0.93994932, 0.98768187]],
[[ 0.04738148, 0.08891106, 0.13248753, 0.177898 ],
[ 0.24726841, 0.28843403, 0.33173452, 0.37694783],
[ 0.44716034, 0.48797777, 0.53101423, 0.57603893],
[ 0.64705774, 0.6875443 , 0.73032986, 0.77517531],
[ 0.84696096, 0.88713512, 0.92968378, 0.97435998]],
[[ 0.04937771, 0.08530344, 0.12439361, 0.16640433],
[ 0.24934878, 0.28467436, 0.32329947, 0.36496974],
[ 0.44932147, 0.48407216, 0.52225175, 0.56359587],
[ 0.64929589, 0.68349948, 0.72125508, 0.76228876],
[ 0.84927212, 0.88295836, 0.92031297, 0.96105307]],
[[ 0.05440291, 0.08430585, 0.1183739 , 0.15641064],
[ 0.25458348, 0.28363519, 0.3170288 , 0.35455942],
[ 0.45475764, 0.48299299, 0.51573974, 0.55278488],
[ 0.65492462, 0.68238209, 0.7145124 , 0.75109483],
[ 0.85508376, 0.88180465, 0.91335119, 0.94949538]],
[[ 0.06177218, 0.0860581 , 0.11525064, 0.14926369],
[ 0.26225812, 0.28546004, 0.31377611, 0.34711631],
[ 0.46272625, 0.48488774, 0.51236246, 0.54505667],
[ 0.66317453, 0.68434373, 0.7110159 , 0.74309381],
[ 0.86360121, 0.88382991, 0.9097413 , 0.94123489]],
[[ 0.06989398, 0.08959148, 0.11465594, 0.14513241],
[ 0.27071639, 0.2891398 , 0.31315677, 0.34281389],
[ 0.47150846, 0.48870843, 0.5117194 , 0.54058946],
[ 0.67226675, 0.68829929, 0.71035014, 0.73846891],
[ 0.87298831, 0.88791376, 0.90905395, 0.93646 ]],
[[ 0.07750784, 0.09371987, 0.11555569, 0.143181 ],
[ 0.27864693, 0.29343991, 0.31409396, 0.3407813 ],
[ 0.47974374, 0.49317377, 0.5126926 , 0.53847878],
[ 0.68079346, 0.69292265, 0.71135777, 0.73628353],
[ 0.88179203, 0.89268732, 0.91009431, 0.93420362]],
[[ 0.0841765 , 0.09776672, 0.11711835, 0.14249779],
[ 0.28559463, 0.29765609, 0.31572199, 0.34006951],
[ 0.48695967, 0.49755273, 0.51438349, 0.5377395 ],
[ 0.68826567, 0.69745713, 0.71310872, 0.735518 ],
[ 0.88950757, 0.89736946, 0.91190228, 0.93341316]],
[[ 0.08992499, 0.10150104, 0.11891032, 0.14250149],
[ 0.29158517, 0.30154765, 0.31758943, 0.34007336],
[ 0.49318268, 0.50159531, 0.51632339, 0.5377435 ],
[ 0.69471052, 0.70164382, 0.71511777, 0.73552215],
[ 0.8961628 , 0.90169281, 0.91397691, 0.93341744]]]
]]
],
dtype=object
)
log_val = Middle_Weights.log.nparray(entries='mod_matrix', header=False)
assert log_val[0] == expected_log_val[0]
for i in range(1, len(log_val)):
try:
np.testing.assert_allclose(log_val[i], expected_log_val[i])
except TypeError:
for j in range(len(log_val[i])):
np.testing.assert_allclose(
np.array(log_val[i][j][0]),
np.array(expected_log_val[i][j][0]),
atol=1e-08,
err_msg='Failed on test item {0} of logged values'.format(i)
)
Middle_Weights.log.print_entries()
# Test Programatic logging
Hidden_Layer_2.log.log_values(VALUE, s)
log_val = Hidden_Layer_2.log.nparray(header=False)
expected_log_val = np.array(
[
['System-0'],
[[
[[1]],
[[0]],
[[0]],
[[0]],
[[[0.8565238418942037, 0.8601053239957609, 0.8662098921116546, 0.8746933736954071]]]
]]
],
dtype=object
)
assert log_val[0] == expected_log_val[0]
for i in range(1, len(log_val)):
try:
np.testing.assert_allclose(log_val[i], expected_log_val[i])
except TypeError:
for j in range(len(log_val[i])):
np.testing.assert_allclose(
np.array(log_val[i][j][0]),
np.array(expected_log_val[i][j][0]),
atol=1e-08,
err_msg='Failed on test item {0} of logged values'.format(i)
)
Hidden_Layer_2.log.print_entries()
# Clear log and test with logging of weights set to LEARNING for another 5 trials of learning
Middle_Weights.log.clear_entries(entries=None, confirm=False)
Middle_Weights.set_log_conditions(('mod_matrix', LEARNING))
s.run(
num_trials=5,
inputs=stim_list,
targets=target_list,
)
log_val = Middle_Weights.log.nparray(entries='mod_matrix', header=False)
expected_log_val = np.array(
[
['System-0'],
[[
[[1], [1], [1], [1], [1]], # RUN
[[0], [1], [2], [3], [4]], # TRIAL
[[1], [1], [1], [1], [1]], # PASS
[[1], [1], [1], [1], [1]], # TIME_STEP
[ [[0.09925812411381937, 0.1079522130303428, 0.12252820028789306, 0.14345816973727732],
[0.30131473371328343, 0.30827285172236585, 0.3213609999139731, 0.3410707131678078],
[0.5032924245149345, 0.5085833053183328, 0.5202423523987703, 0.5387798509126243],
[0.70518251216691, 0.7088822116145151, 0.7191771716324874, 0.7365956448426355],
[0.9069777724600303, 0.9091682860319945, 0.9181692763668221, 0.93452610920817]],
[[0.103113468050986, 0.11073719161508278, 0.12424368674464399, 0.14415219181047598],
[0.3053351724284921, 0.3111770895557729, 0.3231499474835138, 0.341794454877438],
[0.5074709829757806, 0.5116017638574931, 0.5221016574478528, 0.5395320566440044],
[0.7095115080472698, 0.7120093413898914, 0.7211034158081356, 0.7373749316571768],
[0.9114489813353512, 0.9123981459792809, 0.9201588001021687, 0.935330996581107]],
[[0.10656261740658036, 0.11328192907953168, 0.12587702586370172, 0.14490737831188183],
[0.30893272045369513, 0.31383131362555394, 0.32485356055342113, 0.3425821330631872],
[0.5112105492674988, 0.5143607671543178, 0.5238725230390068, 0.5403508295336265],
[0.7133860755337162, 0.7148679468096026, 0.7229382109974996, 0.7382232628724675],
[0.9154510531345043, 0.9153508224199809, 0.9220539747533424, 0.936207244690072]],
[[0.10967776822419642, 0.11562091141141007, 0.12742795007904037, 0.14569308665620523],
[0.3121824816018084, 0.316271366885665, 0.3264715025259811, 0.34340179304134666],
[0.5145890402653069, 0.5168974760377518, 0.5255545550838675, 0.5412029579613059],
[0.7168868378231593, 0.7174964619674593, 0.7246811176253708, 0.7391062307617761],
[0.9190671994078436, 0.9180659725806082, 0.923854327015523, 0.9371193149131859]],
[[0.11251466428344682, 0.11778293740676549, 0.12890014813698167, 0.14649079441816393],
[0.31514245505635713, 0.3185271913574249, 0.328007571201157, 0.3442341089776976],
[0.5176666356203712, 0.5192429413004418, 0.5271516632648602, 0.5420683480396268],
[0.7200760707077265, 0.7199270072739019, 0.7263361597421493, 0.7400030122347587],
[0.922361699102421, 0.9205767427437028, 0.9255639970037588, 0.9380456963960624]]]
]]
],
dtype=object
)
assert log_val.shape == expected_log_val.shape
assert log_val[0] == expected_log_val[0]
assert len(log_val[1]) == len(expected_log_val[1]) == 1
for i in range(len(log_val[1][0])):
try:
np.testing.assert_allclose(log_val[1][0][i], expected_log_val[1][0][i])
except TypeError:
for j in range(len(log_val[1][0][i])):
np.testing.assert_allclose(
np.array(log_val[1][0][i][j]),
| np.array(expected_log_val[1][0][i][j]) | numpy.array |
"""
Vortex dynamics
Several initial states are provided: select one with 'vortex_config'
"""
import sys
try:
from param import Param
except:
print("[ERROR] unable to import the param module")
print("[INFO] you likely forgot to set $PYTHONPATH")
print("[INFO] depending on your shell")
print("> source ~/.fluid2d/activate.sh")
print("> source ~/.fluid2d/activate.csh")
print("> source ~/.fluid2d/activate.fish")
sys.exit()
from grid import Grid
from fluid2d import Fluid2d
import numpy as np
import ana_profiles as ap
# If the code immediately stops with
# Traceback (most recent call last):
# File "vortex.py", line 1, in <module>
# from param import Param
# ImportError: No module named param
# it means that you forgot to do
# source activate.sh in your terminal
param = Param('default.xml')
param.modelname = 'euler'
param.expname = 'vortex_00'
# domain and resolution
param.nx = 64*2
param.ny = param.nx
param.Ly = param.Lx
param.npx = 1
param.npy = 1
param.geometry = 'closed'
# time
param.tend = 10
param.cfl = 1.
param.adaptable_dt = True
param.dt = 0.01
param.dtmax = 100
# discretization
param.order = 3
param.timestepping = 'RK3_SSP'
param.exacthistime = True
# output
param.var_to_save = ['vorticity', 'psi', 'tracer']
param.list_diag = 'all'
param.freq_plot = 10
param.freq_his = .2
param.freq_diag = 0.02
# plot
param.freq_plot = 10
param.plot_interactive = True
param.plot_psi = True
param.plot_var = 'vorticity'
param.cax = np.array([-2, 2.])*5
param.colorscheme = 'imposed'
param.generate_mp4 = False
# physics
param.noslip = False
param.diffusion = False
param.additional_tracer = ['tracer']
grid = Grid(param)
param.Kdiff = 5e-2*grid.dx
xr, yr = grid.xr, grid.yr
# it's time to modify the mask and add obstacles if you wish, 0 is land
msk_config = 'none' # the other possibility is 'T-wall' or 'bay'
if msk_config == 'bay':
x0, y0, radius = 0.5, 0.35, 0.2
y1 = 0.5
msk2 = ap.vortex(xr, yr, param.Lx, param.Ly,
x0, y0, radius, 'step')
grid.msk[yr < y1] = 0
grid.msk += np.asarray(msk2, dtype=int)
grid.msk[grid.msk < 0] = 0
grid.msk[grid.msk > 1] = 1
grid.msk[0:1, :] = 0
grid.finalize_msk()
elif msk_config == 'T-wall':
i0, j0 = param.nx//2, param.ny//2
di = int(0.25*param.Lx/grid.dx)
grid.msk[:j0, i0] = 0
grid.msk[j0, i0-di:i0+di] = 0
grid.finalize_msk()
else:
# do nothing
pass
f2d = Fluid2d(param, grid)
model = f2d.model
vor = model.var.get('vorticity')
def vortex(param, grid, x0, y0, sigma,
vortex_type, ratio=1):
"""Setup a compact distribution of vorticity
at location x0, y0 vortex, width is sigma, vortex_type controls
the radial vorticity profile, ratio controls the x/y aspect ratio
(for ellipses)
"""
xr, yr = grid.xr, grid.yr
# ratio controls the ellipticity, ratio=1 is a disc
x = np.sqrt((xr-param.Lx*x0)**2+(yr-param.Ly*y0)**2*ratio**2)
y = x.copy()*0.
if vortex_type in ('gaussian', 'cosine', 'step'):
if vortex_type == 'gaussian':
y = np.exp(-x**2/(sigma**2))
elif vortex_type == 'cosine':
y = np.cos(x/sigma*np.pi/2)
y[x > sigma] = 0.
elif vortex_type == 'step':
y[x <= sigma] = 1.
else:
print('this kind of vortex (%s) is not defined' % vortex_type)
return y
# 2/ set an initial tracer field
vtype = 'gaussian'
# vortex width
sigma = 0.0*param.Lx
vortex_config = 'dipole2'
if vortex_config == 'single':
vtype = 'gaussian'
sigma = 0.03*param.Lx
vor[:] = vortex(param, grid, 0.4, 0.54, sigma,
vtype, ratio=1)
elif vortex_config == 'dipolebay':
vtype = 'gaussian'
sigma = 0.03*param.Lx
y2 = 0.53
vor[:] = vortex(param, grid, 0.15, y2, sigma,
vtype, ratio=1)
vor[:] -= vortex(param, grid, -0.15, y2, sigma,
vtype, ratio=1)
elif vortex_config == 'dipole2':
vtype = 'gaussian'
sigma = 0.05*param.Lx
x0 = 0.7
vor[:] = -vortex(param, grid, x0, 0.42, sigma,
vtype, ratio=1)
vor[:] += vortex(param, grid, x0, 0.58, sigma,
vtype, ratio=1)
elif vortex_config == 'rankine':
vtype = 'step'
ring_sigma = 0.2*param.Lx
ring_amp = 1.
vor[:] = ring_amp * vortex(param, grid, 0.5, 0.5, ring_sigma,
vtype, ratio=1)
# sigma ring, core = 0.2, 0.135 yields a tripole (with step distribution)
# sigma ring, core = 0.2, 0.12 yields a dipole (with step distribution)
core_sigma = 0.173*param.Lx
core_amp = ring_amp*(ring_sigma**2-core_sigma**2.)/core_sigma**2.
vor[:] -= (core_amp+ring_amp)*vortex(param, grid, 0.5, 0.5, core_sigma,
vtype, ratio=1)
elif vortex_config == 'dipole':
vtype = 'gaussian'
sigma = 0.04*param.Lx
vor[:] = vortex(param, grid, 0.3, 0.52, sigma, vtype)
vor[:] -= vortex(param, grid, 0.3, 0.48, sigma, vtype)
elif vortex_config == 'chasing':
sigma = 0.03*param.Lx
vtype = 'step'
vor[:] = vortex(param, grid, 0.3, 0.6, sigma, vtype)
vor[:] -= vortex(param, grid, 0.3, 0.4, sigma, vtype)
vor[:] += vortex(param, grid, 0.1, 0.55, sigma, vtype)
vor[:] -= vortex(param, grid, 0.1, 0.45, sigma, vtype)
elif vortex_config == 'corotating':
sigma = 0.06*param.Lx
dist = 0.25*param.Lx
vtype = 'gaussian'
vor[:] = vortex(param, grid, 0.5, 0.5+dist/2, sigma, vtype)
vor[:] += vortex(param, grid, 0.5, 0.5-dist/2, sigma, vtype)
elif vortex_config == 'collection':
vtype = 'cosine'
x0 = [0.3, 0.4, 0.6, 0.8]
y0 = [0.5, 0.5, 0.5, 0.5]
amplitude = [1, -2, -1, 2]
width = np.array([1, 0.5, 1, 0.5])*0.04*param.Lx
for x, y, a, s in zip(x0, y0, amplitude, width):
vor[:] += a*vortex(param, grid, x, y, s, vtype)
elif vortex_config == 'unequal':
# Melander, Zabusky, McWilliams 1987
# Asymmetric vortex merger in two dimensions: Which vortex is 'victorious'?
s1 = 0.04*param.Lx
a1 = 1.
s2 = 0.1*param.Lx
a2 = 0.2
vtype = 'cosine'
vor[:] = a1*vortex(param, grid, 0.5, 0.6, s1, vtype)
vor[:] += a2*vortex(param, grid, 0.5, 0.4, s2, vtype)
vor[:] = vor*grid.msk
if False:
np.random.seed(1) # this guarantees the results reproducibility
noise = np.random.normal(size=np.shape(yr))*grid.msk
noise -= grid.domain_integration(noise)*grid.msk/grid.area
grid.fill_halo(noise)
noise_amplitude = 1e-3
vor += noise*noise_amplitude
model.set_psi_from_vorticity()
state = model.var.get('tracer')
state[:] = np.round(xr*6) % 2 + np.round(yr*6) % 2
state *= grid.msk
# % normalization of the vorticity so that enstrophy == 1.
model.diagnostics(model.var, 0)
enstrophy = model.diags['enstrophy']
# print('enstrophy = %g' % enstrophy)
vor[:] = vor[:] / | np.sqrt(enstrophy) | numpy.sqrt |
#!/usr/bin/python
import numpy as np
import os
import pymaster as nmt
import pytest
import tjpcov.main as cv
from tjpcov.parser import parse
import yaml
import sacc
root = "./tests/benchmarks/32_DES_tjpcov_bm/"
input_yml = os.path.join(root, "tjpcov_conf_minimal.yaml")
input_yml_no_nmtc = os.path.join(root, "tjpcov_conf_minimal_no_nmtconf.yaml")
xcell_yml = os.path.join(root, "desy1_tjpcov_bm.yml")
def get_xcell_yml():
with open(xcell_yml) as f:
config = yaml.safe_load(f)
return config
def get_nmt_bin():
bpw_edges = [0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96]
return nmt.NmtBin.from_edges(bpw_edges[:-1], bpw_edges[1:])
def get_pair_folder_name(tracer_comb):
bn = []
for tr in tracer_comb:
bn.append(tr.split('__')[0])
return '_'.join(bn)
def get_data_cl(tr1, tr2, remove_be=False):
bn = get_pair_folder_name((tr1, tr2))
fname = os.path.join(root, bn, f"cl_{tr1}_{tr2}.npz")
cl = np.load(fname)['cl']
# Remove redundant terms
if remove_be and (tr1 == tr2) and (cl.shape[0] == 4):
cl = np.delete(cl, 2, 0)
return cl
def get_fiducial_cl(s, tr1, tr2, binned=True, remove_be=False):
bn = get_pair_folder_name((tr1, tr2))
fname = os.path.join(root, 'fiducial', bn, f"cl_{tr1}_{tr2}.npz")
cl = np.load(fname)['cl']
if binned:
s = s.copy()
s.remove_selection(data_type='cl_0b')
s.remove_selection(data_type='cl_eb')
s.remove_selection(data_type='cl_be')
s.remove_selection(data_type='cl_bb')
ix = s.indices(tracers=(tr1, tr2))
bpw = s.get_bandpower_windows(ix)
cl0_bin = bpw.weight.T.dot(cl[0])
cl_bin = np.zeros((cl.shape[0], cl0_bin.size))
cl_bin[0] = cl0_bin
cl = cl_bin
else:
cl
# Remove redundant terms
if remove_be and (tr1 == tr2) and (cl.shape[0] == 4):
cl = np.delete(cl, 2, 0)
return cl
def get_tracer_noise(tr, cp=True):
bn = get_pair_folder_name((tr, tr))
fname = os.path.join(root, bn, f"cl_{tr}_{tr}.npz")
clfile = np.load(fname)
if cp:
return clfile['nl_cp'][0, -1]
else:
return clfile['nl'][0, 0]
def get_benchmark_cov(tracer_comb1, tracer_comb2):
(tr1, tr2), (tr3, tr4) = tracer_comb1, tracer_comb2
fname = os.path.join(root, 'cov', f'cov_{tr1}_{tr2}_{tr3}_{tr4}.npz')
return np.load(fname)['cov']
def get_workspace(tr1, tr2):
config = get_xcell_yml()
w = nmt.NmtWorkspace()
bn = get_pair_folder_name((tr1, tr2))
m1 = config['tracers'][tr1]['mask_name']
m2 = config['tracers'][tr2]['mask_name']
fname = os.path.join(root, bn, f"w__{m1}__{m2}.fits")
w.read_from(fname)
return w
def get_covariance_workspace(tr1, tr2, tr3, tr4):
config = get_xcell_yml()
cw = nmt.NmtCovarianceWorkspace()
m1 = config['tracers'][tr1]['mask_name']
m2 = config['tracers'][tr2]['mask_name']
m3 = config['tracers'][tr3]['mask_name']
m4 = config['tracers'][tr4]['mask_name']
fname = os.path.join(root, 'cov', f"cw__{m1}__{m2}__{m3}__{m4}.fits")
cw.read_from(fname)
return cw
def assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, threshold):
cl1 = get_data_cl(*tracer_comb1, remove_be=True)
cl2 = get_data_cl(*tracer_comb2, remove_be=True)
clf1 = get_fiducial_cl(s, *tracer_comb1, remove_be=True)
clf2 = get_fiducial_cl(s, *tracer_comb2, remove_be=True)
ndim, nbpw = cl1.shape
# This only runs if tracer_comb1 = tracer_comb2 (when the block covariance
# is invertible)
if (tracer_comb1[0] == tracer_comb1[1]) and (ndim == 3):
cov = cov.reshape((nbpw, 4, nbpw, 4))
cov = np.delete(np.delete(cov, 2, 1), 2, 3).reshape(3 * nbpw, -1)
cov_bm = cov_bm.reshape((nbpw, 4, nbpw, 4))
cov_bm = np.delete(np.delete(cov_bm, 2, 1), 2, 3).reshape(3 * nbpw, -1)
delta1 = (clf1 - cl1).flatten()
delta2 = (clf2 - cl2).flatten()
chi2 = delta1.dot(np.linalg.inv(cov)).dot(delta2)
chi2_bm = delta1.dot( | np.linalg.inv(cov_bm) | numpy.linalg.inv |
"""CmdStanPy-specific conversion code."""
import logging
import re
from collections import defaultdict
from copy import deepcopy
from pathlib import Path
import numpy as np
from ..rcparams import rcParams
from .base import dict_to_dataset, infer_stan_dtypes, make_attrs, requires
from .inference_data import InferenceData
_log = logging.getLogger(__name__)
class CmdStanPyConverter:
"""Encapsulate CmdStanPy specific logic."""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
*,
posterior=None,
posterior_predictive=None,
predictions=None,
prior=None,
prior_predictive=None,
observed_data=None,
constant_data=None,
predictions_constant_data=None,
log_likelihood=None,
index_origin=None,
coords=None,
dims=None,
save_warmup=None,
dtypes=None,
):
self.posterior = posterior # CmdStanPy CmdStanMCMC object
self.posterior_predictive = posterior_predictive
self.predictions = predictions
self.prior = prior
self.prior_predictive = prior_predictive
self.observed_data = observed_data
self.constant_data = constant_data
self.predictions_constant_data = predictions_constant_data
self.log_likelihood = (
rcParams["data.log_likelihood"] if log_likelihood is None else log_likelihood
)
self.index_origin = index_origin
self.coords = coords
self.dims = dims
self.save_warmup = rcParams["data.save_warmup"] if save_warmup is None else save_warmup
import cmdstanpy # pylint: disable=import-error
if dtypes is None:
dtypes = {}
elif isinstance(dtypes, cmdstanpy.model.CmdStanModel):
model_code = dtypes.code()
dtypes = infer_stan_dtypes(model_code)
elif isinstance(dtypes, str):
dtypes_path = Path(dtypes)
if dtypes_path.exists():
with dtypes_path.open("r", encoding="UTF-8") as f_obj:
model_code = f_obj.read()
else:
model_code = dtypes
dtypes = infer_stan_dtypes(model_code)
self.dtypes = dtypes
if hasattr(self.posterior, "metadata"):
if self.log_likelihood is True and "log_lik" in self.posterior.metadata.stan_vars_cols:
self.log_likelihood = ["log_lik"]
elif hasattr(self.posterior, "stan_vars_cols"):
if self.log_likelihood is True and "log_lik" in self.posterior.stan_vars_cols:
self.log_likelihood = ["log_lik"]
else:
if (
self.log_likelihood is True
and self.posterior is not None
and any(name.split("[")[0] == "log_lik" for name in self.posterior.column_names)
):
self.log_likelihood = ["log_lik"]
if isinstance(self.log_likelihood, bool):
self.log_likelihood = None
self.cmdstanpy = cmdstanpy
@requires("posterior")
def posterior_to_xarray(self):
"""Extract posterior samples from output csv."""
if not (hasattr(self.posterior, "metadata") or hasattr(self.posterior, "stan_vars_cols")):
return self.posterior_to_xarray_pre_v_0_9_68()
if hasattr(self.posterior, "metadata"):
items = list(self.posterior.metadata.stan_vars_cols.keys())
else:
items = list(self.posterior.stan_vars_cols.keys())
if self.posterior_predictive is not None:
try:
items = _filter(items, self.posterior_predictive)
except ValueError:
pass
if self.predictions is not None:
try:
items = _filter(items, self.predictions)
except ValueError:
pass
if self.log_likelihood is not None:
try:
items = _filter(items, self.log_likelihood)
except ValueError:
pass
valid_cols = []
for item in items:
if hasattr(self.posterior, "metadata"):
valid_cols.extend(self.posterior.metadata.stan_vars_cols[item])
else:
valid_cols.extend(self.posterior.stan_vars_cols[item])
data, data_warmup = _unpack_fit(
self.posterior,
items,
self.save_warmup,
self.dtypes,
)
# copy dims and coords - Mitzi question: why???
dims = deepcopy(self.dims) if self.dims is not None else {}
coords = deepcopy(self.coords) if self.coords is not None else {}
return (
dict_to_dataset(
data,
library=self.cmdstanpy,
coords=coords,
dims=dims,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=self.cmdstanpy,
coords=coords,
dims=dims,
index_origin=self.index_origin,
),
)
@requires("posterior")
def sample_stats_to_xarray(self):
"""Extract sample_stats from prosterior fit."""
return self.stats_to_xarray(self.posterior)
@requires("prior")
def sample_stats_prior_to_xarray(self):
"""Extract sample_stats from prior fit."""
return self.stats_to_xarray(self.prior)
def stats_to_xarray(self, fit):
"""Extract sample_stats from fit."""
if not (hasattr(fit, "metadata") or hasattr(fit, "sampler_vars_cols")):
return self.sample_stats_to_xarray_pre_v_0_9_68(fit)
dtypes = {
"divergent__": bool,
"n_leapfrog__": np.int64,
"treedepth__": np.int64,
**self.dtypes,
}
if hasattr(fit, "metadata"):
items = list(fit.metadata._method_vars_cols.keys()) # pylint: disable=protected-access
else:
items = list(fit.sampler_vars_cols.keys())
rename_dict = {
"divergent": "diverging",
"n_leapfrog": "n_steps",
"treedepth": "tree_depth",
"stepsize": "step_size",
"accept_stat": "acceptance_rate",
}
data, data_warmup = _unpack_fit(
fit,
items,
self.save_warmup,
self.dtypes,
)
for item in items:
name = re.sub("__$", "", item)
name = rename_dict.get(name, name)
data[name] = data.pop(item).astype(dtypes.get(item, float))
if data_warmup:
data_warmup[name] = data_warmup.pop(item).astype(dtypes.get(item, float))
return (
dict_to_dataset(
data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
)
@requires("posterior")
@requires("posterior_predictive")
def posterior_predictive_to_xarray(self):
"""Convert posterior_predictive samples to xarray."""
return self.predictive_to_xarray(self.posterior_predictive, self.posterior)
@requires("prior")
@requires("prior_predictive")
def prior_predictive_to_xarray(self):
"""Convert prior_predictive samples to xarray."""
return self.predictive_to_xarray(self.prior_predictive, self.prior)
def predictive_to_xarray(self, names, fit):
"""Convert predictive samples to xarray."""
predictive = _as_set(names)
if hasattr(fit, "metadata") or hasattr(fit, "stan_vars_cols"):
data, data_warmup = _unpack_fit(
fit,
predictive,
self.save_warmup,
self.dtypes,
)
else: # pre_v_0_9_68
valid_cols = _filter_columns(fit.column_names, predictive)
data, data_warmup = _unpack_frame(
fit,
fit.column_names,
valid_cols,
self.save_warmup,
self.dtypes,
)
return (
dict_to_dataset(
data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
)
@requires("posterior")
@requires("predictions")
def predictions_to_xarray(self):
"""Convert out of sample predictions samples to xarray."""
predictions = _as_set(self.predictions)
if hasattr(self.posterior, "metadata") or hasattr(self.posterior, "stan_vars_cols"):
data, data_warmup = _unpack_fit(
self.posterior,
predictions,
self.save_warmup,
self.dtypes,
)
else: # pre_v_0_9_68
columns = self.posterior.column_names
valid_cols = _filter_columns(columns, predictions)
data, data_warmup = _unpack_frame(
self.posterior,
columns,
valid_cols,
self.save_warmup,
self.dtypes,
)
return (
dict_to_dataset(
data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
)
@requires("posterior")
@requires("log_likelihood")
def log_likelihood_to_xarray(self):
"""Convert elementwise log likelihood samples to xarray."""
log_likelihood = _as_set(self.log_likelihood)
if hasattr(self.posterior, "metadata") or hasattr(self.posterior, "stan_vars_cols"):
data, data_warmup = _unpack_fit(
self.posterior,
log_likelihood,
self.save_warmup,
self.dtypes,
)
else: # pre_v_0_9_68
columns = self.posterior.column_names
valid_cols = _filter_columns(columns, log_likelihood)
data, data_warmup = _unpack_frame(
self.posterior,
columns,
valid_cols,
self.save_warmup,
self.dtypes,
)
if isinstance(self.log_likelihood, dict):
data = {obs_name: data[lik_name] for obs_name, lik_name in self.log_likelihood.items()}
if data_warmup:
data_warmup = {
obs_name: data_warmup[lik_name]
for obs_name, lik_name in self.log_likelihood.items()
}
return (
dict_to_dataset(
data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
skip_event_dims=True,
),
dict_to_dataset(
data_warmup,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
skip_event_dims=True,
),
)
@requires("prior")
def prior_to_xarray(self):
"""Convert prior samples to xarray."""
if hasattr(self.posterior, "metadata") or hasattr(self.prior, "stan_vars_cols"):
if hasattr(self.posterior, "metadata"):
items = list(self.prior.metadata.stan_vars_cols.keys())
else:
items = list(self.prior.stan_vars_cols.keys())
if self.prior_predictive is not None:
try:
items = _filter(items, self.prior_predictive)
except ValueError:
pass
data, data_warmup = _unpack_fit(
self.prior,
items,
self.save_warmup,
self.dtypes,
)
else: # pre_v_0_9_68
columns = self.prior.column_names
prior_predictive = _as_set(self.prior_predictive)
prior_predictive = _filter_columns(columns, prior_predictive)
invalid_cols = set(prior_predictive + [col for col in columns if col.endswith("__")])
valid_cols = [col for col in columns if col not in invalid_cols]
data, data_warmup = _unpack_frame(
self.prior,
columns,
valid_cols,
self.save_warmup,
self.dtypes,
)
return (
dict_to_dataset(
data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
)
@requires("observed_data")
def observed_data_to_xarray(self):
"""Convert observed data to xarray."""
return dict_to_dataset(
self.observed_data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
default_dims=[],
index_origin=self.index_origin,
)
@requires("constant_data")
def constant_data_to_xarray(self):
"""Convert constant data to xarray."""
return dict_to_dataset(
self.constant_data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
default_dims=[],
index_origin=self.index_origin,
)
@requires("predictions_constant_data")
def predictions_constant_data_to_xarray(self):
"""Convert constant data to xarray."""
return dict_to_dataset(
self.predictions_constant_data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
attrs=make_attrs(library=self.cmdstanpy),
default_dims=[],
index_origin=self.index_origin,
)
def to_inference_data(self):
"""Convert all available data to an InferenceData object.
Note that if groups can not be created (i.e., there is no `output`, so
the `posterior` and `sample_stats` can not be extracted), then the InferenceData
will not have those groups.
"""
return InferenceData(
save_warmup=self.save_warmup,
**{
"posterior": self.posterior_to_xarray(),
"sample_stats": self.sample_stats_to_xarray(),
"posterior_predictive": self.posterior_predictive_to_xarray(),
"predictions": self.predictions_to_xarray(),
"prior": self.prior_to_xarray(),
"sample_stats_prior": self.sample_stats_prior_to_xarray(),
"prior_predictive": self.prior_predictive_to_xarray(),
"observed_data": self.observed_data_to_xarray(),
"constant_data": self.constant_data_to_xarray(),
"predictions_constant_data": self.predictions_constant_data_to_xarray(),
"log_likelihood": self.log_likelihood_to_xarray(),
},
)
@requires("posterior")
def posterior_to_xarray_pre_v_0_9_68(self):
"""Extract posterior samples from output csv."""
columns = self.posterior.column_names
# filter posterior_predictive, predictions and log_likelihood
posterior_predictive = self.posterior_predictive
if posterior_predictive is None:
posterior_predictive = []
elif isinstance(posterior_predictive, str):
posterior_predictive = [
col for col in columns if posterior_predictive == col.split("[")[0].split(".")[0]
]
else:
posterior_predictive = [
col
for col in columns
if any(item == col.split("[")[0].split(".")[0] for item in posterior_predictive)
]
predictions = self.predictions
if predictions is None:
predictions = []
elif isinstance(predictions, str):
predictions = [col for col in columns if predictions == col.split("[")[0].split(".")[0]]
else:
predictions = [
col
for col in columns
if any(item == col.split("[")[0].split(".")[0] for item in predictions)
]
log_likelihood = self.log_likelihood
if log_likelihood is None:
log_likelihood = []
elif isinstance(log_likelihood, str):
log_likelihood = [
col for col in columns if log_likelihood == col.split("[")[0].split(".")[0]
]
else:
log_likelihood = [
col
for col in columns
if any(item == col.split("[")[0].split(".")[0] for item in log_likelihood)
]
invalid_cols = set(
posterior_predictive
+ predictions
+ log_likelihood
+ [col for col in columns if col.endswith("__")]
)
valid_cols = [col for col in columns if col not in invalid_cols]
data, data_warmup = _unpack_frame(
self.posterior,
columns,
valid_cols,
self.save_warmup,
self.dtypes,
)
return (
dict_to_dataset(
data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
)
def sample_stats_to_xarray_pre_v_0_9_68(self, fit):
"""Extract sample_stats from fit."""
dtypes = {"divergent__": bool, "n_leapfrog__": np.int64, "treedepth__": np.int64}
columns = fit.column_names
valid_cols = [col for col in columns if col.endswith("__")]
data, data_warmup = _unpack_frame(
fit,
columns,
valid_cols,
self.save_warmup,
self.dtypes,
)
for s_param in list(data.keys()):
s_param_, *_ = s_param.split(".")
name = re.sub("__$", "", s_param_)
name = "diverging" if name == "divergent" else name
data[name] = data.pop(s_param).astype(dtypes.get(s_param, float))
if data_warmup:
data_warmup[name] = data_warmup.pop(s_param).astype(dtypes.get(s_param, float))
return (
dict_to_dataset(
data,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=self.cmdstanpy,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
),
)
def _as_set(spec):
"""Uniform representation for args which be name or list of names."""
if spec is None:
return []
if isinstance(spec, str):
return [spec]
try:
return set(spec.values())
except AttributeError:
return set(spec)
def _filter(names, spec):
"""Remove names from list of names."""
if isinstance(spec, str):
names.remove(spec)
elif isinstance(spec, list):
for item in spec:
names.remove(item)
elif isinstance(spec, dict):
for item in spec.values():
names.remove(item)
return names
def _filter_columns(columns, spec):
"""Parse variable name from column label, removing element index, if any."""
return [col for col in columns if col.split("[")[0].split(".")[0] in spec]
def _unpack_fit(fit, items, save_warmup, dtypes):
"""Transform fit to dictionary containing ndarrays.
Parameters
----------
data: cmdstanpy.CmdStanMCMC
items: list
save_warmup: bool
dtypes: dict
Returns
-------
dict
key, values pairs. Values are formatted to shape = (chains, draws, *shape)
"""
num_warmup = 0
if save_warmup:
if not fit._save_warmup: # pylint: disable=protected-access
save_warmup = False
else:
num_warmup = fit.num_draws_warmup
nchains = fit.chains
draws = np.swapaxes(fit.draws(inc_warmup=save_warmup), 0, 1)
sample = {}
sample_warmup = {}
stan_vars_cols = fit.metadata.stan_vars_cols if hasattr(fit, "metadata") else fit.stan_vars_cols
sampler_vars_cols = (
fit.metadata._method_vars_cols # pylint: disable=protected-access
if hasattr(fit, "metadata")
else fit.sampler_vars_cols
)
for item in items:
if item in stan_vars_cols:
col_idxs = stan_vars_cols[item]
raw_draws = fit.stan_variable(item, inc_warmup=save_warmup)
raw_draws = np.swapaxes(
raw_draws.reshape((-1, nchains, *raw_draws.shape[1:]), order="F"), 0, 1
)
elif item in sampler_vars_cols:
col_idxs = sampler_vars_cols[item]
raw_draws = draws[..., col_idxs[0]]
else:
raise ValueError(f"fit data, unknown variable: {item}")
raw_draws = raw_draws.astype(dtypes.get(item))
if save_warmup:
sample_warmup[item] = raw_draws[:, :num_warmup, ...]
sample[item] = raw_draws[:, num_warmup:, ...]
else:
sample[item] = raw_draws
return sample, sample_warmup
def _unpack_frame(fit, columns, valid_cols, save_warmup, dtypes):
"""Transform fit to dictionary containing ndarrays.
Called when fit object created by cmdstanpy version < 0.9.68
Parameters
----------
data: cmdstanpy.CmdStanMCMC
columns: list
valid_cols: list
save_warmup: bool
dtypes: dict
Returns
-------
dict
key, values pairs. Values are formatted to shape = (chains, draws, *shape)
"""
if save_warmup and not fit._save_warmup: # pylint: disable=protected-access
save_warmup = False
if hasattr(fit, "draws"):
data = fit.draws(inc_warmup=save_warmup)
if save_warmup:
num_warmup = fit._draws_warmup # pylint: disable=protected-access
data_warmup = data[:num_warmup]
data = data[num_warmup:]
else:
data = fit.sample
if save_warmup:
data_warmup = fit.warmup[: data.shape[0]]
draws, chains, *_ = data.shape
if save_warmup:
draws_warmup, *_ = data_warmup.shape
column_groups = defaultdict(list)
column_locs = defaultdict(list)
# iterate flat column names
for i, col in enumerate(columns):
if "." in col:
# parse parameter names e.g. X.1.2 --> X, (1,2)
col_base, *col_tail = col.split(".")
else:
# parse parameter names e.g. X[1,2] --> X, (1,2)
col_base, *col_tail = col.replace("]", "").replace("[", ",").split(",")
if len(col_tail):
# gather nD array locations
column_groups[col_base].append(tuple(map(int, col_tail)))
# gather raw data locations for each parameter
column_locs[col_base].append(i)
dims = {}
for colname, col_dims in column_groups.items():
# gather parameter dimensions (assumes dense arrays)
dims[colname] = tuple(np.array(col_dims).max(0))
sample = {}
sample_warmup = {}
valid_base_cols = []
# get list of parameters for extraction (basename) X.1.2 --> X
for col in valid_cols:
base_col = col.split("[")[0].split(".")[0]
if base_col not in valid_base_cols:
valid_base_cols.append(base_col)
# extract each wanted parameter to ndarray with correct shape
for key in valid_base_cols:
ndim = dims.get(key, None)
shape_location = column_groups.get(key, None)
if ndim is not None:
sample[key] = np.full((chains, draws, *ndim), np.nan)
if save_warmup:
sample_warmup[key] = np.full((chains, draws_warmup, *ndim), np.nan)
if shape_location is None:
# reorder draw, chain -> chain, draw
(i,) = column_locs[key]
sample[key] = np.swapaxes(data[..., i], 0, 1)
if save_warmup:
sample_warmup[key] = np.swapaxes(data_warmup[..., i], 0, 1)
else:
for i, shape_loc in zip(column_locs[key], shape_location):
# location to insert extracted array
shape_loc = tuple([Ellipsis] + [j - 1 for j in shape_loc])
# reorder draw, chain -> chain, draw and insert to ndarray
sample[key][shape_loc] = | np.swapaxes(data[..., i], 0, 1) | numpy.swapaxes |
import numpy as np
# version 03.10.2019
# solveODE(f, y0, Ttrans, Teval, dt, outSteps, method) solves ODEs for their solution sol
#
# arguments:
# f : right hand side of ode, function object, returns float or numpy array
# y0 : initial values,
# for onedimensional odes int or float
# for n-dimensional odes list or numpy array
# Ttrans : transient time, float
# Teval : evaluation time, float
# dt : integration time step, float
# outSteps : store every outSteps-th step in sol, integer
# method : method for numerical integration, string
class ODE_Solver:
# constructor
def __init__ (self):
pass
# numerical method functions perform integration step for given right-hand side f of ode
# performs explicit Euler step
# convergence order 1
def explicitEuler (self, f, y, t, dt):
return (y + f(y, t) * dt)
# performs implicit Euler step with fixed point iteration
# convergence order 1
def implicitEulerFPI (self, f, y, t, dt, tol = 1e-10):
x = y
x_prev = x + 2.0 * tol
j = 0
while np.linalg.norm(x - x_prev) >= tol and j < 15: # raise error
j += 1
x_prev = x
x = y + f(x, t) * dt
return (x)
# performs explicit midpoint step
# convergence order 2
def explicitMidpoint (self, f, y, t, dt):
k1 = f(y, t)
k2 = f(y + k1 * dt / 2.0, t + dt / 2.0)
return (y + k2 * dt)
# performs explicit Runge-Kutta step of stage 2
# convergence order 2
def explicitHeun (self, f, y, t, dt):
k1 = f(y, t)
k2 = f(y + k1 * dt, t + dt)
return (y + (k1 + k2) * dt / 2.0)
# performs explicit Runge-Kutta step of stage 4
# convergence order 4
def explicitRungeKutta4 (self, f, y, t, dt):
k1 = f(y, t)
k2 = f(y + k1 * dt / 2.0, t + dt / 2.0)
k3 = f(y + k2 * dt / 2.0, t + dt / 2.0)
k4 = f(y + k3 * dt, t + dt)
return (y + (k1 + 2.0 * (k2 + k3) + k4) * dt / 6.0)
def RungeKutta54_coefficients ():
# stage
s = 7
c = np.array([0, 1.0 / 5.0, 3.0, 10.0, 4.0 / 5.0, 8.0 / 9.0, 1.0, 1.0])
A = np.matrix([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0 / 5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[3.0 / 40.0, 9.0 / 40.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[44.0 / 45.0, -56.0 / 15.0, 32.0 / 9.0, 0.0, 0.0, 0.0, 0.0],
[19372.0 / 6561.0, -25360.0 / 2187.0, 64448.0 / 6561.0, -212.0 / 729.0, 0.0, 0.0, 0.0],
[9017.0 / 3168.0, -355.0 / 33.0, 46732.0 / 5247.0, 49.0 / 176.0, -5103.0 / 18656.0, 0.0, 0.0],
[35.0 / 384.0, 0.0, 500.0 / 1113.0, 125.0 / 192.0, -2187.0 / 6784.0, 11.0 / 84.0, 0.0]])
# niederordrig
b1 = np.array([5179.0 / 57600.0, 0.0, 7571.0 / 16695.0, 393.0 / 640.0, -92097.0 / 339200.0, 187.0 / 2100.0, 1.0 / 40.0])
# höherordrig
b2 = np.array([35.0 / 384.0, 0.0, 500.0 / 1113.0, 125.0 / 192.0, -2187.0 / 6784.0, 11.0 / 84.0, 0.0])
return (s, A, b1, b2, c)
# def RungeKutta4 (self, f, y, t, dt):
# returns time vector Tvec and solution matrix sol for solved ode
def solveODE (self, f, y0, Ttrans, Teval, dt = 1e-4, outSteps = 1,
method = "explicitEuler"):
if Ttrans + Teval <= dt:
raise ValueError("Time step dt should be greater than zero and smaller than integration time.")
numberOfSteps = int((Ttrans + Teval) / dt)
skipSteps = int(Ttrans / dt)
# initialize time vector Tvec
Tvec = np.arange(0.0, Ttrans + Teval, dt)
# time at beginning of computation
t = Tvec[0]
# initial condition is casted to numpy-array
y0 = np.array(y0)
# initialize solution matrix sol
sol = np.zeros((y0.size, numberOfSteps))
# write initial condition into solution matrix
sol[:, 0] = y0
y = y0
# numerical integration
if method == "RungeKutta54":
raise ValueError("Dormand-Prince methods are not implemented yet.")
# s, A, b1, b2, c = self.RungeKutta54_coefficients()
#
# k1 = np.zeros(s)
# k1[0] = f(y, t)
# k2 = k1
#
# for i in range(1, k1):
# k1[i] = f(y + dt * A[i, :i] @ k1[:i], t + c[i] * dt)
#
# y1 = y + dt * b1 @ k1
#
# for i in range(1, k2):
# k2[i] = f(y + dt * A[i, :i] @ k2[:i], t + c[i] * dt)
#
# y2 = y + dt * b2 @ k2
else:
for i in range(1, numberOfSteps):
if method == "explicitEuler":
y = self.explicitEuler(f, y, t, dt)
elif method == "implicitEulerFPI":
y = self.implicitEulerFPI(f, y, t, dt, 1e-10)
elif method == "explicitMidpoint":
y = self.explicitMidpoint(f, y, t, dt)
elif method == "explicitHeun":
y = self.explicitHeun(f, y, t, dt)
elif method == "explicitRungeKutta4":
y = self.explicitRungeKutta4(f, y, t, dt)
else:
raise ValueError("Choose numerical integration method from {explicitEuler, implicitEulerFPI, explicitMidpoint, explicitHeun, explicitRungeKutta4}")
sol[:, i] = y
t = Tvec[i]
return (Tvec[skipSteps::outSteps], sol[:, skipSteps::outSteps])
# solveDDE(f, history, Ttrans, Teval, tau, dt, outSteps, method) solves ddes for their solution sol
#
# arguments:
# f : right hand side of dde, function object, returns numpy array
# history : history array on the interval [-tau, 0], numpy array
# derivatives : array with derivative information on the interval [-tau, 0], numpy array
# Ttrans : transient time, float
# Teval : evaluation time, float
# tau : delay time, float
# dt : integration time step, float
# outSteps : store every outSteps-th step in sol, integer
# method : method for numerical integration, string
class DDE_Solver ():
# constructor
def __init__ (self):
pass
# numerical method functions perform integration step for given right-hand side f of dde
# performs explicit Euler step
# convergence order 1
def explicitEuler (self, f, y, yd, t, dt):
return (y + dt * f(y, yd, t))
# performs explicit Runge-Kutta step of stage 4
# convergence order 4
def explicitRungeKutta4 (self, f, y, p0, p1, m0, m1, t, dt):
# p05 is evaluated cubic spline interpolation between p0 and p1
p05 = 0.5 * (p0 + p1) + 0.125 * (m0 - m1)
k1 = f(y, p0, t)
k2 = f(y + dt * k1 / 2.0, p05, t + dt / 2.0)
k3 = f(y + dt * k2 / 2.0, p05, t + dt / 2.0)
k4 = f(y + dt * k3, p1, t + dt)
return (y + dt * (k1 + 2.0 * (k2 + k3) + k4) / 6.0)
# returns time vector Tvec and solution matrix sol for solved dde
def solveDDE (self, f, history, derivatives, Ttrans, Teval, tau, dt = 1e-4,
outSteps = 1, method = "explicitEuler"):
if Ttrans + Teval <= dt:
raise ValueError("Time step dt should be greater than zero and smaller than integration time.")
if Ttrans + Teval <= tau:
raise ValueError("Integration time should be greater than delay time tau.")
numberOfSteps = int((Ttrans + Teval) / dt)
skipSteps = int(Ttrans / dt)
n_tau = int(tau / dt)
# get system dimension
n = history.shape[0]
# initialize time vector Tvec
Tvec = np.zeros(n_tau + numberOfSteps)
# fill Tvec while t is in [-tau, 0]
for i in range(n_tau):
Tvec[i] = -tau + i * dt
# time at beginning of computation
t = Tvec[-1]
# fill Tvec while t is in [0, Ttrans + Teval]
Tvec[n_tau:] = np.arange(0.0, Ttrans + Teval, dt)
# initialize solution matrix sol and derivatives deriv
sol = np.zeros((n, n_tau + numberOfSteps))
deriv = np.zeros((n, n_tau + numberOfSteps))
# fill sol with history and deriv with derivatives information
sol[:, :n_tau + 1] = history
deriv[:, :n_tau + 1] = derivatives
y = history[:, -1]
# numerical integration
for i in range(n_tau, numberOfSteps + n_tau):
t = Tvec[i]
p0 = sol[:, i - n_tau]
if method == "explicitEuler":
y = self.explicitEuler(f, y, p0, t, dt)
elif method == "explicitRungeKutta4":
p1 = sol[:, i - n_tau + 1]
m0 = deriv[:, i - n_tau]
m1 = deriv[:, i - n_tau + 1]
y = self.explicitRungeKutta4(f, y, p0, p1, m0, m1, t, dt)
deriv[:, i] = f(y, p1, t)
else:
raise ValueError("Choose numerical integration method from {explicitEuler, explicitRungeKutta4}")
sol[:, i] = y
return (Tvec[skipSteps::outSteps], sol[:, skipSteps::outSteps])
# solveSDE(f, y0, Ttrans, Teval, dt, outSteps, method) solves SDEs for their solution sol
#
# arguments:
# f : right hand side of sde, function object, returns float or numpy array
# y0 : initial values, list or numpy array
# Ttrans : transient time, float
# Teval : evaluation time, float
# dt : integration time step, float
# outSteps : store every outSteps-th step in sol, integer
# method : choose method for numerical integration, string
class SDE_Solver ():
# constructor
def __init__ (self):
pass
# Wiener process, second argument of normal() is standard deviation
# n = 1 for scalar noise
def dW (self, sqrt_dt, n = 1):
return ( | np.random.normal(loc=0.0, scale=sqrt_dt, size=n) | numpy.random.normal |
# AUTOGENERATED! DO NOT EDIT! File to edit: dev/01_dataset_ucf101.ipynb (unless otherwise specified).
__all__ = ['UCF101', 'SingleFrameDataset', 'BatchShower', 'SequenceDataset', 'SequenceBatchShower']
# Cell
import numpy as np
import pathlib
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import utils
import torchvision.transforms as transforms
import torch
from .avi import AVI
# Cell
class UCF101:
def __init__(self, base_directory=''):
"""
Args:
base_directory: main data folder (e.g. ../data/UCF101)
"""
self.base_directory = pathlib.Path(base_directory)
def getFileList(self, data_type='train', remove_classname = False):
"""
This function uses a text file provided with the dataset
which lists all of the relative paths for the videos for the train/test split.
Args:
data_type: 'train' | 'test'
remove_classname: if True does not include the class name in the filenames.
Returns:
X is a tuple.
The first element is a numpy array with the absolute
filepaths for the videos for training.
The second element is a numpy array of class indices (0-100).
class_names is a list of the action categories.
"""
base_directory = self.base_directory
#print(f'[getFileList] Reading data from: {base_directory}')
# action class labels
class_file = open(base_directory/'annotations/ucfTrainTestlist/classInd.txt','r')
lines = class_file.readlines()
lines = [line.split(' ')[1].strip() for line in lines]
class_file.close()
class_names = np.asarray(lines)
if data_type == 'train':
# training data
train_file = open(base_directory/'annotations/ucfTrainTestlist/trainlist01.txt','r')
lines = train_file.readlines()
if remove_classname:
filenames = ['/UCF-101/' + line.split(' ')[0].split('/')[1] for line in lines]
else:
filenames = ['/UCF-101/' + line.split(' ')[0] for line in lines]
y_train = [int(line.split(' ')[1].strip())-1 for line in lines]
y_train = np.asarray(y_train)
filenames = [base_directory.as_posix() + filename for filename in filenames]
train_file.close()
train = (np.asarray(filenames),y_train)
X = train
print('Number of training files:', len(X[0]))
else:
# testing data
test_file = open(base_directory/'annotations/ucfTrainTestlist/testlist01.txt','r')
lines = test_file.readlines()
filenames = ['/UCF-101/' + line.split(' ')[0].strip() for line in lines]
classnames = [filename.split('/')[2] for filename in filenames]
if remove_classname:
# remove the class name from the filename if needed.
filenames = ['/UCF-101/' + line.split(' ')[0].split('/')[1].strip() for line in lines]
y_test = [np.where(classname == class_names)[0][0] for classname in classnames]
y_test = np.asarray(y_test)
filenames = [base_directory.as_posix() + filename for filename in filenames]
test_file.close()
test = (np.asarray(filenames),y_test)
X = test
print('Number of validation files:', len(X[0]))
#print('[getFileList] Done.')
return X, class_names
def downloadData(self):
"""
Downloads all zip files of the UCF101 dataset.
"""
target_dir = self.base_directory
print(f'[downloadData] 1/2 Beginning file download to {target_dir}')
compressed_dir = pathlib.Path(target_dir + '/compressed')
compressed_dir.mkdir(parents=True, exist_ok=True)
annotations_dir = pathlib.Path(target_dir + '/annotations')
annotations_dir.mkdir(parents=True, exist_ok=True)
destination_dir = pathlib.Path(target_dir + '/UCF-101')
destination_dir.mkdir(parents=True, exist_ok=True)
# download annotations for action recognition
if pathlib.Path(compressed_dir/'UCF101TrainTestSplits-RecognitionTask.zip').exists():
print ("[downloadData]File UCF101TrainTestSplits-RecognitionTask.zip exists.")
else:
annotation_url = 'https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip'
filename = wget.download(annotation_url, out=compressed_dir.as_posix(), bar=wget.bar_adaptive)
print(f'[downloadData]File downloaded to {filename}')
if pathlib.Path(compressed_dir/'UCF101TrainTestSplits-DetectionTask.zip').exists():
print ("[downloadData]File UCF101TrainTestSplits-DetectionTask.zip exists.")
else:
# download annotations for action detection
annotation_url = 'https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-DetectionTask.zip'
filename =wget.download(annotation_url, out=compressed_dir.as_posix(), bar=wget.bar_adaptive)
print(f'[downloadData]File downloaded to {filename}')
# download videos
if pathlib.Path(compressed_dir/'UCF101.rar').exists():
print ("[downloadData]File UCF101.rar exists.")
else:
video_url = 'https://www.crcv.ucf.edu/data/UCF101/UCF101.rar'
filename =wget.download(video_url, out=compressed_dir.as_posix(), bar=wget.bar_adaptive)
print(f'[downloadData]File downloaded to {filename}')
print('[downloadData] Done.\n')
def extractData(self):
"""
Extracts all zip files of the UCF101 dataset.
It does system calls and it needs unrar (apt-get install unrar-free)
"""
target_dir = self.base_directory
print('[extractData] Extracting data...')
target_dir = pathlib.Path(target_dir)
compressed_dir = pathlib.Path(target_dir/'compressed')
compressed_dir.mkdir(parents=True, exist_ok=True)
annotations_dir = pathlib.Path(target_dir/'annotations')
annotations_dir.mkdir(parents=True, exist_ok=True)
destination_dir = pathlib.Path(target_dir/'UCF-101')
destination_dir.mkdir(parents=True, exist_ok=True)
try:
bash_cmd = 'unrar ' + target_dir.as_posix() + '/UCF101.rar' + ' ' + target_dir.as_posix() + '/UCF-101'
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print(output)
except Exception as e:
print(e)
print()
bash_cmd = 'cp ' + target_dir.as_posix() + '/compressed/UCF101TrainTestSplits-RecognitionTask.zip ' + annotations_dir.as_posix() + '/UCF101TrainTestSplits-RecognitionTask.zip'
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
bash_cmd = 'unzip ' + annotations_dir .as_posix() + '/UCF101TrainTestSplits-RecognitionTask.zip -d ' + annotations_dir.as_posix()
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
bash_cmd = 'cp ' + target_dir.as_posix() + '/compressed/UCF101TrainTestSplits-DetectionTask.zip ' + annotations_dir.as_posix() + '/UCF101TrainTestSplits-DetectionTask.zip'
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
bash_cmd = 'unzip ' + annotations_dir.as_posix() + '/UCF101TrainTestSplits-DetectionTask.zip -d ' + annotations_dir.as_posix()
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
bash_cmd = 'rm ' + target_dir.as_posix() + '/annotations/*.zip'
print(bash_cmd)
process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if len(output) > 0: print(output)
if len(error) > 0: print(error)
print()
print('[extractData] Done.')
# Cell
class SingleFrameDataset(Dataset):
"""Single frame dataset for the UCF101."""
def __init__(self, dataset_path, training=True, transform=None):
"""
Args:
file_list: list of files as a numpy array.
labels: one entry per filename in the list of files as a numpy array.
train: flag to say whether train or test dataset is used.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.training = True
ucf = UCF101(dataset_path)
X, class_names = ucf.getFileList(data_type='train' if training else 'test')
self.file_list, self.labels = X[0], X[1]
self.class_names = class_names
self.num_classes = len(self.class_names)
self.transform = transform
def getClassName(self, idx):
return self.class_names[idx]
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
video_list = self.file_list[idx]
#video_list = self.file_list[idx % len(self)] # wraps up if an out of index idx is used.
avi = AVI(video_list)
frame = avi.getRandomFrame()
label = self.labels[idx]
label = np.array([label])
if self.transform:
# frame = frame.transpose(2,0,1)
frame = self.transform(frame)
return frame, label
# Cell
class BatchShower:
def __init__(self, dl):
self.dl = dl
def showBatch(self, idx, scale=1):
"""Loops through the dataloader and shows only one batch (idx)"""
assert idx >= 0 and idx <= np.floor(len(self.dl.dataset)/self.dl.batch_size), "selected batch index out of batch size range: [0, %d]" % np.floor(len(self.dl.dataset)/self.dl.batch_size)
for i_batch, sample_batched in enumerate(self.dl):
# print(i_batch, sample_batched[0].size())
# observe the idx-th batch and stop
if i_batch == idx:
plt.figure(figsize=(10,10))
image, label = sample_batched[0], sample_batched[1]
class_name = self.dl.dataset.getClassName(sample_batched[1])
self.showThisBatch(image, label, scale)
print(class_name.tolist())
plt.axis('off')
plt.ioff()
plt.show()
break
def showThisBatch(self, images_batch, labels_batch, scale=1):
"""Show image for a batch of samples.
Must be tensors of size (bs x w x h x channels).
"""
batch_size = len(images_batch)
im_size = images_batch.size()
ncols = int(np.ceil(np.sqrt(batch_size)))
for i in range(batch_size):
ax = plt.subplot(ncols, ncols, i+1)
if type(images_batch[i]) == torch.Tensor:
frame = images_batch[i].data.numpy()
frame = frame/255.
if frame.shape[0] <= 3:
frame = frame.transpose(1, 2, 0)
frame_v_mean = np.mean(frame)
frame = scale*frame
frame[frame<0] = 0
if np.mean(frame) < 2:
frame[frame>1] = 1
else:
frame[frame>255] = 255
plt.imshow(frame)
# plt.tight_layout()
ax.axis('off')
# Cell
class SequenceDataset(Dataset):
"""Sequence based dataset for the UCF101.
Output is of shape:
seq_len, H, W, C
Note that when this is passed onto a DataLoader with toTensor() transform, it changes its shape to:
batch_size, seq_length, C, H, W
"""
def __init__(self, dataset_path, sequence_length, sample_interval=1, training=True, transform=None):
"""
Args:
file_list: list of files as a numpy array.
labels: one entry per filename in the list of files as a numpy array.
train: flag to say whether train or test dataset is used.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.training = True
self.sequence_length = sequence_length
self.sample_interval = sample_interval
ucf = UCF101(dataset_path)
X, class_names = ucf.getFileList(data_type='train' if training else 'test')
self.file_list, self.labels = X[0], X[1]
self.class_names = class_names
self.num_classes = len(self.class_names)
self.transform = transform
def getClassName(self, idx):
return self.class_names[idx]
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
"""
returns:
- sequence: list of frames of length self.sequence_length
"""
if torch.is_tensor(idx):
idx = idx.tolist()
#video_list = self.file_list[idx]
video_list = self.file_list[idx % len(self)] # wraps up if an out of index idx is used.
avi = AVI(video_list, verbose=False) # set verbose to True might help debugging.
frames = avi.getRandomSequence(self.sequence_length, self.sample_interval)
# frames is a numpy matrix of shape seq_len, H, W, C
label = self.labels[idx]
label = | np.array([label]) | numpy.array |
import os, os.path
import shutil
import numpy as np
import chroma.event as event
from chroma.tools import count_nonzero
from chroma.rootimport import ROOT
import array
# Check if we have already imported the ROOT class due to a user's
# rootlogon.C script
if not hasattr(ROOT, 'Vertex') or not hasattr(ROOT, 'Channel'):
print('Setting up ROOT datatypes.')
# Create .chroma directory if it doesn't exist
chroma_dir = os.path.expanduser('~/.chroma')
if not os.path.isdir(chroma_dir):
if os.path.exists(chroma_dir):
raise Exception('$HOME/.chroma file exists where directory should be')
else:
os.mkdir(chroma_dir)
# Check if latest ROOT file is present
package_root_C = os.path.join(os.path.dirname(__file__), 'root.C')
home_root_C = os.path.join(chroma_dir, 'root.C')
if not os.path.exists(home_root_C) or \
os.stat(package_root_C).st_mtime > os.stat(home_root_C).st_mtime:
shutil.copy2(src=package_root_C, dst=home_root_C)
# ACLiC problem with ROOT
# see http://root.cern.ch/phpBB3/viewtopic.php?f=3&t=14280&start=15
# no longer an issue for root 6+
# ROOT.gSystem.Load('libCint')
# Import this C file for access to data structure
ROOT.gROOT.ProcessLine('.L '+home_root_C+'+')
def tvector3_to_ndarray(vec):
'''Convert a ROOT.TVector3 into a numpy np.float32 array'''
return np.array((vec.X(), vec.Y(), vec.Z()), dtype=np.float32)
def make_photon_with_arrays(size):
'''Returns a new chroma.event.Photons object for `size` number of
photons with empty arrays set for all the photon attributes.'''
return event.Photons(pos=np.empty((size,3), dtype=np.float32),
dir=np.empty((size,3), dtype=np.float32),
pol=np.empty((size,3), dtype=np.float32),
wavelengths=np.empty(size, dtype=np.float32),
t=np.empty(size, dtype=np.float32),
flags=np.empty(size, dtype=np.uint32),
last_hit_triangles=np.empty(size, dtype=np.int32))
def root_vertex_to_python_vertex(vertex):
"Returns a chroma.event.Vertex object from a root Vertex object."
if len(vertex.step_x):
n = len(vertex.step_x)
steps = event.Steps(np.empty(n),np.empty(n),np.empty(n),np.empty(n),
np.empty(n),np.empty(n), | np.empty(n) | numpy.empty |
"""
Proto
Contains the following library code useful for prototyping robotic algorithms:
- YAML
- TIME
- PROFILING
- MATHS
- LINEAR ALGEBRA
- GEOMETRY
- LIE
- TRANSFORM
- MATPLOTLIB
- CV
- DATASET
- FILTER
- STATE ESTIMATION
- CALIBRATION
- SIMULATION
- UNITTESTS
"""
import os
import sys
import glob
import math
import time
import copy
import random
import pickle
import json
import signal
from datetime import datetime
from pathlib import Path
from enum import Enum
from dataclasses import dataclass
from collections import namedtuple
from types import FunctionType
from typing import Optional
import cv2
import yaml
import numpy as np
import scipy
import scipy.sparse
import scipy.sparse.linalg
import pandas
import cProfile
from pstats import Stats
###############################################################################
# YAML
###############################################################################
def load_yaml(yaml_path):
""" Load YAML and return a named tuple """
assert yaml_path is not None
assert yaml_path != ""
# Load yaml_file
yaml_data = None
with open(yaml_path, "r") as stream:
yaml_data = yaml.safe_load(stream)
# Convert dict to named tuple
data = json.dumps(yaml_data) # Python dict to json
data = json.loads(
data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
return data
###############################################################################
# TIME
###############################################################################
def sec2ts(time_s):
""" Convert time in seconds to timestamp """
return int(time_s * 1e9)
def ts2sec(ts):
""" Convert timestamp to seconds """
return ts * 1e-9
###############################################################################
# PROFILING
###############################################################################
def profile_start():
""" Start profile """
prof = cProfile.Profile()
prof.enable()
return prof
def profile_stop(prof, **kwargs):
""" Stop profile """
key = kwargs.get('key', 'cumtime')
N = kwargs.get('N', 10)
stats = Stats(prof)
stats.strip_dirs()
stats.sort_stats(key).print_stats(N)
###############################################################################
# MATHS
###############################################################################
from math import pi
from math import isclose
from math import sqrt
# from math import floor
from math import cos
from math import sin
from math import tan
from math import acos
from math import atan
def rmse(errors):
""" Root Mean Squared Error """
return np.sqrt(np.mean(errors**2))
###############################################################################
# LINEAR ALGEBRA
###############################################################################
from numpy import rad2deg
from numpy import deg2rad
from numpy import sinc
from numpy import zeros
from numpy import ones
from numpy import eye
from numpy import trace
from numpy import diagonal as diag
from numpy import cross
from numpy.linalg import norm
from numpy.linalg import inv
from numpy.linalg import pinv
from numpy.linalg import matrix_rank as rank
from numpy.linalg import eig
from numpy.linalg import svd
from numpy.linalg import cholesky as chol
def normalize(v):
""" Normalize vector v """
n = np.linalg.norm(v)
if n == 0:
return v
return v / n
def full_rank(A):
""" Check if matrix A is full rank """
return rank(A) == A.shape[0]
def skew(vec):
""" Form skew-symmetric matrix from vector `vec` """
assert vec.shape == (3,) or vec.shape == (3, 1)
x, y, z = vec
return np.array([[0.0, -z, y], [z, 0.0, -x], [-y, x, 0.0]])
def skew_inv(A):
""" Form skew symmetric matrix vector """
assert A.shape == (3, 3)
return np.array([A[2, 1], A[0, 2], A[1, 0]])
def fwdsubs(L, b):
"""
Solving a lower triangular system by forward-substitution
Input matrix L is an n by n lower triangular matrix
Input vector b is n by 1
Output vector x is the solution to the linear system
L x = b
"""
assert L.shape[1] == b.shape[0]
n = b.shape[0]
x = zeros((n, 1))
for j in range(n):
if L[j, j] == 0:
raise RuntimeError('Matrix is singular!')
x[j] = b[j] / L[j, j]
b[j:n] = b[j:n] - L[j:n, j] * x[j]
def bwdsubs(U, b):
"""
Solving an upper triangular system by back-substitution
Input matrix U is an n by n upper triangular matrix
Input vector b is n by 1
Output vector x is the solution to the linear system
U x = b
"""
assert U.shape[1] == b.shape[0]
n = b.shape[0]
x = zeros((n, 1))
for j in range(n):
if U[j, j] == 0:
raise RuntimeError('Matrix is singular!')
x[j] = b[j] / U(j, j)
b[0:j] = b[0:j] - U[0:j, j] * x[j]
def solve_svd(A, b):
"""
Solve Ax = b with SVD
"""
# compute svd of A
U, s, Vh = svd(A)
# U diag(s) Vh x = b <=> diag(s) Vh x = U.T b = c
c = np.dot(U.T, b)
# diag(s) Vh x = c <=> Vh x = diag(1/s) c = w (trivial inversion of a diagonal matrix)
w = np.dot(np.diag(1 / s), c)
# Vh x = w <=> x = Vh.H w (where .H stands for hermitian = conjugate transpose)
x = np.dot(Vh.conj().T, w)
return x
def schurs_complement(H, g, m, r, precond=False):
""" Shurs-complement """
assert H.shape[0] == (m + r)
# H = [Hmm, Hmr
# Hrm, Hrr];
Hmm = H[0:m, 0:m]
Hmr = H[0:m, m:]
Hrm = Hmr.T
Hrr = H[m:, m:]
# g = [gmm, grr]
gmm = g[1:]
grr = g[m:]
# Precondition Hmm
if precond:
Hmm = 0.5 * (Hmm + Hmm.T)
# Invert Hmm
assert rank(Hmm) == Hmm.shape[0]
(w, V) = eig(Hmm)
W_inv = diag(1.0 / w)
Hmm_inv = V * W_inv * V.T
# Schurs complement
H_marg = Hrr - Hrm * Hmm_inv * Hmr
g_marg = grr - Hrm * Hmm_inv * gmm
return (H_marg, g_marg)
def is_pd(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = chol(B)
return True
except np.linalg.LinAlgError:
return False
def nearest_pd(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if is_pd(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not is_pd(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def matrix_equal(A, B, tol=1e-8, verbose=False):
""" Compare matrices `A` and `B` """
diff = A - B
if len(diff.shape) == 1:
for i in range(diff.shape[0]):
if abs(diff[i]) > tol:
if verbose:
print("A - B:")
print(diff)
elif len(diff.shape) == 2:
for i in range(diff.shape[0]):
for j in range(diff.shape[1]):
if abs(diff[i, j]) > tol:
if verbose:
print("A - B:")
print(diff)
return False
return True
def plot_compare_matrices(title_A, A, title_B, B):
""" Plot compare matrices """
plt.matshow(A)
plt.colorbar()
plt.title(title_A)
plt.matshow(B)
plt.colorbar()
plt.title(title_B)
diff = A - B
plt.matshow(diff)
plt.colorbar()
plt.title(f"{title_A} - {title_B}")
print(f"max_coeff({title_A}): {np.max(np.max(A))}")
print(f"max_coeff({title_B}): {np.max(np.max(B))}")
print(f"min_coeff({title_A}): {np.min(np.min(A))}")
print(f"min_coeff({title_B}): {np.min(np.min(B))}")
print(f"max_diff: {np.max(np.max(np.abs(diff)))}")
plt.show()
def check_jacobian(jac_name, fdiff, jac, threshold, verbose=False):
""" Check jacobians """
# Check if numerical diff is same as analytical jacobian
if matrix_equal(fdiff, jac, threshold):
if verbose:
print(f"Check [{jac_name}] passed!")
return True
# Failed - print differences
if verbose:
fdiff_minus_jac = fdiff - jac
print(f"Check [{jac_name}] failed!")
print("-" * 60)
print("J_fdiff - J:")
print(np.round(fdiff_minus_jac, 4))
print()
print("J_fdiff:")
print(np.round(fdiff, 4))
print()
print("J:")
print( | np.round(jac, 4) | numpy.round |
import crosscat.cython_code.CyclicComponentModel as ccm
import math
import random
import numpy
import six
from scipy.stats import vonmises
from crosscat.utils.general_utils import logmeanexp
import pdb
pi = math.pi
next_seed = lambda rng: rng.randrange(2147483647)
default_hyperparameters = dict(a=1.0, b=pi, kappa=4.0)
default_data_parameters = dict(mu=pi, kappa=4.0)
###############################################################################
# Input-checking and exception-handling functions
###############################################################################
def check_type_force_float(x, name):
"""
If an int is passed, convert it to a float. If some other type is passed,
raise an exception.
"""
if type(x) is int:
return float(x)
elif not isinstance(x, (float, numpy.float64)):
raise TypeError("%r should be a float" % (name,))
else:
return x
def check_data_type_column_data(X):
"""
Makes sure that X is a numpy array and that it is a column vector
"""
if type(X) is not numpy.ndarray:
raise TypeError("X should be type numpy.ndarray")
if len(X.shape) == 2 and X.shape[1] > 1:
raise TypeError("X should have a single column.")
def check_hyperparams_dict(hypers):
if type(hypers) is not dict:
raise TypeError("hypers should be a dict")
keys = ['a', 'b', 'kappa']
for key in keys:
if key not in hypers:
raise KeyError("missing key in hypers: %r" % (key,))
for key, value in six.iteritems(hypers):
if key not in keys:
raise KeyError("invalid hypers key: %r" % (key,))
if not isinstance(value, (float, numpy.float64)):
raise TypeError("%r should be float" % (key,))
if key in ['a', 'kappa']:
if value <= 0.0:
raise ValueError("hypers[%r] should be greater than 0" % (key,))
if key == 'b':
if value <= 0.0 or value >= 2*pi:
raise ValueError("hypers[%r] should be in [0,2*pi]" % (key,))
def check_model_params_dict(params):
if type(params) is not dict:
raise TypeError("params should be a dict")
keys = ['mu', 'kappa']
for key in keys:
if key not in params:
raise KeyError("missing key in params: %r" % (key,))
for key, value in six.iteritems(params):
if key not in keys:
raise KeyError("invalid params key: %r" % (key,))
if not isinstance(value, (float, numpy.float64)):
raise TypeError("%r should be float" % (key,))
if key == "kappa":
if value <= 0.0:
raise ValueError("kappa should be greater than 0")
elif key != "mu":
raise KeyError("Invalid params key: %r" % (key,))
else:
if value < 0.0 or value > 2*pi:
raise ValueError("mu should be in [0,2*pi]")
###############################################################################
# The class extension
###############################################################################
class p_CyclicComponentModel(ccm.p_CyclicComponentModel):
model_type = 'vonmises'
cctype = 'cyclic'
@classmethod
def from_parameters(cls, N, data_params=default_data_parameters, hypers=None, gen_seed=0):
"""
Initialize a continuous component model with sufficient statistics
generated from random data.
Inputs:
N: the number of data points
data_params: a dict with the following keys
mu: the mean of the data
kappa: the precision of the data
hypers: a dict with the following keys
a: the prior precision of the mean
b: the prior mean of the
kappa: precision parameter
gen_seed: an integer from which the rng is seeded
"""
check_model_params_dict(data_params)
data_kappa = data_params['kappa']
data_mean = data_params['mu']
rng = random.Random(gen_seed)
X = [ [rng.vonmisesvariate(data_mean-math.pi, data_kappa)+math.pi] for i in range(N)]
X = numpy.array(X)
check_data_type_column_data(X)
if hypers is None:
hypers = cls.draw_hyperparameters(X, n_draws=1, gen_seed=next_seed(rng))[0]
check_hyperparams_dict(hypers)
sum_sin_x = numpy.sum(numpy.sin(X))
sum_cos_x = numpy.sum(numpy.cos(X))
hypers['fixed'] = 0.0
return cls(hypers, float(N), sum_sin_x, sum_cos_x)
@classmethod
def from_data(cls, X, hypers=None, gen_seed=0):
"""
Initialize a continuous component model with sufficient statistics
generated from data X
Inputs:
X: a column of data (numpy)
hypers: dict with the following entries
a: the prior precision of the mean
b: the prior mean of the
kappa: precision parameter
gen_seed: a int to seed the rng
"""
check_data_type_column_data(X)
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
rng = random.Random(gen_seed)
if hypers is None:
hypers = cls.draw_hyperparameters(X, gen_seed=next_seed(rng))[0]
check_hyperparams_dict(hypers)
N = len(X)
sum_sin_x = numpy.sum(numpy.sin(X))
sum_cos_x = numpy.sum(numpy.cos(X))
hypers['fixed'] = 0.0
return cls(hypers, float(N), sum_sin_x, sum_cos_x)
def sample_parameters_given_hyper(self, gen_seed=0):
"""
Samples a Gaussian parameter given the current hyperparameters.
Inputs:
gen_seed: integer used to seed the rng
"""
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
nprng = | numpy.random.RandomState(gen_seed) | numpy.random.RandomState |
import numpy as np
from matplotlib.patches import Wedge, Polygon, RegularPolygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
class FootprintGeometry():
def __init__(self):
self._patches = []
self._xlim = 0
self._zenithAngleInDeg = 0
self._lgs = []
self._ngs = []
self._targets = []
self._dms = []
self._instrFoVInArcsec = None
self._layerAltitudeInMeter = 10000
self._telescopeRadiusInMeter = 4.1
self._drawMetapupil = True
def setInstrumentFoV(self, instrFoVInArcsec, instrRotInDeg=0):
self._instrFoVInArcsec = instrFoVInArcsec
self._instrRotInDeg = instrRotInDeg
def set_zenith_angle(self, zenithAngleInDeg):
self._zenithAngleInDeg = zenithAngleInDeg
def setLayerAltitude(self, altitudeInMeter):
self._layerAltitudeInMeter = altitudeInMeter
def setTelescopeRadiusInMeter(self, radiusInMeter):
self._telescopeRadiusInMeter = radiusInMeter
def addLgs(self,
thetaSkyInArcsec,
AzSkyInDeg):
self._lgs.append(
{'lRo': 0,
'lAz': 0,
'skyTheta': thetaSkyInArcsec,
'skyAz': AzSkyInDeg})
def addNgs(self, thetaSkyInArcsec, AzSkyInDeg):
self._ngs.append(
{'lRo': 0,
'lAz': 0,
'skyTheta': thetaSkyInArcsec,
'skyAz': AzSkyInDeg})
def addTarget(self, thetaSkyInArcsec, AzSkyInDeg):
self._targets.append(
{'lRo': 0,
'lAz': 0,
'skyTheta': thetaSkyInArcsec,
'skyAz': AzSkyInDeg})
def addDm(self, pitchOnLayer, nActs, size, rotationAngleInDeg):
self._dms.append(
{'lRo': 0,
'lAz': 0,
'pitch': pitchOnLayer,
'nActs': nActs,
'size': size,
'skyRot': rotationAngleInDeg})
def compute(self):
self._lgsL = []
if self._lgs:
for l in self._lgs:
ll = self._polToRect(l['lRo'], l['lAz'])
cc = self._centerOffset(l['skyTheta'], l['skyAz'],
self._layerAltitudeInMeter)
ra = self._lgsRadius()
self._lgsL.append(FootprintXYRadius(
ll[0] + cc[0], ll[1] + cc[1], ra))
self._ngsL = []
if self._ngs:
for l in self._ngs:
cc = self._centerOffset(l['skyTheta'], l['skyAz'],
self._layerAltitudeInMeter)
ra = self._telescopeRadiusInMeter
self._ngsL.append(FootprintXYRadius(cc[0], cc[1], ra))
self._targetsL = []
for l in self._targets:
cc = self._centerOffset(l['skyTheta'], l['skyAz'],
self._layerAltitudeInMeter)
ra = self._telescopeRadiusInMeter
self._targetsL.append(FootprintXYRadius(cc[0], cc[1], ra))
self._sciL = []
if self._instrFoVInArcsec:
for sciFovCornerDeg in np.array([45, 135, 225, 315]
) + self._instrRotInDeg:
cc = self._centerOffset(
self._instrFoVInArcsec / 2 * np.sqrt(2),
sciFovCornerDeg,
self._layerAltitudeInMeter)
ra = self._telescopeRadiusInMeter
self._sciL.append(
np.array([cc[0], cc[1], ra, sciFovCornerDeg]))
self._metapupilL = []
if self._lgs:
raLgs = np.max([np.linalg.norm((l.x, l.y)) + l.r
for l in self._lgsL])
else:
raLgs = 0
if self._ngs:
raNgs = np.max([np.linalg.norm((l.x, l.y)) + l.r
for l in self._ngsL])
else:
raNgs = 0
if self._targetsL:
raTargets = np.max([np.linalg.norm((l.x, l.y)) + l.r
for l in self._targetsL])
else:
raTargets = 0
ra = np.max([raLgs, raNgs, raTargets])
self._metapupilL.append(FootprintXYRadius(0, 0, ra))
self._dmsL = []
if self._dms:
for l in self._dms:
self._dmsL.append(l)
# ll = self._polToRect(l['lRo'], l['lAz'])
# cc = self._centerOffset(l['skyTheta'], l['skyAz'],
# self._layerAltitudeInMeter)
# ra = self._lgsRadius()
# self._dmsL.append(FootprintXYRadius(
# ll[0] + cc[0], ll[1] + cc[1], ra))
self._computePatches()
def getNgsFootprint(self):
return self._ngsL
def getLgsFootprint(self):
return self._lgsL
def getTargetFootprint(self):
return self._targetsL
def getMetapupilFootprint(self):
return self._metapupilL
def _computePatches(self):
self._patches = []
self._xlim = 0
for l in self._lgsL:
self._addAnnularFootprint(
l.x, l.y, l.r, 0.99 * l.r, color='y', alpha=0.5)
self._addAnnularFootprint(
l.x, l.y, l.r, 0.00 * l.r, color='y', alpha=0.1)
for l in self._ngsL:
self._addAnnularFootprint(
l.x, l.y, l.r, 0.99 * l.r, color='r', alpha=0.5)
for l in self._targetsL:
self._addAnnularFootprint(
l.x, l.y, l.r, 0.99 * l.r, color='b', alpha=0.5)
for l in self._sciL:
self._addAnnularFootprint(
l[0], l[1], l[2], 0.99 * l[2],
theta1=l[3] - 10, theta2=l[3] + 10, color='b')
if self._drawMetapupil:
for l in self._metapupilL:
self._addAnnularFootprint(
l.x, l.y, l.r, 0.99 * l.r, color='k')
for l in self._dmsL:
self._addDmFootprint(l, color='k', alpha=0.1)
def scienceFieldRadius(self, rTel, fovInArcsec, hInMeter):
return rTel + fovInArcsec / 2 * 4.848e-6 * hInMeter
def _polToRect(self, ro, azInDeg):
azInRad = azInDeg * np.pi / 180
return ro * np.array(
[np.cos(azInRad), np.sin(azInRad)])
def _centerOffset(self, thetaInArcsec, azInDeg, hInMeter):
return self._polToRect(
thetaInArcsec * 4.848e-6 * hInMeter, azInDeg)
def _lgsDistance(self):
return 90000 / np.cos(self._zenithAngleInDeg * np.pi / 180)
def _lgsRadius(self):
return self._telescopeRadiusInMeter * (
1 - self._layerAltitudeInMeter / self._lgsDistance())
def _addAnnularFootprint(
self, centerX, centerY,
radiusOut, radiusIn=0,
theta1=0, theta2=360,
color='b', alpha=1):
center = np.array([centerX, centerY])
self._patches.append(
Wedge(center, radiusOut, theta1, theta2,
width=(radiusOut - radiusIn), color=color, alpha=alpha))
self._xlim = np.maximum(self._xlim,
np.max(np.abs(center) + radiusOut))
def _addDmFootprint(self, l, color, alpha):
rotAngleRad = l['skyRot'] * np.pi / 180
for i in np.arange(-l['nActs'], l['nActs'] + 1):
x = i * l['pitch']
for j in np.arange(-l['nActs'], l['nActs'] + 1):
y = j * l['pitch']
xR = x * | np.cos(rotAngleRad) | numpy.cos |
from polare import Stroke
from unittest import TestCase
import numpy as np
import unittest
class TestStrokeBase(TestCase):
def setUp(self):
self.x = np.linspace(-1, 1, 10)
self.y = np.exp(self.x) + np.cos(np.pi * self.x) - 1
self.f1 = Stroke(self.x, self.y, "linear")
self.f2 = Stroke(self.x, self.y, "quadratic")
self.f3 = Stroke(self.x, self.y, "cubic")
self.xnew = np.linspace(-1, 1, 100)
self.ynew = np.exp(self.xnew) + np.cos(np.pi * self.xnew) - 1
self.xother = np.linspace(-1, 1, 10)
self.yother = np.sin(self.xother)
self.fother = Stroke(self.xother, self.yother, "cubic")
self.xothernew = np.linspace(-1, 1, 100)
self.yothernew = np.sin(self.xothernew)
def test_pos(self):
y1 = (+self.f1)(self.xnew)
y2 = (+self.f2)(self.xnew)
y3 = (+self.f3)(self.xnew)
self.assertTrue(np.allclose(self.ynew, y1, atol=0.1))
self.assertTrue(np.allclose(self.ynew, y2, atol=0.1))
self.assertTrue(np.allclose(self.ynew, y3, atol=0.1))
def test_neg(self):
y1 = (-self.f1)(self.xnew)
y2 = (-self.f2)(self.xnew)
y3 = (-self.f3)(self.xnew)
self.assertTrue(np.allclose(-self.ynew, y1, atol=0.1))
self.assertTrue(np.allclose(-self.ynew, y2, atol=0.1))
self.assertTrue(np.allclose(-self.ynew, y3, atol=0.1))
def test_add(self):
y1 = (self.f1 + 5)(self.xnew)
y2 = (self.f2 + 5)(self.xnew)
y3 = (self.f3 + 5)(self.xnew)
self.assertTrue(np.allclose(self.ynew + 5, y1, atol=0.1))
self.assertTrue(np.allclose(self.ynew + 5, y2, atol=0.1))
self.assertTrue(np.allclose(self.ynew + 5, y3, atol=0.1))
y1 = (self.f1 + 0)(self.xnew)
y2 = (self.f2 + 0)(self.xnew)
y3 = (self.f3 + 0)(self.xnew)
self.assertTrue(np.allclose(self.ynew, y1, atol=0.1))
self.assertTrue(np.allclose(self.ynew, y2, atol=0.1))
self.assertTrue(np.allclose(self.ynew, y3, atol=0.1))
yother = (self.f1 + self.fother)(self.xnew)
self.assertTrue(np.allclose(self.ynew + self.yothernew, yother, atol=0.1))
def test_radd(self):
y1 = (5 + self.f1)(self.xnew)
y2 = (5 + self.f2)(self.xnew)
y3 = (5 + self.f3)(self.xnew)
self.assertTrue(np.allclose(5 + self.ynew, y1, atol=0.1))
self.assertTrue(np.allclose(5 + self.ynew, y2, atol=0.1))
self.assertTrue(np.allclose(5 + self.ynew, y3, atol=0.1))
y1 = (0 + self.f1)(self.xnew)
y2 = (0 + self.f2)(self.xnew)
y3 = (0 + self.f3)(self.xnew)
self.assertTrue(np.allclose(self.ynew, y1, atol=0.1))
self.assertTrue(np.allclose(self.ynew, y2, atol=0.1))
self.assertTrue(np.allclose(self.ynew, y3, atol=0.1))
yother = (self.fother + self.f1)(self.xnew)
self.assertTrue(np.allclose(self.yothernew + self.ynew, yother, atol=0.1))
def test_sub(self):
y1 = (self.f1 - 5)(self.xnew)
y2 = (self.f2 - 5)(self.xnew)
y3 = (self.f3 - 5)(self.xnew)
self.assertTrue(np.allclose(self.ynew - 5, y1, atol=0.1))
self.assertTrue(np.allclose(self.ynew - 5, y2, atol=0.1))
self.assertTrue(np.allclose(self.ynew - 5, y3, atol=0.1))
y1 = (self.f1 - 0)(self.xnew)
y2 = (self.f2 - 0)(self.xnew)
y3 = (self.f3 - 0)(self.xnew)
self.assertTrue(np.allclose(self.ynew, y1, atol=0.1))
self.assertTrue(np.allclose(self.ynew, y2, atol=0.1))
self.assertTrue(np.allclose(self.ynew, y3, atol=0.1))
yother = (self.f1 - self.fother)(self.xnew)
self.assertTrue(np.allclose(self.ynew - self.yothernew, yother, atol=0.1))
def test_rsub(self):
y1 = (0 - self.f1)(self.xnew)
y2 = (0 - self.f2)(self.xnew)
y3 = (0 - self.f3)(self.xnew)
self.assertTrue(np.allclose(-self.ynew, y1, atol=0.1))
self.assertTrue(np.allclose(-self.ynew, y2, atol=0.1))
self.assertTrue(np.allclose(-self.ynew, y3, atol=0.1))
yother = (self.fother - self.f1)(self.xnew)
self.assertTrue(np.allclose(self.yothernew - self.ynew, yother, atol=0.1))
def test_mul(self):
y1 = (self.f1 * 5)(self.xnew)
y2 = (self.f2 * 5)(self.xnew)
y3 = (self.f3 * 5)(self.xnew)
self.assertTrue(np.allclose(self.ynew * 5, y1, atol=0.4))
self.assertTrue(np.allclose(self.ynew * 5, y2, atol=0.2))
self.assertTrue(np.allclose(self.ynew * 5, y3, atol=0.05))
y1 = self.f1 * 0
y2 = self.f2 * 0
y3 = self.f3 * 0
self.assertTrue(0 == y1)
self.assertTrue(0 == y2)
self.assertTrue(0 == y3)
yother = (self.f1 * self.fother)(self.xnew)
self.assertTrue(np.allclose(self.ynew * self.yothernew, yother, atol=0.1))
def test_rmul(self):
y1 = (5 * self.f1)(self.xnew)
y2 = (5 * self.f2)(self.xnew)
y3 = (5 * self.f3)(self.xnew)
self.assertTrue(np.allclose(5 * self.ynew, y1, atol=0.4))
self.assertTrue(np.allclose(5 * self.ynew, y2, atol=0.2))
self.assertTrue(np.allclose(5 * self.ynew, y3, atol=0.05))
y1 = 0 * self.f1
y2 = 0 * self.f2
y3 = 0 * self.f3
self.assertTrue(0 == y1)
self.assertTrue(0 == y2)
self.assertTrue(0 == y3)
yother = (self.fother * self.f1)(self.xnew)
self.assertTrue(np.allclose(self.yothernew * self.ynew, yother, atol=0.1))
def test_truediv(self):
y1 = (self.f1 / 5)(self.xnew)
y2 = (self.f2 / 5)(self.xnew)
y3 = (self.f3 / 5)(self.xnew)
self.assertTrue(np.allclose(self.ynew / 5, y1, atol=0.05))
self.assertTrue(np.allclose(self.ynew / 5, y2, atol=0.05))
self.assertTrue(np.allclose(self.ynew / 5, y3, atol=0.05))
def _raise_callable(a, b):
return a / b
self.assertRaises(ZeroDivisionError, _raise_callable, self.f1, 0)
self.assertRaises(ZeroDivisionError, _raise_callable, self.f2, 0)
self.assertRaises(ZeroDivisionError, _raise_callable, self.f3, 0)
yother = (self.f1 / self.fother)(self.xnew)
self.assertTrue(np.allclose(self.ynew / self.yothernew, yother, atol=5.5))
def test_rtruediv(self):
y1 = (5 / self.f1)(self.xnew[50:])
y2 = (5 / self.f2)(self.xnew[50:])
y3 = (5 / self.f3)(self.xnew[50:])
self.assertTrue(np.allclose(5 / self.ynew[50:], y1, atol=1.5))
self.assertTrue(np.allclose(5 / self.ynew[50:], y2, atol=1.5))
self.assertTrue(np.allclose(5 / self.ynew[50:], y3, atol=1.5))
y1 = 0 / self.f1
y2 = 0 / self.f2
y3 = 0 / self.f3
self.assertTrue(0 == y1)
self.assertTrue(0 == y2)
self.assertTrue(0 == y3)
yother = (self.fother / self.f1)(self.xnew[50:])
self.assertTrue(np.allclose(self.yothernew[50:] / self.ynew[50:], yother, atol=0.3))
def test_pow(self):
y1 = (self.f1 ** 5)(self.xnew)
y2 = (self.f2 ** 5)(self.xnew)
y3 = (self.f3 ** 5)(self.xnew)
self.assertTrue(np.allclose(self.ynew ** 5, y1, atol=2))
self.assertTrue(np.allclose(self.ynew ** 5, y2, atol=2))
self.assertTrue(np.allclose(self.ynew ** 5, y3, atol=2))
y1 = self.f1 ** 0
y2 = self.f2 ** 0
y3 = self.f3 ** 0
self.assertTrue(1 == y1)
self.assertTrue(1 == y2)
self.assertTrue(1 == y3)
yother = (abs(self.f1) ** abs(self.fother))(self.xnew)
self.assertTrue(np.allclose(abs(self.ynew) ** abs(self.yothernew), yother, atol=2))
def test_rpow(self):
y1 = (5 ** self.f1)(self.xnew)
y2 = (5 ** self.f2)(self.xnew)
y3 = (5 ** self.f3)(self.xnew)
self.assertTrue(np.allclose(5 ** self.ynew, y1, atol=2))
self.assertTrue(np.allclose(5 ** self.ynew, y2, atol=2))
self.assertTrue( | np.allclose(5 ** self.ynew, y3, atol=2) | numpy.allclose |
import basix
import pytest
import numpy as np
elements = [
(basix.ElementFamily.P, [basix.LagrangeVariant.equispaced]),
(basix.ElementFamily.P, [basix.LagrangeVariant.gll_warped]),
(basix.ElementFamily.RT, []),
(basix.ElementFamily.BDM, []),
(basix.ElementFamily.N1E, []),
(basix.ElementFamily.N2E, []),
(basix.ElementFamily.Regge, []),
(basix.ElementFamily.HHJ, []),
(basix.ElementFamily.bubble, []),
(basix.ElementFamily.serendipity, [basix.LagrangeVariant.legendre, basix.DPCVariant.legendre]),
(basix.ElementFamily.DPC, [basix.DPCVariant.legendre]),
(basix.ElementFamily.CR, []),
]
def cross2d(x):
return [x[1], -x[0]]
def create_continuity_map_interval(map_type, start, end):
if map_type == basix.MapType.identity:
return lambda x: x
if map_type == basix.MapType.covariantPiola:
return lambda x: np.dot(x, end - start)
if map_type == basix.MapType.contravariantPiola:
return lambda x: np.dot(x, cross2d(end - start))
if map_type == basix.MapType.doubleCovariantPiola:
return lambda x: np.dot(start - end, np.dot(x, end - start))
if map_type == basix.MapType.doubleContravariantPiola:
return lambda x: np.dot(cross2d(end - start), np.dot(x, cross2d(end - start)))
raise NotImplementedError
def create_continuity_map_triangle(map_type, v0, v1, v2):
if map_type == basix.MapType.identity:
return lambda x: x
raise NotImplementedError
def create_continuity_map_quadrilateral(map_type, v0, v1, v2):
if map_type == basix.MapType.identity:
return lambda x: x
raise NotImplementedError
@pytest.mark.parametrize("degree", range(1, 5))
@pytest.mark.parametrize("element, variant", elements)
def test_continuity_interval_facet(degree, element, variant):
"""Test that basis functions between neighbouring cells of different types will be continuous."""
elements = {}
for cell in [basix.CellType.triangle, basix.CellType.quadrilateral]:
try:
elements[cell] = basix.create_element(element, cell, degree, *variant)
except RuntimeError:
pass
if len(elements) <= 1:
pytest.skip()
facets = [
[np.array([0, 0]), np.array([1, 0]), {basix.CellType.triangle: 2, basix.CellType.quadrilateral: 0}],
[np.array([0, 0]), np.array([0, 1]), {basix.CellType.triangle: 1, basix.CellType.quadrilateral: 1}],
]
for start, end, cellmap in facets:
points = np.array([start + i/10 * (end - start) for i in range(11)])
data = None
for c, e in elements.items():
tab = e.tabulate(0, points)[0]
continuity_map = create_continuity_map_interval(e.map_type, start, end)
entity_tab = [continuity_map(tab[:, i, :]) for i in e.entity_dofs[1][cellmap[c]]]
if data is None:
data = entity_tab
else:
assert | np.allclose(data, entity_tab) | numpy.allclose |
from utils import normdata, myrmse
from sklearn.metrics import (
accuracy_score,
roc_curve,
auc,
roc_auc_score,
mean_squared_error,
)
import numpy as np
import random
import matplotlib.pyplot as plt
def performance_vs_confidence(
original_data,
imp_data,
missing_data,
testY,
test_idx,
total_uncertainty,
coeff_variation,
clf=None,
):
"""
Computes the performance vs confidence (i.e exclusions)
Args:
analysis_scores (dict): dict of different analysis scores
"""
df_mis = missing_data
testX = original_data
percents = np.linspace(0.01, 0.9, 10)
amounts = percents * testX.shape[0]
# sort based on variance
uncert = np.argsort(total_uncertainty)
# sort based on CV
cv_uncert = np.argsort(coeff_variation)[::-1]
uncert_rmses_retention = []
cv_rmses_retention = []
random_rmses_retention = []
y_score_retention = []
auc_retention = []
gt_y = []
acc_scores = []
# apply mask
true = testX[~missing_data.astype(bool)]
preds = imp_data[~missing_data.astype(bool)]
# oracle error
errors = np.abs(preds - true)
# sort based on error - oracle
uncert_oracle = np.argsort(errors)
rmse_oracle = []
for count, amount in enumerate(amounts):
idx = int(amount)
# Calculations and exclusions based on variance
excl = uncert[:-idx]
ori_data = testX[excl, :]
imputed_data = imp_data[excl, :]
data_m = np.array(df_mis != df_mis)[excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
uncert_rmses_retention.append(rmse)
# Calculations for oracle
if count > 0:
excl_oracle = uncert_oracle[: -int(amount)]
rmseval = mean_squared_error(true[excl_oracle], preds[excl_oracle])
rmse_oracle.append(rmseval)
else:
rmse_oracle.append(rmse)
excl_oracle = uncert_oracle[: -int(amount)]
rmseval = mean_squared_error(true[excl_oracle], preds[excl_oracle])
rmse_oracle.append(rmseval)
# if a classifier is specified apply the sortings for diff acc and auc
if clf:
y_preds = clf.predict(imputed_data[:, 0:-1])
y_scores = clf.predict_proba(imputed_data[:, 0:-1])[:, 1]
if len(np.unique(testY)) == 2:
auc_retention.append(
roc_auc_score(testY[excl], y_scores, multi_class="ovr")
)
y_score_retention.append(y_scores)
gt_y.append(testY[excl])
acc_scores.append(accuracy_score(testY[excl], y_preds))
# Calculations and exclusions based on CV
excl = cv_uncert[:-idx]
ori_data = testX[excl, :]
imputed_data = imp_data[excl, :]
data_m = np.array(df_mis != df_mis)[excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
cv_rmses_retention.append(rmse)
# Calculations and exclusions based on random
rand_excl = random.sample(range(len(uncert)), idx)
ori_data = testX[rand_excl, :]
imputed_data = imp_data[rand_excl, :]
data_m = np.array(df_mis != df_mis)[rand_excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
random_rmses_retention.append(rmse)
return (
uncert_rmses_retention,
cv_rmses_retention,
random_rmses_retention,
y_score_retention,
auc_retention,
gt_y,
acc_scores,
rmse_oracle[:-1],
)
def plot_rmse_conf_curve(analysis_scores, dataset, filename):
"""
Plots the RMSE Confidence-Exclusion curve
"""
plt.style.reload_library()
plt.style.use(["science", "ieee", "no-latex", "notebook", "grid", "vibrant"])
mean_uncert = np.mean(analysis_scores["uncert_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["uncert_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="Variance", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["random_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["random_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="Random", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["cv_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["cv_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="CV", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["rmse_oracle"], axis=0)
std_uncert = | np.std(analysis_scores["rmse_oracle"], axis=0) | numpy.std |
import os
import numpy as np
from tvtk.api import tvtk, write_data
import sharpy.utils.cout_utils as cout
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
import sharpy.utils.algebra as algebra
@solver
class BeamPlot(BaseSolver):
"""
Plots beam to Paraview format
"""
solver_id = 'BeamPlot'
solver_classification = 'post-processor'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['folder'] = 'str'
settings_default['folder'] = './output'
settings_description['folder'] = 'Output folder path'
settings_types['include_rbm'] = 'bool'
settings_default['include_rbm'] = True
settings_description['include_rbm'] = 'Include frame of reference rigid body motion'
settings_types['include_FoR'] = 'bool'
settings_default['include_FoR'] = False
settings_description['include_FoR'] = 'Include frame of reference variables'
settings_types['include_applied_forces'] = 'bool'
settings_default['include_applied_forces'] = True
settings_description['include_applied_forces'] = 'Write beam applied forces'
settings_types['include_applied_moments'] = 'bool'
settings_default['include_applied_moments'] = True
settings_description['include_applied_moments'] = 'Write beam applied moments'
settings_types['name_prefix'] = 'str'
settings_default['name_prefix'] = ''
settings_description['name_prefix'] = 'Name prefix for files'
settings_types['output_rbm'] = 'bool'
settings_default['output_rbm'] = True
settings_description['output_rbm'] = 'Write ``csv`` file with rigid body motion data'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.settings = None
self.data = None
self.folder = ''
self.filename = ''
self.caller = None
def initialise(self, data, custom_settings=None, caller=None):
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings, self.settings_types, self.settings_default)
# create folder for containing files if necessary
if not os.path.exists(self.settings['folder']):
os.makedirs(self.settings['folder'])
self.folder = self.settings['folder'] + '/' + self.data.settings['SHARPy']['case'] + '/beam/'
if not os.path.exists(self.folder):
os.makedirs(self.folder)
self.filename = (self.folder +
self.settings['name_prefix'] +
'beam_' +
self.data.settings['SHARPy']['case'])
self.filename_for = (self.folder +
self.settings['name_prefix'] +
'for_' +
self.data.settings['SHARPy']['case'])
self.caller = caller
def run(self, online=False):
self.plot(online)
if not online:
self.write()
cout.cout_wrap('...Finished', 1)
return self.data
def write(self):
if self.settings['output_rbm']:
filename = self.filename + '_rbm_acc.csv'
timesteps = len(self.data.structure.timestep_info)
temp_matrix = np.zeros((timesteps, 6))
for it in range(timesteps):
if self.data.structure.timestep_info[it] is not None:
temp_matrix[it, :] = self.data.structure.timestep_info[it].for_acc
np.savetxt(filename, temp_matrix, delimiter=',')
def plot(self, online):
if not online:
for it in range(len(self.data.structure.timestep_info)):
if self.data.structure.timestep_info[it] is not None:
self.write_beam(it)
if self.settings['include_FoR']:
self.write_for(it)
else:
it = len(self.data.structure.timestep_info) - 1
self.write_beam(it)
if self.settings['include_FoR']:
self.write_for(it)
def write_beam(self, it):
it_filename = (self.filename +
'%06u' % it)
num_nodes = self.data.structure.num_node
num_elem = self.data.structure.num_elem
coords = np.zeros((num_nodes, 3))
conn = np.zeros((num_elem, 3), dtype=int)
node_id = np.zeros((num_nodes,), dtype=int)
elem_id = np.zeros((num_elem,), dtype=int)
coords_a_cell = np.zeros((num_elem, 3), dtype=int)
local_x = np.zeros((num_nodes, 3))
local_y = np.zeros((num_nodes, 3))
local_z = np.zeros((num_nodes, 3))
coords_a = np.zeros((num_nodes, 3))
app_forces = np.zeros((num_nodes, 3))
app_moment = np.zeros((num_nodes, 3))
forces_constraints_nodes = np.zeros((num_nodes, 3))
moments_constraints_nodes = np.zeros((num_nodes, 3))
if self.data.structure.timestep_info[it].in_global_AFoR:
tstep = self.data.structure.timestep_info[it]
else:
tstep = self.data.structure.timestep_info[it].copy()
tstep.whole_structure_to_global_AFoR(self.data.structure)
# aero2inertial rotation
aero2inertial = tstep.cga()
# coordinates of corners
coords = tstep.glob_pos(include_rbm=self.settings['include_rbm'])
# check if I can output gravity forces
with_gravity = False
try:
gravity_forces = tstep.gravity_forces[:]
gravity_forces_g = np.zeros_like(gravity_forces)
with_gravity = True
except AttributeError:
pass
# check if postproc dicts are present and count/prepare
with_postproc_cell = False
try:
tstep.postproc_cell
with_postproc_cell = True
except AttributeError:
pass
with_postproc_node = False
try:
tstep.postproc_node
with_postproc_node = True
except AttributeError:
pass
# count number of arguments
postproc_cell_keys = tstep.postproc_cell.keys()
postproc_cell_vals = tstep.postproc_cell.values()
postproc_cell_scalar = []
postproc_cell_vector = []
postproc_cell_6vector = []
for k, v in tstep.postproc_cell.items():
_, cols = v.shape
if cols == 1:
raise NotImplementedError('scalar cell types not supported in beamplot (Easy to implement)')
# postproc_cell_scalar.append(k)
elif cols == 3:
postproc_cell_vector.append(k)
elif cols == 6:
postproc_cell_6vector.append(k)
else:
raise AttributeError('Only scalar and 3-vector types supported in beamplot')
# count number of arguments
postproc_node_keys = tstep.postproc_node.keys()
postproc_node_vals = tstep.postproc_node.values()
postproc_node_scalar = []
postproc_node_vector = []
postproc_node_6vector = []
for k, v in tstep.postproc_node.items():
_, cols = v.shape
if cols == 1:
raise NotImplementedError('scalar node types not supported in beamplot (Easy to implement)')
# postproc_cell_scalar.append(k)
elif cols == 3:
postproc_node_vector.append(k)
elif cols == 6:
postproc_node_6vector.append(k)
else:
raise AttributeError('Only scalar and 3-vector types supported in beamplot')
for i_node in range(num_nodes):
i_elem = self.data.structure.node_master_elem[i_node, 0]
i_local_node = self.data.structure.node_master_elem[i_node, 1]
node_id[i_node] = i_node
v1 = np.array([1., 0, 0])
v2 = np.array([0., 1, 0])
v3 = np.array([0., 0, 1])
cab = algebra.crv2rotation(
tstep.psi[i_elem, i_local_node, :])
local_x[i_node, :] = np.dot(aero2inertial, np.dot(cab, v1))
local_y[i_node, :] = np.dot(aero2inertial, np.dot(cab, v2))
local_z[i_node, :] = np.dot(aero2inertial, np.dot(cab, v3))
if i_local_node == 2:
coords_a_cell[i_elem, :] = tstep.pos[i_node, :]
coords_a[i_node, :] = tstep.pos[i_node, :]
# applied forces
cab = algebra.crv2rotation(tstep.psi[i_elem, i_local_node, :])
app_forces[i_node, :] = np.dot(aero2inertial,
np.dot(cab,
tstep.steady_applied_forces[i_node, 0:3]+
tstep.unsteady_applied_forces[i_node, 0:3]))
app_moment[i_node, :] = np.dot(aero2inertial,
np.dot(cab,
tstep.steady_applied_forces[i_node, 3:6]+
tstep.unsteady_applied_forces[i_node, 3:6]))
forces_constraints_nodes[i_node, :] = np.dot(aero2inertial,
np.dot(cab,
tstep.forces_constraints_nodes[i_node, 0:3]))
moments_constraints_nodes[i_node, :] = np.dot(aero2inertial,
np.dot(cab,
tstep.forces_constraints_nodes[i_node, 3:6]))
if with_gravity:
gravity_forces_g[i_node, 0:3] = np.dot(aero2inertial,
gravity_forces[i_node, 0:3])
gravity_forces_g[i_node, 3:6] = np.dot(aero2inertial,
gravity_forces[i_node, 3:6])
for i_elem in range(num_elem):
conn[i_elem, :] = self.data.structure.elements[i_elem].reordered_global_connectivities
elem_id[i_elem] = i_elem
ug = tvtk.UnstructuredGrid(points=coords)
ug.set_cells(tvtk.Line().cell_type, conn)
ug.cell_data.scalars = elem_id
ug.cell_data.scalars.name = 'elem_id'
counter = 1
if with_postproc_cell:
for k in postproc_cell_vector:
ug.cell_data.add_array(tstep.postproc_cell[k])
ug.cell_data.get_array(counter).name = k + '_cell'
counter += 1
for k in postproc_cell_6vector:
for i in range(0, 2):
ug.cell_data.add_array(tstep.postproc_cell[k][:, 3*i:3*(i+1)])
ug.cell_data.get_array(counter).name = k + '_' + str(i) + '_cell'
counter += 1
ug.cell_data.add_array(coords_a_cell)
ug.cell_data.get_array(counter).name = 'coords_a_elem'
counter += 1
ug.point_data.scalars = node_id
ug.point_data.scalars.name = 'node_id'
point_vector_counter = 1
ug.point_data.add_array(local_x, 'vector')
ug.point_data.get_array(point_vector_counter).name = 'local_x'
point_vector_counter += 1
ug.point_data.add_array(local_y, 'vector')
ug.point_data.get_array(point_vector_counter).name = 'local_y'
point_vector_counter += 1
ug.point_data.add_array(local_z, 'vector')
ug.point_data.get_array(point_vector_counter).name = 'local_z'
point_vector_counter += 1
ug.point_data.add_array(coords_a, 'vector')
ug.point_data.get_array(point_vector_counter).name = 'coords_a'
if self.settings['include_applied_forces']:
point_vector_counter += 1
ug.point_data.add_array(app_forces, 'vector')
ug.point_data.get_array(point_vector_counter).name = 'app_forces'
point_vector_counter += 1
ug.point_data.add_array(forces_constraints_nodes, 'vector')
ug.point_data.get_array(point_vector_counter).name = 'forces_constraints_nodes'
if with_gravity:
point_vector_counter += 1
ug.point_data.add_array(gravity_forces_g[:, 0:3], 'vector')
ug.point_data.get_array(point_vector_counter).name = 'gravity_forces'
if self.settings['include_applied_moments']:
point_vector_counter += 1
ug.point_data.add_array(app_moment, 'vector')
ug.point_data.get_array(point_vector_counter).name = 'app_moments'
point_vector_counter += 1
ug.point_data.add_array(moments_constraints_nodes, 'vector')
ug.point_data.get_array(point_vector_counter).name = 'moments_constraints_nodes'
if with_gravity:
point_vector_counter += 1
ug.point_data.add_array(gravity_forces_g[:, 3:6], 'vector')
ug.point_data.get_array(point_vector_counter).name = 'gravity_moments'
if with_postproc_node:
for k in postproc_node_vector:
point_vector_counter += 1
ug.point_data.add_array(tstep.postproc_node[k])
ug.point_data.get_array(point_vector_counter).name = k + '_point'
for k in postproc_node_6vector:
for i in range(0, 2):
point_vector_counter += 1
ug.point_data.add_array(tstep.postproc_node[k][:, 3*i:3*(i+1)])
ug.point_data.get_array(point_vector_counter).name = k + '_' + str(i) + '_point'
write_data(ug, it_filename)
def write_for(self, it):
it_filename = (self.filename_for +
'%06u' % it)
forces_constraints_FoR = np.zeros((self.data.structure.num_bodies, 3))
moments_constraints_FoR = np.zeros((self.data.structure.num_bodies, 3))
# TODO: what should I do with the forces of the quaternion?
# aero2inertial rotation
aero2inertial = self.data.structure.timestep_info[it].cga()
# coordinates of corners
FoR_coords = np.zeros((self.data.structure.num_bodies, 3))
if self.settings['include_rbm']:
offset = | np.zeros((3,)) | numpy.zeros |
# Forward Propagation Algorithm By Using Activation Function
import numpy as np
# Define rectified linear unit function
def relu(inp):
out = max(0, inp)
return out
input_data = np.array([2,3])
# Using dictionary in order to save weights of hidden and output layer
weights = { 'node0': np.array([1,1]),
'node1': np.array([-1,1]),
'output': np.array([2,-1])}
node0_input = (input_data * weights['node0']).sum()
node0_output = relu(node0_input)
# Note: sum() is a built-in function which works as an iterator
node1_input = (input_data * weights['node1']).sum()
node1_output = relu(node1_input)
hidden_layer_values = | np.array([node0_output,node1_output]) | numpy.array |
#!/usr/bin/env python
#
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for testing the pyxir numpy runtime
"""
import unittest
import numpy as np
from pyxir.shapes import TensorShape, TupleShape
from pyxir.runtime.numpy.rt_layer_np import *
class TestXfLayerNpPool2d(unittest.TestCase):
def test_max_pool2d_layer_basic(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 2, 3, 3]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 3, 3])],
subgraph=None
),
PoolingLayer(
name='pool1',
shape=TensorShape([1, 2, 2, 2]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 3, 3])],
subgraph=None,
attrs={
'data_layout': 'NCHW'
},
op='Max',
ksize=[2, 2],
paddings=[[0, 0], [0, 0], [0, 0], [0, 0]],
strides=[1, 1, 1, 1]
)
]
inputs = {
'input': np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[1, -1, 0], [3, 0, -5], [0, 1, 8]]],
dtype=np.float32).reshape((1, 2, 3, 3))
}
for layer in layers:
inpts = [inputs[name] for name in layer.inputs]
outpt = layer.forward_exec(inpts)
inputs[layer.name] = outpt
expected_outpt = np.array([[
[[5., 6.], [8., 9.]],
[[3., 0.], [3., 8.]]
]])
np.testing.assert_array_equal(outpt, expected_outpt)
def test_max_pool2d_layer_padding(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 2, 2, 2]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 2, 2])],
subgraph=None
),
PoolingLayer(
name='pool1',
shape=TensorShape([1, 2, 2, 2]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 2, 2])],
subgraph=None,
attrs={
'data_layout': 'NCHW'
},
op='Max',
ksize=[2, 2],
paddings=[[0, 0], [0, 0], [1, 0], [1, 0]],
strides=[1, 1, 1, 1]
)
]
inputs = {
'input': np.array([[[1, -2], [8, 9]],
[[1, -1], [0, 1]]],
dtype=np.float32).reshape((1, 2, 2, 2))
}
for layer in layers:
inpts = [inputs[name] for name in layer.inputs]
outpt = layer.forward_exec(inpts)
inputs[layer.name] = outpt
expected_outpt = np.array([[
[[1., 1.], [8., 9.]],
[[1., 1.], [1., 1.]]
]])
np.testing.assert_array_equal(outpt, expected_outpt)
def test_max_pool2d_layer_stride2(self):
layers = [
InputLayer(
name='input',
shape=TensorShape([1, 2, 3, 3]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 3, 3])],
subgraph=None
),
PoolingLayer(
name='pool1',
shape=TensorShape([1, 2, 2, 2]),
dtype='float32',
inputs=['input'],
input_shapes=[TensorShape([1, 2, 3, 3])],
subgraph=None,
attrs={
'data_layout': 'NCHW'
},
op='Max',
ksize=[3, 3],
paddings=[[0, 0], [0, 0], [1, 1], [1, 1]],
strides=[1, 1, 2, 2]
)
]
inputs = {
'input': np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[1, -1, 0], [3, 0, -5], [0, 1, 8]]],
dtype=np.float32).reshape((1, 2, 3, 3))
}
for layer in layers:
inpts = [inputs[name] for name in layer.inputs]
outpt = layer.forward_exec(inpts)
inputs[layer.name] = outpt
expected_outpt = np.array([[
[[5., 6.], [8., 9.]],
[[3., 0.], [3., 8.]]
]])
| np.testing.assert_array_equal(outpt, expected_outpt) | numpy.testing.assert_array_equal |
import numpy as np
import cv2 as cv
from Data_Augmentation.image_transformer import ImageTransformer
from Data_Augmentation.utility import getTheBoundRect
import sys
import random
padding=50
class SampImgModifier:
def __init__(self,image,size,lower,upper,bgcolor):
self.height=size[0]+padding*2
self.width=size[1]+padding*2
self.channels=size[2]
self.image = bgcolor* np.ones((self.height,self.width,self.channels),np.uint8)
self.image[padding:(self.height-padding),padding:(self.width-padding)]=np.copy(image[0:size[0],0:size[1]])
self.modifiedFlag=0
self.lower=lower
self.upper=upper
self.maskImage=cv.inRange(self.image,lower,upper)
self.modifiedImg=np.copy(self.image)
def addGaussianNoise(self,noiseMean,noiseVariance):
noiseSigma = noiseVariance ** 0.5
foregrndPix = (np.where(self.maskImage == 0))
if(self.modifiedFlag==1):
height,width,_=np.shape(self.modifiedImg)
gaussImg = np.random.normal(noiseMean, noiseSigma, (height, width, self.channels))
self.modifiedImg= np.float32(self.modifiedImg)
self.modifiedImg[foregrndPix] = self.modifiedImg[foregrndPix] + gaussImg[foregrndPix]
self.modifiedImg=np.uint8(self.modifiedImg)
else:
gaussImg = np.random.normal(noiseMean, noiseSigma, (self.height, self.width, self.channels))
self.modifiedImg = np.float32(self.image)
self.modifiedImg[foregrndPix] = self.modifiedImg[foregrndPix] + gaussImg[foregrndPix]
self.modifiedImg = np.uint8(self.modifiedImg)
self.modifiedFlag = 1
def addMedianNoise(self,percentPixel,percentSalt):
foregroundPix=np.where(self.maskImage==0)
s=np.size(foregroundPix)/2
numPixels=int(percentPixel*s)
allCoord=np.array(range(0,int(s)))
random.shuffle(allCoord)
salt_end=int(percentSalt*numPixels)
indices = np.zeros((np.shape(foregroundPix)), np.uint64)
indices[0] = np.array([foregroundPix[0]])
indices[1] = np.array([foregroundPix[1]])
salt_pixels=tuple(map(tuple,indices[:,allCoord[0:salt_end]]))
pepper_pixels=tuple(map(tuple,indices[:,allCoord[0:salt_end]]))
if (self.modifiedFlag == 1):
self.modifiedImg[salt_pixels]=[255,255,255]
self.modifiedImg[pepper_pixels]= [0, 0, 0]
else:
self.modifiedImg= | np.copy(self.image) | numpy.copy |
"""
This file was taken from: https://github.com/bharat-b7/MultiGarmentNetwork
"""
import cv2
import numpy as np
from scipy import sparse as sp
def laplacian(v, f):
n = len(v)
v_a = f[:, 0]
v_b = f[:, 1]
v_c = f[:, 2]
ab = v[v_a] - v[v_b]
bc = v[v_b] - v[v_c]
ca = v[v_c] - v[v_a]
cot_a = -1 * (ab * ca).sum(axis=1) / np.sqrt(np.sum(np.cross(ab, ca) ** 2, axis=-1))
cot_b = -1 * (bc * ab).sum(axis=1) / np.sqrt(np.sum(np.cross(bc, ab) ** 2, axis=-1))
cot_c = -1 * (ca * bc).sum(axis=1) / np.sqrt(np.sum(np.cross(ca, bc) ** 2, axis=-1))
I = np.concatenate((v_a, v_c, v_a, v_b, v_b, v_c))
J = np.concatenate((v_c, v_a, v_b, v_a, v_c, v_b))
W = 0.5 * np.concatenate((cot_b, cot_b, cot_c, cot_c, cot_a, cot_a))
L = sp.csr_matrix((W, (I, J)), shape=(n, n))
L = L - sp.spdiags(L * np.ones(n), 0, n, n)
return L
def get_hres(v, f):
"""
Get an upsampled version of the mesh.
OUTPUT:
- nv: new vertices
- nf: faces of the upsampled
- mapping: mapping from low res to high res
"""
from opendr.topology import loop_subdivider
(mapping, nf) = loop_subdivider(v, f)
nv = mapping.dot(v.ravel()).reshape(-1, 3)
return (nv, nf, mapping)
def barycentric_coordinates(p, q, u, v):
"""
Calculate barycentric coordinates of the given point
:param p: a given point
:param q: triangle vertex
:param u: triangle vertex
:param v: triangle vertex
:return: 1X3 ndarray with the barycentric coordinates of p
"""
v0 = u - q
v1 = v - q
v2 = p - q
d00 = v0.dot(v0)
d01 = v0.dot(v1)
d11 = v1.dot(v1)
d20 = v2.dot(v0)
d21 = v2.dot(v1)
denom = d00 * d11 - d01 * d01
y = (d11 * d20 - d01 * d21) / denom
z = (d00 * d21 - d01 * d20) / denom
x = 1.0 - z - y
return | np.array([x, y, z]) | numpy.array |
from __future__ import division
import torch
import torch.nn as nn
import os
import numpy as np
from scipy.ndimage import imread
import h5py
from network.net_track1 import make_model
from tools.utils import save_matv73, AverageMeter
class args:
def __init__(self):
super(CALayer, self).__init__()
args.n_resgroups
args.n_resblocks
args.n_feats
args.n_reduction
def cal_mrae(target,img_res):
diff = target - img_res
abs_diff = np.abs(diff)
relative_abs_diff = np.divide(abs_diff,target + 1)
MRAEs = np.mean(relative_abs_diff)
# ax = sns.heatmap(MRAEs[:,:,1],vmin=0, vmax=1)
return MRAEs
def postprocess(img_res):
img_res = torch.clamp(img_res*65535,max=65535,min=0)
img_res = torch.round(img_res)
img_res = np.squeeze(np.transpose(torch.Tensor.cpu(img_res).detach().numpy(),[3,2,1,0]),axis=3)
return img_res
def self_ensemble(model,input_data,target):
input_data1 = input_data
input_data2 = np.flip(input_data,2)
input_data3 = np.rot90(input_data1, k=1, axes=(2, 1))
input_data4 = np.rot90(input_data1, k=2, axes=(2, 1))
input_data5 = np.rot90(input_data1, k=3, axes=(2, 1))
input_data6 = np.rot90(input_data2, k=1, axes=(2, 1))
input_data7 = np.rot90(input_data2, k=2, axes=(2, 1))
input_data8 = np.rot90(input_data2, k=3, axes=(2, 1))
input_data1 = np.expand_dims(input_data1, axis=0).copy()
input_data2 = np.expand_dims(input_data2, axis=0).copy()
input_data3 = | np.expand_dims(input_data3, axis=0) | numpy.expand_dims |
import time
import numpy as np
from keras import models
from keras.layers import Dense, Activation
from keras.utils import Sequence
import tcn
def _preproccess(actions, labels):
needed = ['开始', '暂停', '放下', '向右', '向左', '上翻_标准', '下翻_标准']
results = []
for action in actions:
for i, label in enumerate(needed):
if label in labels[action[2]]:
action[2] = i
results.append(action)
break
return np.array(results), needed
def preproccess(keypoints, actions):
# 动作的结束点减起始点,从而获取每一个动作的长度
lenght = actions[..., 1] - actions[..., 0]
# 过滤掉太长的和太短的,甚至长度为负数的动作…
actions = actions[np.logical_and(4 < lenght, lenght < 64)]
# 将其翻译成方便机器学习的数据结构
n = keypoints.shape[0]
y = np.zeros(n, dtype='int32')
w = np.zeros(n, dtype='float32')
for i, (start, last, action) in enumerate(actions):
for j in range(start, last + 1):
y[j] = action
w[j] = (j - start + 1) / (last - start + 1)
# 开始进行真正的预处理
epsilon = 1e-7
# 忽略置信度
x = keypoints[..., :2].copy()
x -= x.mean(1, keepdims=True)
x /= x.max(1, keepdims=True) - x.min(1, keepdims=True) + epsilon
# 定义:自然放下的结束点是可以切割的点,从而方便对动作进行洗牌
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.split.html
cut = actions[actions[:, 2] == 2][:, 1]
x = np.split(x, cut)
y = | np.split(y, cut) | numpy.split |
# encoding: utf-8
#
# @Author: <NAME>, <NAME>
# @Date: Nov 15, 2021
# @Filename: ism.py
# @License: BSD 3-Clause
# @Copyright: <NAME>, <NAME>
import os.path
from astropy import units as u
from astropy import constants as c
import numpy as np
from astropy.io import fits, ascii
from astropy.table import Table
from scipy.special import sph_harm
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy.coordinates import SkyCoord
from astropy.modeling.models import Sersic2D
from dataclasses import dataclass
import sys
if (sys.version_info[0]+sys.version_info[1]/10.) < 3.8:
from backports.cached_property import cached_property
else:
from functools import cached_property
from scipy.ndimage.interpolation import map_coordinates
from scipy.interpolate import interp1d, interp2d
import lvmdatasimulator
from lvmdatasimulator import log
import progressbar
from joblib import Parallel, delayed
from astropy.convolution import convolve_fft, kernels
from lvmdatasimulator.utils import calc_circular_mask, convolve_array, set_default_dict_values, \
ism_extinction, check_overlap, assign_units
fluxunit = u.erg / (u.cm ** 2 * u.s * u.arcsec ** 2)
velunit = u.km / u.s
def brightness_inhomogeneities_sphere(harm_amplitudes, ll, phi_cur, theta_cur, rho, med, radius, thickness):
"""
Auxiliary function producing the inhomogeneities on the brightness distribution for the Cloud of Bubble objects
using the spherical harmonics.
"""
brt = theta_cur * 0
for m in np.arange(-ll, ll + 1):
brt += (harm_amplitudes[m + ll * (ll + 1) - 1] * sph_harm(m, ll, phi_cur, theta_cur).real * med *
(1 - np.sqrt(abs(rho.value ** 2 / radius.value ** 2 - (1 - thickness / 2) ** 2))))
return brt
def sphere_brt_in_line(brt_3d, rad_3d, rad_model, flux_model):
"""
Auxiliary function computing the brightness of the Cloud or Bubble at given radii and in given line
according to the Cloudy models
"""
p = interp1d(rad_model, flux_model, fill_value='extrapolate', assume_sorted=True)
return p(rad_3d) * brt_3d
def interpolate_sphere_to_cartesian(spherical_array, x_grid=None, y_grid=None, z_grid=None,
rad_grid=None, theta_grid=None, phi_grid=None, pxscale=1. * u.pc):
"""
Auxiliary function to project the brightness or velocities from the spherical to cartesian coordinates
"""
x, y, z = np.meshgrid(x_grid, y_grid, z_grid, indexing='ij')
phi_c, theta_c, rad_c = xyz_to_sphere(x, y, z, pxscale=pxscale)
ir = interp1d(rad_grid, np.arange(len(rad_grid)), bounds_error=False)
ith = interp1d(theta_grid, np.arange(len(theta_grid)))
iphi = interp1d(phi_grid, np.arange(len(phi_grid)))
new_ir = ir(rad_c.ravel())
new_ith = ith(theta_c.ravel())
new_iphi = iphi(phi_c.ravel())
cart_data = map_coordinates(spherical_array, np.vstack([new_ir, new_ith, new_iphi]),
order=1, mode='constant', cval=0)
return cart_data.reshape([len(x_grid), len(y_grid), len(z_grid)]).T
def limit_angle(value, bottom_limit=0, top_limit=np.pi):
"""
Auxiliary function to limit the angle values to the range of [0, pi]
"""
value[value < bottom_limit] += (top_limit - bottom_limit)
value[value > top_limit] -= (top_limit - bottom_limit)
return value
def xyz_to_sphere(x, y, z, pxscale=1. * u.pc):
"""
Auxiliary function to map the coordinates from cartesian to spherical system
"""
phi_c = np.arctan2(y, x)
rad_c = (np.sqrt(x ** 2 + y ** 2 + z ** 2))
rad_c[rad_c == 0 * u.pc] = 1e-3 * pxscale
theta_c = (np.arccos(z / rad_c))
phi_c = limit_angle(phi_c, 0 * u.radian, 2 * np.pi * u.radian)
theta_c = limit_angle(theta_c, 0 * u.radian, np.pi * u.radian)
return phi_c, theta_c, rad_c
def find_model_id(file=lvmdatasimulator.CLOUDY_MODELS,
check_id=None, params=lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']):
"""
Checks the input parameters of the pre-computed Cloudy model and return corresponding index in the grid
"""
with fits.open(file) as hdu:
if check_id is None:
if params is None:
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
log.warning(f'Default Cloudy model will be used (id = {check_id})')
else:
summary_table = Table(hdu['Summary'].data)
indexes = np.arange(len(summary_table)).astype(int)
rec_table = np.ones(shape=len(summary_table), dtype=bool)
def closest(rec, prop, val):
unique_col = np.unique(summary_table[prop][rec])
if isinstance(val, str):
res = unique_col[unique_col == val]
if len(res) == 0:
return ""
return res
else:
return unique_col[np.argsort(np.abs(unique_col - val))[0]]
for p in params:
if p not in summary_table.colnames or params[p] is None or \
((isinstance(params[p], float) or isinstance(params[p], int)) and ~np.isfinite(params[p])):
continue
rec_table = rec_table & (summary_table[p] == closest(indexes, p, params[p]))
indexes = np.flatnonzero(rec_table)
if len(indexes) == 0:
break
if len(indexes) == 0 or len(indexes) == len(summary_table):
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
log.warning('Input parameters do not correspond to any pre-computed Cloudy model.'
'Default Cloudy model will be used (id = {0})'.format(check_id))
elif len(indexes) == 1:
check_id = summary_table['Model_ID'][indexes[0]]
for p in params:
if p not in summary_table.colnames or params[p] is None or \
((isinstance(params[p], float) or
isinstance(params[p], int)) and ~np.isfinite(params[p])):
continue
if params[p] != summary_table[p][indexes[0]]:
log.warning(f'Use the closest pre-computed Cloudy model with id = {check_id}')
break
else:
check_id = summary_table['Model_ID'][indexes[0]]
log.warning(f'Select one of the closest pre-computed Cloudy model with id = {check_id}')
#
# for cur_ext in range(len(hdu)):
# if cur_ext == 0:
# continue
# found = False
# for p in params:
# if p == 'id':
# continue
# precision = 1
# if p == 'Z':
# precision = 2
# if np.round(params[p], precision) != np.round(hdu[cur_ext].header[p], precision):
# break
# else:
# found = True
# if found:
# return cur_ext, check_id
# check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
# log.warning('Input parameters do not correspond to any pre-computed Cloudy model.'
# 'Default Cloudy model will be used (id = {0})'.format(check_id))
extension_index = None
while extension_index is None:
extension_index = [cur_ext for cur_ext in range(len(hdu)) if (
check_id == hdu[cur_ext].header.get('MODEL_ID'))]
if len(extension_index) == 0:
if check_id == lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']:
log.warning('Model_ID = {0} is not found in the Cloudy models grid. '
'Use the first one in the grid instead'.format(check_id))
extension_index = 1
else:
log.warning('Model_ID = {0} is not found in the Cloudy models grid. '
'Use default ({1}) instead'.format(check_id,
lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']))
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
extension_index = None
else:
extension_index = extension_index[0]
return extension_index, check_id
@dataclass
class Nebula:
"""
Base class defining properties of every nebula type.
By itself it describes the rectangular nebula (e.g. DIG)
Constructed nebula has 4 dimensions, where 4th derive its appearance in different lines
(if spectrum_id is None, or if it is dark nebula => only one line)
"""
xc: int = None # Center of the region in the field of view, pix
yc: int = None # Center of the region in the field of view, pix
x0: int = 0 # Coordinates of the bottom-left corner in the field of view, pix
y0: int = 0 # Coordinates of the bottom-left corner in the field of view, pix
pix_width: int = None # full width of cartesian grid, pix (should be odd)
pix_height: int = None # full height of cartesian grid, pix (should be odd)
width: u.pc = 0 * u.pc # width of the nebula in pc (not used if pix_width is set up)
height: u.pc = 0 * u.pc # height of the nebula in pc (not used if pix_height is set up)
pxscale: u.pc = 0.01 * u.pc # pixel size in pc
spectrum_id: int = None # ID of a template Cloudy emission spectrum for this nebula
n_brightest_lines: int = None # limit the number of the lines to the first N brightest
sys_velocity: velunit = 0 * velunit # Systemic velocity
turbulent_sigma: velunit = 10 * velunit # Velocity dispersion due to turbulence; included in calculations of LSF
max_brightness: fluxunit = 1e-15 * fluxunit
max_extinction: u.mag = 0 * u.mag
perturb_scale: int = 0 * u.pc # Spatial scale of correlated perturbations
perturb_amplitude: float = 0.1 # Maximal amplitude of perturbations
_npix_los: int = 1 # full size along line of sight in pixels
nchunks: int = -1 # number of chuncks to use for the convolution. If negative, select automatically
vel_gradient: (velunit / u.pc) = 0 # velocity gradient along the nebula
vel_pa: u.degree = 0 # Position angle of the kinematical axis (for the velocity gradient or rotation velocity)
def __post_init__(self):
self._assign_all_units()
self._assign_position_params()
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
def _assign_all_units(self):
whole_list_properties = ['pxscale', 'sys_velocity', 'turbulent_sigma', 'max_brightness', 'max_extinction',
'perturb_scale', 'radius', 'PA', 'length', 'width', 'vel_gradient', 'r_eff',
'vel_rot', 'expansion_velocity', 'spectral_axis', 'vel_pa']
whole_list_units = [u.pc, velunit, velunit, fluxunit, u.mag, u.pc, u.pc, u.degree, u.pc, u.pc,
(velunit / u.pc), u.kpc, velunit, velunit, velunit, u.degree]
cur_list_properties = []
cur_list_units = []
for prp, unit in zip(whole_list_properties, whole_list_units):
if hasattr(self, prp):
cur_list_properties.append(prp)
cur_list_units.append(unit)
assign_units(self, cur_list_properties, cur_list_units)
def _assign_position_params(self, conversion_type='rect'):
if conversion_type == 'rect':
for v in ['height', 'width']:
if self.__getattribute__(f'pix_{v}') is None:
val = np.round((self.__getattribute__(v) / self.pxscale).value / 2.).astype(int) * 2 + 1
else:
val = np.round(self.__getattribute__(f'pix_{v}') / 2.).astype(int) * 2 + 1
setattr(self, f'pix_{v}', val)
elif conversion_type == 'ellipse':
self.pix_width = (np.round(np.abs(self.radius / self.pxscale * np.sin(self.PA)) +
np.abs(self.radius / self.pxscale *
self.ax_ratio * np.cos(self.PA))).astype(int) * 2 + 1).value
self.pix_height = (np.round(np.abs(self.radius / self.pxscale * np.cos(self.PA)) +
np.abs(self.radius / self.pxscale *
self.ax_ratio * np.sin(self.PA))).astype(int) * 2 + 1).value
elif conversion_type == 'galaxy':
self.pix_width = (np.round(np.abs(self.r_max * np.sin(self.PA)) +
np.abs(self.r_max * self.ax_ratio * np.cos(self.PA))).astype(int) * 2 + 1).value
self.pix_height = (np.round(np.abs(self.r_max * np.cos(self.PA)) +
np.abs(self.r_max * self.ax_ratio * np.sin(self.PA))).astype(int) * 2 + 1).value
elif conversion_type == 'cylinder':
self.pix_width = (np.ceil((self.length * np.abs(np.sin(self.PA)) +
self.width * np.abs(np.cos(self.PA))) / self.pxscale / 2.
).astype(int) * 2 + 1).value
self.pix_height = (np.ceil((self.length * np.abs(np.cos(self.PA)) +
self.width * np.abs(np.sin(self.PA))) / self.pxscale / 2.
).astype(int) * 2 + 1).value
if (self.xc is not None) and (self.yc is not None):
self.x0 = self.xc - np.round((self.pix_width - 1) / 2).astype(int)
self.y0 = self.yc - np.round((self.pix_height - 1) / 2).astype(int)
elif (self.x0 is not None) and (self.y0 is not None):
self.xc = self.x0 + np.round((self.pix_width - 1) / 2).astype(int)
self.yc = self.y0 + np.round((self.pix_height - 1) / 2).astype(int)
@cached_property
def _cartesian_x_grid(self):
return np.arange(self.pix_width) * self.pxscale
@cached_property
def _cartesian_y_grid(self):
return np.arange(self.pix_height) * self.pxscale
@cached_property
def _cartesian_z_grid(self):
return np.arange(self._npix_los) * self.pxscale
@cached_property
def _max_density(self):
return self.max_extinction * (1.8e21 / (u.cm ** 2 * u.mag))
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
brt = np.ones(shape=(self.pix_height, self.pix_width, self._npix_los), dtype=float) / self._npix_los
if (self.perturb_scale > 0) and (self.perturb_amplitude > 0):
pertscale = (self.perturb_scale / self.pxscale).value
perturb = np.random.uniform(-1, 1, (self.pix_height, self.pix_width)
) * self.perturb_amplitude / self._npix_los
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
f = np.exp(-2 * (xx ** 2 + yy ** 2) / pertscale)
perturb = 4 / np.sqrt(np.pi) / pertscale * np.fft.ifft2(np.fft.fft2(perturb) * np.fft.fft2(f)).real
brt += (perturb[:, :, None] - np.median(perturb))
return brt
@cached_property
def _brightness_4d_cartesian(self):
"""
Derive the brightness (or density) distribution of the nebula for each emission line in cartesian coordinates
"""
if self.spectrum_id is None or self.linerat_constant:
flux_ratios = np.array([1.])
else:
with fits.open(lvmdatasimulator.CLOUDY_MODELS) as hdu:
flux_ratios = hdu[self.spectrum_id].data[1:, 1]
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0] == 6562.81)
if self.n_brightest_lines is not None and \
(self.n_brightest_lines > 0) and (self.n_brightest_lines < len(flux_ratios)):
indexes_sorted = np.argsort(flux_ratios)[::-1]
flux_ratios = flux_ratios[indexes_sorted[: self.n_brightest_lines]]
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0][indexes_sorted] == 6562.81)
if len(index_ha) == 1:
self._ref_line_id = index_ha[0]
return self._brightness_3d_cartesian[None, :, :, :] * flux_ratios[:, None, None, None]
@cached_property
def brightness_skyplane(self):
"""
Project the 3D nebula onto sky plane (for emission or continuum sources)
"""
if self.max_brightness > 0:
norm_max = self.max_brightness
else:
norm_max = 1
map2d = np.nansum(self._brightness_3d_cartesian, 2)
return map2d / np.nanmax(map2d) * norm_max
@cached_property
def brightness_skyplane_lines(self):
"""
Project the 3D emission nebula line onto sky plane (return images in each emission line)
"""
if self.max_brightness > 0:
map2d = np.nansum(self._brightness_4d_cartesian, 3)
return map2d / np.nanmax(map2d[self._ref_line_id, :, :]) * self.max_brightness
else:
return None
@cached_property
def extinction_skyplane(self):
"""
Project the 3D nebula onto sky plane (for dark clouds)
"""
if self.max_extinction > 0:
map2d = | np.nansum(self._brightness_3d_cartesian, 2) | numpy.nansum |
import numpy as np
from ..utils.sampling import Region
def mk_gal(gal,
save=False,
verbose=False,
mstar_min=1e9,
den_lim=1e6,
den_lim2=5e6,
rmin = -1,
Rgal_to_reff=5.0,
Rgal_max = 50,
method_com="catalog",
method_cov="catalog",
method_member="Reff",
follow_bp=False,
unit_conversion="code",
convert_time=False):
"""
Determine if this is a legitimate galxy. re-center components.
This routine consists of three parts:
1) decide if the system is dense enough.
2) determine member components
3) re-center components and do simple calculations (com, cov, total mass, ...)
Raw star/DM/gas data are only rough representations of a galaxy.
But not much have to be thrown away, either.
Parameters
----------
Rgal_to_reff:
Galaxy radius = Reff * Rgal_to_reff.
By default, Rgal = 5*Reff.
(E's have smaller R_t_r, L's have larger.)
save: False
verbose:False
mstar_min:1e9
den_lim:1e6
den_lim2:5e6
rmin : -1
Rgal_to_reff:5.0
Rgal_max : 50
Maximum possible size of a galaxy.
method_com:"catalog"
method_cov:"catalog"
method_member:"Reff"
follow_bp:False
unit_conversion:"code"
convert_time:
default = False
because it is more efficient to create an timecoverter instance once (load table) and
convert multiple galaxies altogether.
Notes
-----
1. Since now (as of 2016.03) galaxy calculation is based on the GalaxyMaker results,
let's just believe and minimize redundant processes such as determining the center of mass,
system velocity, and checking the presence of (unwanted) substructure.
2. Assuming all catalog in the code units.
3. dr, bin size in determining the radial profile cut scales with exponential factor.
Without scaling, 0.5kpc bin size is too large for ~1kpc galaxies at high-z.
4. The outer part of a galaxy (or halo stars) is sometimes critical.
It is especially true when measuring the total angular momentum of a galaxy (or halo).
On the other hand, including background stars as a member of a galaxy is
a source of errors, particularly for satellite galaxies. So be very smart
to choose the Rmax. (Or it is not a good idea to use Rmax at all.)
"""
# Need halo information
assert (gal.gcat is not None), ("Need a catalog,"
"use Galaxy.set_halo() and provide x,y,z,vx,vy,vz,r,rvir, at least"
"Units are.. ?? ")
pbx = gal.info.pboxsize
# galaxy center from GalaxyMaker. - good enough.
xc = gal.gcat["x"]
yc = gal.gcat["y"]
zc = gal.gcat["z"]
if verbose:
print("Galaxy center : {} {} {} using {}".format(xc, yc, zc, method_com))
vxc = gal.gcat["vx"]
vyc = gal.gcat["vy"]
vzc = gal.gcat["vz"]
if verbose:
print("Velocity center : {} {} {} using {}".format(vxc,vyc,vzc,method_cov))
star = gal.star
# re-center position first.
#if star is not None:
# star["x"] = (star["x"] - xc)*1e3
# star["y"] = (star["y"] - yc)*1e3
# star["z"] = (star["z"] - zc)*1e3
# star["m"] *= 1e11#gal.info.msun
if verbose: print("star x", gal.star["x"])
dm = gal.dm
if dm is not None:
gal._has_dm = True
dm["x"] = (dm["x"] - xc)*1e3
dm["y"] = (dm["y"] - yc)*1e3
dm["z"] = (dm["z"] - zc)*1e3
dm["m"] *= gal.info.msun
cell = gal.cell
# Don't convert cell units here,
# COPY only relevant cells and then modify them.
if cell is not None:
gal.__has_cell = True
assert (gal._has_star or gal._has_dm or gal._has_cell), ("At least"
"one of three(star, dm, gas) component is needed")
# all tests passed.
if verbose:
print("Making a galaxy:", gal.meta.id)
print("SAVE:", save)
print("Halo size:", gal.gcat['rvir'])
rgal_tmp = min([gal.gcat['r'] * 1e3, Rgal_max]) # gcat["rvir"] in kpc
if verbose:
print("Rgal_tmp", rgal_tmp)
print("gal.debug",gal.debug)
dense_enough = radial_profile_cut(gal, star['x'], star['y'], star['m'],
den_lim=den_lim, den_lim2=den_lim2,
mag_lim=25,
nbins=int(rgal_tmp/0.5),
dr=0.5 * gal.info.aexp,
rmax=rgal_tmp,
debug=gal.debug)
if not dense_enough:
print("Not dense enough")
return False
if method_com=="catalog":
gal.meta.xc, gal.meta.yc, gal.meta.zc = gal.header["xg"]
dd = (np.square(star['x']) +
np.square(star['y']) +
np.square(star['z']))
if method_cov=="close_member":
i_close = np.argsort(dd)[:int(len(star))] # half close members
gal.meta.vxc = np.average(star["vx"][i_close])
gal.meta.vyc = np.average(star["vy"][i_close])
gal.meta.vzc = np.average(star["vz"][i_close])
elif method_cov=="catalog":
gal.meta.vxc, gal.meta.vyc, gal.meta.vzc = gal.header["vg"]
# Membership
ind = np.where(dd < gal.meta.rgal**2)[0]# in kpc unit
gal.star = star[ind]
gal.star["vx"] -= gal.meta.vxc
gal.star["vy"] -= gal.meta.vyc
gal.star["vz"] -= gal.meta.vzc
if gal.debug:
print(f"method_COV = {method_cov}")
print('[galaxy.Galaxy.mk_gal] mimax vx :',
min(gal.star['vx']),
max(gal.star['vx']))
gal.meta.nstar = len(ind)
gal.meta.mstar = sum(gal.star['m'])
if gal.meta.mstar < mstar_min:
print("Not enough stars: {:.2e} Msun".format(gal.meta.mstar))
print("{} Aborting... \n".format(len(gal.star['m'])))
gal.meta.star = False
return False
gal.meta.Rgal_to_reff = gal.meta.rgal / gal.meta.reff
# should not surpass rr_tmp, over where another galaxy might be.
# Test if the outer annulus has significant amount of stars
# -> it shouldn't.
if star is not None:
nstar_tot = len(star['x'][ind])
if verbose: print("nstar tot:", nstar_tot)
if verbose: print("Store stellar particle")
if 'time' in gal.star.dtype.names and convert_time:
from utils.cosmology import Timeconvert
tc = Timeconvert(gal.info)
gal.star['time'] = tc.time2gyr(gal.star['time'],
z_now = gal.info.zred)
# VERY arbitrary..
rgal_tmp = gal.meta.Rgal_to_reff *gal.meta.reff
gal.region = Region(xc=gal.meta.xc,
yc=gal.meta.yc,
zc=gal.meta.zc,
radius = gal.meta.rgal)
if gal.debug:
print('[galaxy.Galaxy.mk_gal] meta.v[x,y,z]c',
gal.meta.vxc, gal.meta.vyc, gal.meta.vzc)
print('[galaxy.Galaxy.mk_gal] mima vx 2',
min(gal.star['vx']), max(gal.star['vx']))
if dm is not None:
if method_member == "Reff":
idm = np.where( np.square(dm["x"] - gal.meta.xc) +
np.square(dm["y"] - gal.meta.yc) +
np.square(dm["z"] - gal.meta.zc) <= np.square(rgal_tmp))[0]
elif method_member == "v200":
# Although the velocity is redefined later,
# particle membership is fixed at this point.
idm = np.where( np.square(dm["vx"] - gal.meta.vxc / gal.info.kms)+
np.square(dm["vy"] - gal.meta.vyc / gal.info.kms)+
np.square(dm["vz"] - gal.meta.vzc / gal.info.kms) <= np.square(200**2))[0]
gal._convert_unit("dm", unit_conversion)
if cell is not None:
dtype_cell = [('x', '<f8'), ('y', '<f8'), ('z', '<f8'),
('dx', '<f8'), ('rho', '<f8'),
('vx', '<f8'), ('vy', '<f8'), ('vz', '<f8'),
('temp', '<f8')]#, ('metal', '<f8')]
if "var5" in cell.dtype.names:
if len(cell.dtype.names) < 12:
dtype_cell.append(("metal", "<f8"))
else:
print("[mk_gal] Warning...")
print("[mk_gal] Don't know what to do with all the hydro-variables:")
print("[mk_gal] ",cell.dtype)
print("[mk_gal] Ignoring anyting after the temperature field.")
if "cpu" in cell.dtype.names:
dtype_cell.append(('cpu', '<f8'))
if verbose: print("Cell is NOT none")
icell = np.where(np.square(cell["x"] - (xc/pbx + 0.5)) +
np.square(cell["y"] - (yc/pbx + 0.5)) +
np.square(cell["z"] - (zc/pbx + 0.5)) <= np.square(rgal_tmp))[0]
#gal._add_cell(cell, icell)
#gal._convert_unit("cell", unit_conversion)
gal.cell = np.recarray(len(icell), dtype=dtype_cell)
gal.cell['x'] = (cell['x'][icell] - 0.5) * pbx * 1e3 - xc
gal.cell['y'] = (cell['y'][icell] - 0.5) * pbx * 1e3 - xc
gal.cell['z'] = (cell['z'][icell] - 0.5) * pbx * 1e3 - xc
gal.cell['dx'] = cell['dx'][icell] * pbx * 1000
gal.cell['rho'] = cell['var0'][icell]
gal.cell['vx'] = cell['var1'][icell] * gal.info.kms - gal.meta.vxc
gal.cell['vy'] = cell['var2'][icell] * gal.info.kms - gal.meta.vyc
gal.cell['vz'] = cell['var3'][icell] * gal.info.kms - gal.meta.vzc
gal.cell['temp'] = cell['var4'][icell]
if "var5" in cell.dtype.names:
gal.cell['metal'] = cell['var5'][icell]
if "cpu" in cell.dtype.names:
gal.cell['cpu'] = cell['cpu'][icell]
print("cell x, final", cell["x"])
gal.cal_mgas()
# Some more sophistications.
"""
print("Rgal = 4 * Reff = ", rgal_tmp * gal.info.pboxsize * 1000)
# Save sink particle as a BH, not cloud particles.
"""
return True
def extract_cold_gas(gg, rmax = 180, dr = 5):
"""
Measure radial profile and returns indices of cells inside r_min,
where r_min is the local minima of radial MASS profile.
-> should I use density profile instead?
"""
from scipy.signal import argrelmin
# radial profile.
if not hasattr(gg,"cell"):
print("No cell found")
return
cold_cell = gg.cell[rho_t_cut(gg.cell, gg.info)]
rr = np.sqrt(np.square(cold_cell["x"])+\
np.square(cold_cell["y"])+\
np.square(cold_cell["z"]))
i_sort = np.argsort(rr)
r_sorted = rr[i_sort]
mm = cold_cell["dx"]**3 * cold_cell["var0"]
m_sorted = mm[i_sort]
rmax = min([np.max(rr), rmax])
# Note 1.
# Depends on the cell resolution. How about 8 * dx_min?
# Larger dx will count in small satellites,
# while smaller dx will make the measurement sensitive to density fluctuations.
nbins= int(rmax/dr)
frequency, bins = np.histogram(r_sorted, bins = nbins, range=[0, rmax])
bin_centers = bins[:-1] + 0.5 * dr # remove the rightmost boundary.
m_radial = np.zeros(nbins)
ibins = np.concatenate((np.zeros(1,dtype=int), np.cumsum(frequency)))
for i in range(nbins):
m_radial[i] = np.sum(m_sorted[ibins[i]:ibins[i+1]])
# Check stellar surface density
sig_at_r = m_radial[i]/(2 * np.pi * bin_centers[i] * dr)
# Find local minimum
# If there is flat zeros, take the first zero.
# If not, use scipy.argrelmin
i_zero = np.argmax(m_radial==0)
if i_zero > 0:
ind_min = i_zero -1
else:
ind_min= argrelmin(m_radial)[0] -1 # 1D array for 1D input.
ind_min = ind_min[np.argmax(ind_min * dr > rmin)]* dr
# Note 2.
# If the minimum is farther than rmin=10kpc,
# I assume that is correct.
gg.cell = cold_cell[rr < bin_centers[ind_min]]
gg.mgas_cold = np.sum(gg.cell["var0"]*gg.cell["dx"]**3)
gg.cold_gas_profile = dict(profile=m_radial[:ind_min],dr=dr)
def rho_t_cut(cell, info, clip_sigma=0):
"""
Extract galactic cold gas following Torrey+12 criterion.
Assume cells in the original (code) unit.
"""
# Var0 in Msun h^2 kpc^-3 unit.
kpc_in_cm = 3.08567758e21
msun_in_g = 1.99e33
gcc2this_unit = kpc_in_cm**3/msun_in_g
if clip_sigma > 0:
pass
#Do sigma clipping..
return np.log10(cell["var4"]/cell["var0"]*info.unit_T2) < 6 + 0.25*np.log10((cell["var0"]*info.unit_d)*gcc2this_unit*1e-10) #
def radial_profile_cut(gal, xx, yy, mm,
den_lim=1e6, den_lim2=5e6,
mag_lim=25, nbins=100, rmax=20, dr=0.5,
debug=False):
"""
Determine the system velocity and size of the galaxy (stellar).
What about the gas disk size?
System velocity determined as np.average(vx[i_close]) sometimes fail,
which may indicate that this function fails to extract reliable member stars.
This occurs more frequently with high-z or high resolution data.
Todo
----
1. Adaptive member determination over varying resolution and redshift.
2. Similar functionality is provided by galaxy.get_radius.
"""
rr = np.sqrt(np.square(xx) + np.square(yy))# in kpc unit
if debug:
print("min(rr) {}\n max(rr){}\n min(xx){}\n max(xx){}".format(
min(rr), max(rr), min(xx), max(xx)))
# Mass weight.
i_sort = np.argsort(rr)
r_sorted = rr[i_sort]
m_sorted = mm[i_sort]
rmax = min([np.max(rr), rmax])
nbins = int(rmax/dr)
if nbins < 3:
print("Too small size \n # of stars:", len(rr))
return False
frequency, bins = np.histogram(r_sorted, bins = nbins, range=[0, rmax])
bin_centers = bins[:-1] + 0.5 * dr # remove the rightmost boundary.
m_radial = np.zeros(nbins)
ibins = np.concatenate((np.zeros(1,dtype=int), np.cumsum(frequency)))
i_r_cut1 = nbins -1 # Maximum value
# on rare occasions, a galaxy's stellar surface density
# never crosses the density limit. Then i_r_cut1 = last index.
for i in range(nbins):
m_radial[i] = np.sum(m_sorted[ibins[i]:ibins[i+1]])
# Check stellar surface density
sig_at_r = m_radial[i]/(2 * np.pi * bin_centers[i] * dr)
if debug:
print(sig_at_r, den_lim)
if sig_at_r < den_lim:
i_r_cut1 = i-1
break
#i_r_cut2= np.argmax(m_radial/(2 * np.pi * bin_centers * dr) < den_lim2)
# If for some reason central region is less dense,
# profile can end at the first index.
# Instead coming from backward, search for the point the opposite condition satisfied.
if debug:
print(rmax, nbins)
print("frequency", frequency)
print("bins", bins)
print("ibins", ibins)
print("bin centers", bin_centers)
print("m_radial", m_radial)
den_radial_inverse = m_radial[::-1]/(2 * np.pi * bin_centers[::-1] * dr)
if debug: print("den_radial_inverse", den_radial_inverse)
if max(den_radial_inverse) < 2 * den_lim2:
np.set_printoptions(precision=3)
print("radial density :",den_radial_inverse)
return False
i_r_cut2=len(m_radial) - np.argmax(den_radial_inverse > den_lim2) -1
if debug:
print("[galaxy.Galaxy.radial_profile_cut] m_radial \n", m_radial)
print("[galaxy.Galaxy.radial_profile_cut] den_radial_inverse \n", den_radial_inverse)
print("[galaxy.Galaxy.radial_profile_cut] i_r_cut2", i_r_cut2)
mtot2 = sum(m_radial[:i_r_cut2])
mtot1 = sum(m_radial[:i_r_cut1])
i_reff2 = np.argmax(np.cumsum(m_sorted) > (0.5*mtot2))
i_reff1 = np.argmax(np.cumsum(m_sorted) > (0.5*mtot1))
gal.meta.reff2 = r_sorted[i_reff2]
gal.meta.reff = r_sorted[i_reff1]
gal.meta.rgal2 = max([bin_centers[i_r_cut2],4*gal.meta.reff2])
gal.meta.rgal = max([bin_centers[i_r_cut1],4*gal.meta.reff])#bin_centers[i_r_cut1]
# It is not wrong for BCGs to have very large Reff(~50kpc).
# But referring the average velocity of stellar particles inside 50kpc
# as the system velocity is WRONG.
# If 1Reff is huge, try smaller aperture when measuring the system velocity.
if debug:
print("[galaxy.Galaxy.radial_profile_cut] mtot, mtot2", mtot1, mtot2)
i_close = i_sort[:np.argmax( | np.cumsum(m_sorted) | numpy.cumsum |
import numpy as np
from scipy.spatial.transform import Rotation as R
import magpylib as magpy
from magpylib._src.exceptions import MagpylibBadUserInput
from magpylib._src.exceptions import MagpylibMissingInput
###########################################################
###########################################################
# OBJECT INPUTS
def test_input_objects_position_good():
"""good input: magpy.Sensor(position=inp)"""
goods = [
(1, 2, 3),
(0, 0, 0),
((1, 2, 3), (2, 3, 4)),
[(2, 3, 4)],
[2, 3, 4],
[[2, 3, 4], [3, 4, 5]],
[(2, 3, 4), (3, 4, 5)],
np.array((1, 2, 3)),
np.array(((1, 2, 3), (2, 3, 4))),
]
for good in goods:
sens = magpy.Sensor(position=good)
np.testing.assert_allclose(sens.position, np.squeeze(np.array(good)))
def test_input_objects_position_bad():
"""bad input: magpy.Sensor(position=inp)"""
bads = [
(1, 2),
(1, 2, 3, 4),
[(1, 2, 3, 4)] * 2,
(((1, 2, 3), (1, 2, 3)), ((1, 2, 3), (1, 2, 3))),
"x",
["x", "y", "z"],
dict(woot=15),
True,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.Sensor, bad)
def test_input_objects_pixel_good():
"""good input: magpy.Sensor(pixel=inp)"""
goods = [
(1, -2, 3),
(0, 0, 0),
((1, 2, 3), (2, 3, 4)),
(((1, 2, 3), (2, -3, 4)), ((1, 2, 3), (2, 3, 4))),
[(2, 3, 4)],
[2, 3, 4],
[[-2, 3, 4], [3, 4, 5]],
[[[2, 3, 4], [3, 4, 5]]] * 4,
[(2, 3, 4), (3, 4, 5)],
np.array((1, 2, -3)),
np.array(((1, -2, 3), (2, 3, 4))),
]
for good in goods:
sens = magpy.Sensor(pixel=good)
np.testing.assert_allclose(sens.pixel, good)
def test_input_objects_pixel_bad():
"""bad input: magpy.Sensor(pixel=inp)"""
bads = [
(1, 2),
(1, 2, 3, 4),
[(1, 2, 3, 4)] * 2,
"x",
["x", "y", "z"],
dict(woot=15),
True,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.Sensor, (0, 0, 0), bad)
def test_input_objects_orientation_good():
"""good input: magpy.Sensor(orientation=inp)"""
goods = [
None,
(0.1, 0.2, 0.3),
(0, 0, 0),
[(0.1, 0.2, 0.3)],
[(0.1, 0.2, 0.3)] * 5,
]
for good in goods:
if good is None:
sens = magpy.Sensor(orientation=None)
np.testing.assert_allclose(sens.orientation.as_rotvec(), (0, 0, 0))
else:
sens = magpy.Sensor(orientation=R.from_rotvec(good))
np.testing.assert_allclose(
sens.orientation.as_rotvec(), np.squeeze(np.array(good))
)
def test_input_objects_orientation_bad():
"""bad input: magpy.Sensor(orientation=inp)"""
bads = [
(1, 2),
(1, 2, 3, 4),
[(1, 2, 3, 4)] * 2,
"x",
["x", "y", "z"],
dict(woot=15),
True,
]
for bad in bads:
np.testing.assert_raises(
MagpylibBadUserInput, magpy.Sensor, (0, 0, 0), (0, 0, 0), bad
)
def test_input_objects_current_good():
"""good input: magpy.current.Loop(inp)"""
goods = [
None,
0,
1,
1.2,
np.array([1, 2, 3])[1],
-1,
-1.123,
True,
]
for good in goods:
src = magpy.current.Loop(good)
if good is None:
assert src.current is None
else:
np.testing.assert_allclose(src.current, good)
def test_input_objects_current_bad():
"""bad input: magpy.current.Loop(inp)"""
bads = [
(1, 2),
[(1, 2, 3, 4)] * 2,
"x",
["x", "y", "z"],
dict(woot=15),
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.current.Loop, bad)
def test_input_objects_diameter_good():
"""good input: magpy.current.Loop(diameter=inp)"""
goods = [
None,
0,
1,
1.2,
np.array([1, 2, 3])[1],
True,
]
for good in goods:
src = magpy.current.Loop(diameter=good)
if good is None:
assert src.diameter is None
else:
np.testing.assert_allclose(src.diameter, good)
def test_input_objects_diameter_bad():
"""bad input: magpy.current.Loop(diameter=inp)"""
bads = [
(1, 2),
[(1, 2, 3, 4)] * 2,
"x",
["x", "y", "z"],
dict(woot=15),
-1,
-1.123,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.current.Loop(diameter=bad)
def test_input_objects_vertices_good():
"""good input: magpy.current.Line(vertices=inp)"""
goods = [
None,
((0, 0, 0), (0, 0, 0)),
((1, 2, 3), (2, 3, 4)),
[(2, 3, 4), (-1, -2, -3)] * 2,
[[2, 3, 4], [3, 4, 5]],
np.array(((1, 2, 3), (2, 3, 4))),
]
for good in goods:
src = magpy.current.Line(vertices=good)
if good is None:
assert src.vertices is None
else:
np.testing.assert_allclose(src.vertices, good)
def test_input_objects_vertices_bad():
"""bad input: magpy.current.Line(vertices=inp)"""
bads = [
(1, 2),
[(1, 2, 3, 4)] * 2,
[(1, 2, 3)],
"x",
["x", "y", "z"],
dict(woot=15),
0,
-1.123,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.current.Line(vertices=bad)
def test_input_objects_magnetization_moment_good():
"""
good input:
magpy.magnet.Cuboid(magnetization=inp),
magpy.misc.Dipole(moment=inp)
"""
goods = [
None,
(1, 2, 3),
(0, 0, 0),
[-1, -2, -3],
np.array((1, 2, 3)),
]
for good in goods:
src = magpy.magnet.Cuboid(good)
src2 = magpy.misc.Dipole(good)
if good is None:
assert src.magnetization is None
assert src2.moment is None
else:
np.testing.assert_allclose(src.magnetization, good)
np.testing.assert_allclose(src2.moment, good)
def test_input_objects_magnetization_moment_bad():
"""
bad input:
magpy.magnet.Cuboid(magnetization=inp),
magpy.misc.Dipole(moment=inp)
"""
bads = [
(1, 2),
[1, 2, 3, 4],
[(1, 2, 3)] * 2,
np.array([(1, 2, 3)] * 2),
"x",
["x", "y", "z"],
dict(woot=15),
0,
-1.123,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.magnet.Cuboid(magnetization=bad)
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.misc.Dipole(moment=bad)
def test_input_objects_dimension_cuboid_good():
"""good input: magpy.magnet.Cuboid(dimension=inp)"""
goods = [
None,
(1, 2, 3),
[11, 22, 33],
np.array((1, 2, 3)),
]
for good in goods:
src = magpy.magnet.Cuboid(dimension=good)
if good is None:
assert src.dimension is None
else:
np.testing.assert_allclose(src.dimension, good)
def test_input_objects_dimension_cuboid_bad():
"""bad input: magpy.magnet.Cuboid(dimension=inp)"""
bads = [
[-1, 2, 3],
(0, 1, 2),
(1, 2),
[1, 2, 3, 4],
[(1, 2, 3)] * 2,
np.array([(1, 2, 3)] * 2),
"x",
["x", "y", "z"],
dict(woot=15),
0,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.magnet.Cuboid(dimension=bad)
def test_input_objects_dimension_cylinder_good():
"""good input: magpy.magnet.Cylinder(dimension=inp)"""
goods = [
None,
(1, 2),
[11, 22],
np.array((1, 2)),
]
for good in goods:
src = magpy.magnet.Cylinder(dimension=good)
if good is None:
assert src.dimension is None
else:
np.testing.assert_allclose(src.dimension, good)
def test_input_objects_dimension_cylinder_bad():
"""bad input: magpy.magnet.Cylinder(dimension=inp)"""
bads = [
[-1, 2],
(0, 1),
(1,),
[1, 2, 3],
[(1, 2)] * 2,
np.array([(2, 3)] * 2),
"x",
["x", "y"],
dict(woot=15),
0,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.magnet.Cylinder(dimension=bad)
def test_input_objects_dimension_cylinderSegment_good():
"""good input: magpy.magnet.CylinderSegment(dimension=inp)"""
goods = [
None,
(0, 2, 3, 0, 50),
(1, 2, 3, 40, 50),
[11, 22, 33, 44, 360],
[11, 22, 33, -44, 55],
np.array((1, 2, 3, 4, 5)),
[11, 22, 33, -44, -33],
(0, 2, 3, -10, 0),
]
for good in goods:
src = magpy.magnet.CylinderSegment(dimension=good)
if good is None:
assert src.dimension is None
else:
np.testing.assert_allclose(src.dimension, good)
def test_input_objects_dimension_cylinderSegment_bad():
"""good input: magpy.magnet.CylinderSegment(dimension=inp)"""
bads = [
(1, 2, 3, 4),
(1, 2, 3, 4, 5, 6),
(0, 0, 3, 4, 5),
(2, 1, 3, 4, 5),
(-1, 2, 3, 4, 5),
(1, 2, 0, 4, 5),
(1, 2, -1, 4, 5),
(1, 2, 3, 5, 4),
[(1, 2, 3, 4, 5)] * 2,
np.array([(1, 2, 3, 4, 5)] * 2),
"x",
["x", "y", "z", 1, 2],
dict(woot=15),
0,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.magnet.CylinderSegment(dimension=bad)
def test_input_objects_field_func_good():
"""good input: magpy.misc.CustomSource(field_func=f)"""
# pylint: disable=unused-argument
# init empty = None
src = magpy.misc.CustomSource()
np.testing.assert_raises(MagpylibMissingInput, src.getB, (1, 2, 3))
np.testing.assert_raises(MagpylibMissingInput, src.getH, (1, 2, 3))
# None
src = magpy.misc.CustomSource(field_func=None)
np.testing.assert_raises(MagpylibMissingInput, src.getB, (1, 2, 3))
np.testing.assert_raises(MagpylibMissingInput, src.getH, (1, 2, 3))
# acceptable func with B and H return
def f(field, observers):
"""3 in 3 out"""
return observers
src = magpy.misc.CustomSource(field_func=f)
np.testing.assert_allclose(src.getB((1, 2, 3)), (1, 2, 3))
np.testing.assert_allclose(src.getH((1, 2, 3)), (1, 2, 3))
# acceptable func with only B return
def ff(field, observers):
"""3 in 3 out"""
if field == "B":
return observers
return None
src = magpy.misc.CustomSource(field_func=ff)
np.testing.assert_allclose(src.getB((1, 2, 3)), (1, 2, 3))
np.testing.assert_raises(MagpylibMissingInput, src.getH, (1, 2, 3))
# acceptable func with only B return
def fff(field, observers):
"""3 in 3 out"""
if field == "H":
return observers
return None
src = magpy.misc.CustomSource(field_func=fff)
np.testing.assert_raises(MagpylibMissingInput, src.getB, (1, 2, 3))
np.testing.assert_allclose(src.getH((1, 2, 3)), (1, 2, 3))
def test_input_objects_field_func_bad():
"""bad input: magpy.misc.CustomSource(field_func=f)"""
# pylint: disable=unused-argument
# non callable
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, 1)
# bad arg names
def ff(fieldd, observers, whatever):
"""ff"""
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, ff)
# no ndarray return on B
def fff(field, observers):
"""fff"""
if field == "B":
return 1
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, fff)
# no ndarray return on H
def ffff(field, observers):
"""ffff"""
if field == "H":
return 1
return observers
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, ffff)
# bad return shape on B
def g(field, observers):
"""g"""
if field == "B":
return np.array([1, 2, 3])
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, g)
# bad return shape on H
def gg(field, observers):
"""gg"""
if field == "H":
return np.array([1, 2, 3])
return observers
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, gg)
###########################################################
###########################################################
# DISPLAY
def test_input_show_zoom_bad():
"""bad show zoom inputs"""
x = magpy.Sensor()
bads = [
(1, 2, 3),
-1,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.show, x, zoom=bad)
def test_input_show_animation_bad():
"""bad show animation inputs"""
x = magpy.Sensor()
bads = [
(1, 2, 3),
-1,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.show, x, animation=bad)
def test_input_show_backend_bad():
"""bad show backend inputs"""
x = magpy.Sensor()
bads = [
(1, 2, 3),
-1,
"x",
True,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.show, x, backend=bad)
def test_input_show_missing_parameters1():
"""missing inputs"""
s = magpy.magnet.Cuboid()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.Cylinder()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.CylinderSegment()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.Sphere()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.current.Loop()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.current.Line()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.misc.Dipole()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
def test_input_show_missing_parameters2():
"""missing inputs"""
s = magpy.magnet.Cuboid(dimension=(1, 2, 3))
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.Cylinder(dimension=(1, 2))
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.CylinderSegment(dimension=(1, 2, 3, 4, 5))
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.Sphere(diameter=1)
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.current.Loop(diameter=1)
| np.testing.assert_raises(MagpylibMissingInput, magpy.show, s) | numpy.testing.assert_raises |
from collections import defaultdict
import cPickle as pickle
import os
import time
import numpy as np
import tensorflow as tf
from sym_net import SymNet
from util import *
# Data
tf.app.flags.DEFINE_string('train_txt_fp', '', 'Training dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('valid_txt_fp', '', 'Eval dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('test_txt_fp', '', 'Test dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('sym_rnn_pretrain_model_ckpt_fp', '', 'File path to model checkpoint with only sym weights')
tf.app.flags.DEFINE_string('model_ckpt_fp', '', 'File path to model checkpoint if resuming or eval')
# Features
tf.app.flags.DEFINE_string('sym_in_type', 'onehot', 'Either \'onehot\' or \'bagofarrows\'')
tf.app.flags.DEFINE_string('sym_out_type', 'onehot', 'Either \'onehot\' or \'bagofarrows\'')
tf.app.flags.DEFINE_integer('sym_narrows', 4, 'Number or arrows in data')
tf.app.flags.DEFINE_integer('sym_narrowclasses', 4, 'Number or arrow classes in data')
tf.app.flags.DEFINE_integer('sym_embedding_size', 32, '')
tf.app.flags.DEFINE_bool('audio_z_score', False, 'If true, train and test on z-score of validation data')
tf.app.flags.DEFINE_integer('audio_deviation_max', 0, '')
tf.app.flags.DEFINE_integer('audio_context_radius', -1, 'Past and future context per training example')
tf.app.flags.DEFINE_integer('audio_nbands', 0, 'Number of bands per frame')
tf.app.flags.DEFINE_integer('audio_nchannels', 0, 'Number of channels per frame')
tf.app.flags.DEFINE_bool('feat_meas_phase', False, '')
tf.app.flags.DEFINE_bool('feat_meas_phase_cos', False, '')
tf.app.flags.DEFINE_bool('feat_meas_phase_sin', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase_cos', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase_sin', False, '')
tf.app.flags.DEFINE_bool('feat_beat_diff', False, '')
tf.app.flags.DEFINE_bool('feat_beat_diff_next', False, '')
tf.app.flags.DEFINE_bool('feat_beat_abs', False, '')
tf.app.flags.DEFINE_bool('feat_time_diff', False, '')
tf.app.flags.DEFINE_bool('feat_time_diff_next', False, '')
tf.app.flags.DEFINE_bool('feat_time_abs', False, '')
tf.app.flags.DEFINE_bool('feat_prog_diff', False, '')
tf.app.flags.DEFINE_bool('feat_prog_abs', False, '')
tf.app.flags.DEFINE_bool('feat_diff_feet', False, '')
tf.app.flags.DEFINE_bool('feat_diff_aps', False, '')
tf.app.flags.DEFINE_integer('feat_beat_phase_nquant', 0, '')
tf.app.flags.DEFINE_integer('feat_beat_phase_max_nwraps', 0, '')
tf.app.flags.DEFINE_integer('feat_meas_phase_nquant', 0, '')
tf.app.flags.DEFINE_integer('feat_meas_phase_max_nwraps', 0, '')
tf.app.flags.DEFINE_string('feat_diff_feet_to_id_fp', '', '')
tf.app.flags.DEFINE_string('feat_diff_coarse_to_id_fp', '', '')
tf.app.flags.DEFINE_bool('feat_diff_dipstick', False, '')
tf.app.flags.DEFINE_string('feat_freetext_to_id_fp', '', '')
tf.app.flags.DEFINE_integer('feat_bucket_beat_diff_n', None, '')
tf.app.flags.DEFINE_float('feat_bucket_beat_diff_max', None, '')
tf.app.flags.DEFINE_integer('feat_bucket_time_diff_n', None, '')
tf.app.flags.DEFINE_float('feat_bucket_time_diff_max', None, '')
# Network params
tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size for training')
tf.app.flags.DEFINE_integer('nunroll', 1, '')
tf.app.flags.DEFINE_string('cnn_filter_shapes', '', 'CSV 3-tuples of filter shapes (time, freq, n)')
tf.app.flags.DEFINE_string('cnn_pool', '', 'CSV 2-tuples of pool amounts (time, freq)')
tf.app.flags.DEFINE_integer('cnn_dim_reduction_size', -1, '')
tf.app.flags.DEFINE_float('cnn_dim_reduction_keep_prob', 1.0, '')
tf.app.flags.DEFINE_string('cnn_dim_reduction_nonlin', '', '')
tf.app.flags.DEFINE_string('rnn_cell_type', 'lstm', '')
tf.app.flags.DEFINE_integer('rnn_size', 0, '')
tf.app.flags.DEFINE_integer('rnn_nlayers', 0, '')
tf.app.flags.DEFINE_float('rnn_keep_prob', 1.0, '')
tf.app.flags.DEFINE_string('dnn_sizes', '', 'CSV sizes for dense layers')
tf.app.flags.DEFINE_float('dnn_keep_prob', 1.0, '')
# Training params
tf.app.flags.DEFINE_float('grad_clip', 0.0, 'Clip gradients to this value if greater than 0')
tf.app.flags.DEFINE_string('opt', 'sgd', 'One of \'sgd\'')
tf.app.flags.DEFINE_float('lr', 1.0, 'Learning rate')
tf.app.flags.DEFINE_float('lr_decay_rate', 1.0, 'Multiply learning rate by this value every epoch')
tf.app.flags.DEFINE_integer('lr_decay_delay', 0, '')
tf.app.flags.DEFINE_integer('nbatches_per_ckpt', 100, 'Save model weights every N batches')
tf.app.flags.DEFINE_integer('nbatches_per_eval', 10000, 'Evaluate model every N batches')
tf.app.flags.DEFINE_integer('nepochs', 0, 'Number of training epochs, negative means train continuously')
tf.app.flags.DEFINE_string('experiment_dir', '', 'Directory for temporary training files and model weights')
# Eval params
# Generate params
tf.app.flags.DEFINE_string('generate_fp', '', '')
tf.app.flags.DEFINE_string('generate_vocab_fp', '', '')
FLAGS = tf.app.flags.FLAGS
dtype = tf.float32
def main(_):
assert FLAGS.experiment_dir
do_train = FLAGS.nepochs != 0 and bool(FLAGS.train_txt_fp)
do_valid = bool(FLAGS.valid_txt_fp)
do_train_eval = do_train and do_valid
do_eval = bool(FLAGS.test_txt_fp)
do_generate = bool(FLAGS.generate_fp)
# Load data
print('Loading data')
train_data, valid_data, test_data = open_dataset_fps(FLAGS.train_txt_fp, FLAGS.valid_txt_fp, FLAGS.test_txt_fp)
# Calculate validation metrics
if FLAGS.audio_z_score:
z_score_fp = os.path.join(FLAGS.experiment_dir, 'valid_mean_std.pkl')
if do_valid and not os.path.exists(z_score_fp):
print('Calculating validation metrics')
mean_per_band, std_per_band = calc_mean_std_per_band(valid_data)
with open(z_score_fp, 'wb') as f:
pickle.dump((mean_per_band, std_per_band), f)
else:
print('Loading validation metrics')
with open(z_score_fp, 'rb') as f:
mean_per_band, std_per_band = pickle.load(f)
# Sanitize data
for data in [train_data, valid_data, test_data]:
apply_z_norm(data, mean_per_band, std_per_band)
# Flatten the data into chart references for easier iteration
print('Flattening datasets into charts')
charts_train = flatten_dataset_to_charts(train_data)
charts_valid = flatten_dataset_to_charts(valid_data)
charts_test = flatten_dataset_to_charts(test_data)
# Filter charts that are too short
charts_train_len = len(charts_train)
charts_train = filter(lambda x: x.get_nannotations() >= FLAGS.nunroll, charts_train)
if len(charts_train) != charts_train_len:
print('{} charts too small for training'.format(charts_train_len - len(charts_train)))
print('Train set: {} charts, valid set: {} charts, test set: {} charts'.format(len(charts_train), len(charts_valid), len(charts_test)))
# Load ID maps
diff_feet_to_id = None
if FLAGS.feat_diff_feet_to_id_fp:
diff_feet_to_id = load_id_dict(FLAGS.feat_diff_feet_to_id_fp)
diff_coarse_to_id = None
if FLAGS.feat_diff_coarse_to_id_fp:
diff_coarse_to_id = load_id_dict(FLAGS.feat_diff_coarse_to_id_fp)
freetext_to_id = None
if FLAGS.feat_freetext_to_id_fp:
freetext_to_id = load_id_dict(FLAGS.feat_freetext_to_id_fp)
# Create feature config
feats_config = {
'meas_phase': FLAGS.feat_meas_phase,
'meas_phase_cos': FLAGS.feat_meas_phase_cos,
'meas_phase_sin': FLAGS.feat_meas_phase_sin,
'beat_phase': FLAGS.feat_beat_phase,
'beat_phase_cos': FLAGS.feat_beat_phase_cos,
'beat_phase_sin': FLAGS.feat_beat_phase_sin,
'beat_diff': FLAGS.feat_beat_diff,
'beat_diff_next': FLAGS.feat_beat_diff_next,
'beat_abs': FLAGS.feat_beat_abs,
'time_diff': FLAGS.feat_time_diff,
'time_diff_next': FLAGS.feat_time_diff_next,
'time_abs': FLAGS.feat_time_abs,
'prog_diff': FLAGS.feat_prog_diff,
'prog_abs': FLAGS.feat_prog_abs,
'diff_feet': FLAGS.feat_diff_feet,
'diff_aps': FLAGS.feat_diff_aps,
'beat_phase_nquant': FLAGS.feat_beat_phase_nquant,
'beat_phase_max_nwraps': FLAGS.feat_beat_phase_max_nwraps,
'meas_phase_nquant': FLAGS.feat_meas_phase_nquant,
'meas_phase_max_nwraps': FLAGS.feat_meas_phase_max_nwraps,
'diff_feet_to_id': diff_feet_to_id,
'diff_coarse_to_id': diff_coarse_to_id,
'freetext_to_id': freetext_to_id,
'bucket_beat_diff_n': FLAGS.feat_bucket_beat_diff_n,
'bucket_time_diff_n': FLAGS.feat_bucket_time_diff_n
}
nfeats = 0
for feat in feats_config.values():
if feat is None:
continue
if isinstance(feat, dict):
nfeats += max(feat.values()) + 1
else:
nfeats += int(feat)
nfeats += 1 if FLAGS.feat_beat_phase_max_nwraps > 0 else 0
nfeats += 1 if FLAGS.feat_meas_phase_max_nwraps > 0 else 0
nfeats += 1 if FLAGS.feat_bucket_beat_diff_n > 0 else 0
nfeats += 1 if FLAGS.feat_bucket_time_diff_n > 0 else 0
feats_config['diff_dipstick'] = FLAGS.feat_diff_dipstick
feats_config['audio_time_context_radius'] = FLAGS.audio_context_radius
feats_config['audio_deviation_max'] = FLAGS.audio_deviation_max
feats_config['bucket_beat_diff_max'] = FLAGS.feat_bucket_beat_diff_max
feats_config['bucket_time_diff_max'] = FLAGS.feat_bucket_time_diff_max
feats_config_eval = dict(feats_config)
feats_config_eval['audio_deviation_max'] = 0
print('Feature configuration (nfeats={}): {}'.format(nfeats, feats_config))
# Create model config
rnn_proj_init = tf.constant_initializer(0.0, dtype=dtype) if FLAGS.sym_rnn_pretrain_model_ckpt_fp else tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype)
model_config = {
'nunroll': FLAGS.nunroll,
'sym_in_type': FLAGS.sym_in_type,
'sym_embedding_size': FLAGS.sym_embedding_size,
'sym_out_type': FLAGS.sym_out_type,
'sym_narrows': FLAGS.sym_narrows,
'sym_narrowclasses': FLAGS.sym_narrowclasses,
'other_nfeats': nfeats,
'audio_context_radius': FLAGS.audio_context_radius,
'audio_nbands': FLAGS.audio_nbands,
'audio_nchannels': FLAGS.audio_nchannels,
'cnn_filter_shapes': stride_csv_arg_list(FLAGS.cnn_filter_shapes, 3, int),
'cnn_init': tf.uniform_unit_scaling_initializer(factor=1.43, dtype=dtype),
'cnn_pool': stride_csv_arg_list(FLAGS.cnn_pool, 2, int),
'cnn_dim_reduction_size': FLAGS.cnn_dim_reduction_size,
'cnn_dim_reduction_init': tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype),
'cnn_dim_reduction_nonlin': FLAGS.cnn_dim_reduction_nonlin,
'cnn_dim_reduction_keep_prob': FLAGS.cnn_dim_reduction_keep_prob,
'rnn_proj_init': rnn_proj_init,
'rnn_cell_type': FLAGS.rnn_cell_type,
'rnn_size': FLAGS.rnn_size,
'rnn_nlayers': FLAGS.rnn_nlayers,
'rnn_init': tf.random_uniform_initializer(-5e-2, 5e-2, dtype=dtype),
'nunroll': FLAGS.nunroll,
'rnn_keep_prob': FLAGS.rnn_keep_prob,
'dnn_sizes': stride_csv_arg_list(FLAGS.dnn_sizes, 1, int),
'dnn_init': tf.uniform_unit_scaling_initializer(factor=1.15, dtype=dtype),
'dnn_keep_prob': FLAGS.dnn_keep_prob,
'grad_clip': FLAGS.grad_clip,
'opt': FLAGS.opt,
}
print('Model configuration: {}'.format(model_config))
with tf.Graph().as_default(), tf.Session() as sess:
if do_train:
print('Creating train model')
with tf.variable_scope('model_ss', reuse=None):
model_train = SymNet(mode='train', batch_size=FLAGS.batch_size, **model_config)
if do_train_eval or do_eval:
print('Creating eval model')
with tf.variable_scope('model_ss', reuse=do_train):
eval_batch_size = FLAGS.batch_size
if FLAGS.rnn_size > 0 and FLAGS.rnn_nlayers > 0:
eval_batch_size = 1
model_eval = SymNet(mode='eval', batch_size=eval_batch_size, **model_config)
model_early_stop_xentropy_avg = tf.train.Saver(tf.global_variables(), max_to_keep=None)
model_early_stop_accuracy = tf.train.Saver(tf.global_variables(), max_to_keep=None)
if do_generate:
print('Creating generation model')
with tf.variable_scope('model_ss', reuse=do_train):
eval_batch_size = FLAGS.batch_size
model_gen = SymNet(mode='gen', batch_size=1, **model_config)
# Restore or init model
model_saver = tf.train.Saver(tf.global_variables())
if FLAGS.model_ckpt_fp:
print('Restoring model weights from {}'.format(FLAGS.model_ckpt_fp))
model_saver.restore(sess, FLAGS.model_ckpt_fp)
else:
print('Initializing model weights from scratch')
sess.run(tf.global_variables_initializer())
# Restore or init sym weights
if FLAGS.sym_rnn_pretrain_model_ckpt_fp:
print('Restoring pretrained weights from {}'.format(FLAGS.sym_rnn_pretrain_model_ckpt_fp))
var_list_old = filter(lambda x: 'nosym' not in x.name and 'cnn' not in x.name, tf.global_variables())
pretrain_saver = tf.train.Saver(var_list_old)
pretrain_saver.restore(sess, FLAGS.sym_rnn_pretrain_model_ckpt_fp)
# Create summaries
if do_train:
summary_writer = tf.summary.FileWriter(FLAGS.experiment_dir, sess.graph)
epoch_mean_xentropy = tf.placeholder(tf.float32, shape=[], name='epoch_mean_xentropy')
epoch_mean_time = tf.placeholder(tf.float32, shape=[], name='epoch_mean_time')
epoch_var_xentropy = tf.placeholder(tf.float32, shape=[], name='epoch_var_xentropy')
epoch_var_time = tf.placeholder(tf.float32, shape=[], name='epoch_var_time')
epoch_time_total = tf.placeholder(tf.float32, shape=[], name='epoch_time_total')
epoch_summaries = tf.summary.merge([
tf.summary.scalar('epoch_mean_xentropy', epoch_mean_xentropy),
tf.summary.scalar('epoch_mean_time', epoch_mean_time),
tf.summary.scalar('epoch_var_xentropy', epoch_var_xentropy),
tf.summary.scalar('epoch_var_time', epoch_var_time),
tf.summary.scalar('epoch_time_total', epoch_time_total)
])
eval_metric_names = ['xentropy_avg', 'accuracy']
eval_metrics = {}
eval_summaries = []
for eval_metric_name in eval_metric_names:
name_mean = 'eval_mean_{}'.format(eval_metric_name)
name_var = 'eval_var_{}'.format(eval_metric_name)
ph_mean = tf.placeholder(tf.float32, shape=[], name=name_mean)
ph_var = tf.placeholder(tf.float32, shape=[], name=name_var)
summary_mean = tf.summary.scalar(name_mean, ph_mean)
summary_var = tf.summary.scalar(name_var, ph_var)
eval_summaries.append(tf.summary.merge([summary_mean, summary_var]))
eval_metrics[eval_metric_name] = (ph_mean, ph_var)
eval_time = tf.placeholder(tf.float32, shape=[], name='eval_time')
eval_time_summary = tf.summary.scalar('eval_time', eval_time)
eval_summaries = tf.summary.merge([eval_time_summary] + eval_summaries)
# Calculate epoch stuff
train_nexamples = sum([chart.get_nannotations() for chart in charts_train])
examples_per_batch = FLAGS.batch_size
examples_per_batch *= model_train.out_nunroll
batches_per_epoch = train_nexamples // examples_per_batch
nbatches = FLAGS.nepochs * batches_per_epoch
print('{} frames in data, {} batches per epoch, {} batches total'.format(train_nexamples, batches_per_epoch, nbatches))
# Init epoch
lr_summary = model_train.assign_lr(sess, FLAGS.lr)
summary_writer.add_summary(lr_summary, 0)
epoch_xentropies = []
epoch_times = []
batch_num = 0
eval_best_xentropy_avg = float('inf')
eval_best_accuracy = float('-inf')
while FLAGS.nepochs < 0 or batch_num < nbatches:
batch_time_start = time.time()
syms, feats_other, feats_audio, targets, target_weights = model_train.prepare_train_batch(charts_train, **feats_config)
feed_dict = {
model_train.syms: syms,
model_train.feats_other: feats_other,
model_train.feats_audio: feats_audio,
model_train.targets: targets,
model_train.target_weights: target_weights
}
batch_xentropy, _ = sess.run([model_train.avg_neg_log_lhood, model_train.train_op], feed_dict=feed_dict)
epoch_xentropies.append(batch_xentropy)
epoch_times.append(time.time() - batch_time_start)
batch_num += 1
if batch_num % batches_per_epoch == 0:
epoch_num = batch_num // batches_per_epoch
print('Completed epoch {}'.format(epoch_num))
lr_decay = FLAGS.lr_decay_rate ** max(epoch_num - FLAGS.lr_decay_delay, 0)
lr_summary = model_train.assign_lr(sess, FLAGS.lr * lr_decay)
summary_writer.add_summary(lr_summary, batch_num)
epoch_xentropy = np.mean(epoch_xentropies)
print('Epoch mean cross-entropy (nats) {}'.format(epoch_xentropy))
epoch_summary = sess.run(epoch_summaries, feed_dict={epoch_mean_xentropy: epoch_xentropy, epoch_mean_time: np.mean(epoch_times), epoch_var_xentropy: np.var(epoch_xentropies), epoch_var_time: np.var(epoch_times), epoch_time_total: np.sum(epoch_times)})
summary_writer.add_summary(epoch_summary, batch_num)
epoch_xentropies = []
epoch_times = []
if batch_num % FLAGS.nbatches_per_ckpt == 0:
print('Saving model weights...')
ckpt_fp = os.path.join(FLAGS.experiment_dir, 'onset_net_train')
model_saver.save(sess, ckpt_fp, global_step=tf.contrib.framework.get_or_create_global_step())
print('Done saving!')
if do_train_eval and batch_num % FLAGS.nbatches_per_eval == 0:
print('Evaluating...')
eval_start_time = time.time()
metrics = defaultdict(list)
for eval_chart in charts_valid:
if model_eval.do_rnn:
state = sess.run(model_eval.initial_state)
neg_log_prob_sum = 0.0
correct_predictions_sum = 0.0
weight_sum = 0.0
for syms, syms_in, feats_other, feats_audio, targets, target_weights in model_eval.eval_iter(eval_chart, **feats_config_eval):
feed_dict = {
model_eval.syms: syms_in,
model_eval.feats_other: feats_other,
model_eval.feats_audio: feats_audio,
model_eval.targets: targets,
model_eval.target_weights: target_weights
}
if model_eval.do_rnn:
feed_dict[model_eval.initial_state] = state
xentropies, correct_predictions, state = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions, model_eval.final_state], feed_dict=feed_dict)
else:
xentropies, correct_predictions = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions], feed_dict=feed_dict)
neg_log_prob_sum += np.sum(xentropies)
correct_predictions_sum += np.sum(correct_predictions)
weight_sum += np.sum(target_weights)
assert int(weight_sum) == eval_chart.get_nannotations()
xentropy_avg = neg_log_prob_sum / weight_sum
accuracy = correct_predictions_sum / weight_sum
metrics['xentropy_avg'].append(xentropy_avg)
metrics['accuracy'].append(accuracy)
metrics = {k: (np.mean(v), np.var(v)) for k, v in metrics.items()}
feed_dict = {}
results = []
for metric_name, (mean, var) in metrics.items():
feed_dict[eval_metrics[metric_name][0]] = mean
feed_dict[eval_metrics[metric_name][1]] = var
feed_dict[eval_time] = time.time() - eval_start_time
summary_writer.add_summary(sess.run(eval_summaries, feed_dict=feed_dict), batch_num)
xentropy_avg_mean = metrics['xentropy_avg'][0]
if xentropy_avg_mean < eval_best_xentropy_avg:
print('Xentropy {} better than previous {}'.format(xentropy_avg_mean, eval_best_xentropy_avg))
ckpt_fp = os.path.join(FLAGS.experiment_dir, 'onset_net_early_stop_xentropy_avg')
model_early_stop_xentropy_avg.save(sess, ckpt_fp, global_step=tf.contrib.framework.get_or_create_global_step())
eval_best_xentropy_avg = xentropy_avg_mean
accuracy_mean = metrics['accuracy'][0]
if accuracy_mean > eval_best_accuracy:
print('Accuracy {} better than previous {}'.format(accuracy_mean, eval_best_accuracy))
ckpt_fp = os.path.join(FLAGS.experiment_dir, 'onset_net_early_stop_accuracy')
model_early_stop_accuracy.save(sess, ckpt_fp, global_step=tf.contrib.framework.get_or_create_global_step())
eval_best_accuracy = accuracy_mean
print('Done evaluating')
if do_eval:
print('Evaluating...')
metrics = defaultdict(list)
for test_chart in charts_test:
if model_eval.do_rnn:
state = sess.run(model_eval.initial_state)
neg_log_prob_sum = 0.0
correct_predictions_sum = 0.0
weight_sum = 0.0
for syms, syms_in, feats_other, feats_audio, targets, target_weights in model_eval.eval_iter(test_chart, **feats_config_eval):
feed_dict = {
model_eval.syms: syms_in,
model_eval.feats_other: feats_other,
model_eval.feats_audio: feats_audio,
model_eval.targets: targets,
model_eval.target_weights: target_weights
}
if model_eval.do_rnn:
feed_dict[model_eval.initial_state] = state
xentropies, correct_predictions, state = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions, model_eval.final_state], feed_dict=feed_dict)
else:
xentropies, correct_predictions = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions], feed_dict=feed_dict)
neg_log_prob_sum += np.sum(xentropies)
correct_predictions_sum += np.sum(correct_predictions)
weight_sum += np.sum(target_weights)
assert int(weight_sum) == test_chart.get_nannotations()
xentropy_avg = neg_log_prob_sum / weight_sum
accuracy = correct_predictions_sum / weight_sum
metrics['perplexity'].append(np.exp(xentropy_avg))
metrics['xentropy_avg'].append(xentropy_avg)
metrics['accuracy'].append(accuracy)
metrics = {k: (np.mean(v), np.std(v), np.min(v), np.max(v)) for k, v in metrics.items()}
copy_pasta = []
for metric_name in ['xentropy_avg', 'perplexity', 'accuracy']:
metric_stats = metrics[metric_name]
copy_pasta += list(metric_stats)
print('{}: {}'.format(metric_name, metric_stats))
print('COPY PASTA:')
print(','.join([str(x) for x in copy_pasta]))
# TODO: This currently only works for VERY specific model (delta time LSTM)
if do_generate:
print('Generating...')
with open(FLAGS.generate_fp, 'r') as f:
step_times = [float(x) for x in f.read().split(',')]
with open(FLAGS.generate_vocab_fp, 'r') as f:
idx_to_sym = {i:k for i, k in enumerate(f.read().splitlines())}
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, | np.random.rand(1) | numpy.random.rand |
# #! /usr/bin/env python
# Load Libraries
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pytest
from .. import api
# Fixtures.
@pytest.fixture
def create_dummy_dataset(n=50, expt_groups=6):
# Dummy dataset
Ns = n
dataset = list()
for seed in np.random.randint(low=100, high=1000, size=expt_groups):
np.random.seed(seed)
dataset.append(np.random.randn(Ns))
df = pd.DataFrame(dataset).T
# Create some upwards/downwards shifts.
for c in df.columns:
df.loc[:,c] =(df[c] * np.random.random()) + np.random.random()
# Turn columns into strings
df.columns = [str(c) for c in df.columns]
# Add gender column for color.
df['Gender'] = np.concatenate([np.repeat('Male', Ns/2),
np.repeat('Female', Ns/2)])
return df
@pytest.fixture
def get_swarm_yspans(coll, round_result=False, decimals=12):
"""
Given a matplotlib Collection, will obtain the y spans
for the collection. Will return None if this fails.
Modified from `get_swarm_spans` in plot_tools.py.
"""
_, y = np.array(coll.get_offsets()).T
try:
if round_result:
return np.around(y.min(), decimals), np.around(y.max(),decimals)
else:
return y.min(), y.max()
except ValueError:
return None
# Start tests.
def test_swarmspan():
print('Testing swarmspan')
df = create_dummy_dataset()
for c in df.columns[1:-1]:
f1, swarmplt = plt.subplots(figsize=(10, 10))
sns.swarmplot(data=df[[df.columns[0], c]],
ax=swarmplt)
sns_yspans = []
for coll in swarmplt.collections:
sns_yspans.append(get_swarm_yspans(coll))
f2, b = api.plot(data=df,
fig_size=(12.5, 11),
idx=(df.columns[0], c))
dabest_yspans = []
for coll in f2.axes[0].collections:
dabest_yspans.append(get_swarm_yspans(coll))
for j, span in enumerate(sns_yspans):
assert span == pytest.approx(dabest_yspans[j])
def test_ylims():
print('Testing assignment of ylims')
df = create_dummy_dataset()
print('Testing assignment for Gardner-Altman plot')
rand_swarm_ylim2 = ( | np.random.randint(-7, 0) | numpy.random.randint |
import cv2
import numpy as np
cap = cv2.VideoCapture("DSC09513.JPG")
def nothing(x):
pass
# Create a black image, a window
img = np.zeros((300,512,3), np.uint8)
cv2.namedWindow('image')
# create trackbars for color change
cv2.createTrackbar('lH','image',0,255,nothing)
cv2.createTrackbar('lS','image',0,255,nothing)
cv2.createTrackbar('lV','image',0,255,nothing)
cv2.createTrackbar('uH','image',0,255,nothing)
cv2.createTrackbar('uS','image',0,255,nothing)
cv2.createTrackbar('uV','image',0,255,nothing)
cv2.createTrackbar('l_or_u','image',0,1,nothing)
while(1):
lB = cv2.getTrackbarPos('lH','image')
lG = cv2.getTrackbarPos('lS','image')
lR = cv2.getTrackbarPos('lV','image')
uB = cv2.getTrackbarPos('uH','image')
uG = cv2.getTrackbarPos('uS','image')
uR = cv2.getTrackbarPos('uV','image')
s = cv2.getTrackbarPos('l_or_u','image')
if s == 1:
img[:] = [uB,uG,uR]
if s == 0:
img[:] = [lB,lG,lR]
cv2.imshow('image',img)
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Conert entrie BGR to HSV
BGRl = np.uint8([[[lB,lG,lR ]]])
BGRu = | np.uint8([[[uB,uG,uR ]]]) | numpy.uint8 |
import cv2
import numpy as np
from numpy.linalg import norm
import math
class hog:
cv_hog = None
cellx = celly = 8
bin_n = 16
def initialize(usegpu):
hog.cv_hog = cv2.HOGDescriptor()
return hog.bin_n
def describe(image):
cellx = hog.cellx
celly = hog.celly
cellxCount = image.shape[1] / cellx
cellyCount = image.shape[0] / celly
cutOffX = image.shape[1] - cellxCount * cellx
cutOffY = image.shape[0] - cellyCount * celly
image = image[cutOffY / 2:-cutOffY / 2, cutOffX / 2:-cutOffX / 2, :]
gx = cv2.Sobel(image[:, :, 0], cv2.CV_64F, 1, 0)
gy = cv2.Sobel(image[:, :, 0], cv2.CV_64F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin = np.int32(hog.bin_n * ang / (2 * np.pi) % hog.bin_n)
bin_cells = []
mag_cells = []
for i in range(image.shape[0] / celly):
for j in range(image.shape[1] / cellx):
bin_cells.append(bin[i * celly:i * celly + celly - 1, j * cellx:j * cellx + cellx - 1])
mag_cells.append(mag[i * celly:i * celly + celly - 1, j * cellx:j * cellx + cellx - 1])
eps = 1e-7
hists = [np.bincount(b.ravel(), m.ravel(), hog.bin_n) for b, m in zip(bin_cells, mag_cells)]
desc = np.zeros([hog.bin_n, image.shape[0] / celly, image.shape[1] / cellx])
for i in range(image.shape[0] / celly):
for j in range(image.shape[1] / cellx):
hist = hists[i * image.shape[1] / cellx + j]
hist /= hist.sum() + eps
hist = | np.sqrt(hist) | numpy.sqrt |
"""
This module is an example of a barebones function plugin for napari
It implements the ``napari_experimental_provide_function`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
from __future__ import print_function, division
from typing import TYPE_CHECKING, DefaultDict
from unicodedata import name
import six
# import modules
import sys # input, output, errors, and files
import os # interacting with file systems
import time # getting time
import datetime
import inspect # get passed parameters
import yaml # parameter importing
import json # for importing tiff metadata
try:
import cPickle as pickle # loading and saving python objects
except:
import pickle
import numpy as np # numbers package
import struct # for interpretting strings as binary data
import re # regular expressions
from pprint import pprint # for human readable file output
import traceback # for error messaging
import warnings # error messaging
import copy # not sure this is needed
import h5py # working with HDF5 files
import pandas as pd
import networkx as nx
import collections
# scipy and image analysis
from scipy.signal import find_peaks_cwt # used in channel finding
from scipy.optimize import curve_fit # fitting ring profile
from scipy.optimize import leastsq # fitting 2d gaussian
from scipy import ndimage as ndi # labeling and distance transform
from skimage import io
from skimage import segmentation # used in make_masks and segmentation
from skimage.transform import rotate
from skimage.feature import match_template # used to align images
from skimage.feature import blob_log # used for foci finding
from skimage.filters import threshold_otsu, median # segmentation
from skimage import filters
from skimage import morphology # many functions is segmentation used from this
from skimage.measure import regionprops # used for creating lineages
from skimage.measure import profile_line # used for ring an nucleoid analysis
from skimage import util, measure, transform, feature
import tifffile as tiff
from sklearn import metrics
# deep learning
import tensorflow as tf # ignore message about how tf was compiled
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import utils
from tensorflow.keras import backend as K
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # supress warnings
# Parralelization modules
import multiprocessing
from multiprocessing import Pool
# Plotting for debug
import matplotlib as mpl
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 12}
mpl.rc('font', **font)
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib.patches import Ellipse
from pathlib import Path
import time
import matplotlib.pyplot as plt
# import modules
import os
import glob
import re
import numpy as np
import tifffile as tiff
import pims_nd2
from skimage import io, measure, morphology
import tifffile as tiff
from scipy import stats
from pprint import pprint # for human readable file output
import multiprocessing
from multiprocessing import Pool
import numpy as np
import warnings
from tensorflow.python.keras import models
from enum import Enum
import numpy as np
import multiprocessing
from multiprocessing import Pool
import os
from napari_plugin_engine import napari_hook_implementation
from skimage.filters import threshold_otsu # segmentation
from skimage import morphology # many functions is segmentation used from this
from skimage import segmentation # used in make_masks and segmentation
from scipy import ndimage as ndi # labeling and distance transform
import matplotlib.gridspec as gridspec
from skimage.exposure import rescale_intensity # for displaying in GUI
from skimage import io, morphology, segmentation
# import mm3_helpers as mm3
import napari
# This is the actual plugin function, where we export our function
# (The functions themselves are defined below)
@napari_hook_implementation
def napari_experimental_provide_function():
# we can return a single function
# or a tuple of (function, magicgui_options)
# or a list of multiple functions with or without options, as shown here:
#return [Segment, threshold, image_arithmetic]
return [Compile, ChannelPicker, Segment]
# 1. First example, a simple function that thresholds an image and creates a labels layer
def threshold(data: "napari.types.ImageData", threshold: int) -> "napari.types.LabelsData":
"""Threshold an image and return a mask."""
return (data > threshold).astype(int)
# print a warning
def warning(*objs):
print(time.strftime("%H:%M:%S WARNING:", time.localtime()), *objs, file=sys.stderr)
# print information
def information(*objs):
print(time.strftime("%H:%M:%S", time.localtime()), *objs, file=sys.stdout)
def julian_day_number():
"""
Need this to solve a bug in pims_nd2.nd2reader.ND2_Reader instance initialization.
The bug is in /usr/local/lib/python2.7/site-packages/pims_nd2/ND2SDK.py in function `jdn_to_datetime_local`, when the year number in the metadata (self._lim_metadata_desc) is not in the correct range. This causes a problem when calling self.metadata.
https://en.wikipedia.org/wiki/Julian_day
"""
dt=datetime.datetime.now()
tt=dt.timetuple()
jdn=(1461.*(tt.tm_year + 4800. + (tt.tm_mon - 14.)/12))/4. + (367.*(tt.tm_mon - 2. - 12.*((tt.tm_mon -14.)/12)))/12. - (3.*((tt.tm_year + 4900. + (tt.tm_mon - 14.)/12.)/100.))/4. + tt.tm_mday - 32075
return jdn
def get_plane(filepath):
pattern = r'(c\d+).tif'
res = re.search(pattern,filepath)
if (res != None):
return res.group(1)
else:
return None
def get_fov(filepath):
pattern = r'xy(\d+)\w*.tif'
res = re.search(pattern,filepath)
if (res != None):
return int(res.group(1))
else:
return None
def get_time(filepath):
pattern = r't(\d+)xy\w+.tif'
res = re.search(pattern,filepath)
if (res != None):
return np.int_(res.group(1))
else:
return None
# loads and image stack from TIFF or HDF5 using mm3 conventions
def load_stack(fov_id, peak_id, color='c1', image_return_number=None):
'''
Loads an image stack.
Supports reading TIFF stacks or HDF5 files.
Parameters
----------
fov_id : int
The FOV id
peak_id : int
The peak (channel) id. Dummy None value incase color='empty'
color : str
The image stack type to return. Can be:
c1 : phase stack
cN : where n is an integer for arbitrary color channel
sub : subtracted images
seg : segmented images
empty : get the empty channel for this fov, slightly different
Returns
-------
image_stack : np.ndarray
The image stack through time. Shape is (t, y, x)
'''
# things are slightly different for empty channels
if 'empty' in color:
if params['output'] == 'TIFF':
img_filename = params['experiment_name'] + '_xy%03d_%s.tif' % (fov_id, color)
with tiff.TiffFile(os.path.join(params['empty_dir'],img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r') as h5f:
img_stack = h5f[color][:]
return img_stack
# load normal images for either TIFF or HDF5
if params['output'] == 'TIFF':
if color[0] == 'c':
img_dir = params['chnl_dir']
elif 'sub' in color:
img_dir = params['sub_dir']
elif 'foci' in color:
img_dir = params['foci_seg_dir']
elif 'seg' in color:
img_dir = params['seg_dir']
img_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, color)
with tiff.TiffFile(os.path.join(img_dir, img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'], 'xy%03d.hdf5' % fov_id), 'r') as h5f:
# normal naming
# need to use [:] to get a copy, else it references the closed hdf5 dataset
img_stack = h5f['channel_%04d/p%04d_%s' % (peak_id, peak_id, color)][:]
return img_stack
# load the time table and add it to the global params
def load_time_table():
'''Add the time table dictionary to the params global dictionary.
This is so it can be used during Cell creation.
'''
# try first for yaml, then for pkl
try:
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'rb') as time_table_file:
params['time_table'] = yaml.safe_load(time_table_file)
except:
with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'rb') as time_table_file:
params['time_table'] = pickle.load(time_table_file)
return
# function for loading the channel masks
def load_channel_masks():
'''Load channel masks dictionary. Should be .yaml but try pickle too.
'''
information("Loading channel masks dictionary.")
# try loading from .yaml before .pkl
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.yaml'))
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'r') as cmask_file:
channel_masks = yaml.safe_load(cmask_file)
except:
warning('Could not load channel masks dictionary from .yaml.')
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.pkl'))
with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'rb') as cmask_file:
channel_masks = pickle.load(cmask_file)
except ValueError:
warning('Could not load channel masks dictionary from .pkl.')
return channel_masks
# function for loading the specs file
def load_specs():
'''Load specs file which indicates which channels should be analyzed, used as empties, or ignored.'''
try:
with open(os.path.join(params['ana_dir'], 'specs.yaml'), 'r') as specs_file:
specs = yaml.safe_load(specs_file)
except:
try:
with open(os.path.join(params['ana_dir'], 'specs.pkl'), 'rb') as specs_file:
specs = pickle.load(specs_file)
except ValueError:
warning('Could not load specs file.')
return specs
### functions for dealing with raw TIFF images
# get params is the major function which processes raw TIFF images
def get_initial_tif_params(image_filename):
'''This is a function for getting the information
out of an image for later trap identification, cropping, and aligning with Unet. It loads a tiff file and pulls out the image metadata.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
#print(image_data.shape) # uncomment for debug
#if len(image_data.shape) == 2:
# img_shape = [image_data.shape[0],image_data.shape[1]]
#else:
img_shape = [image_data.shape[1],image_data.shape[2]]
plane_list = [str(i+1) for i in range(image_data.shape[0])]
#print(plane_list) # uncomment for debug
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : plane_list, # list of plane names
'shape' : img_shape} # image shape x y in pixels
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# get params is the major function which processes raw TIFF images
def get_tif_params(image_filename, find_channels=True):
'''This is a damn important function for getting the information
out of an image. It loads a tiff file, pulls out the image data, and the metadata,
including the location of the channels if flagged.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
'channels': cp_dict, # dictionary of channel locations, in the case of Unet-based channel segmentation, it's a dictionary of channel labels
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
# look for channels if flagged
if find_channels:
# fix the image orientation and get the number of planes
image_data = fix_orientation(image_data)
# if the image data has more than 1 plane restrict image_data to phase,
# which should have highest mean pixel data
if len(image_data.shape) > 2:
#ph_index = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
ph_index = int(params['phase_plane'][1:]) - 1
image_data = image_data[ph_index]
# get shape of single plane
img_shape = [image_data.shape[0], image_data.shape[1]]
# find channels on the processed image
chnl_loc_dict = find_channel_locs(image_data)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : image_metadata['planes'], # list of plane names
'shape' : img_shape, # image shape x y in pixels
# 'channels' : {1 : {'A' : 1, 'B' : 2}, 2 : {'C' : 3, 'D' : 4}}}
'channels' : chnl_loc_dict} # dictionary of channel locations
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# finds metdata in a tiff image which has been expoted with Nikon Elements.
def get_tif_metadata_elements(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by Nikon Elements as a stacked tiff, each for one tpoint.
tif is an opened tif file (using the package tifffile)
arguments:
fname (tifffile.TiffFile): TIFF file object from which data will be extracted
returns:
dictionary of values:
'jdn' (float)
'x' (float)
'y' (float)
'plane_names' (list of strings)
Called by
mm3.Compile
'''
# image Metadata
idata = { 'fov': -1,
't' : -1,
'jd': -1 * 0.0,
'x': -1 * 0.0,
'y': -1 * 0.0,
'planes': []}
# get the fov and t simply from the file name
idata['fov'] = int(tif.fname.split('xy')[1].split('.tif')[0])
idata['t'] = int(tif.fname.split('xy')[0].split('t')[-1])
# a page is plane, or stack, in the tiff. The other metdata is hidden down in there.
for page in tif:
for tag in page.tags.values():
#print("Checking tag",tag.name,tag.value)
t = tag.name, tag.value
t_string = u""
time_string = u""
# Interesting tag names: 65330, 65331 (binary data; good stuff), 65332
# we wnat to work with the tag of the name 65331
# if the tag name is not in the set of tegs we find interesting then skip this cycle of the loop
if tag.name not in ('65331', '65332', 'strip_byte_counts', 'image_width', 'orientation', 'compression', 'new_subfile_type', 'fill_order', 'max_sample_value', 'bits_per_sample', '65328', '65333'):
#print("*** " + tag.name)
#print(tag.value)
pass
#if tag.name == '65330':
# return tag.value
if tag.name in ('65331'):
# make info list a list of the tag values 0 to 65535 by zipoing up a paired list of two bytes, at two byte intervals i.e. fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
# note that 0X100 is hex for 256
infolist = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
# get char values for each element in infolist
for c_entry in range(0, len(infolist)):
# the element corresponds to an ascii char for a letter or bracket (and a few other things)
if infolist[c_entry] < 127 and infolist[c_entry] > 64:
# add the letter to the unicode string t_string
t_string += chr(infolist[c_entry])
#elif infolist[c_entry] == 0:
# continue
else:
t_string += " "
# this block will find the dTimeAbsolute and print the subsequent integers
# index 170 is counting seconds, and rollover of index 170 leads to increment of index 171
# rollover of index 171 leads to increment of index 172
# get the position of the array by finding the index of the t_string at which dTimeAbsolute is listed not that 2*len(dTimeAbsolute)=26
#print(t_string)
arraypos = t_string.index("dXPos") * 2 + 16
xarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in xarr)
idata['x'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dYPos") * 2 + 16
yarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in yarr)
idata['y'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dTimeAbsolute") * 2 + 26
shortarray = tag.value[arraypos+2:arraypos+10]
b = ''.join(chr(i) for i in shortarray)
idata['jd'] = float(struct.unpack('<d', b)[0])
# extract plane names
il = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
li = [a+b*0x100 for a,b in zip(tag.value[1::2], tag.value[2::2])]
strings = list(zip(il, li))
allchars = ""
for c_entry in range(0, len(strings)):
if 31 < strings[c_entry][0] < 127:
allchars += chr(strings[c_entry][0])
elif 31 < strings[c_entry][1] < 127:
allchars += chr(strings[c_entry][1])
else:
allchars += " "
allchars = re.sub(' +',' ', allchars)
words = allchars.split(" ")
planes = []
for idx in [i for i, x in enumerate(words) if x == "sOpticalConfigName"]:
planes.append(words[idx+1])
idata['planes'] = planes
return idata
# finds metdata in a tiff image which has been expoted with nd2ToTIFF.py.
def get_tif_metadata_nd2ToTIFF(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by the mm3 function mm3_nd2ToTIFF.py. All the metdata
is found in that script and saved in json format to the tiff, so it is simply extracted here
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
'planes' (list of strings)
Called by
mm3_Compile.get_tif_params
'''
# get the first page of the tiff and pull out image description
# this dictionary should be in the above form
for tag in tif.pages[0].tags:
if tag.name=="ImageDescription":
idata=tag.value
break
#print(idata)
idata = json.loads(idata)
return idata
# Finds metadata from the filename
def get_tif_metadata_filename(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This just gets the tiff metadata from the filename and is a backup option when the known format of the metadata is not known.
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
Called by
mm3_Compile.get_tif_params
'''
idata = {'fov' : get_fov(tif.filename), # fov id
't' : get_time(tif.filename), # time point
'jd' : -1 * 0.0, # absolute julian time
'x' : -1 * 0.0, # x position on stage [um]
'y' : -1 * 0.0} # y position on stage [um]
return idata
# make a lookup time table for converting nominal time to elapsed time in seconds
def make_time_table(analyzed_imgs):
'''
Loops through the analyzed images and uses the jd time in the metadata to find the elapsed
time in seconds that each picture was taken. This is later used for more accurate elongation
rate calculation.
Parametrs
---------
analyzed_imgs : dict
The output of get_tif_params.
params['use_jd'] : boolean
If set to True, 'jd' time will be used from the image metadata to use to create time table. Otherwise the 't' index will be used, and the parameter 'seconds_per_time_index' will be used from the parameters.yaml file to convert to seconds.
Returns
-------
time_table : dict
Look up dictionary with keys for the FOV and then the time point.
'''
information('Making time table...')
# initialize
time_table = {}
first_time = float('inf')
# need to go through the data once to find the first time
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
if idata['jd'] < first_time:
first_time = idata['jd']
else:
if idata['t'] < first_time:
first_time = idata['t']
# init dictionary for specific times per FOV
if idata['fov'] not in time_table:
time_table[idata['fov']] = {}
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
# convert jd time to elapsed time in seconds
t_in_seconds = np.around((idata['jd'] - first_time) * 24*60*60, decimals=0).astype('uint32')
else:
t_in_seconds = np.around((idata['t'] - first_time) * params['moviemaker']['seconds_per_time_index'], decimals=0).astype('uint32')
time_table[int(idata['fov'])][int(idata['t'])] = int(t_in_seconds)
# save to .pkl. This pkl will be loaded into the params
# with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'wb') as time_table_file:
# pickle.dump(time_table, time_table_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(os.path.join(params['ana_dir'], 'time_table.txt'), 'w') as time_table_file:
# pprint(time_table, stream=time_table_file)
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'w') as time_table_file:
yaml.dump(data=time_table, stream=time_table_file, default_flow_style=False, tags=None)
information('Time table saved.')
return time_table
# saves traps sliced via Unet
def save_tiffs(imgDict, analyzed_imgs, fov_id):
savePath = os.path.join(params['experiment_directory'],
params['analysis_directory'],
params['chnl_dir'])
img_names = [key for key in analyzed_imgs.keys()]
image_params = analyzed_imgs[img_names[0]]
for peak,img in six.iteritems(imgDict):
img = img.astype('uint16')
if not os.path.isdir(savePath):
os.mkdir(savePath)
for planeNumber in image_params['planes']:
channel_filename = os.path.join(savePath, params['experiment_name'] + '_xy{0:0=3}_p{1:0=4}_c{2}.tif'.format(fov_id, peak, planeNumber))
io.imsave(channel_filename, img[:,:,:,int(planeNumber)-1])
# slice_and_write cuts up the image files one at a time and writes them out to tiff stacks
def tiff_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images per channel.
Loads all tiffs from and FOV into memory and then slices all time points at once.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# go through list of images and get the file path
for n, image in enumerate(images_to_write):
# analyzed_imgs dictionary will be found in main scope. [0] is the key, [1] is jd
image_params = analyzed_imgs[image[0]]
information("Loading %s." % image_params['filepath'].split('/')[-1])
if n == 1:
# declare identification variables for saving using first image
fov_id = image_params['fov']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
# change axis so it goes Y, X, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# channel masks should only contain ints, but you can use this for hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different time stack for all colors
for color_index in range(channel_stack.shape[3]):
# this is the filename for the channel
# # chnl_dir and p will be looked for in the scope above (__main__)
channel_filename = os.path.join(params['chnl_dir'], params['experiment_name'] + '_xy%03d_p%04d_c%1d.tif' % (fov_id, peak, color_index+1))
# save stack
tiff.imsave(channel_filename, channel_stack[:,:,:,color_index], compress=4)
return
# saves traps sliced via Unet to an hdf5 file
def save_hdf5(imgDict, img_names, analyzed_imgs, fov_id, channel_masks):
'''Writes out 4D stacks of images to an HDF5 file.
Called by
mm3_Compile.py
'''
savePath = params['hdf5_dir']
if not os.path.isdir(savePath):
os.mkdir(savePath)
img_times = [analyzed_imgs[key]['t'] for key in img_names]
img_jds = [analyzed_imgs[key]['jd'] for key in img_names]
fov_ids = [analyzed_imgs[key]['fov'] for key in img_names]
# get image_params from first image from current fov
image_params = analyzed_imgs[img_names[0]]
# establish some variables for hdf5 attributes
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
fov_channel_masks = channel_masks[fov_id]
with h5py.File(os.path.join(savePath,'{}_xy{:0=2}.hdf5'.format(params['experiment_name'],fov_id)), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted([key for key in imgDict.keys()]))
# this is for things that change across time, for these create a dataset
img_names = np.asarray(img_names)
img_names = np.expand_dims(img_names, 1)
img_names = img_names.astype('S100')
h5ds = h5f.create_dataset(u'filenames', data=img_names,
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(img_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(img_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak,channel_stack in six.iteritems(imgDict):
channel_stack = channel_stack.astype('uint16')
# create group for this trap
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
channel_loc = fov_channel_masks[peak]
h5g.attrs.create('channel_loc', channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
# same thing as tiff_stack_slice_and_write but do it for hdf5
def hdf5_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images to an HDF5 file.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# make arrays for filenames and times
image_filenames = []
image_times = [] # times is still an integer but may be indexed arbitrarily
image_jds = [] # jds = julian dates (times)
# go through list of images, load and fix them, and create arrays of metadata
for n, image in enumerate(images_to_write):
image_name = image[0] # [0] is the key, [1] is jd
# analyzed_imgs dictionary will be found in main scope.
image_params = analyzed_imgs[image_name]
information("Loading %s." % image_params['filepath'].split('/')[-1])
# add information to metadata arrays
image_filenames.append(image_name)
image_times.append(image_params['t'])
image_jds.append(image_params['jd'])
# declare identification variables for saving using first image
if n == 1:
# same across fov
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
#change axis so it goes X, Y, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# create the HDF5 file for the FOV, first time this is being done.
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted(channel_masks[fov_id].keys()))
# this is for things that change across time, for these create a dataset
h5ds = h5f.create_dataset(u'filenames', data=np.expand_dims(image_filenames, 1),
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(image_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(image_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# create group for this channel
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
h5g.attrs.create('channel_loc', channel_loc)
# channel masks should only contain ints, but you can use this for a hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
def tileImage(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
print(img.shape, M, N, divisor, subImageNumber)
ans = ([img[x:x+M,y:y+N] for x in range(0,img.shape[0],M) for y in range(0,img.shape[1],N)])
tiles=[]
for m in ans:
if m.shape[0]==512 and m.shape[1]==512:
tiles.append(m)
tiles=np.asarray(tiles)
#print(tiles)
return(tiles)
def get_weights(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
weights = np.ones((img.shape[0],img.shape[1]),dtype='uint8')
for i in range(divisor-1):
weights[(M*(i+1))-25:(M*(i+1)+25),:] = 0
weights[:,(N*(i+1))-25:(N*(i+1)+25)] = 0
return(weights)
def permute_image(img, trap_align_metadata):
# are there three dimensions?
if len(img.shape) == 3:
if img.shape[0] < 3: # for tifs with fewer than three imageing channels, the first dimension separates channels
# img = np.transpose(img, (1,2,0))
img = img[trap_align_metadata['phase_plane_index'],:,:] # grab just the phase channel
else:
img = img[:,:,trap_align_metadata['phase_plane_index']] # grab just the phase channel
return(img)
def imageConcatenatorFeatures(imgStack, subImageNumber = 64):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
#print(rowNumPerImage)
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j]))#,
#imgStack[baseNum+4,:,:,j],imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3]))#,
#featureRowDicts[j][baseNum+4],featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7]))
return(bigImg)
def imageConcatenatorFeatures2(imgStack, subImageNumber = 81):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j],
imgStack[baseNum+4,:,:,j]))#,imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j],
#imgStack[baseNum+8,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3],
featureRowDicts[j][baseNum+4]))#,featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7],
#featureRowDicts[j][baseNum+8]))
return(bigImg)
def get_weights_array(arr=np.zeros((2048,2048)), shiftDistance=128, subImageNumber=64, padSubImageNumber=81):
originalImageWeights = get_weights(arr, subImageNumber=subImageNumber)
shiftLeftWeights = np.pad(originalImageWeights, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
shiftRightWeights = np.pad(originalImageWeights, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:(-1*shiftDistance)]
shiftUpWeights = np.pad(originalImageWeights, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
shiftDownWeights = np.pad(originalImageWeights, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:(-1*shiftDistance),:]
expandedImageWeights = get_weights(np.zeros((arr.shape[0]+2*shiftDistance,arr.shape[1]+2*shiftDistance)), subImageNumber=padSubImageNumber)[shiftDistance:-shiftDistance,shiftDistance:-shiftDistance]
allWeights = np.stack((originalImageWeights, expandedImageWeights, shiftUpWeights, shiftDownWeights, shiftLeftWeights,shiftRightWeights), axis=-1)
stackWeights = np.stack((allWeights,allWeights),axis=0)
stackWeights = np.stack((stackWeights,stackWeights,stackWeights),axis=3)
return(stackWeights)
# predicts locations of channels in an image using deep learning model
def get_frame_predictions(img,model,stackWeights, shiftDistance=256, subImageNumber=16, padSubImageNumber=25, debug=False):
pred = predict_first_image_channels(img, model, shiftDistance=shiftDistance,
subImageNumber=subImageNumber, padSubImageNumber=padSubImageNumber, debug=debug)[0,...]
# print(pred.shape)
if debug:
print(pred.shape)
compositePrediction = np.average(pred, axis=3, weights=stackWeights)
# print(compositePrediction.shape)
padSize = (compositePrediction.shape[0]-img.shape[0])//2
compositePrediction = util.crop(compositePrediction,((padSize,padSize),
(padSize,padSize),
(0,0)))
# print(compositePrediction.shape)
return(compositePrediction)
def apply_median_filter_normalize(imgs):
selem = morphology.disk(3)
for i in range(imgs.shape[0]):
# Store sample
tmpImg = imgs[i,:,:,0]
medImg = median(tmpImg, selem)
tmpImg = medImg/np.max(medImg)
tmpImg = np.expand_dims(tmpImg, axis=-1)
imgs[i,:,:,:] = tmpImg
return(imgs)
def predict_first_image_channels(img, model,
subImageNumber=16, padSubImageNumber=25,
shiftDistance=128, batchSize=1,
debug=False):
imgSize = img.shape[0]
padSize = (2048-imgSize)//2 # how much to pad on each side to get up to 2048x2048?
imgStack = np.pad(img, pad_width=((padSize,padSize),(padSize,padSize)),
mode='constant', constant_values=((0,0),(0,0))) # pad the images to make them 2048x2048
# pad the stack by 128 pixels on each side to get complemetary crops that I can run the network on. This
# should help me fill in low-confidence regions where the crop boundaries were for the original image
imgStackExpand = np.pad(imgStack, pad_width=((shiftDistance,shiftDistance),(shiftDistance,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))
imgStackShiftRight = np.pad(imgStack, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
imgStackShiftLeft = np.pad(imgStack, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:-shiftDistance]
imgStackShiftDown = np.pad(imgStack, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
imgStackShiftUp = np.pad(imgStack, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:-shiftDistance,:]
#print(imgStackShiftUp.shape)
crops = tileImage(imgStack, subImageNumber=subImageNumber)
print("Crops: ", crops.shape)
crops = np.expand_dims(crops, -1)
data_gen_args = {'batch_size':params['compile']['channel_prediction_batch_size'],
'n_channels':1,
'normalize_to_one':True,
'shuffle':False}
predict_gen_args = {'verbose':1,
'use_multiprocessing':True,
'workers':params['num_analyzers']}
img_generator = TrapSegmentationDataGenerator(crops, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
prediction = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
#print(prediction.shape)
cropsExpand = tileImage(imgStackExpand, subImageNumber=padSubImageNumber)
cropsExpand = np.expand_dims(cropsExpand, -1)
img_generator = TrapSegmentationDataGenerator(cropsExpand, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionExpand = imageConcatenatorFeatures2(predictions, subImageNumber=padSubImageNumber)
predictionExpand = util.crop(predictionExpand, ((0,0),(shiftDistance,shiftDistance),(shiftDistance,shiftDistance),(0,0)))
#print(predictionExpand.shape)
cropsShiftLeft = tileImage(imgStackShiftLeft, subImageNumber=subImageNumber)
cropsShiftLeft = np.expand_dims(cropsShiftLeft, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftLeft, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionLeft = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionLeft = np.pad(predictionLeft, pad_width=((0,0),(0,0),(0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,shiftDistance:,:]
#print(predictionLeft.shape)
cropsShiftRight = tileImage(imgStackShiftRight, subImageNumber=subImageNumber)
cropsShiftRight = np.expand_dims(cropsShiftRight, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftRight, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionRight = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionRight = np.pad(predictionRight, pad_width=((0,0),(0,0),(shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,:(-1*shiftDistance),:]
#print(predictionRight.shape)
cropsShiftUp = tileImage(imgStackShiftUp, subImageNumber=subImageNumber)
#print(cropsShiftUp.shape)
cropsShiftUp = np.expand_dims(cropsShiftUp, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftUp, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionUp = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionUp = np.pad(predictionUp, pad_width=((0,0),(0,shiftDistance),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,shiftDistance:,:,:]
#print(predictionUp.shape)
cropsShiftDown = tileImage(imgStackShiftDown, subImageNumber=subImageNumber)
cropsShiftDown = np.expand_dims(cropsShiftDown, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftDown, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionDown = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionDown = np.pad(predictionDown, pad_width=((0,0),(shiftDistance,0),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:(-1*shiftDistance),:,:]
#print(predictionDown.shape)
allPredictions = np.stack((prediction, predictionExpand,
predictionUp, predictionDown,
predictionLeft, predictionRight), axis=-1)
return(allPredictions)
# takes initial U-net centroids for trap locations, and creats bounding boxes for each trap at the defined height and width
def get_frame_trap_bounding_boxes(trapLabels, trapProps, trapAreaThreshold=2000, trapWidth=27, trapHeight=256):
badTrapLabels = [reg.label for reg in trapProps if reg.area < trapAreaThreshold] # filter out small "trap" regions
goodTraps = trapLabels.copy()
for label in badTrapLabels:
goodTraps[goodTraps == label] = 0 # re-label bad traps as background (0)
goodTrapProps = measure.regionprops(goodTraps)
trapCentroids = [(int(np.round(reg.centroid[0])),int(np.round(reg.centroid[1]))) for reg in goodTrapProps] # get centroids as integers
trapBboxes = []
for centroid in trapCentroids:
rowIndex = centroid[0]
colIndex = centroid[1]
minRow = rowIndex-trapHeight//2
maxRow = rowIndex+trapHeight//2
minCol = colIndex-trapWidth//2
maxCol = colIndex+trapWidth//2
if trapWidth % 2 != 0:
maxCol += 1
coordArray = np.array([minRow,maxRow,minCol,maxCol])
# remove any traps at edges of image
if np.any(coordArray > goodTraps.shape[0]):
continue
if np.any(coordArray < 0):
continue
trapBboxes.append((minRow,minCol,maxRow,maxCol))
return(trapBboxes)
# this function performs image alignment as defined by the shifts passed as an argument
def crop_traps(fileNames, trapProps, labelledTraps, bboxesDict, trap_align_metadata):
frameNum = trap_align_metadata['frame_count']
channelNum = trap_align_metadata['plane_number']
trapImagesDict = {key:np.zeros((frameNum,
trap_align_metadata['trap_height'],
trap_align_metadata['trap_width'],
channelNum)) for key in bboxesDict}
trapClosedEndPxDict = {}
flipImageDict = {}
trapMask = labelledTraps
for frame in range(frameNum):
if (frame+1) % 20 == 0:
print("Cropping trap regions for frame number {} of {}.".format(frame+1, frameNum))
imgPath = os.path.join(params['experiment_directory'],params['image_directory'],fileNames[frame])
fullFrameImg = io.imread(imgPath)
if len(fullFrameImg.shape) == 3:
if fullFrameImg.shape[0] < 3: # for tifs with less than three imaging channels, the first dimension separates channels
fullFrameImg = np.transpose(fullFrameImg, (1,2,0))
trapClosedEndPxDict[fileNames[frame]] = {key:{} for key in bboxesDict.keys()}
for key in trapImagesDict.keys():
bbox = bboxesDict[key][frame]
trapImagesDict[key][frame,:,:,:] = fullFrameImg[bbox[0]:bbox[2],bbox[1]:bbox[3],:]
#tmpImg = np.reshape(fullFrameImg[trapMask==key], (trapHeight,trapWidth,channelNum))
if frame == 0:
medianProfile = np.median(trapImagesDict[key][frame,:,:,0],axis=1) # get intensity of middle column of trap
maxIntensityRow = np.argmax(medianProfile)
if maxIntensityRow > trap_align_metadata['trap_height']//2:
flipImageDict[key] = 0
else:
flipImageDict[key] = 1
if flipImageDict[key] == 1:
trapImagesDict[key][frame,:,:,:] = trapImagesDict[key][frame,::-1,:,:]
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[0]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[2]
else:
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[2]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[0]
continue
return(trapImagesDict, trapClosedEndPxDict)
# gets shifted bounding boxes to crop traps through time
def shift_bounding_boxes(bboxesDict, shifts, imgSize):
bboxesShiftDict = {}
for key in bboxesDict.keys():
bboxesShiftDict[key] = []
bboxes = bboxesDict[key]
for i in range(shifts.shape[0]):
if i == 0:
bboxesShiftDict[key].append(bboxes)
else:
minRow = bboxes[0]+shifts[i,0]
minCol = bboxes[1]+shifts[i,1]
maxRow = bboxes[2]+shifts[i,0]
maxCol = bboxes[3]+shifts[i,1]
bboxesShiftDict[key].append((minRow,
minCol,
maxRow,
maxCol))
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) < 0):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) > imgSize):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
return(bboxesShiftDict)
# finds the location of channels in a tif
def find_channel_locs(image_data):
'''Finds the location of channels from a phase contrast image. The channels are returned in
a dictionary where the key is the x position of the channel in pixel and the value is a
dicionary with the open and closed end in pixels in y.
Called by
mm3_Compile.get_tif_params
'''
# declare temp variables from yaml parameter dict.
chan_w = params['compile']['channel_width']
chan_sep = params['compile']['channel_separation']
crop_wp = int(params['compile']['channel_width_pad'] + chan_w/2)
chan_snr = params['compile']['channel_detection_snr']
# Detect peaks in the x projection (i.e. find the channels)
projection_x = image_data.sum(axis=0).astype(np.int32)
# find_peaks_cwt is a function which attempts to find the peaks in a 1-D array by
# convolving it with a wave. here the wave is the default Mexican hat wave
# but the minimum signal to noise ratio is specified
# *** The range here should be a parameter or changed to a fraction.
peaks = find_peaks_cwt(projection_x, np.arange(chan_w-5,chan_w+5), min_snr=chan_snr)
# If the left-most peak position is within half of a channel separation,
# discard the channel from the list.
if peaks[0] < (chan_sep / 2):
peaks = peaks[1:]
# If the diference between the right-most peak position and the right edge
# of the image is less than half of a channel separation, discard the channel.
if image_data.shape[1] - peaks[-1] < (chan_sep / 2):
peaks = peaks[:-1]
# Find the average channel ends for the y-projected image
projection_y = image_data.sum(axis=1)
# find derivative, must use int32 because it was unsigned 16b before.
proj_y_d = np.diff(projection_y.astype(np.int32))
# use the top third to look for closed end, is pixel location of highest deriv
onethirdpoint_y = int(projection_y.shape[0]/3.0)
default_closed_end_px = proj_y_d[:onethirdpoint_y].argmax()
# use bottom third to look for open end, pixel location of lowest deriv
twothirdpoint_y = int(projection_y.shape[0]*2.0/3.0)
default_open_end_px = twothirdpoint_y + proj_y_d[twothirdpoint_y:].argmin()
default_length = default_open_end_px - default_closed_end_px # used for checks
# go through peaks and assign information
# dict for channel dimensions
chnl_loc_dict = {}
# key is peak location, value is dict with {'closed_end_px': px, 'open_end_px': px}
for peak in peaks:
# set defaults
chnl_loc_dict[peak] = {'closed_end_px': default_closed_end_px,
'open_end_px': default_open_end_px}
# redo the previous y projection finding with just this channel
channel_slice = image_data[:, peak-crop_wp:peak+crop_wp]
slice_projection_y = channel_slice.sum(axis = 1)
slice_proj_y_d = np.diff(slice_projection_y.astype(np.int32))
slice_closed_end_px = slice_proj_y_d[:onethirdpoint_y].argmax()
slice_open_end_px = twothirdpoint_y + slice_proj_y_d[twothirdpoint_y:].argmin()
slice_length = slice_open_end_px - slice_closed_end_px
# check if these values make sense. If so, use them. If not, use default
# make sure lenght is not 30 pixels bigger or smaller than default
# *** This 15 should probably be a parameter or at least changed to a fraction.
if slice_length + 15 < default_length or slice_length - 15 > default_length:
continue
# make sure ends are greater than 15 pixels from image edge
if slice_closed_end_px < 15 or slice_open_end_px > image_data.shape[0] - 15:
continue
# if you made it to this point then update the entry
chnl_loc_dict[peak] = {'closed_end_px' : slice_closed_end_px,
'open_end_px' : slice_open_end_px}
return chnl_loc_dict
# make masks from initial set of images (same images as clusters)
def make_masks(analyzed_imgs):
'''
Make masks goes through the channel locations in the image metadata and builds a consensus
Mask for each image per fov, which it returns as dictionary named channel_masks.
The keys in this dictionary are fov id, and the values is a another dictionary. This dict's keys are channel locations (peaks) and the values is a [2][2] array:
[[minrow, maxrow],[mincol, maxcol]] of pixel locations designating the corner of each mask
for each channel on the whole image
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
information("Determining initial channel masks...")
# declare temp variables from yaml parameter dict.
crop_wp = int(params['compile']['channel_width_pad'] + params['compile']['channel_width']/2)
chan_lp = int(params['compile']['channel_length_pad'])
#intiaize dictionary
channel_masks = {}
# get the size of the images (hope they are the same)
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
image_rows = img_v['shape'][0] # x pixels
image_cols = img_v['shape'][1] # y pixels
break # just need one. using iteritems mean the whole dict doesn't load
# get the fov ids
fovs = []
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
if img_v['fov'] not in fovs:
fovs.append(img_v['fov'])
# max width and length across all fovs. channels will get expanded by these values
# this important for later updates to the masks, which should be the same
max_chnl_mask_len = 0
max_chnl_mask_wid = 0
# for each fov make a channel_mask dictionary from consensus mask
for fov in fovs:
# initialize a the dict and consensus mask
channel_masks_1fov = {} # dict which holds channel masks {peak : [[y1, y2],[x1,x2]],...}
consensus_mask = np.zeros([image_rows, image_cols]) # mask for labeling
# bring up information for each image
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
# skip this one if it is not of the current fov
if img_v['fov'] != fov:
continue
# for each channel in each image make a single mask
img_chnl_mask = np.zeros([image_rows, image_cols])
# and add the channel mask to it
for chnl_peak, peak_ends in six.iteritems(img_v['channels']):
# pull out the peak location and top and bottom location
# and expand by padding (more padding done later for width)
x1 = max(chnl_peak - crop_wp, 0)
x2 = min(chnl_peak + crop_wp, image_cols)
y1 = max(peak_ends['closed_end_px'] - chan_lp, 0)
y2 = min(peak_ends['open_end_px'] + chan_lp, image_rows)
# add it to the mask for this image
img_chnl_mask[y1:y2, x1:x2] = 1
# add it to the consensus mask
consensus_mask += img_chnl_mask
# Normalize concensus mask between 0 and 1.
consensus_mask = consensus_mask.astype('float32') / float(np.amax(consensus_mask))
# threshhold and homogenize each channel mask within the mask, label them
# label when value is above 0.1 (so 90% occupancy), transpose.
# the [0] is for the array ([1] is the number of regions)
# It transposes and then transposes again so regions are labeled left to right
# clear border it to make sure the channels are off the edge
consensus_mask = ndi.label(consensus_mask)[0]
# go through each label
for label in np.unique(consensus_mask):
if label == 0: # label zero is the background
continue
binary_core = consensus_mask == label
# clean up the rough edges
poscols = np.any(binary_core, axis = 0) # column positions where true (any)
posrows = np.any(binary_core, axis = 1) # row positions where true (any)
# channel_id givin by horizontal position
# this is important. later updates to the positions will have to check
# if their channels contain this median value to match up
channel_id = int(np.median(np.where(poscols)[0]))
# store the edge locations of the channel mask in the dictionary. Will be ints
min_row = np.min(np.where(posrows)[0])
max_row = np.max(np.where(posrows)[0])
min_col = np.min(np.where(poscols)[0])
max_col = np.max(np.where(poscols)[0])
# if the min/max cols are within the image bounds,
# add the mask, as 4 points, to the dictionary
if min_col > 0 and max_col < image_cols:
channel_masks_1fov[channel_id] = [[min_row, max_row], [min_col, max_col]]
# find the largest channel width and height while you go round
max_chnl_mask_len = int(max(max_chnl_mask_len, max_row - min_row))
max_chnl_mask_wid = int(max(max_chnl_mask_wid, max_col - min_col))
# add channel_mask dictionary to the fov dictionary, use copy to play it safe
channel_masks[fov] = channel_masks_1fov.copy()
# update all channel masks to be the max size
cm_copy = channel_masks.copy()
for fov, peaks in six.iteritems(channel_masks):
# f_id = int(fov)
for peak, chnl_mask in six.iteritems(peaks):
# p_id = int(peak)
# just add length to the open end (bottom of image, low column)
if chnl_mask[0][1] - chnl_mask[0][0] != max_chnl_mask_len:
cm_copy[fov][peak][0][1] = chnl_mask[0][0] + max_chnl_mask_len
# enlarge widths around the middle, but make sure you don't get floats
if chnl_mask[1][1] - chnl_mask[1][0] != max_chnl_mask_wid:
wid_diff = max_chnl_mask_wid - (chnl_mask[1][1] - chnl_mask[1][0])
if wid_diff % 2 == 0:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - wid_diff/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + wid_diff/2, image_cols - 1)
else:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - (wid_diff-1)/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + (wid_diff+1)/2, image_cols - 1)
# convert all values to ints
chnl_mask[0][0] = int(chnl_mask[0][0])
chnl_mask[0][1] = int(chnl_mask[0][1])
chnl_mask[1][0] = int(chnl_mask[1][0])
chnl_mask[1][1] = int(chnl_mask[1][1])
# cm_copy[fov][peak] = {'y_top': chnl_mask[0][0],
# 'y_bot': chnl_mask[0][1],
# 'x_left': chnl_mask[1][0],
# 'x_right': chnl_mask[1][1]}
# print(type(cm_copy[fov][peak][1][0]), cm_copy[fov][peak][1][0])
#save the channel mask dictionary to a pickle and a text file
# with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'wb') as cmask_file:
# pickle.dump(cm_copy, cmask_file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(params['ana_dir'], 'channel_masks.txt'), 'w') as cmask_file:
pprint(cm_copy, stream=cmask_file)
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'w') as cmask_file:
yaml.dump(data=cm_copy, stream=cmask_file, default_flow_style=False, tags=None)
information("Channel masks saved.")
return cm_copy
# get each fov_id, peak_id, frame's mask bounding box from bounding boxes arrived at by convolutional neural network
def make_channel_masks_CNN(bboxes_dict):
'''
The keys in this dictionary are peak_ids and the values of each is an array of shape (frameNumber,2,2):
Each frameNumber's 2x2 slice of the array represents the given peak_id's [[minrow, maxrow],[mincol, maxcol]].
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
# initialize the new channel_masks dict
channel_masks = {}
# reorder elements of tuples in bboxes_dict to match [[minrow, maxrow], [mincol, maxcol]] convention above
peak_ids = [peak_id for peak_id in bboxes_dict.keys()]
peak_ids.sort()
bbox_array = np.zeros((len(bboxes_dict[peak_ids[0]]),2,2), dtype='uint16')
for peak_id in peak_ids:
# get each frame's bounding boxes for the given peak_id
frame_bboxes = bboxes_dict[peak_id]
for frame_index in range(len(frame_bboxes)):
# replace the values in bbox_array with the proper ones from frame_bboxes
minrow = frame_bboxes[frame_index][0]
maxrow = frame_bboxes[frame_index][2]
mincol = frame_bboxes[frame_index][1]
maxcol = frame_bboxes[frame_index][3]
bbox_array[frame_index,0,0] = minrow
bbox_array[frame_index,0,1] = maxrow
bbox_array[frame_index,1,0] = mincol
bbox_array[frame_index,1,1] = maxcol
channel_masks[peak_id] = bbox_array
return(channel_masks)
### functions about trimming, padding, and manipulating images
# define function for flipping the images on an FOV by FOV basis
def fix_orientation(image_data):
'''
Fix the orientation. The standard direction for channels to open to is down.
called by
process_tif
get_params
'''
# user parameter indicates how things should be flipped
image_orientation = params['compile']['image_orientation']
# if this is just a phase image give in an extra layer so rest of code is fine
flat = False # flag for if the image is flat or multiple levels
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
flat = True
# setting image_orientation to 'auto' will use autodetection
if image_orientation == "auto":
# use 'phase_plane' to find the phase plane in image_data, assuming c1, c2, c3... naming scheme here.
try:
ph_channel = int(re.search('[0-9]', params['phase_plane']).group(0)) - 1
except:
# Pick the plane to analyze with the highest mean px value (should be phase)
ph_channel = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
# flip based on the index of the higest average row value
# this should be closer to the opening
if np.argmax(image_data[ph_channel].mean(axis = 1)) < image_data[ph_channel].shape[0] / 2:
image_data = image_data[:,::-1,:]
else:
pass # no need to do anything
# flip if up is chosen
elif image_orientation == "up":
return image_data[:,::-1,:]
# do not flip the images if "down is the specified image orientation"
elif image_orientation == "down":
pass
if flat:
image_data = image_data[0] # just return that first layer
return image_data
# cuts out channels from the image
def cut_slice(image_data, channel_loc):
'''Takes an image and cuts out the channel based on the slice location
slice location is the list with the peak information, in the form
[][y1, y2],[x1, x2]]. Returns the channel slice as a numpy array.
The numpy array will be a stack if there are multiple planes.
if you want to slice all the channels from a picture with the channel_masks
dictionary use a loop like this:
for channel_loc in channel_masks[fov_id]: # fov_id is the fov of the image
channel_slice = cut_slice[image_pixel_data, channel_loc]
# ... do something with the slice
NOTE: this function will try to determine what the shape of your
image is and slice accordingly. It expects the images are in the order
[t, x, y, c]. It assumes images with three dimensions are [x, y, c] not
[t, x, y].
'''
# case where image is in form [x, y]
if len(image_data.shape) == 2:
# make slice object
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1]]
# case where image is in form [x, y, c]
elif len(image_data.shape) == 3:
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# case where image in form [t, x , y, c]
elif len(image_data.shape) == 4:
channel_slicer = np.s_[:,channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# slice based on appropriate slicer object.
channel_slice = image_data[channel_slicer]
# pad y of channel if slice happened to be outside of image
y_difference = (channel_loc[0][1] - channel_loc[0][0]) - channel_slice.shape[1]
if y_difference > 0:
paddings = [[0, 0], # t
[0, y_difference], # y
[0, 0], # x
[0, 0]] # c
channel_slice = np.pad(channel_slice, paddings, mode='edge')
return channel_slice
# calculate cross correlation between pixels in channel stack
def channel_xcorr(fov_id, peak_id):
'''
Function calculates the cross correlation of images in a
stack to the first image in the stack. The output is an
array that is the length of the stack with the best cross
correlation between that image and the first image.
The very first value should be 1.
'''
pad_size = params['subtract']['alignment_pad']
# Use this number of images to calculate cross correlations
number_of_images = 20
# load the phase contrast images
image_data = load_stack(fov_id, peak_id, color=params['phase_plane'])
# if there are more images than number_of_images, use number_of_images images evenly
# spaced across the range
if image_data.shape[0] > number_of_images:
spacing = int(image_data.shape[0] / number_of_images)
image_data = image_data[::spacing,:,:]
if image_data.shape[0] > number_of_images:
image_data = image_data[:number_of_images,:,:]
# we will compare all images to this one, needs to be padded to account for image drift
first_img = np.pad(image_data[0,:,:], pad_size, mode='reflect')
xcorr_array = [] # array holds cross correlation vaues
for img in image_data:
# use match_template to find all cross correlations for the
# current image against the first image.
xcorr_array.append(np.max(match_template(first_img, img)))
return xcorr_array
### functions about subtraction
# average empty channels from stacks, making another TIFF stack
def average_empties_stack(fov_id, specs, color='c1', align=True):
'''Takes the fov file name and the peak names of the designated empties,
averages them and saves the image
Parameters
fov_id : int
FOV number
specs : dict
specifies whether a channel should be analyzed (1), used for making
an average empty (0), or ignored (-1).
color : string
Which plane to use.
align : boolean
Flag that is passed to the worker function average_empties, indicates
whether images should be aligned be for averaging (use False for fluorescent images)
Returns
True if succesful.
Saves empty stack to analysis folder
'''
information("Creating average empty channel for FOV %d." % fov_id)
# get peak ids of empty channels for this fov
empty_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 0: # 0 means it should be used for empty
empty_peak_ids.append(peak_id)
empty_peak_ids = sorted(empty_peak_ids) # sort for repeatability
# depending on how many empties there are choose what to do
# if there is no empty the user is going to have to copy another empty stack
if len(empty_peak_ids) == 0:
information("No empty channel designated for FOV %d." % fov_id)
return False
# if there is just one then you can just copy that channel
elif len(empty_peak_ids) == 1:
peak_id = empty_peak_ids[0]
information("One empty channel (%d) designated for FOV %d." % (peak_id, fov_id))
# load the one phase contrast as the empties
avg_empty_stack = load_stack(fov_id, peak_id, color=color)
# but if there is more than one empty you need to align and average them per timepoint
elif len(empty_peak_ids) > 1:
# load the image stacks into memory
empty_stacks = [] # list which holds phase image stacks of designated empties
for peak_id in empty_peak_ids:
# load data and append to list
image_data = load_stack(fov_id, peak_id, color=color)
empty_stacks.append(image_data)
information("%d empty channels designated for FOV %d." % (len(empty_stacks), fov_id))
# go through time points and create list of averaged empties
avg_empty_stack = [] # list will be later concatentated into numpy array
time_points = range(image_data.shape[0]) # index is time
for t in time_points:
# get images from one timepoint at a time and send to alignment and averaging
imgs = [stack[t] for stack in empty_stacks]
avg_empty = average_empties(imgs, align=align) # function is in mm3
avg_empty_stack.append(avg_empty)
# concatenate list and then save out to tiff stack
avg_empty_stack = np.stack(avg_empty_stack, axis=0)
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (fov_id, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute
h5ds.attrs.create('empty_channels', empty_peak_ids)
h5f.close()
information("Saved empty channel for FOV %d." % fov_id)
return True
# averages a list of empty channels
def average_empties(imgs, align=True):
'''
This function averages a set of images (empty channels) and returns a single image
of the same size. It first aligns the images to the first image before averaging.
Alignment is done by enlarging the first image using edge padding.
Subsequent images are then aligned to this image and the offset recorded.
These images are padded such that they are the same size as the first (padded) image but
with the image in the correct (aligned) place. Edge padding is again used.
The images are then placed in a stack and aveaged. This image is trimmed so it is the size
of the original images
Called by
average_empties_stack
'''
aligned_imgs = [] # list contains the aligned, padded images
if align:
# pixel size to use for padding (ammount that alignment could be off)
pad_size = params['subtract']['alignment_pad']
for n, img in enumerate(imgs):
# if this is the first image, pad it and add it to the stack
if n == 0:
ref_img = np.pad(img, pad_size, mode='reflect') # padded reference image
aligned_imgs.append(ref_img)
# otherwise align this image to the first padded image
else:
# find correlation between a convolution of img against the padded reference
match_result = match_template(ref_img, img)
# find index of highest correlation (relative to top left corner of img)
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad img so it aligns and is the same size as reference image
pad_img = np.pad(img, ((y, ref_img.shape[0] - (y + img.shape[0])),
(x, ref_img.shape[1] - (x + img.shape[1]))), mode='reflect')
aligned_imgs.append(pad_img)
else:
# don't align, just link the names to go forward easily
aligned_imgs = imgs
# stack the aligned data along 3rd axis
aligned_imgs = np.dstack(aligned_imgs)
# get a mean image along 3rd axis
avg_empty = np.nanmean(aligned_imgs, axis=2)
# trim off the padded edges (only if images were alinged, otherwise there was no padding)
if align:
avg_empty = avg_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
# change type back to unsigned 16 bit not floats
avg_empty = avg_empty.astype(dtype='uint16')
return avg_empty
# this function is used when one FOV doesn't have an empty
def copy_empty_stack(from_fov, to_fov, color='c1'):
'''Copy an empty stack from one FOV to another'''
# load empty stack from one FOV
information('Loading empty stack from FOV {} to save for FOV {}.'.format(from_fov, to_fov))
avg_empty_stack = load_stack(from_fov, 0, color='empty_{}'.format(color))
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (to_fov, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % to_fov), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute. Just put 0
h5ds.attrs.create('empty_channels', [0])
h5f.close()
information("Saved empty channel for FOV %d." % to_fov)
# Do subtraction for an fov over many timepoints
def subtract_fov_stack(fov_id, specs, color='c1', method='phase'):
'''
For a given FOV, loads the precomputed empty stack and does subtraction on
all peaks in the FOV designated to be analyzed
Parameters
----------
color : string, 'c1', 'c2', etc.
This is the channel to subtraction. will be appended to the word empty.
Called by
mm3_Subtract.py
Calls
mm3.subtract_phase
'''
information('Subtracting peaks for FOV %d.' % fov_id)
# load empty stack feed dummy peak number to get empty
avg_empty_stack = load_stack(fov_id, 0, color='empty_{}'.format(color))
# determine which peaks are to be analyzed
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 0 means it should be used for empty, -1 is ignore
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information("Subtracting %d channels for FOV %d." % (len(ana_peak_ids), fov_id))
# just break if there are to peaks to analize
if not ana_peak_ids:
return False
# load images for the peak and get phase images
for peak_id in ana_peak_ids:
information('Subtracting peak %d.' % peak_id)
image_data = load_stack(fov_id, peak_id, color=color)
# make a list for all time points to send to a multiprocessing pool
# list will length of image_data with tuples (image, empty)
subtract_pairs = zip(image_data, avg_empty_stack)
# # set up multiprocessing pool to do subtraction. Should wait until finished
# pool = Pool(processes=params['num_analyzers'])
# if method == 'phase':
# subtracted_imgs = pool.map(subtract_phase, subtract_pairs, chunksize=10)
# elif method == 'fluor':
# subtracted_imgs = pool.map(subtract_fluor, subtract_pairs, chunksize=10)
# pool.close() # tells the process nothing more will be added.
# pool.join() # blocks script until everything has been processed and workers exit
# linear loop for debug
subtracted_imgs = [subtract_phase(subtract_pair) for subtract_pair in subtract_pairs]
# stack them up along a time axis
subtracted_stack = np.stack(subtracted_imgs, axis=0)
# save out the subtracted stack
if params['output'] == 'TIFF':
sub_filename = params['experiment_name'] + '_xy%03d_p%04d_sub_%s.tif' % (fov_id, peak_id, color)
tiff.imsave(os.path.join(params['sub_dir'],sub_filename), subtracted_stack, compress=4) # save it
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(subtracted_stack, name='Subtracted' + '_xy1_p'+str(peak_id)+'_sub_'+str(color)+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put subtracted channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_sub_%s' % (peak_id, color) in h5g:
del h5g['p%04d_sub_%s' % (peak_id, color)]
h5ds = h5g.create_dataset(u'p%04d_sub_%s' % (peak_id, color),
data=subtracted_stack,
chunks=(1, subtracted_stack.shape[1], subtracted_stack.shape[2]),
maxshape=(None, subtracted_stack.shape[1], subtracted_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
information("Saved subtracted channel %d." % peak_id)
if params['output'] == 'HDF5':
h5f.close()
return True
# subtracts one phase contrast image from another.
def subtract_phase(image_pair):
'''subtract_phase aligns and subtracts a .
Modified from subtract_phase_only by jt on 20160511
The subtracted image returned is the same size as the image given. It may however include
data points around the edge that are meaningless but not marked.
We align the empty channel to the phase channel, then subtract.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# this is for aligning the empty channel to the cell channel.
### Pad cropped channel.
pad_size = params['subtract']['alignment_pad'] # pixel size to use for padding (ammount that alignment could be off)
padded_chnl = np.pad(cropped_channel, pad_size, mode='reflect')
# ### Align channel to empty using match template.
# use match template to get a correlation array and find the position of maximum overlap
match_result = match_template(padded_chnl, empty_channel)
# get row and colum of max correlation value in correlation array
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad the empty channel according to alignment to be overlayed on padded channel.
empty_paddings = [[y, padded_chnl.shape[0] - (y + empty_channel.shape[0])],
[x, padded_chnl.shape[1] - (x + empty_channel.shape[1])]]
aligned_empty = np.pad(empty_channel, empty_paddings, mode='reflect')
# now trim it off so it is the same size as the original channel
aligned_empty = aligned_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = aligned_empty.astype('int32') - cropped_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0. This is what Sattar does
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
# subtract one fluorescence image from another.
def subtract_fluor(image_pair):
''' subtract_fluor does a simple subtraction of one image to another. Unlike subtract_phase,
there is no alignment. Also, the empty channel is subtracted from the full channel.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image.
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# check frame size of cropped channel and background, always keep crop channel size the same
crop_size = np.shape(cropped_channel)[:2]
empty_size = np.shape(empty_channel)[:2]
if crop_size != empty_size:
if crop_size[0] > empty_size[0] or crop_size[1] > empty_size[1]:
pad_row_length = max(crop_size[0] - empty_size[0], 0) # prevent negatives
pad_column_length = max(crop_size[1] - empty_size[1], 0)
empty_channel = np.pad(empty_channel,
[[np.int(.5*pad_row_length), pad_row_length-np.int(.5*pad_row_length)],
[np.int(.5*pad_column_length), pad_column_length-np.int(.5*pad_column_length)],
[0,0]], 'edge')
# mm3.information('size adjusted 1')
empty_size = np.shape(empty_channel)[:2]
if crop_size[0] < empty_size[0] or crop_size[1] < empty_size[1]:
empty_channel = empty_channel[:crop_size[0], :crop_size[1],]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = cropped_channel.astype('int32') - empty_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0.
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
### functions that deal with segmentation and lineages
# Do segmentation for an channel time stack
def segment_chnl_stack(fov_id, peak_id):
'''
For a given fov and peak (channel), do segmentation for all images in the
subtracted .tif stack.
Called by
mm3_Segment.py
Calls
mm3.segment_image
'''
information('Segmenting FOV %d, channel %d.' % (fov_id, peak_id))
# load subtracted images
sub_stack = load_stack(fov_id, peak_id, color='sub_{}'.format(params['phase_plane']))
# set up multiprocessing pool to do segmentation. Will do everything before going on.
#pool = Pool(processes=params['num_analyzers'])
# send the 3d array to multiprocessing
#segmented_imgs = pool.map(segment_image, sub_stack, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# image by image for debug
segmented_imgs = []
for sub_image in sub_stack:
segmented_imgs.append(segment_image(sub_image))
# stack them up along a time axis
segmented_imgs = np.stack(segmented_imgs, axis=0)
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stack
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'],seg_filename),
segmented_imgs, compress=5)
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(segmented_imgs, name='Segmented' + '_xy1_p'+str(peak_id)+'_sub_'+str(params['seg_img'])+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
information("Saved segmented channel %d." % peak_id)
return True
# segmentation algorithm
def segment_image(image):
'''Segments a subtracted image and returns a labeled image
Parameters
image : a ndarray which is an image. This should be the subtracted image
Returns
labeled_image : a ndarray which is also an image. Labeled values, which
should correspond to cells, all have the same integer value starting with 1.
Non labeled area should have value zero.
'''
# load in segmentation parameters
OTSU_threshold = params['segment']['otsu']['OTSU_threshold']
first_opening_size = params['segment']['otsu']['first_opening_size']
distance_threshold = params['segment']['otsu']['distance_threshold']
second_opening_size = params['segment']['otsu']['second_opening_size']
min_object_size = params['segment']['otsu']['min_object_size']
# threshold image
try:
thresh = threshold_otsu(image) # finds optimal OTSU threshhold value
except:
return np.zeros_like(image)
threshholded = image > OTSU_threshold*thresh # will create binary image
# if there are no cells, good to clear the border
# because otherwise the OTSU is just for random bullshit, most
# likely on the side of the image
threshholded = segmentation.clear_border(threshholded)
# Opening = erosion then dialation.
# opening smooths images, breaks isthmuses, and eliminates protrusions.
# "opens" dark gaps between bright features.
morph = morphology.binary_opening(threshholded, morphology.disk(first_opening_size))
# if this image is empty at this point (likely if there were no cells), just return
# zero array
if np.amax(morph) == 0:
return np.zeros_like(image)
### Calculate distance matrix, use as markers for random walker (diffusion watershed)
# Generate the markers based on distance to the background
distance = ndi.distance_transform_edt(morph)
# threshold distance image
distance_thresh = np.zeros_like(distance)
distance_thresh[distance < distance_threshold] = 0
distance_thresh[distance >= distance_threshold] = 1
# do an extra opening on the distance
distance_opened = morphology.binary_opening(distance_thresh,
morphology.disk(second_opening_size))
# remove artifacts connected to image border
cleared = segmentation.clear_border(distance_opened)
# remove small objects. Remove small objects wants a
# labeled image and will fail if there is only one label. Return zero image in that case
# could have used try/except but remove_small_objects loves to issue warnings.
cleared, label_num = morphology.label(cleared, connectivity=1, return_num=True)
if label_num > 1:
cleared = morphology.remove_small_objects(cleared, min_size=min_object_size)
else:
# if there are no labels, then just return the cleared image as it is zero
return np.zeros_like(image)
# relabel now that small objects and labels on edges have been cleared
markers = morphology.label(cleared, connectivity=1)
# just break if there is no label
if np.amax(markers) == 0:
return np.zeros_like(image)
# the binary image for the watershed, which uses the unmodified OTSU threshold
threshholded_watershed = threshholded
threshholded_watershed = segmentation.clear_border(threshholded_watershed)
# label using the random walker (diffusion watershed) algorithm
try:
# set anything outside of OTSU threshold to -1 so it will not be labeled
markers[threshholded_watershed == 0] = -1
# here is the main algorithm
labeled_image = segmentation.random_walker(-1*image, markers)
# put negative values back to zero for proper image
labeled_image[labeled_image == -1] = 0
except:
return np.zeros_like(image)
return labeled_image
# loss functions for model
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
def tversky_loss(y_true, y_pred):
alpha = 0.5
beta = 0.5
ones = K.ones((512,512,3)) #K.ones(K.shape(y_true))
p0 = y_pred # proba that voxels are class i
p1 = ones-y_pred # proba that voxels are not class i
g0 = y_true
g1 = ones-y_true
num = K.sum(p0*g0, (0,1,2))
den = num + alpha*K.sum(p0*g1,(0,1,2)) + beta*K.sum(p1*g0,(0,1,2))
T = K.sum(num/den) # when summing over classes, T has dynamic range [0 Ncl]
Ncl = K.cast(K.shape(y_true)[-1], 'float32')
return Ncl-T
def cce_tversky_loss(y_true, y_pred):
loss = losses.categorical_crossentropy(y_true, y_pred) + tversky_loss(y_true, y_pred)
return loss
def get_pad_distances(unet_shape, img_height, img_width):
'''Finds padding and trimming sizes to make the input image the same as the size expected by the U-net model.
Padding is done evenly to the top and bottom of the image. Trimming is only done from the right or bottom.
'''
half_width_pad = (unet_shape[1]-img_width)/2
if half_width_pad > 0:
left_pad = int(np.floor(half_width_pad))
right_pad = int(np.ceil(half_width_pad))
right_trim = 0
else:
left_pad = 0
right_pad = 0
right_trim = img_width - unet_shape[1]
half_height_pad = (unet_shape[0]-img_height)/2
if half_height_pad > 0:
top_pad = int(np.floor(half_height_pad))
bottom_pad = int(np.ceil(half_height_pad))
bottom_trim = 0
else:
top_pad = 0
bottom_pad = 0
bottom_trim = img_height - unet_shape[0]
pad_dict = {'top_pad' : top_pad,
'bottom_pad' : bottom_pad,
'right_pad' : right_pad,
'left_pad' : left_pad,
'bottom_trim' : bottom_trim,
'right_trim' : right_trim}
return pad_dict
#@profile
def segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
batch_size = params['segment']['batch_size']
cellClassThreshold = params['segment']['cell_class_threshold']
if cellClassThreshold == 'None': # yaml imports None as a string
cellClassThreshold = False
min_object_size = params['segment']['min_object_size']
# arguments to data generator
# data_gen_args = {'batch_size':batch_size,
# 'n_channels':1,
# 'normalize_to_one':False,
# 'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=True,
workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting peak {}.'.format(peak_id))
img_stack = load_stack(fov_id, peak_id, color=params['phase_plane'])
if params['segment']['normalize_to_one']:
med_stack = np.zeros(img_stack.shape)
selem = morphology.disk(1)
for frame_idx in range(img_stack.shape[0]):
tmpImg = img_stack[frame_idx,...]
med_stack[frame_idx,...] = median(tmpImg, selem)
# robust normalization of peak's image stack to 1
max_val = np.max(med_stack)
img_stack = img_stack/max_val
img_stack[img_stack > 1] = 1
# trim and pad image to correct size
img_stack = img_stack[:, :unet_shape[0], :unet_shape[1]]
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1) # TF expects images to be 4D
# set up image generator
# image_generator = CellSegmentationDataGenerator(img_stack, **data_gen_args)
image_datagen = ImageDataGenerator()
image_generator = image_datagen.flow(x=img_stack,
batch_size=batch_size,
shuffle=False) # keep same order
# predict cell locations. This has multiprocessing built in but I need to mess with the parameters to see how to best utilize it. ***
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
# pad back incase the image had been trimmed
predictions = np.pad(predictions,
((0,0),
(0,pad_dict['bottom_trim']),
(0,pad_dict['right_trim'])),
mode='constant')
if params['segment']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['pred_dir']):
os.makedirs(params['pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if cellClassThreshold:
predictions[predictions >= cellClassThreshold] = 1
predictions[predictions < cellClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=1)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
#@profile
def segment_fov_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
#ana_peak_ids = ana_peak_ids[:2]
segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return
def segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
# batch_size = params['foci']['batch_size']
focusClassThreshold = params['foci']['focus_threshold']
if focusClassThreshold == 'None': # yaml imports None as a string
focusClassThreshold = False
# arguments to data generator
data_gen_args = {'batch_size':params['foci']['batch_size'],
'n_channels':1,
'normalize_to_one':False,
'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=False,
# workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting foci in peak {}.'.format(peak_id))
# print(peak_id) # debugging a shape error at some traps
img_stack = load_stack(fov_id, peak_id, color=params['foci']['foci_plane'])
# pad image to correct size
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1)
# set up image generator
image_generator = FocusSegmentationDataGenerator(img_stack, **data_gen_args)
# predict foci locations.
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
if params['foci']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['foci_pred_dir']):
os.makedirs(params['foci_pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['foci_pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if focusClassThreshold:
predictions[predictions >= focusClassThreshold] = 1
predictions[predictions < focusClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
# predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
# predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=2)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['foci_seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
def segment_fov_foci_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
# find padding and trimming distances
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# timepoints = img_stack.shape[0]
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
k = segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return(k)
# class for image generation for predicting cell locations in phase-contrast images
class CellSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
if tmpImg.dtype=="uint16":
tmpImg = tmpImg / 2**16 * 2**8
tmpImg = tmpImg.astype('uint8')
if self.normalize_to_one:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
medImg = median(tmpImg, self.selem)
tmpImg = tmpImg/np.max(medImg)
tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
return (X)
class TemporalCellDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
fileName,
batch_size=32,
dim=(32,32,32),
n_channels=1,
n_classes=10,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.fileName = fileName
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.batch_size / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate data
X = self.__data_generation()
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
pass
def __data_generation(self):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], self.n_channels))
full_stack = io.imread(self.fileName)
if full_stack.dtype=="uint16":
full_stack = full_stack / 2**16 * 2**8
full_stack = full_stack.astype('uint8')
img_height = full_stack.shape[1]
img_width = full_stack.shape[2]
pad_dict = get_pad_distances(self.dim, img_height, img_width)
full_stack = np.pad(full_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])
),
mode='constant')
full_stack = full_stack.transpose(1,2,0)
# Generate data
for i in range(self.batch_size):
if i == 0:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,0,0] = full_stack[:,:,0]
for j in range(1,self.dim[2]):
tmpImg[:,:,j,0] = full_stack[:,:,j]
elif i == (self.batch_size - 1):
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,-1,0] = full_stack[:,:,-1]
for j in range(self.dim[2]-1):
tmpImg[:,:,j,0] = full_stack[:,:,j]
else:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,:,0] = full_stack[:,:,(i-1):(i+2)]
X[i,:,:,:,:] = tmpImg
return X
# class for image generation for predicting cell locations in phase-contrast images
class FocusSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels), 'uint16')
if self.normalize_to_one:
max_pixels = []
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
if self.normalize_to_one:
# tmpMedian = filters.median(tmpImg, self.selem)
tmpMax = np.max(tmpImg)
max_pixels.append(tmpMax)
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
# if tmpImg.dtype=="uint16":
# tmpImg = tmpImg / 2**16 * 2**8
# tmpImg = tmpImg.astype('uint8')
# if self.normalize_to_one:
# with warnings.catch_warnings():
# warnings.simplefilter('ignore')
# medImg = median(tmpImg, self.selem)
# tmpImg = tmpImg/np.max(medImg)
# tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
if self.normalize_to_one:
channel_max = np.max(max_pixels) / (2**8 - 1)
# print("Channel max: {}".format(channel_max))
# print("Array max: {}".format(np.max(X)))
X = X/channel_max
# print("Normalized array max: {}".format(np.max(X)))
X[X > 1] = 1
return (X)
# class for image generation for predicting trap locations in phase-contrast images
class TrapSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, img_array, batch_size=32,
n_channels=1, normalize_to_one=False, shuffle=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.img_number = img_array.shape[0]
self.img_array = img_array
self.batch_size = batch_size
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(3)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.img_number / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
if self.normalize_to_one:
medImg = median(tmpImg, self.selem)
tmpImg = medImg/np.max(medImg)
X[i,:,:,0] = tmpImg
return (X)
# class for image generation for classifying traps as good, empty, out-of-focus, or defective
class TrapKymographPredictionDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, list_fileNames, batch_size=32, dim=(32,32,32), n_channels=1,
n_classes=10, shuffle=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.list_fileNames = list_fileNames
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.list_fileNames) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_fileNames_temp = [self.list_fileNames[k] for k in indexes]
# Generate data
X = self.__data_generation(list_fileNames_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_fileNames))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_fileNames_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i, fName in enumerate(list_fileNames_temp):
# Store sample
tmpImg = io.imread(fName)
tmpImgShape = tmpImg.shape
if tmpImgShape[0] < self.dim[0]:
t_end = tmpImgShape[0]
else:
t_end = self.dim[0]
X[i,:t_end,:,:] = np.expand_dims(tmpImg[:t_end,:,tmpImg.shape[-1]//2], axis=-1)
return X
def absolute_diff(y_true, y_pred):
y_true_sum = K.sum(y_true)
y_pred_sum = K.sum(y_pred)
diff = K.abs(y_pred_sum - y_true_sum)/tf.to_float(tf.size(y_true))
return diff
def all_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def absolute_dice_loss(y_true, y_pred):
loss = dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def f2_m(y_true, y_pred, beta=2):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
def f_precision_m(y_true, y_pred, beta=0.5):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
# finds lineages for all peaks in a fov
def make_lineages_fov(fov_id, specs):
'''
For a given fov, create the lineages from the segmented images.
Called by
mm3_Segment.py
Calls
mm3.make_lineage_chnl_stack
'''
ana_peak_ids = [] # channels to be analyzed
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 1 means analyze
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information('Creating lineage for FOV %d with %d channels.' % (fov_id, len(ana_peak_ids)))
# just break if there are no peaks to analize
if not ana_peak_ids:
# returning empty dictionary will add nothing to current cells dictionary
return {}
# This is a list of tuples (fov_id, peak_id) to send to the Pool command
fov_and_peak_ids_list = [(fov_id, peak_id) for peak_id in ana_peak_ids]
# set up multiprocessing pool. will complete pool before going on
#pool = Pool(processes=params['num_analyzers'])
# create the lineages for each peak individually
# the output is a list of dictionaries
#lineages = pool.map(make_lineage_chnl_stack, fov_and_peak_ids_list, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# This is the non-parallelized version (useful for debug)
lineages = []
for fov_and_peak_ids in fov_and_peak_ids_list:
lineages.append(make_lineage_chnl_stack(fov_and_peak_ids))
# combine all dictionaries into one dictionary
Cells = {} # create dictionary to hold all information
for cell_dict in lineages: # for all the other dictionaries in the list
Cells.update(cell_dict) # updates Cells with the entries in cell_dict
return Cells
# get number of cells in each frame and total number of pairwise interactions
def get_cell_counts(regionprops_list):
cell_count_list = [len(time_regions) for time_regions in regionprops_list]
interaction_count_list = []
for i,cell_count in enumerate(cell_count_list):
if i+1 == len(cell_count_list):
break
interaction_count_list.append(cell_count*cell_count_list[i+1])
total_cells = np.sum(cell_count_list)
total_interactions = np.sum(interaction_count_list)
return(total_cells, total_interactions, cell_count_list, interaction_count_list)
# get cells' information for track prediction
def gather_interactions_and_events(regionprops_list):
total_cells, total_interactions, cell_count_list, interaction_count_list = get_cell_counts(regionprops_list)
# instantiate an array with a 2x4 array for each pair of cells'
# min_y, max_y, centroid_y, and area
# in reality it would be much, much more efficient to
# look this information up in the data generator at run time
# for now, this will work
pairwise_cell_data = np.zeros((total_interactions,2,5,1))
# make a dictionary, the keys of which will be row indices so that we
# can quickly look up which timepoints/cells correspond to which
# rows of our model's ouput
pairwise_cell_lookup = {}
# populate arrays
interaction_count = 0
cell_count = 0
for frame, frame_regions in enumerate(regionprops_list):
for region in frame_regions:
cell_label = region.label
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
area = region.area
cell_label = region.label
cell_info = (min_y, max_y, y, area, orientation)
cell_count += 1
try:
frame_plus_one_regions = regionprops_list[frame+1]
except IndexError as e:
# print(e)
break
for region_plus_one in frame_plus_one_regions:
paired_cell_label = region_plus_one.label
y,x = region_plus_one.centroid
bbox = region_plus_one.bbox
min_y = bbox[0]
max_y = bbox[2]
area = region_plus_one.area
paired_cell_label = region_plus_one.label
pairwise_cell_data[interaction_count,0,:,0] = cell_info
pairwise_cell_data[interaction_count,1,:,0] = (min_y, max_y, y, area, orientation)
pairwise_cell_lookup[interaction_count] = {'frame':frame, 'cell_label':cell_label, 'paired_cell_label':paired_cell_label}
interaction_count += 1
return(pairwise_cell_data, pairwise_cell_lookup)
# look up which cells are interacting according to the track model
def cell_interaction_lookup(predictions, lookup_table):
'''
Accepts prediction matrix and
'''
frame = []
cell_label = []
paired_cell_label = []
interaction_type = []
# loop over rows of predictions
for row_index in range(predictions.shape[0]):
row_predictions = predictions[row_index]
row_relationship = np.where(row_predictions > 0.95)[0]
if row_relationship.size == 0:
continue
elif row_relationship[0] == 3:
continue
elif row_relationship[0] == 0:
interaction_type.append('migration')
elif row_relationship[0] == 1:
interaction_type.append('child')
elif row_relationship[0] == 2:
interaction_type.append('false_join')
frame.append(lookup_table[row_index]['frame'])
cell_label.append(lookup_table[row_index]['cell_label'])
paired_cell_label.append(lookup_table[row_index]['paired_cell_label'])
track_df = pd.DataFrame(data={'frame':frame,
'cell_label':cell_label,
'paired_cell_label':paired_cell_label,
'interaction_type':interaction_type})
return(track_df)
def get_tracking_model_dict():
model_dict = {}
if not 'migrate_model' in model_dict:
model_dict['migrate_model'] = models.load_model(params['tracking']['migrate_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'child_model' in model_dict:
model_dict['child_model'] = models.load_model(params['tracking']['child_model'],
custom_objects={'bce_dice_loss':bce_dice_loss,
'f2_m':f2_m})
if not 'appear_model' in model_dict:
model_dict['appear_model'] = models.load_model(params['tracking']['appear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'die_model' in model_dict:
model_dict['die_model'] = models.load_model(params['tracking']['die_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'disappear_model' in model_dict:
model_dict['disappear_model'] = models.load_model(params['tracking']['disappear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'born_model' in model_dict:
model_dict['born_model'] = models.load_model(params['tracking']['born_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
# if not 'zero_cell_model' in model_dict:
# model_dict['zero_cell_model'] = models.load_model(params['tracking']['zero_cell_model'],
# custom_objects={'absolute_dice_loss':absolute_dice_loss,
# 'f2_m':f2_m})
# if not 'one_cell_model' in model_dict:
# model_dict['one_cell_model'] = models.load_model(params['tracking']['one_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
# if not 'two_cell_model' in model_dict:
# model_dict['two_cell_model'] = models.load_model(params['tracking']['two_cell_model'],
# custom_objects={'all_loss':all_loss,
# 'f2_m':f2_m})
# if not 'geq_three_cell_model' in model_dict:
# model_dict['geq_three_cell_model'] = models.load_model(params['tracking']['geq_three_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
return(model_dict)
# Creates lineage for a single channel
def make_lineage_chnl_stack(fov_and_peak_id):
'''
Create the lineage for a set of segmented images for one channel. Start by making the regions in the first time points potenial cells. Go forward in time and map regions in the timepoint to the potential cells in previous time points, building the life of a cell. Used basic checks such as the regions should overlap, and grow by a little and not shrink too much. If regions do not link back in time, discard them. If two regions map to one previous region, check if it is a sensible division event.
Parameters
----------
fov_and_peak_ids : tuple.
(fov_id, peak_id)
Returns
-------
Cells : dict
A dictionary of all the cells from this lineage, divided and undivided
'''
# load in parameters
# if leaf regions see no action for longer than this, drop them
lost_cell_time = params['track']['lost_cell_time']
# only cells with y positions below this value will recieve the honor of becoming new
# cells, unless they are daughters of current cells
new_cell_y_cutoff = params['track']['new_cell_y_cutoff']
# only regions with labels less than or equal to this value will be considered to start cells
new_cell_region_cutoff = params['track']['new_cell_region_cutoff']
# get the specific ids from the tuple
fov_id, peak_id = fov_and_peak_id
# start time is the first time point for this series of TIFFs.
start_time_index = min(params['time_table'][fov_id].keys())
information('Creating lineage for FOV %d, channel %d.' % (fov_id, peak_id))
# load segmented data
image_data_seg = load_stack(fov_id, peak_id, color=params['track']['seg_img'])
# image_data_seg = load_stack(fov_id, peak_id, color='seg')
# Calculate all data for all time points.
# this list will be length of the number of time points
regions_by_time = [regionprops(label_image=timepoint) for timepoint in image_data_seg] # removed coordinates='xy'
# Set up data structures.
Cells = {} # Dict that holds all the cell objects, divided and undivided
cell_leaves = [] # cell ids of the current leaves of the growing lineage tree
# go through regions by timepoint and build lineages
# timepoints start with the index of the first image
for t, regions in enumerate(regions_by_time, start=start_time_index):
# if there are cell leaves who are still waiting to be linked, but
# too much time has passed, remove them.
for leaf_id in cell_leaves:
if t - Cells[leaf_id].times[-1] > lost_cell_time:
cell_leaves.remove(leaf_id)
# make all the regions leaves if there are no current leaves
if not cell_leaves:
for region in regions:
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
# Create cell and put in cell dictionary
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
# add thes id to list of current leaves
cell_leaves.append(cell_id)
# Determine if the regions are children of current leaves
else:
### create mapping between regions and leaves
leaf_region_map = {}
leaf_region_map = {leaf_id : [] for leaf_id in cell_leaves}
# get the last y position of current leaves and create tuple with the id
current_leaf_positions = [(leaf_id, Cells[leaf_id].centroids[-1][0]) for leaf_id in cell_leaves]
# go through regions, they will come off in Y position order
for r, region in enumerate(regions):
# create tuple which is cell_id of closest leaf, distance
current_closest = (None, float('inf'))
# check this region against all positions of all current leaf regions,
# find the closest one in y.
for leaf in current_leaf_positions:
# calculate distance between region and leaf
y_dist_region_to_leaf = abs(region.centroid[0] - leaf[1])
# if the distance is closer than before, update
if y_dist_region_to_leaf < current_closest[1]:
current_closest = (leaf[0], y_dist_region_to_leaf)
# update map with the closest region
leaf_region_map[current_closest[0]].append((r, y_dist_region_to_leaf))
# go through the current leaf regions.
# limit by the closest two current regions if there are three regions to the leaf
for leaf_id, region_links in six.iteritems(leaf_region_map):
if len(region_links) > 2:
closest_two_regions = sorted(region_links, key=lambda x: x[1])[:2]
# but sort by region order so top region is first
closest_two_regions = sorted(closest_two_regions, key=lambda x: x[0])
# replace value in dictionary
leaf_region_map[leaf_id] = closest_two_regions
# for the discarded regions, put them as new leaves
# if they are near the closed end of the channel
discarded_regions = sorted(region_links, key=lambda x: x[1])[2:]
for discarded_region in discarded_regions:
region = regions[discarded_region[0]]
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
else:
# since the regions are ordered, none of the remaining will pass
break
### iterate over the leaves, looking to see what regions connect to them.
for leaf_id, region_links in six.iteritems(leaf_region_map):
# if there is just one suggested descendant,
# see if it checks out and append the data
if len(region_links) == 1:
region = regions[region_links[0][0]] # grab the region from the list
# check if the pairing makes sense based on size and position
# this function returns true if things are okay
if check_growth_by_region(Cells[leaf_id], region):
# grow the cell by the region in this case
Cells[leaf_id].grow(region, t)
# there may be two daughters, or maybe there is just one child and a new cell
elif len(region_links) == 2:
# grab these two daughters
region1 = regions[region_links[0][0]]
region2 = regions[region_links[1][0]]
# check_division returns 3 if cell divided,
# 1 if first region is just the cell growing and the second is trash
# 2 if the second region is the cell, and the first is trash
# or 0 if it cannot be determined.
check_division_result = check_division(Cells[leaf_id], region1, region2)
if check_division_result == 3:
# create two new cells and divide the mother
daughter1_id = create_cell_id(region1, t, peak_id, fov_id)
daughter2_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[daughter1_id] = Cell(daughter1_id, region1, t,
parent_id=leaf_id)
Cells[daughter2_id] = Cell(daughter2_id, region2, t,
parent_id=leaf_id)
Cells[leaf_id].divide(Cells[daughter1_id], Cells[daughter2_id], t)
# remove mother from current leaves
cell_leaves.remove(leaf_id)
# add the daughter ids to list of current leaves if they pass cutoffs
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_leaves.append(daughter1_id)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_leaves.append(daughter2_id)
# 1 means that daughter 1 is just a continuation of the mother
# The other region should be a leaf it passes the requirements
elif check_division_result == 1:
Cells[leaf_id].grow(region1, t)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region2, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# ditto for 2
elif check_division_result == 2:
Cells[leaf_id].grow(region2, t)
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region1, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region1, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# return the dictionary with all the cells
return Cells
### Cell class and related functions
# this is the object that holds all information for a detection
class Detection():
'''
The Detection is a single detection in a single frame.
'''
# initialize (birth) the cell
def __init__(self, detection_id, region, t):
'''The detection must be given a unique detection_id and passed the region
information from the segmentation
Parameters
__________
detection_id : str
detection_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point
r is region label for that segmentation
Use the function create_detection_id to return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
'''
# create all the attributes
# id
self.id = detection_id
# identification convenience
self.fov = int(detection_id.split('f')[1].split('p')[0])
self.peak = int(detection_id.split('p')[1].split('t')[0])
self.t = t
self.cell_count = 1
# self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
if region is not None:
self.label = region.label
self.bbox = region.bbox
self.area = region.area
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.length = length_tmp
self.width = width_tmp
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = (length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 + (4/3) * np.pi * (width_tmp/2)**3
# angle of the fit elipsoid and centroid location
self.orientation = region.orientation
self.centroid = region.centroid
else:
self.label = None
self.bbox = None
self.area = None
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = (None, None)
self.length = None
self.width = None
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = None
# angle of the fit elipsoid and centroid location
self.orientation = None
self.centroid = None
# this is the object that holds all information for a cell
class Cell():
'''
The Cell class is one cell that has been born. It is not neccesarily a cell that
has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent_id=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(cell_id.split('r')[1])
# parent id may be none
self.parent = parent_id
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
#calculating cell length and width by using <NAME>
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def divide(self, daughter1, daughter2, t):
'''Divide the cell and update stats.
daugther1 and daugther2 are instances of the Cell class.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = daughter1.birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (daughter1.lengths[0] + daughter2.lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((daughter1.widths[0] + daughter2.widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0)
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = daughter1.lengths[0] / (daughter1.lengths[0] + daughter2.lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
class CellTree():
def __init__(self):
self.cells = {}
self.scores = [] # probably needs to be different
self.score = 0
self.cell_id_list = []
def add_cell(self, cell):
self.cells[cell.id] = cell
self.cell_id_list.append(cell.id)
self.cell_id_list.sort()
def update_score(self):
pass
def get_cell(self, cell_id):
return(self.cells[cell_id])
def get_top_from_cell(self, cell_id):
pass
# this is the object that holds all information for a cell
class CellFromGraph():
'''
The CellFromGraph class is one cell that has been born.
It is not neccesarily a cell that has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(region.label)
self.regions = [region]
# parent is a CellFromGraph object, can be None
self.parent = parent
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
self.disappear = None
self.area_mean_fluorescence = {}
self.volume_mean_fluorescence = {}
self.total_fluorescence = {}
self.foci = {}
def __len__(self):
return(len(self.times))
def add_parent(self, parent):
self.parent = parent
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating cell length and width by using Feret Diamter
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def disappears(self, region, t):
'''
Annotate cell as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
assert len(self.daughters) < 3, "Too many daughter cells in cell {}".format(self.id)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda cell: cell.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = self.daughters[0].birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (self.daughters[0].lengths[0] + self.daughters[1].lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((self.daughters[0].widths[0] + self.daughters[1].widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0) # convert times to minutes
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = self.daughters[0].lengths[0] / (self.daughters[0].lengths[0] + self.daughters[1].lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def add_focus(self, focus, t):
'''Adds a focus to the cell. See function foci_info_unet'''
self.foci[focus.id] = focus
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.parent is not None:
print('parent = {}'.format(self.parent.id))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['fov'] = self.fov
data['trap'] = self.peak
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
data['division_time'] = self.division_time
data['birth_label'] = self.birth_label
data['birth_time'] = self.birth_time
data['sb'] = self.sb
data['sd'] = self.sd
data['delta'] = self.delta
data['tau'] = self.tau
data['elong_rate'] = self.elong_rate
data['septum_position'] = self.septum_position
data['death'] = self.death
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['times'] = self.times
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
# if a cell divides then there is one extra value in abs_times
if self.division_time is None:
data['seconds'] = self.abs_times
else:
data['seconds'] = self.abs_times[:-1]
# if there is fluorescence data, place it into the dataframe
if len(self.area_mean_fluorescence.keys()) != 0:
for fluorescence_channel in self.area_mean_fluorescence.keys():
data['{}_area_mean_fluorescence'.format(fluorescence_channel)] = self.area_mean_fluorescence[fluorescence_channel]
data['{}_volume_mean_fluorescence'.format(fluorescence_channel)] = self.volume_mean_fluorescence[fluorescence_channel]
data['{}_total_fluorescence'.format(fluorescence_channel)] = self.total_fluorescence[fluorescence_channel]
df = pd.DataFrame(data, index=data['id'])
return(df)
# this is the object that holds all information for a fluorescent focus
# this class can eventually be used in focus tracking, much like the Cell class
# is used for cell tracking
class Focus():
'''
The Focus class holds information on fluorescent foci.
A single focus can be present in multiple different cells.
'''
# initialize the focus
def __init__(self,
cell,
region,
seg_img,
intensity_image,
t):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell : a Cell object
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
seg_img : 2D numpy array
Labelled image of cell segmentations
intensity_image : 2D numpy array
Fluorescence image with foci
'''
# create all the attributes
# id
focus_id = create_focus_id(region,
t,
cell.peak,
cell.fov,
experiment_name=params['experiment_name'])
self.id = focus_id
# identification convenience
self.appear_label = int(region.label)
self.regions = [region]
self.fov = cell.fov
self.peak = cell.peak
# cell is a CellFromGraph object
# cells are added later using the .add_cell method
self.cells = [cell]
# daughters is updated when focus splits
# if this is none then the focus did not split
self.parent = None
self.daughters = None
self.merger_partner = None
# appearance and split time
self.appear_time = t
self.split_time = None # filled out if focus splits
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][cell.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating focus length and width by using Feret Diamter.
# These values are in pixels
# NOTE: in the future, update to straighten a focus an get straightened length/width
# print(region)
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate focus volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# special information for focci
self.elong_rate = None
self.disappear = None
self.area_mean_fluorescence = []
self.volume_mean_fluorescence = []
self.total_fluorescence = []
self.median_fluorescence = []
self.sd_fluorescence = []
self.disp_l = []
self.disp_w = []
self.calculate_fluorescence(seg_img, intensity_image, region)
def __len__(self):
return(len(self.times))
def __str__(self):
return(self.print_info())
def add_cell(self, cell):
self.cells.append(cell)
def add_parent_focus(self, parent):
self.parent = parent
def merge(self, partner):
self.merger_partner = partner
def grow(self,
region,
t,
seg_img,
intensity_image,
current_cell):
'''Append data from a region to this focus.
use self.times[-1] to get most current value.'''
if current_cell is not self.cells[-1]:
self.add_cell(current_cell)
self.times.append(t)
self.abs_times.append(params['time_table'][self.cells[-1].fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating focus length and width by using Feret Diamter
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
self.calculate_fluorescence(seg_img, intensity_image, region)
def calculate_fluorescence(self,
seg_img,
intensity_image,
region):
total_fluor = np.sum(intensity_image[seg_img == region.label])
self.total_fluorescence.append(total_fluor)
self.area_mean_fluorescence.append(total_fluor/self.areas[-1])
self.volume_mean_fluorescence.append(total_fluor/self.volumes[-1])
self.median_fluorescence.append(np.median(intensity_image[seg_img == region.label]))
self.sd_fluorescence.append(np.std(intensity_image[seg_img == region.label]))
# get the focus' displacement from center of cell
# find x and y position relative to the whole image (convert from small box)
# calculate distance of foci from middle of cell (scikit image)
orientation = region.orientation
if orientation < 0:
orientation = np.pi+orientation
cell_idx = self.cells[-1].times.index(self.times[-1]) # final time in self.times is current time
cell_centroid = self.cells[-1].centroids[cell_idx]
focus_centroid = region.centroid
disp_y = (focus_centroid[0]-cell_centroid[0])*np.sin(orientation) - (focus_centroid[1]-cell_centroid[1])*np.cos(orientation)
disp_x = (focus_centroid[0]-cell_centroid[0])*np.cos(orientation) + (focus_centroid[1]-cell_centroid[1])*np.sin(orientation)
# append foci information to the list
self.disp_l = np.append(self.disp_l, disp_y)
self.disp_w = np.append(self.disp_w, disp_x)
def disappears(self, region, t):
'''
Annotate focus as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda focus: focus.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.split_time = self.daughters[0].appear_time
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.widths = [width.astype(convert_to) for width in self.widths]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the focus'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.cells is not None:
print('cells = {}'.format([cell.id for cell in self.cells]))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['cells'] = self.cells
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
# data['division_time'] = self.division_time
data['appear_label'] = self.appear_label
data['appear_time'] = self.appear_time
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['time'] = self.times
# data['cell'] = self.cells
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
data['seconds'] = self.abs_times
data['area_mean_fluorescence'] = self.area_mean_fluorescence
data['volume_mean_fluorescence'] = self.volume_mean_fluorescence
data['total_fluorescence'] = self.total_fluorescence
data['median_fluorescence'] = self.median_fluorescence
data['sd_fluorescence'] = self.sd_fluorescence
data['disp_l'] = self.disp_l
data['disp_w'] = self.disp_w
# print(data['id'])
df = pd.DataFrame(data, index=data['id'])
return(df)
class PredictTrackDataGenerator(utils.Sequence):
'''Generates data for running tracking class preditions
Input is a stack of labeled images'''
def __init__(self,
data,
batch_size=32,
dim=(4,5,9)):
'Initialization'
self.batch_size = batch_size
self.data = data
self.dim = dim
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.data) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate keys of the batch
batch_indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X = self.__data_generation(batch_indices)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indices = np.arange(len(self.data))
def __data_generation(self, batch_indices):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
# shape is (batch_size, max_cell_num, frame_num, cell_feature_num, 1)
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], 1))
# Generate data
for idx in batch_indices:
start_idx = idx-2
end_idx = idx+3
# print(start_idx, end_idx)
if start_idx < 0:
batch_frame_list = []
for empty_idx in range(abs(start_idx)):
batch_frame_list.append([])
batch_frame_list.extend(self.data[0:end_idx])
elif end_idx > len(self.data):
batch_frame_list = self.data[start_idx:len(self.data)+1]
for empty_idx in range(abs(end_idx - len(self.data))):
batch_frame_list.extend([])
else:
batch_frame_list = self.data[start_idx:end_idx]
for i,frame_region_list in enumerate(batch_frame_list):
# shape is (max_cell_num, frame_num, cell_feature_num)
# tmp_x = np.zeros((self.dim[0], self.dim[1], self.dim[2]))
if not frame_region_list:
continue
for region_idx, region, in enumerate(frame_region_list):
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
min_x = bbox[1]
max_x = bbox[3]
area = region.area
length = region.major_axis_length
cell_label = region.label
cell_index = cell_label - 1
cell_info = (min_x, max_x, x, min_y, max_y, y, orientation, area, length)
if region_idx + 1 > self.dim[0]:
continue
# supplement tmp_x at (region_idx, )
# tmp_x[region_idx, i, :] = cell_info
X[idx, cell_index, i, :,0] = cell_info # tmp_x
return X
def get_greatest_score_info(first_node, second_node, graph):
'''A function that is useful for track linking
'''
score_names = [k for k in graph.get_edge_data(first_node, second_node).keys()]
pred_scores = [val['score'] for k,val in graph.get_edge_data(first_node, second_node).items()]
max_score_index = np.argmax(pred_scores)
max_name = score_names[max_score_index]
max_score = pred_scores[max_score_index]
return(max_name, max_score)
def get_score_by_type(first_node, second_node, graph, score_type='child'):
'''A function useful in track linking
'''
pred_score = graph.get_edge_data(first_node, second_node)[score_type]['score']
return(pred_score)
def count_unvisited(G, experiment_name):
count = 0
for node_id in G.nodes:
if node_id.startswith(experiment_name):
if not G.nodes[node_id]['visited']:
count += 1
return(count)
def create_lineages_from_graph(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in graph
# graph_score = 0
# track_dict = {}
# tracks = CellTree()
tracks = {}
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.keys():
tracks[cell_id] = current_cell
else:
current_cell = tracks[cell_id]
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_score = np.max(successor_scores)
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if the max_score in successor_scores isn't greater than log(0.1), just make the cell disappear for now.
if max_score < np.log(0.1):
max_edge_type = 'disappear'
next_node_id = [n_id for n_id in unvisited_node_ids if n_id.startswith('disappear')][0]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks[new_cell_id] = new_cell
current_cell.add_daughter(new_cell, new_cell_time)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
try:
second_daughter_score = np.max(child_scores)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
if second_daughter_score < np.log(0.5):
current_cell = new_cell
else:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks[other_daughter_cell_id] = other_daughter_cell
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
prior_node_id = next_node_id
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
print("WARNING: Ten iterations surpassed without decreasing the number of visited nodes.\n \
Breaking tracking loop now. You should probably not trust these results.")
break
return tracks
def viterbi_create_lineages_from_graph(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a maximally-scoring CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in G
graph_score = 0
# track_dict = {}
tracks = CellTree()
max_time = np.max([node.timepoint for node in graph.nodes])
print(max_time)
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
for t in range(1,max_time+1):
if t > 1:
prior_time_nodes = time_nodes
if t == 1:
time_nodes = [node for node in G.nodes if node.time == t]
else:
time_nodes = next_time_nodes
if t != max_time:
next_time_nodes = [node for node in G.nodes if node.time == t+1]
for node in time_nodes:
pass
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.cell_id_list:
tracks.add_cell(current_cell)
else:
current_cell = tracks.get_cell(cell_id)
# track_dict_key = prior_node_id
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks.add_cell(new_cell)
current_cell.add_daughter(new_cell, new_cell_time)
# print("First daughter", current_cell.id, new_cell.id)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
# print(child_scores)
try:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks.add_cell(other_daughter_cell)
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# print("Second daughter", current_cell.parent.id, other_daughter_cell.id)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
# current_track.append(next_node_id)
prior_node_id = next_node_id
# print(current_cell.id, current_cell.parent.id)
# track_dict[track_dict_key][:] = current_track
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
break
return(tracks)
def create_lineages_from_graph_2(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in G
# graph_score = 0
# track_dict = {}
tracks = CellTree()
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.cell_id_list:
tracks.add_cell(current_cell)
else:
current_cell = tracks.get_cell(cell_id)
# track_dict_key = prior_node_id
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks.add_cell(new_cell)
current_cell.add_daughter(new_cell, new_cell_time)
# print("First daughter", current_cell.id, new_cell.id)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
# print(child_scores)
try:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks.add_cell(other_daughter_cell)
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# print("Second daughter", current_cell.parent.id, other_daughter_cell.id)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
# current_track.append(next_node_id)
prior_node_id = next_node_id
# print(current_cell.id, current_cell.parent.id)
# track_dict[track_dict_key][:] = current_track
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
break
return(tracks)
# obtains cell length and width of the cell using the feret diameter
def feretdiameter(region):
'''
feretdiameter calculates the length and width of the binary region shape. The cell orientation
from the ellipsoid is used to find the major and minor axis of the cell.
See https://en.wikipedia.org/wiki/Feret_diameter.
'''
# y: along vertical axis of the image; x: along horizontal axis of the image;
# calculate the relative centroid in the bounding box (non-rotated)
# print(region.centroid)
y0, x0 = region.centroid
y0 = y0 - np.int16(region.bbox[0]) + 1
x0 = x0 - np.int16(region.bbox[1]) + 1
cosorient = np.cos(region.orientation)
sinorient = np.sin(region.orientation)
# print(cosorient, sinorient)
amp_param = 1.2 #amplifying number to make sure the axis is longer than actual cell length
# coordinates relative to bounding box
# r_coords = region.coords - [np.int16(region.bbox[0]), np.int16(region.bbox[1])]
# limit to perimeter coords. pixels are relative to bounding box
region_binimg = np.pad(region.image, 1, 'constant') # pad region binary image by 1 to avoid boundary non-zero pixels
distance_image = ndi.distance_transform_edt(region_binimg)
r_coords = np.where(distance_image == 1)
r_coords = list(zip(r_coords[0], r_coords[1]))
# coordinates are already sorted by y. partion into top and bottom to search faster later
# if orientation > 0, L1 is closer to top of image (lower Y coord)
if region.orientation > 0:
L1_coords = r_coords[:int(np.round(len(r_coords)/4))]
L2_coords = r_coords[int(np.round(len(r_coords)/4)):]
else:
L1_coords = r_coords[int(np.round(len(r_coords)/4)):]
L2_coords = r_coords[:int(np.round(len(r_coords)/4))]
#####################
# calculte cell length
L1_pt = np.zeros((2,1))
L2_pt = np.zeros((2,1))
# define the two end points of the the long axis line
# one pole.
L1_pt[1] = x0 + cosorient * 0.5 * region.major_axis_length*amp_param
L1_pt[0] = y0 - sinorient * 0.5 * region.major_axis_length*amp_param
# the other pole.
L2_pt[1] = x0 - cosorient * 0.5 * region.major_axis_length*amp_param
L2_pt[0] = y0 + sinorient * 0.5 * region.major_axis_length*amp_param
# calculate the minimal distance between the points at both ends of 3 lines
# aka calcule the closest coordiante in the region to each of the above points.
# pt_L1 = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-L1_pt[0],2) + np.power(Pt[1]-L1_pt[1],2)) for Pt in r_coords])]
# pt_L2 = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-L2_pt[0],2) + np.power(Pt[1]-L2_pt[1],2)) for Pt in r_coords])]
try:
pt_L1 = L1_coords[np.argmin([np.sqrt(np.power(Pt[0]-L1_pt[0],2) + np.power(Pt[1]-L1_pt[1],2)) for Pt in L1_coords])]
pt_L2 = L2_coords[np.argmin([np.sqrt(np.power(Pt[0]-L2_pt[0],2) + np.power(Pt[1]-L2_pt[1],2)) for Pt in L2_coords])]
length = np.sqrt(np.power(pt_L1[0]-pt_L2[0],2) + np.power(pt_L1[1]-pt_L2[1],2))
except:
length = None
#####################
# calculate cell width
# draw 2 parallel lines along the short axis line spaced by 0.8*quarter of length = 0.4, to avoid in midcell
# limit to points in each half
W_coords = []
if region.orientation > 0:
W_coords.append(r_coords[:int(np.round(len(r_coords)/2))]) # note the /2 here instead of /4
W_coords.append(r_coords[int(np.round(len(r_coords)/2)):])
else:
W_coords.append(r_coords[int(np.round(len(r_coords)/2)):])
W_coords.append(r_coords[:int(np.round(len(r_coords)/2))])
# starting points
x1 = x0 + cosorient * 0.5 * length*0.4
y1 = y0 - sinorient * 0.5 * length*0.4
x2 = x0 - cosorient * 0.5 * length*0.4
y2 = y0 + sinorient * 0.5 * length*0.4
W1_pts = np.zeros((2,2))
W2_pts = np.zeros((2,2))
# now find the ends of the lines
# one side
W1_pts[0,1] = x1 - sinorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[0,0] = y1 - cosorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[1,1] = x2 - sinorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[1,0] = y2 - cosorient * 0.5 * region.minor_axis_length*amp_param
# the other side
W2_pts[0,1] = x1 + sinorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[0,0] = y1 + cosorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[1,1] = x2 + sinorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[1,0] = y2 + cosorient * 0.5 * region.minor_axis_length*amp_param
# calculate the minimal distance between the points at both ends of 3 lines
pt_W1 = np.zeros((2,2))
pt_W2 = np.zeros((2,2))
d_W = np.zeros((2,1))
i = 0
for W1_pt, W2_pt in zip(W1_pts, W2_pts):
# # find the points closest to the guide points
# pt_W1[i,0], pt_W1[i,1] = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-W1_pt[0],2) + np.power(Pt[1]-W1_pt[1],2)) for Pt in r_coords])]
# pt_W2[i,0], pt_W2[i,1] = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-W2_pt[0],2) + np.power(Pt[1]-W2_pt[1],2)) for Pt in r_coords])]
# find the points closest to the guide points
pt_W1[i,0], pt_W1[i,1] = W_coords[i][np.argmin([np.sqrt(np.power(Pt[0]-W1_pt[0],2) + np.power(Pt[1]-W1_pt[1],2)) for Pt in W_coords[i]])]
pt_W2[i,0], pt_W2[i,1] = W_coords[i][np.argmin([np.sqrt(np.power(Pt[0]-W2_pt[0],2) + np.power(Pt[1]-W2_pt[1],2)) for Pt in W_coords[i]])]
# calculate the actual width
d_W[i] = np.sqrt(np.power(pt_W1[i,0]-pt_W2[i,0],2) + np.power(pt_W1[i,1]-pt_W2[i,1],2))
i += 1
# take the average of the two at quarter positions
width = np.mean([d_W[0],d_W[1]])
return length, width
# take info and make string for cell id
def create_focus_id(region, t, peak, fov, experiment_name=None):
'''Make a unique focus id string for a new focus'''
if experiment_name is None:
focus_id = 'f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(fov, peak, t, region.label)
else:
focus_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region.label)
return focus_id
# take info and make string for cell id
def create_cell_id(region, t, peak, fov, experiment_name=None):
'''Make a unique cell id string for a new cell'''
# cell_id = ['f', str(fov), 'p', str(peak), 't', str(t), 'r', str(region.label)]
if experiment_name is None:
cell_id = ['f', '%02d' % fov, 'p', '%04d' % peak, 't', '%04d' % t, 'r', '%02d' % region.label]
cell_id = ''.join(cell_id)
else:
cell_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region.label)
return cell_id
def create_detection_id(t, peak, fov, region_label, experiment_name=None, max_cell_number=6):
'''Make a unique cell id string for a new cell'''
# cell_id = ['f', str(fov), 'p', str(peak), 't', str(t), 'r', str(region.label)]
if experiment_name is None:
det_id = ['f', '%02d' % fov, 'p', '%04d' % peak, 't', '%04d' % t, 'r', '%02d' % region_label]
det_id = ''.join(det_id)
else:
det_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region_label)
return det_id
def initialize_track_graph(peak_id,
fov_id,
experiment_name,
predictions_dict,
regions_by_time,
max_cell_number=6,
born_threshold=0.75,
appear_threshold=0.75):
detection_dict = {}
frame_num = predictions_dict['migrate_model_predictions'].shape[0]
ebunch = []
G = nx.MultiDiGraph()
# create common start point
G.add_node('A')
# create common end point
G.add_node('B')
last_frame = False
node_id_list = []
timepoint_list = []
region_label_list = []
for frame_idx in range(frame_num):
timepoint = frame_idx + 1
paired_detection_time = timepoint+1
# get detections for this frame
frame_regions_list = regions_by_time[frame_idx]
# if we're at the end of the imaging, make all cells migrate to node 'B'
if timepoint == frame_num:
last_frame = True
else:
paired_frame_regions_list = regions_by_time[frame_idx+1]
# get state change probabilities (class predictions) for this frame
frame_prediction_dict = {key:val[frame_idx,...] for key,val in predictions_dict.items() if key != 'general_model_predictions'}
# for i in range(len(predictions_dict['general_model_predictions'])):
# frame_general_prediction = predictions_dict['general_model_predictions'][]
# create the "will be born" and "will appear" nodes for this frame
prior_born_state = 'born_{:0=4}'.format(timepoint-1)
born_state = 'born_{:0=4}'.format(timepoint)
G.add_node(born_state, visited=False, time=timepoint)
prior_appear_state = 'appear_{:0=4}'.format(timepoint-1)
appear_state = 'appear_{:0=4}'.format(timepoint)
G.add_node(appear_state, visited=False, time=timepoint)
if frame_idx == 0:
ebunch.append(('A', appear_state, 'start', {'weight':appear_threshold, 'score':1*np.log(appear_threshold)}))
ebunch.append(('A', born_state, 'start', {'weight':born_threshold, 'score':1*np.log(born_threshold)}))
# create the "Dies" and "Disappeared" nodes to link from prior frame
prior_dies_state = 'dies_{:0=4}'.format(timepoint-1)
dies_state = 'dies_{:0=4}'.format(timepoint)
next_dies_state = 'dies_{:0=4}'.format(timepoint+1)
G.add_node(dies_state, visited=False, time=timepoint)
prior_disappear_state = 'disappear_{:0=4}'.format(timepoint-1)
disappear_state = 'disappear_{:0=4}'.format(timepoint)
next_disappear_state = 'disappear_{:0=4}'.format(timepoint+1)
G.add_node(disappear_state, visited=False, time=timepoint)
node_id_list.extend([born_state, dies_state, appear_state, disappear_state])
timepoint_list.extend([timepoint, timepoint, timepoint, timepoint])
region_label_list.extend([0,0,0,0])
if frame_idx > 0:
ebunch.append((prior_dies_state, dies_state, 'die', {'weight':1.1, 'score':1*np.log(1.1)})) # impossible to move out of dies track
ebunch.append((prior_disappear_state, disappear_state, 'disappear', {'weight':1.1, 'score':1*np.log(1.1)})) # impossible to move out of disappear track
ebunch.append((prior_born_state, born_state, 'born', {'weight':born_threshold, 'score':1*np.log(born_threshold)}))
ebunch.append((prior_appear_state, appear_state, 'appear', {'weight':appear_threshold, 'score':1*np.log(appear_threshold)}))
if last_frame:
ebunch.append((appear_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((disappear_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((born_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((dies_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
for region_idx in range(max_cell_number):
# the tracking models assume there are 6 detections in each frame, regardless of how many
# are actually there. Therefore, this try/except logic will catch cases where there
# were fewer than 6 detections in a frame.
try:
region = frame_regions_list[region_idx]
region_label = region.label
except IndexError:
region = None
region_label = region_idx + 1
# create the name for this detection
detection_id = create_detection_id(timepoint,
peak_id,
fov_id,
region_label,
experiment_name=experiment_name)
det = Detection(detection_id, region, timepoint)
detection_dict[det.id] = det
if det.area is not None:
# if the detection represents a segmentation from our imaging, add its ID,
# which is also its key in detection_dict, as a node in G
G.add_node(det.id, visited=False, cell_count=1, region=region, time=timepoint)
timepoint_list.append(timepoint)
node_id_list.append(detection_id)
region_label_list.append(region.label)
# also set up all edges for this detection's node in our ebunch
# loop through prediction types and add each to the ebunch
for key,val in frame_prediction_dict.items():
if frame_idx == 0:
ebunch.append(('A', detection_id, 'start', {'weight':1, 'score':1*np.log(1)}))
if last_frame:
ebunch.append((detection_id, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
if val.shape[0] == max_cell_number ** 2:
continue
else:
frame_predictions = val
detection_prediction = frame_predictions[region_idx]
if key == 'appear_model_predictions':
if frame_idx == 0:
continue
elem = (prior_appear_state, detection_id, 'appear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'born' in key:
if frame_idx == 0:
continue
elem = (prior_born_state, detection_id, 'born', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'zero_cell' in key:
G.nodes[det.id]['zero_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'one_cell' in key:
G.nodes[det.id]['one_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'two_cell' in key:
G.nodes[det.id]['two_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
ebunch.append(elem)
else:
# if the array is cell_number^2, reshape it to cell_number x cell_number
# Then slice our detection's row and iterate over paired_cells
if val.shape[0] == max_cell_number**2:
frame_predictions = val.reshape((max_cell_number,max_cell_number))
detection_predictions = frame_predictions[region_idx,:]
# loop through paired detection predictions, test whether paired detection exists
# then append the edge to our ebunch
for paired_cell_idx in range(detection_predictions.size):
# attempt to grab the paired detection. If we get an IndexError, it doesn't exist.
try:
paired_detection = paired_frame_regions_list[paired_cell_idx]
except IndexError:
continue
# create the paired detection's id for use in our ebunch
paired_detection_id = create_detection_id(paired_detection_time,
peak_id,
fov_id,
paired_detection.label,
experiment_name=experiment_name)
paired_prediction = detection_predictions[paired_cell_idx]
if 'child_' in key:
child_weight = paired_prediction
elem = (detection_id, paired_detection_id, 'child', {'child_weight':child_weight, 'score':1*np.log(child_weight)})
ebunch.append(elem)
if 'migrate_' in key:
migrate_weight = paired_prediction
elem = (detection_id, paired_detection_id, 'migrate', {'migrate_weight':migrate_weight, 'score':1*np.log(migrate_weight)})
ebunch.append(elem)
# if 'interaction_' in key:
# interaction_weight = paired_prediction
# elem = (detection_id, paired_detection_id, 'interaction', {'weight':interaction_weight, 'score':1*np.log(interaction_weight)})
# ebunch.append(elem)
# if the array is cell_number long, do similar stuff as above.
elif val.shape[0] == max_cell_number:
frame_predictions = val
detection_prediction = frame_predictions[region_idx]
if key == 'appear_model_predictions':
if frame_idx == 0:
continue
# print("Linking {} to {}.".format(prior_appear_state, detection_id))
elem = (prior_appear_state, detection_id, 'appear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'disappear_' in key:
if last_frame:
continue
# print("Linking {} to {}.".format(detection_id, next_disappear_state))
elem = (detection_id, next_disappear_state, 'disappear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'born_' in key:
if frame_idx == 0:
continue
# print("Linking {} to {}.".format(prior_born_state, detection_id))
elem = (prior_born_state, detection_id, 'born', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'die_model' in key:
if last_frame:
continue
# print("Linking {} to {}.".format(detection_id, next_dies_state))
elem = (detection_id, next_dies_state, 'die', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
# the following classes aren't yet implemented
elif 'zero_cell' in key:
G.nodes[det.id]['zero_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'one_cell' in key:
G.nodes[det.id]['one_cell_weight'] = detection_prediction
G.nodes[det.id]['one_cell_score'] = 1*np.log(detection_prediction)
elif 'two_cell' in key:
G.nodes[det.id]['two_cell_weight'] = detection_prediction
G.nodes[det.id]['two_cell_score'] = 1*np.log(detection_prediction)
ebunch.append(elem)
G.add_edges_from(ebunch)
graph_df = pd.DataFrame(data={'timepoint':timepoint_list,
'node_id':node_id_list,
'region_label':region_label_list})
return(G, graph_df)
# function for a growing cell, used to calculate growth rate
def cell_growth_func(t, sb, elong_rate):
'''
Assumes you have taken log of the data.
It also allows the size at birth to be a free parameter, rather than fixed
at the actual size at birth (but still uses that as a guess)
Assumes natural log, not base 2 (though I think that makes less sense)
old form: sb*2**(alpha*t)
'''
return sb+elong_rate*t
# functions for checking if a cell has divided or not
# this function should also take the variable t to
# weight the allowed changes by the difference in time as well
def check_growth_by_region(cell, region):
'''Checks to see if it makes sense
to grow a cell by a particular region'''
# load parameters for checking
max_growth_length = params['track']['max_growth_length']
min_growth_length = params['track']['min_growth_length']
max_growth_area = params['track']['max_growth_area']
min_growth_area = params['track']['min_growth_area']
# check if length is not too much longer
if cell.lengths[-1]*max_growth_length < region.major_axis_length:
return False
# check if it is not too short (cell should not shrink really)
if cell.lengths[-1]*min_growth_length > region.major_axis_length:
return False
# check if area is not too great
if cell.areas[-1]*max_growth_area < region.area:
return False
# check if area is not too small
if cell.lengths[-1]*min_growth_area > region.area:
return False
# # check if y position of region is within
# # the quarter positions of the bounding box
# lower_quarter = cell.bboxes[-1][0] + (region.major_axis_length / 4)
# upper_quarter = cell.bboxes[-1][2] - (region.major_axis_length / 4)
# if lower_quarter > region.centroid[0] or upper_quarter < region.centroid[0]:
# return False
# check if y position of region is within the bounding box of previous region
lower_bound = cell.bboxes[-1][0]
upper_bound = cell.bboxes[-1][2]
if lower_bound > region.centroid[0] or upper_bound < region.centroid[0]:
return False
# return true if you get this far
return True
# see if a cell has reasonably divided
def check_division(cell, region1, region2):
'''Checks to see if it makes sense to divide a
cell into two new cells based on two regions.
Return 0 if nothing should happend and regions ignored
Return 1 if cell should grow by region 1
Return 2 if cell should grow by region 2
Return 3 if cell should divide into the regions.'''
# load in parameters
max_growth_length = params['track']['max_growth_length']
min_growth_length = params['track']['min_growth_length']
# see if either region just could be continued growth,
# if that is the case then just return
# these shouldn't return true if the cells are divided
# as they would be too small
if check_growth_by_region(cell, region1):
return 1
if check_growth_by_region(cell, region2):
return 2
# make sure combined size of daughters is not too big
combined_size = region1.major_axis_length + region2.major_axis_length
# check if length is not too much longer
if cell.lengths[-1]*max_growth_length < combined_size:
return 0
# and not too small
if cell.lengths[-1]*min_growth_length > combined_size:
return 0
# centroids of regions should be in the upper and lower half of the
# of the mother's bounding box, respectively
# top region within top half of mother bounding box
if cell.bboxes[-1][0] > region1.centroid[0] or cell.centroids[-1][0] < region1.centroid[0]:
return 0
# bottom region with bottom half of mother bounding box
if cell.centroids[-1][0] > region2.centroid[0] or cell.bboxes[-1][2] < region2.centroid[0]:
return 0
# if you got this far then divide the mother
return 3
### functions for pruning a dictionary of cells
# find cells with both a mother and two daughters
def find_complete_cells(Cells):
'''Go through a dictionary of cells and return another dictionary
that contains just those with a parent and daughters'''
Complete_Cells = {}
for cell_id in Cells:
if Cells[cell_id].daughters and Cells[cell_id].parent:
Complete_Cells[cell_id] = Cells[cell_id]
return Complete_Cells
# finds cells whose birth label is 1
def find_mother_cells(Cells):
'''Return only cells whose starting region label is 1.'''
Mother_Cells = {}
for cell_id in Cells:
if Cells[cell_id].birth_label == 1:
Mother_Cells[cell_id] = Cells[cell_id]
return Mother_Cells
def filter_foci(Foci, label, t, debug=False):
Filtered_Foci = {}
for focus_id, focus in Foci.items():
# copy the times list so as not to update it in-place
times = focus.times
if debug:
print(times)
match_inds = [i for i,time in enumerate(times) if time == t]
labels = [focus.labels[idx] for idx in match_inds]
if label in labels:
Filtered_Foci[focus_id] = focus
return Filtered_Foci
def filter_cells(Cells, attr, val, idx=None, debug=False):
'''Return only cells whose designated attribute equals "val".'''
Filtered_Cells = {}
for cell_id, cell in Cells.items():
at_val = getattr(cell, attr)
if debug:
print(at_val)
print("Times: ", cell.times)
if idx is not None:
at_val = at_val[idx]
if at_val == val:
Filtered_Cells[cell_id] = cell
return Filtered_Cells
def filter_cells_containing_val_in_attr(Cells, attr, val):
'''Return only cells that have val in list attribute, attr.'''
Filtered_Cells = {}
for cell_id, cell in Cells.items():
at_list = getattr(cell, attr)
if val in at_list:
Filtered_Cells[cell_id] = cell
return Filtered_Cells
### functions for additional cell centric analysis
def compile_cell_info_df(Cells):
# count the number of rows that will be in the long dataframe
quant_fluor = False
long_df_row_number = 0
for cell in Cells.values():
# first time through, evaluate whether we quantified cells' fluorescence
if long_df_row_number == 0:
if len(cell.area_mean_fluorescence.keys()) != 0:
quant_fluor = True
fluorescence_channels = [k for k in cell.area_mean_fluorescence.keys()]
long_df_row_number += len(cell.times)
# initialize some arrays for filling with data
data = {
# ids can be up to 100 characters long
'id': np.chararray(long_df_row_number, itemsize=100),
'times': np.zeros(long_df_row_number, dtype='uint16'),
'lengths': np.zeros(long_df_row_number),
'volumes': np.zeros(long_df_row_number),
'areas': np.zeros(long_df_row_number),
'abs_times': np.zeros(long_df_row_number, dtype='uint32')
}
if quant_fluor:
for fluorescence_channel in fluorescence_channels:
data['{}_area_mean_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data['{}_volume_mean_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data['{}_total_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data = populate_focus_arrays(Cells, data, cell_quants=True)
long_df = pd.DataFrame(data=data)
wide_df_row_number = len(Cells)
data = {
# ids can be up to 100 characters long
'id': np.chararray(wide_df_row_number, itemsize=100),
'fov': np.zeros(wide_df_row_number, dtype='uint8'),
'peak': np.zeros(wide_df_row_number, dtype='uint16'),
'parent_id': np.chararray(wide_df_row_number, itemsize=100),
'child1_id': np.chararray(wide_df_row_number, itemsize=100),
'child2_id': np.chararray(wide_df_row_number, itemsize=100),
'division_time': np.zeros(wide_df_row_number),
'birth_label': np.zeros(wide_df_row_number, dtype='uint8'),
'birth_time': np.zeros(wide_df_row_number, dtype='uint16'),
'sb': np.zeros(wide_df_row_number),
'sd': | np.zeros(wide_df_row_number) | numpy.zeros |
import mmcv
import numpy as np
import os
from concurrent import futures as futures
from os import path as osp
class S3DISData(object):
"""S3DIS data.
Generate s3dis infos for s3dis_converter.
Args:
root_path (str): Root path of the raw data.
split (str): Set split type of the data. Default: 'Area_1'.
"""
def __init__(self, root_path, split='Area_1'):
self.root_dir = root_path
self.split = split
self.data_dir = osp.join(root_path,
'Stanford3dDataset_v1.2_Aligned_Version')
self.classes = [
'ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door',
'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter'
]
self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}
self.label2cat = {self.cat2label[t]: t for t in self.cat2label}
self.cat_ids = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
self.cat_ids2class = {
cat_id: i
for i, cat_id in enumerate(list(self.cat_ids))
}
assert split in [
'Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6'
]
self.sample_id_list = os.listdir(osp.join(self.data_dir,
split)) # conferenceRoom_1
for sample_id in self.sample_id_list:
if os.path.isfile(osp.join(self.data_dir, split, sample_id)):
self.sample_id_list.remove(sample_id)
def __len__(self):
return len(self.sample_id_list)
def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):
"""Get data infos.
This method gets information from the raw data.
Args:
num_workers (int): Number of threads to be used. Default: 4.
has_label (bool): Whether the data has label. Default: True.
sample_id_list (list[int]): Index list of the sample.
Default: None.
Returns:
infos (list[dict]): Information of the raw data.
"""
def process_single_scene(sample_idx):
print(f'{self.split} sample_idx: {sample_idx}')
info = dict()
pc_info = {
'num_features': 6,
'lidar_idx': f'{self.split}_{sample_idx}'
}
info['point_cloud'] = pc_info
pts_filename = osp.join(self.root_dir, 's3dis_data',
f'{self.split}_{sample_idx}_point.npy')
pts_instance_mask_path = osp.join(
self.root_dir, 's3dis_data',
f'{self.split}_{sample_idx}_ins_label.npy')
pts_semantic_mask_path = osp.join(
self.root_dir, 's3dis_data',
f'{self.split}_{sample_idx}_sem_label.npy')
points = np.load(pts_filename).astype(np.float32)
pts_instance_mask = np.load(pts_instance_mask_path).astype(np.int)
pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.int)
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))
points.tofile(
osp.join(self.root_dir, 'points',
f'{self.split}_{sample_idx}.bin'))
pts_instance_mask.tofile(
osp.join(self.root_dir, 'instance_mask',
f'{self.split}_{sample_idx}.bin'))
pts_semantic_mask.tofile(
osp.join(self.root_dir, 'semantic_mask',
f'{self.split}_{sample_idx}.bin'))
info['pts_path'] = osp.join('points',
f'{self.split}_{sample_idx}.bin')
info['pts_instance_mask_path'] = osp.join(
'instance_mask', f'{self.split}_{sample_idx}.bin')
info['pts_semantic_mask_path'] = osp.join(
'semantic_mask', f'{self.split}_{sample_idx}.bin')
return info
sample_id_list = sample_id_list if sample_id_list is not None \
else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
class S3DISSegData(object):
"""S3DIS dataset used to generate infos for semantic segmentation task.
Args:
data_root (str): Root path of the raw data.
ann_file (str): The generated scannet infos.
split (str): Set split type of the data. Default: 'train'.
num_points (int): Number of points in each data input. Default: 8192.
label_weight_func (function): Function to compute the label weight.
Default: None.
"""
def __init__(self,
data_root,
ann_file,
split='Area_1',
num_points=4096,
label_weight_func=None):
self.data_root = data_root
self.data_infos = mmcv.load(ann_file)
self.split = split
self.num_points = num_points
self.all_ids = np.arange(13) # all possible ids
self.cat_ids = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12]) # used for seg task
self.ignore_index = len(self.cat_ids)
self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \
self.ignore_index
for i, cat_id in enumerate(self.cat_ids):
self.cat_id2class[cat_id] = i
# label weighting function is taken from
# https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24
self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \
label_weight_func is None else label_weight_func
def get_seg_infos(self):
scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()
save_folder = osp.join(self.data_root, 'seg_info')
mmcv.mkdir_or_exist(save_folder)
np.save(
osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),
scene_idxs)
np.save(
osp.join(save_folder, f'{self.split}_label_weight.npy'),
label_weight)
print(f'{self.split} resampled scene index and label weight saved')
def _convert_to_label(self, mask):
"""Convert class_id in loaded segmentation mask to label."""
if isinstance(mask, str):
if mask.endswith('npy'):
mask = np.load(mask)
else:
mask = np.fromfile(mask, dtype=np.long)
label = self.cat_id2class[mask]
return label
def get_scene_idxs_and_label_weight(self):
"""Compute scene_idxs for data sampling and label weight for loss \
calculation.
We sample more times for scenes with more points. Label_weight is
inversely proportional to number of class points.
"""
num_classes = len(self.cat_ids)
num_point_all = []
label_weight = np.zeros((num_classes + 1, )) # ignore_index
for data_info in self.data_infos:
label = self._convert_to_label(
osp.join(self.data_root, data_info['pts_semantic_mask_path']))
num_point_all.append(label.shape[0])
class_count, _ = np.histogram(label, range(num_classes + 2))
label_weight += class_count
# repeat scene_idx for num_scene_point // num_sample_point times
sample_prob = np.array(num_point_all) / float(np.sum(num_point_all))
num_iter = int(np.sum(num_point_all) / float(self.num_points))
scene_idxs = []
for idx in range(len(self.data_infos)):
scene_idxs.extend([idx] * round(sample_prob[idx] * num_iter))
scene_idxs = | np.array(scene_idxs) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''astrokep.py - <NAME> (<EMAIL>) - 05/2016
Contains various useful tools for analyzing Kepler light curves.
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
from time import time as unixtime
import glob
import fnmatch
import sys
import os.path
try:
import cPickle as pickle
except:
import pickle
import gzip
import numpy as np
from numpy import nan as npnan, sum as npsum, abs as npabs, \
roll as nproll, isfinite as npisfinite, std as npstd, \
sign as npsign, sqrt as npsqrt, median as npmedian, \
array as nparray, percentile as nppercentile, \
polyfit as nppolyfit, var as npvar, max as npmax, min as npmin, \
log10 as nplog10, arange as nparange, pi as MPI, floor as npfloor, \
argsort as npargsort, cos as npcos, sin as npsin, tan as nptan, \
where as npwhere, linspace as nplinspace, \
zeros_like as npzeros_like, full_like as npfull_like, all as npall, \
correlate as npcorrelate, zeros as npzeros, ones as npones, \
column_stack as npcolumn_stack, in1d as npin1d, append as npappend, \
unique as npunique, argwhere as npargwhere, concatenate as npconcatenate
from numpy.polynomial.legendre import Legendre
from scipy.optimize import leastsq
from scipy.signal import medfilt
# FIXME: should probably add this to setup.py requirements
try:
from sklearn.ensemble import RandomForestRegressor
SKLEARN = True
except:
SKLEARN = False
from .lcmath import sigclip_magseries, find_lc_timegroups
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
except:
import pyfits
###########################################
## UTILITY FUNCTIONS FOR FLUXES AND MAGS ##
###########################################
def keplerflux_to_keplermag(keplerflux, f12=1.74e5):
'''
This converts the kepler flux in electrons/sec to kepler magnitude.
kepler mag/flux relation:
- fkep = (10.0**(-0.4*(kepmag - 12.0)))*f12
- f12 = 1.74e5 # electrons/sec
'''
kepmag = 12.0 - 2.5*nplog10(keplerflux/f12)
return kepmag
def keplermag_to_keplerflux(keplermag, f12=1.74e5):
'''
This converts the kepler mag back to kepler flux.
'''
kepflux = (10.0**(-0.4*(keplermag - 12.0)))*f12
return kepflux
def keplermag_to_sdssr(keplermag, kic_sdssg, kic_sdssr):
'''
convert from kepmag to SDSS r mag, we must know the sdssg of the target
(from UCAC4 or other transforms). this appears to be a very rough
transformation.
Get kic_sdssg and kic_sdssr from extension 0 of a Kepler llc.fits file.
'''
kic_sdssgr = kic_sdssg - kic_sdssr
if kic_sdssgr < 0.8:
kepsdssr = (keplermag - 0.2*kic_sdssg)/0.8
else:
kepsdssr = (keplermag - 0.1*kic_sdssg)/0.9
return kepsdssr
def flux_ppm_to_magnitudes(ppm):
'''
This converts Kepler's flux parts-per-million to magnitudes.
'''
return -2.5*nplog10(1.0 - ppm/1.0e6)
######################################################
## FUNCTIONS FOR READING KEPLER AND K2 LIGHT CURVES ##
######################################################
# this is the list of keys to pull out of the light curve FITS table
LCDATAKEYS = ['TIME','TIMECORR','CADENCENO',
'SAP_QUALITY',
'PSF_CENTR1','PSF_CENTR1_ERR','PSF_CENTR2','PSF_CENTR2_ERR',
'MOM_CENTR1','MOM_CENTR1_ERR','MOM_CENTR2','MOM_CENTR2_ERR']
LCSAPKEYS = ['SAP_FLUX','SAP_FLUX_ERR','SAP_BKG','SAP_BKG_ERR']
LCPDCKEYS = ['PDCSAP_FLUX','PDCSAP_FLUX_ERR']
# this is the list of keys to pull out of the light curve header
LCHEADERKEYS = ['TIMESYS','BJDREFI','BJDREFF',
'OBJECT','KEPLERID',
'RA_OBJ','DEC_OBJ','EQUINOX',
'EXPOSURE',
'CDPP3_0','CDPP6_0','CDPP12_0',
'PDCVAR','PDCMETHD','CROWDSAP','FLFRCSAP']
# this is the list of keys to pull out of the top header of the FITS
LCTOPKEYS = ['CHANNEL','SKYGROUP','MODULE','OUTPUT',
'QUARTER','SEASON','CAMPAIGN',
'DATA_REL','OBSMODE',
'PMRA','PMDEC','PMTOTAL','PARALLAX',
'GLON','GLAT',
'GMAG','RMAG','IMAG','ZMAG','D51MAG',
'JMAG','HMAG','KMAG','KEPMAG',
'GRCOLOR','JKCOLOR','GKCOLOR',
'TEFF','LOGG','FEH',
'EBMINUSV','AV','RADIUS','TMINDEX']
# this is the list of keys to pull out of the aperture part of the light curve
# we also pull out the whole pixel mask, which looks something like:
# array([[0, 1, 1, 1, 1, 1, 1, 0],
# [1, 1, 1, 3, 3, 1, 1, 1],
# [1, 1, 3, 3, 3, 3, 1, 1],
# [1, 1, 3, 3, 3, 3, 3, 1],
# [1, 1, 3, 3, 3, 3, 3, 1],
# [1, 1, 1, 1, 3, 3, 1, 1],
# [0, 1, 1, 1, 1, 1, 1, 0]], dtype=int32)
# where the value 3 means the actual pixels used to sum the flux for this
# particular object (the optimal aperture). 1 means the pixel was collected by
# the telescope, so its flux is available
# we use CDELT1 and CDELT2 below to get the pixel scale in arcsec/px
# it should be about 3.96 arcsec/pixel in most cases
LCAPERTUREKEYS = ['NPIXSAP','NPIXMISS','CDELT1','CDELT2']
def read_kepler_fitslc(lcfits,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS,
normalize=False,
appendto=None):
'''This extracts the light curve from a single Kepler or K2 LC FITS file.
This works on the light curves available at MAST:
-> kplr{kepid}-{somedatething}_llc.fits files from the Kepler mission
-> ktwo{epicid}-c{campaign}_llc.fits files from the K2 mission
Returns an lcdict.
If normalize == True, then each component light curve's flux measurements
will be normalized to 1.0 by dividing out the median flux for the component
light curve.
If appendto is an lcdict, will append measurements to that dict. This is
used for consolidating light curves for the same object across different
files (quarters). The appending does not care about the time order. To
consolidate light curves in time order, use consolidate_kepler_fitslc below.
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr, lcaperturehdr, lcaperturedata = (hdulist[0].header,
hdulist[2].header,
hdulist[2].data)
hdulist.close()
hdrinfo = {}
# now get the values we want from the header
for key in headerkeys:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in topkeys:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# get the info from the lcaperturehdr
for key in lcaperturehdr:
if key in lcaperturehdr and lcaperturehdr[key] is not None:
hdrinfo[key.lower()] = lcaperturehdr[key]
else:
hdrinfo[key.lower()] = None
# if we're appending to another lcdict
if appendto and isinstance(appendto, dict):
lcdict = appendto
lcdict['quarter'].append(hdrinfo['quarter'])
lcdict['season'].append(hdrinfo['season'])
lcdict['datarelease'].append(hdrinfo['data_rel'])
lcdict['obsmode'].append(hdrinfo['obsmode'])
lcdict['campaign'].append(hdrinfo['campaign'])
# we don't update the objectid
# update lcinfo
lcdict['lcinfo']['timesys'].append(hdrinfo['timesys'])
lcdict['lcinfo']['bjdoffset'].append(
hdrinfo['bjdrefi'] + hdrinfo['bjdreff']
)
lcdict['lcinfo']['exptime'].append(hdrinfo['exposure'])
lcdict['lcinfo']['lcaperture'].append(lcaperturedata)
lcdict['lcinfo']['aperpixused'].append(hdrinfo['npixsap'])
lcdict['lcinfo']['aperpixunused'].append(hdrinfo['npixmiss'])
lcdict['lcinfo']['pixarcsec'].append(
(npabs(hdrinfo['cdelt1']) +
npabs(hdrinfo['cdelt2']))*3600.0/2.0
)
lcdict['lcinfo']['channel'].append(hdrinfo['channel'])
lcdict['lcinfo']['skygroup'].append(hdrinfo['skygroup'])
lcdict['lcinfo']['module'].append(hdrinfo['module'])
lcdict['lcinfo']['output'].append(hdrinfo['output'])
lcdict['lcinfo']['ndet'].append(ndet)
# the objectinfo is not updated for the same object when appending to a
# light curve. FIXME: maybe it should be?
# update the varinfo for this light curve
lcdict['varinfo']['cdpp3_0'].append(hdrinfo['cdpp3_0'])
lcdict['varinfo']['cdpp6_0'].append(hdrinfo['cdpp6_0'])
lcdict['varinfo']['cdpp12_0'].append(hdrinfo['cdpp12_0'])
lcdict['varinfo']['pdcvar'].append(hdrinfo['pdcvar'])
lcdict['varinfo']['pdcmethod'].append(hdrinfo['pdcmethd'])
lcdict['varinfo']['aper_target_total_ratio'].append(hdrinfo['crowdsap'])
lcdict['varinfo']['aper_target_frac'].append(hdrinfo['flfrcsap'])
# update the light curve columns now
for key in datakeys:
if key.lower() in lcdict:
lcdict[key.lower()] = (
npconcatenate((lcdict[key.lower()], lcdata[key]))
)
for key in sapkeys:
if key.lower() in lcdict['sap']:
# normalize the current flux measurements if needed
if normalize and key == '<KEY>':
LOGINFO('normalizing SAP_FLUX')
thislcdata = lcdata[key] / np.nanmedian(lcdata[key])
else:
thislcdata = lcdata[key]
lcdict['sap'][key.lower()] = (
npconcatenate((lcdict['sap'][key.lower()], thislcdata))
)
for key in pdckeys:
if key.lower() in lcdict['pdc']:
# normalize the current flux measurements if needed
if normalize and key == '<KEY>':
LOGINFO('normalizing PDCSAP_FLUX')
thislcdata = lcdata[key] / np.nanmedian(lcdata[key])
else:
thislcdata = lcdata[key]
lcdict['pdc'][key.lower()] = (
npconcatenate((lcdict['pdc'][key.lower()], thislcdata))
)
# append some of the light curve information into existing numpy arrays
# so we can sort on them later
lcdict['lc_channel'] = npconcatenate(
(lcdict['lc_channel'],
npfull_like(lcdata['TIME'],
hdrinfo['channel']))
)
lcdict['lc_skygroup'] = npconcatenate(
(lcdict['lc_skygroup'],
npfull_like(lcdata['TIME'],
hdrinfo['skygroup']))
)
lcdict['lc_module'] = npconcatenate(
(lcdict['lc_module'],
npfull_like(lcdata['TIME'],
hdrinfo['module']))
)
lcdict['lc_output'] = npconcatenate(
(lcdict['lc_output'],
npfull_like(lcdata['TIME'],
hdrinfo['output']))
)
lcdict['lc_quarter'] = npconcatenate(
(lcdict['lc_quarter'],
npfull_like(lcdata['TIME'],
hdrinfo['quarter']))
)
lcdict['lc_season'] = npconcatenate(
(lcdict['lc_season'],
npfull_like(lcdata['TIME'],
hdrinfo['season']))
)
lcdict['lc_campaign'] = npconcatenate(
(lcdict['lc_campaign'],
npfull_like(lcdata['TIME'],
hdrinfo['campaign']))
)
# otherwise, this is a new lcdict
else:
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'quarter':[hdrinfo['quarter']],
'season':[hdrinfo['season']],
'datarelease':[hdrinfo['data_rel']],
'campaign':[hdrinfo['campaign']], # this is None for KepPrime
'obsmode':[hdrinfo['obsmode']],
'objectid':hdrinfo['object'],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcaperture':[lcaperturedata],
'aperpixused':[hdrinfo['npixsap']],
'aperpixunused':[hdrinfo['npixmiss']],
'pixarcsec':[(npabs(hdrinfo['cdelt1']) +
npabs(hdrinfo['cdelt2']))*3600.0/2.0],
'channel':[hdrinfo['channel']],
'skygroup':[hdrinfo['skygroup']],
'module':[hdrinfo['module']],
'output':[hdrinfo['output']],
'ndet':[ndet],
},
'objectinfo':{
'objectid':hdrinfo['object'], # repeated here for checkplot use
'keplerid':hdrinfo['keplerid'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'sdssg':hdrinfo['gmag'],
'sdssr':hdrinfo['rmag'],
'sdssi':hdrinfo['imag'],
'sdssz':hdrinfo['zmag'],
'kepmag':hdrinfo['kepmag'],
'teff':hdrinfo['teff'],
'logg':hdrinfo['logg'],
'feh':hdrinfo['feh'],
'ebminusv':hdrinfo['ebminusv'],
'extinction':hdrinfo['av'],
'starradius':hdrinfo['radius'],
'twomassuid':hdrinfo['tmindex'],
},
'varinfo':{
'cdpp3_0':[hdrinfo['cdpp3_0']],
'cdpp6_0':[hdrinfo['cdpp6_0']],
'cdpp12_0':[hdrinfo['cdpp12_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'aper_target_total_ratio':[hdrinfo['crowdsap']],
'aper_target_frac':[hdrinfo['flfrcsap']],
},
'sap':{},
'pdc':{},
}
# get the LC columns
for key in datakeys:
lcdict[key.lower()] = lcdata[key]
for key in sapkeys:
lcdict['sap'][key.lower()] = lcdata[key]
for key in pdckeys:
lcdict['pdc'][key.lower()] = lcdata[key]
# turn some of the light curve information into numpy arrays so we can
# sort on them later
lcdict['lc_channel'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['channel'][0])
lcdict['lc_skygroup'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['skygroup'][0])
lcdict['lc_module'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['module'][0])
lcdict['lc_output'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['output'][0])
lcdict['lc_quarter'] = npfull_like(lcdict['time'],
lcdict['quarter'][0])
lcdict['lc_season'] = npfull_like(lcdict['time'],
lcdict['season'][0])
lcdict['lc_campaign'] = npfull_like(lcdict['time'],
lcdict['campaign'][0])
## END OF LIGHT CURVE CONSTRUCTION ##
# normalize the SAP and PDCSAP fluxes if needed
# FIXME: should we normalize the other stuff too?
if normalize:
lcdict['sap']['sap_flux'] = (
lcdict['sap']['sap_flux'] /
np.nanmedian(lcdict['sap']['sap_flux'])
)
lcdict['pdc']['pdcsap_flux'] = (
lcdict['pdc']['pdcsap_flux'] /
np.nanmedian(lcdict['pdc']['pdcsap_flux'])
)
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in datakeys] +
['sap.%s' % x.lower() for x in sapkeys] +
['pdc.%s' % x.lower() for x in pdckeys] +
['lc_channel','lc_skygroup','lc_module',
'lc_output','lc_quarter','lc_season']
)
# return the lcdict at the end
return lcdict
def consolidate_kepler_fitslc(keplerid,
lcfitsdir,
normalize=True,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS):
'''This gets all light curves for the given keplerid in lcfitsdir.
Searches recursively in lcfitsdir for all of the files belonging to the
specified keplerid. Sorts the light curves by time. Returns an lcdict. This
is meant to be used for light curves across quarters.
NOTE: keplerid is an integer (without the leading zeros). This is usually
the KIC ID.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
If normalize == True, then each component light curve's SAP_FLUX and
PDCSAP_FLUX measurements will be normalized to 1.0 by dividing out the
median flux for the component light curve.
NOTE: The other flux related measurements, such as errors and backgrounds
WILL NOT be normalized (FIXME: for now).
'''
LOGINFO('looking for Kepler light curve FITS in %s for %s...' % (lcfitsdir,
keplerid))
# for Python 3.5 and up, use recursive glob, it appears to be absurdly
# faster than os.walk
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(lcfitsdir,
'**',
'kplr%09i-*_llc.fits' % keplerid),
recursive=True)
LOGINFO('found %s files: %s' % (len(matching), repr(matching)))
# for Python < 3.5, use os.walk and glob
else:
# use the os.walk function to start looking for files in lcfitsdir
walker = os.walk(lcfitsdir)
matching = []
for root, dirs, files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
'kplr%09i-*_llc.fits' % keplerid)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO('found %s in dir: %s' % (repr(foundfiles),
os.path.join(root,sdir)))
# now that we've found everything, read them all in
if len(matching) > 0:
LOGINFO('consolidating...')
# the first file
consolidated = read_kepler_fitslc(matching[0],
headerkeys=headerkeys,
datakeys=datakeys,
sapkeys=sapkeys,
pdckeys=pdckeys,
topkeys=topkeys,
apkeys=apkeys,
normalize=normalize)
# get the rest of the files
for lcf in matching:
consolidated = read_kepler_fitslc(lcf,
appendto=consolidated,
headerkeys=headerkeys,
datakeys=datakeys,
sapkeys=sapkeys,
pdckeys=pdckeys,
topkeys=topkeys,
apkeys=apkeys,
normalize=normalize)
# get the sort indices
# we use time for the columns and quarters for the headers
LOGINFO('sorting by time...')
# NOTE: nans in time will be sorted to the end of the array
finiteind = npisfinite(consolidated['time'])
if npsum(finiteind) < consolidated['time'].size:
LOGWARNING('some time values are nan! '
'measurements at these times will be '
'sorted to the end of the column arrays.')
# get the sort index
column_sort_ind = npargsort(consolidated['time'])
# sort the columns by time
for col in consolidated['columns']:
if '.' in col:
key, subkey = col.split('.')
consolidated[key][subkey] = (
consolidated[key][subkey][column_sort_ind]
)
else:
consolidated[col] = consolidated[col][column_sort_ind]
# now sort the headers by quarters
header_sort_ind = npargsort(consolidated['quarter']).tolist()
# this is a bit convoluted, but whatever: list -> array -> list
for key in ('quarter', 'season', 'datarelease', 'obsmode'):
consolidated[key] = (
nparray(consolidated[key])[header_sort_ind].tolist()
)
for key in ('timesys','bjdoffset','exptime','lcaperture',
'aperpixused','aperpixunused','pixarcsec',
'channel','skygroup','module','output','ndet'):
consolidated['lcinfo'][key] = (
nparray(consolidated['lcinfo'][key])[header_sort_ind].tolist()
)
for key in ('cdpp3_0','cdpp6_0','cdpp12_0','pdcvar','pdcmethod',
'aper_target_total_ratio','aper_target_frac'):
consolidated['varinfo'][key] = (
nparray(consolidated['varinfo'][key])[header_sort_ind].tolist()
)
# finally, return the consolidated lcdict
return consolidated
# if we didn't find anything, complain
else:
LOGERROR('could not find any light curves '
'for %s in %s or its subdirectories' % (keplerid,
lcfitsdir))
return None
########################
## READING K2 SFF LCs ##
########################
SFFTOPKEYS = LCTOPKEYS
SFFHEADERKEYS = LCHEADERKEYS + ['MASKTYPE','MASKINDE','NPIXSAP']
SFFDATAKEYS = ['T','FRAW','FCOR','ARCLENGTH','MOVING','CADENCENO']
def read_k2sff_lightcurve(lcfits):
'''
This reads a K2 SFF (Vandenberg+ 2014) light curve into an lcdict.
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr = hdulist[0].header
hdulist.close()
hdrinfo = {}
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in SFFTOPKEYS:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# now get the values we want from the header
for key in SFFHEADERKEYS:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'quarter':[hdrinfo['quarter']],
'season':[hdrinfo['season']],
'datarelease':[hdrinfo['data_rel']],
'obsmode':[hdrinfo['obsmode']],
'objectid':hdrinfo['object'],
'campaign':[hdrinfo['campaign']],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcapermaskidx':[hdrinfo['maskinde']],
'lcapermasktype':[hdrinfo['masktype']],
'aperpixused':[hdrinfo['npixsap']],
'aperpixunused':[None],
'pixarcsec':[None],
'channel':[hdrinfo['channel']],
'skygroup':[hdrinfo['skygroup']],
'module':[hdrinfo['module']],
'output':[hdrinfo['output']],
'ndet':[ndet],
},
'objectinfo':{
'keplerid':hdrinfo['keplerid'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'sdssg':hdrinfo['gmag'],
'sdssr':hdrinfo['rmag'],
'sdssi':hdrinfo['imag'],
'sdssz':hdrinfo['zmag'],
'kepmag':hdrinfo['kepmag'],
'teff':hdrinfo['teff'],
'logg':hdrinfo['logg'],
'feh':hdrinfo['feh'],
'ebminusv':hdrinfo['ebminusv'],
'extinction':hdrinfo['av'],
'starradius':hdrinfo['radius'],
'twomassuid':hdrinfo['tmindex'],
},
'varinfo':{
'cdpp3_0':[hdrinfo['cdpp3_0']],
'cdpp6_0':[hdrinfo['cdpp6_0']],
'cdpp12_0':[hdrinfo['cdpp12_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'aptgttotrat':[hdrinfo['crowdsap']],
'aptgtfrac':[hdrinfo['flfrcsap']],
},
}
# get the LC columns
for key in SFFDATAKEYS:
lcdict[key.lower()] = lcdata[key]
# add some of the light curve information to the data arrays so we can sort
# on them later
lcdict['channel'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['channel'][0])
lcdict['skygroup'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['skygroup'][0])
lcdict['module'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['module'][0])
lcdict['output'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['output'][0])
lcdict['quarter'] = npfull_like(lcdict['t'],
lcdict['quarter'][0])
lcdict['season'] = npfull_like(lcdict['t'],
lcdict['season'][0])
lcdict['campaign'] = npfull_like(lcdict['t'],
lcdict['campaign'][0])
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in SFFDATAKEYS] +
['channel','skygroup','module','output','quarter','season','campaign']
)
# return the lcdict at the end
return lcdict
##################
## INPUT/OUTPUT ##
##################
def kepler_lcdict_to_pkl(lcdict,
outfile=None):
'''This simply writes the lcdict to a pickle.
'''
if not outfile:
outfile = '%s-keplc.pkl' % lcdict['objectid'].replace(' ','-')
# we're using pickle.HIGHEST_PROTOCOL here, this will make Py3 pickles
# unreadable for Python 2.7
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return os.path.abspath(outfile)
def read_kepler_pklc(picklefile):
'''This turns the pickled lightcurve back into an lcdict.
'''
try:
with open(picklefile, 'rb') as infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with open(picklefile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % checkplotpickle)
return lcdict
##########################
## KEPLER LC PROCESSING ##
##########################
def stitch_kepler_lcdict(lcdict):
'''
This stitches Kepler light curves together across quarters.
FIXME: implement this.
'''
def filter_kepler_lcdict(lcdict,
filterflags=True,
nanfilter='sap,pdc',
timestoignore=None):
'''This filters the Kepler light curve dict.
By default, this function removes points in the Kepler LC that have ANY
quality flags set. Also removes nans.
timestoignore is a list of tuples containing start and end times to mask:
[(time1_start, time1_end), (time2_start, time2_end), ...]
This function filters the dict IN PLACE!
'''
cols = lcdict['columns']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = lcdict['time'].size
filterind = lcdict['sap_quality'] == 0
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][filterind]
else:
lcdict[col] = lcdict[col][filterind]
nafter = lcdict['time'].size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
if nanfilter and nanfilter == 'sap,pdc':
notnanind = (
npisfinite(lcdict['sap']['sap_flux']) &
npisfinite(lcdict['pdc']['pdcsap_flux'])
)
elif nanfilter and nanfilter == 'sap':
notnanind = npisfinite(lcdict['sap']['sap_flux'])
elif nanfilter and nanfilter == 'pdc':
notnanind = npisfinite(lcdict['pdc']['pdcsap_flux'])
# remove nans from all columns
if nanfilter:
nbefore = lcdict['time'].size
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][notnanind]
else:
lcdict[col] = lcdict[col][notnanind]
nafter = lcdict['time'].size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(lcdict['time'],True)
nbefore = exclind.size
# get all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (lcdict['time'] > time0) & (lcdict['time'] < time1)
exclind = exclind & thismask
# apply the masks
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][exclind]
else:
lcdict[col] = lcdict[col][exclind]
nafter = lcdict['time'].size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
###################
## KEPLER LC EPD ##
###################
def _epd_function(coeffs, fluxes, xcc, ycc, bgv, bge):
'''
This is the EPD function to fit.
'''
epdf = (
coeffs[0] +
coeffs[1]*npsin(2*MPI*xcc) + coeffs[2]*npcos(2*MPI*xcc) +
coeffs[3]*npsin(2*MPI*ycc) + coeffs[4]*npcos(2*MPI*ycc) +
coeffs[5]*npsin(4*MPI*xcc) + coeffs[6]*npcos(4*MPI*xcc) +
coeffs[7]*npsin(4*MPI*ycc) + coeffs[8]*npcos(4*MPI*ycc) +
coeffs[9]*bgv +
coeffs[10]*bge
)
return epdf
def _epd_residual(coeffs, fluxes, xcc, ycc, bgv, bge):
'''
This is the residual function to minimize using scipy.optimize.leastsq.
'''
f = _epd_function(coeffs, fluxes, xcc, ycc, bgv, bge)
residual = fluxes - f
return residual
def epd_kepler_lightcurve(lcdict,
xccol='mom_centr1',
yccol='mom_centr2',
timestoignore=None,
filterflags=True,
writetodict=True,
epdsmooth=5):
'''This runs EPD on the Kepler light curve.
Following Huang et al. 2015, we fit and subtract the following EPD function:
f = c0 +
c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) +
c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) +
c9*bgv + c10*bge
timestoignore is a list of tuples containing start and end times to mask
when fitting the EPD function:
[(time1_start, time1_end), (time2_start, time2_end), ...]
NOTES:
- this function returns times and mags by default
- by default, this function removes points in the Kepler LC that have ANY
quality flags set
if writetodict is set, adds the following columns to the lcdict:
epd_time = time array
epd_sapflux = uncorrected flux before EPD
epd_epdsapflux = corrected flux after EPD
epd_epdsapcorr = EPD flux corrections
epd_bkg = background array
epd_bkg_err = background errors array
epd_xcc = xcoord array
epd_ycc = ycoord array
epd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
'''
times, fluxes, background, background_err = (lcdict['time'],
lcdict['sap']['sap_flux'],
lcdict['sap']['sap_bkg'],
lcdict['sap']['sap_bkg_err'])
xcc = lcdict[xccol]
ycc = lcdict[yccol]
flags = lcdict['sap_quality']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = times.size
filterind = flags == 0
times = times[filterind]
fluxes = fluxes[filterind]
background = background[filterind]
background_err = background_err[filterind]
xcc = xcc[filterind]
ycc = ycc[filterind]
flags = flags[filterind]
nafter = times.size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# remove nans
find = (npisfinite(xcc) & npisfinite(ycc) &
npisfinite(times) & npisfinite(fluxes) &
npisfinite(background) & npisfinite(background_err))
nbefore = times.size
times = times[find]
fluxes = fluxes[find]
background = background[find]
background_err = background_err[find]
xcc = xcc[find]
ycc = ycc[find]
flags = flags[find]
nafter = times.size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(times,True)
nefore = times.size
# apply all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (times > time0) & (times < time1)
exclind = exclind & thismask
# quantities after masks have been applied
times = times[exclind]
fluxes = fluxes[exclind]
background = background[exclind]
background_err = background_err[exclind]
xcc = xcc[exclind]
ycc = ycc[exclind]
flags = flags[exclind]
nafter = times.size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# now that we're all done, we can do EPD
# first, smooth the light curve
smoothedfluxes = medfilt(fluxes, epdsmooth)
# initial fit coeffs
initcoeffs = npones(11)
# fit the the smoothed mags and find better coeffs
leastsqfit = leastsq(_epd_residual,
initcoeffs,
args=(smoothedfluxes,
xcc, ycc,
background, background_err))
# if the fit succeeds, then get the EPD fluxes
if leastsqfit[-1] in (1,2,3,4):
fitcoeffs = leastsqfit[0]
epdfit = _epd_function(fitcoeffs,
fluxes,
xcc,
ycc,
background,
background_err)
epdfluxes = npmedian(fluxes) + fluxes - epdfit
# write these to the dictionary if requested
if writetodict:
lcdict['epd'] = {}
lcdict['epd']['time'] = times
lcdict['epd']['sapflux'] = fluxes
lcdict['epd']['epdsapflux'] = epdfluxes
lcdict['epd']['epdsapcorr'] = epdfit
lcdict['epd']['bkg'] = background
lcdict['epd']['bkg_err'] = background_err
lcdict['epd']['xcc'] = xcc
lcdict['epd']['ycc'] = ycc
lcdict['epd']['quality'] = flags
for newcol in ['epd.time','epd.sapflux',
'epd.epdsapflux','epd.epdsapcorr',
'epd.bkg','epd.bkg.err',
'epd.xcc','epd.ycc',
'epd.quality']:
if newcol not in lcdict['columns']:
lcdict['columns'].append(newcol)
return times, epdfluxes, fitcoeffs, epdfit
else:
LOGERROR('could not fit EPD function to light curve')
return None, None, None, None
# FIXME: this is only available if sklearn is available. not sure if we should
# add yet another dependency
if SKLEARN:
def rfepd_kepler_lightcurve(lcdict,
xccol='mom_centr1',
yccol='mom_centr2',
timestoignore=None,
filterflags=True,
writetodict=True,
epdsmooth=23,
decorr='xcc,ycc',
nrftrees=200):
'''
This uses a RandomForestRegressor to fit and correct K2 light curves.
Fits the X and Y positions, and the background and background error.
timestoignore is a list of tuples containing start and end times to mask
when fitting the EPD function:
[(time1_start, time1_end), (time2_start, time2_end), ...]
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
if writetodict is set, adds the following columns to the lcdict:
rfepd_time = time array
rfepd_sapflux = uncorrected flux before EPD
rfepd_epdsapflux = corrected flux after EPD
rfepd_epdsapcorr = EPD flux corrections
rfepd_bkg = background array
rfepd_bkg_err = background errors array
rfepd_xcc = xcoord array
rfepd_ycc = ycoord array
rfepd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
'''
times, fluxes, background, background_err = (
lcdict['time'],
lcdict['sap']['sap_flux'],
lcdict['sap']['sap_bkg'],
lcdict['sap']['sap_bkg_err']
)
xcc = lcdict[xccol]
ycc = lcdict[yccol]
flags = lcdict['sap_quality']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = times.size
filterind = flags == 0
times = times[filterind]
fluxes = fluxes[filterind]
background = background[filterind]
background_err = background_err[filterind]
xcc = xcc[filterind]
ycc = ycc[filterind]
flags = flags[filterind]
nafter = times.size
LOGINFO('applied quality flag filter, ndet before = %s, '
'ndet after = %s'
% (nbefore, nafter))
# remove nans
find = (npisfinite(xcc) & npisfinite(ycc) &
npisfinite(times) & npisfinite(fluxes) &
npisfinite(background) & npisfinite(background_err))
nbefore = times.size
times = times[find]
fluxes = fluxes[find]
background = background[find]
background_err = background_err[find]
xcc = xcc[find]
ycc = ycc[find]
flags = flags[find]
nafter = times.size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(times,True)
nefore = times.size
# apply all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (times > time0) & (times < time1)
exclind = exclind & thismask
# quantities after masks have been applied
times = times[exclind]
fluxes = fluxes[exclind]
background = background[exclind]
background_err = background_err[exclind]
xcc = xcc[exclind]
ycc = ycc[exclind]
flags = flags[exclind]
nafter = times.size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# now that we're all done, we can do EPD
# set up the regressor
RFR = RandomForestRegressor(n_estimators=nrftrees)
if decorr == 'xcc,ycc,bgv,bge':
# collect the features and target variable
features = npcolumn_stack((xcc,ycc,background,background_err))
elif decorr == 'xcc,ycc':
# collect the features and target variable
features = npcolumn_stack((xcc,ycc))
elif decorr == 'bgv,bge':
# collect the features and target variable
features = npcolumn_stack((background,background_err))
else:
LOGERROR("couldn't understand decorr, not decorrelating...")
return None
# smooth the light curve
if epdsmooth:
smoothedfluxes = medfilt(fluxes, epdsmooth)
else:
smoothedfluxes = fluxes
# fit, then generate the predicted values, then get corrected values
RFR.fit(features, smoothedfluxes)
flux_corrections = RFR.predict(features)
corrected_fluxes = | npmedian(fluxes) | numpy.median |
import os
import numpy as np
from tensorflow import set_random_seed
from sklearn.preprocessing import MinMaxScaler
from keras import Input
from keras.layers import (Dense, LeakyReLU, Reshape,
Conv2D, Conv2DTranspose,
AveragePooling2D, BatchNormalization,
Flatten, Dropout)
from keras.models import Model
from keras.optimizers import RMSprop, Adam
from skimage.io import imread, imsave
from skimage.transform import resize, rescale
from sklearn.metrics import (roc_auc_score, log_loss,
confusion_matrix)
import matplotlib.pyplot as plt
from time import time
from keras.callbacks import TensorBoard
import keras.backend as K
config_params = {
'data_dir' : 'dsilt-ml-code/09 Generative Models/anime_faces_data/',
'save_dir' : 'dsilt-ml-code/09 Generative Models/anime_faces_generated/',
'random_seed': 14,
'latent_dim' : 50,
'image_pixel_height' : 64,
'image_pixel_width' : 64,
'image_color_channels' : 3,
'prob_of_flipped_label': 0.1,
'nbr_images_limit': 200,
'batch_size' : 20, #20
'nbr_epochs' : 10, #60
'epochs_per_network_b4_switching': 2,
'discriminator_learning_rate': 0.0002, #0.0002
'discriminator_momentum': 0.5,
'generator_learning_rate': 0.004, #0.0004
'generator_momentum': 0.5
}
np.random.seed(config_params['random_seed'])
set_random_seed(config_params['random_seed'])
callbacks = [
TensorBoard(
log_dir='dsilt-ml-code/09 Generative Models/logs/gan_{}'.format(time()),
histogram_freq=2,
batch_size=config_params['batch_size'],
write_graph=True,
write_grads=True,
write_images=True
)
]
# Build the generator
# Generator will take random input
gen_input = Input(shape=(config_params['latent_dim'],))
gen_l1 = Dense(1 * 32 * 32)(gen_input) # orig 128 * 32 * 32
gen_l1 = BatchNormalization()(gen_l1)
gen_l1 = LeakyReLU()(gen_l1)
gen_l1 = Reshape((32, 32, 1))(gen_l1)
gen_l2 = Conv2D(filters=256, kernel_size=(5, 5),
strides=(1, 1),
padding='same',
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros')(gen_l1)
gen_l2 = BatchNormalization()(gen_l2)
gen_l2 = LeakyReLU()(gen_l2)
gen_l3 = Conv2DTranspose(filters=256, kernel_size=(4, 4),
strides=(2, 2), padding='same',
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros')(gen_l2)
gen_l3 = BatchNormalization()(gen_l3)
gen_l3 = LeakyReLU()(gen_l3)
gen_l4 = Conv2D(filters=config_params['image_color_channels'],
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
activation='tanh',
kernel_initializer='glorot_uniform',
bias_initializer='zeros')(gen_l3)
generator = Model(inputs=gen_input,
outputs=gen_l4,
name='generator')
print(generator.summary())
# Do not compile generator with an optimizer or loss
# Build the discriminator
# Discriminator will take real and fake image input
dis_input = Input(shape=(config_params['image_pixel_height'],
config_params['image_pixel_width'],
config_params['image_color_channels']))
dis_l1 = Conv2D(filters=128, kernel_size=(5, 5),
strides=(1, 1),
padding='valid',
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros')(dis_input)
dis_l1 = BatchNormalization()(dis_l1)
dis_l1 = LeakyReLU()(dis_l1)
dis_l1 = AveragePooling2D(pool_size=(2, 2),
strides=None,
padding='valid')(dis_l1)
dis_l1 = Flatten()(dis_l1)
dis_l1 = Dropout(0.4)(dis_l1)
dis_l2 = Dense(1, activation='sigmoid',
kernel_initializer='glorot_uniform',
bias_initializer='zeros')(dis_l1)
discriminator = Model(inputs=dis_input,
outputs=dis_l2,
name='discriminator')
print(discriminator.summary())
discriminator_optimizer = Adam(lr=config_params['discriminator_learning_rate'],
beta_1=config_params['discriminator_momentum'],
epsilon=None,
decay=0.0)
discriminator.compile(optimizer=discriminator_optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
# Bring everything together to create the GAN
gan_input = Input(shape=(config_params['latent_dim'],))
gan_output = discriminator(generator(gan_input))
gan = Model(inputs=gan_input,
outputs=gan_output,
name='gan')
gan_optimizer = Adam(lr=config_params['generator_learning_rate'],
beta_1=config_params['generator_momentum'],
epsilon=None,
decay=0.0)
gan.compile(optimizer=gan_optimizer,
loss='binary_crossentropy')
print(gan.summary())
# Prepare the real data
file_list = os.listdir(config_params['data_dir'])[:config_params['nbr_images_limit']]
print("Number of total samples:", len(file_list))
real_images = np.array([resize(imread(os.path.join(config_params['data_dir'], file_name)), (64, 64)) for file_name in file_list])
real_images /= np.max(real_images) # input images are already scaled, but never hurts to be sure
plt.imshow(real_images[0])
plt.show()
def smooth_labels(y, min_label=0, max_label=1):
"""
Converts data labels from hard to soft by
adding random noise to labels while keeping them
in the range of min_label-max_label
Arguments:
y: numpy array of labels
min_label: the minimum label value, usually 0
max_label: the maximum label value, usually 1
Returns:
numpy array of labels with random noise
"""
noise = np.random.uniform(low=0.1, high=0.2, size=y.shape[0]).reshape(y.shape[0], 1)
if np.min(y) == 0:
return np.round(np.add(y, noise))
else:
return np.round(np.subtract(y, noise))
def prepare_real_data(real_input_data, nbr_samples, prob_of_flipped_label, apply_label_smoothing=False):
"""
Samples training data to create a mini-batch with labels
Arguments:
real_input_data: a numpy array of training data
nbr_samples: number of training samples in 1 batch
prob_of_flipped_label: percentage of labels to set incorrectly
(this helps the generator learn better)
apply_label_smoothing: if True, add random noise to label
(label smoothing should always be done after flipping)
Returns:
tuple: (numpy array of training data, numpy array of labels)
"""
sample_indices = np.random.randint(real_input_data.shape[0], size=nbr_samples)
real_samples = real_input_data[sample_indices,:]
real_labels = np.ones((nbr_samples, 1))
flipped_labels = np.array([1 if np.random.uniform() <= prob_of_flipped_label else 0 for x in range(nbr_samples)]).reshape(nbr_samples, 1)
real_labels = np.subtract(real_labels, flipped_labels)
if smooth_labels:
real_labels = smooth_labels(real_labels)
return real_samples, real_labels
def scale_generated_fakes(x):
"""
Scales input to the range (0, 1)
Arguments:
x: a numpy array
Returns:
numpy array scaled to (0, 1)
"""
if np.min(x) < 0:
return (x+np.min(x)*-1)/( | np.max(x) | numpy.max |
import numpy as np
from rand_param_envs.walker2d_rand_params import Walker2DRandParamsEnv
from . import register_env
def read_log_params(log_file):
params_dict = {}
with open(log_file) as f:
lines = f.readlines()
cur_key = None
for line in lines:
if "'" in line:
if ")" in line:
last_entry = line.split(")")[0].split("[")[1].split("]")[0].split(",")
#print(last_entry)
last_entry_float = [float(s) for s in last_entry]
params_dict[cur_key].append(np.array(last_entry_float))
key = line.split("'")[1]
#print('key is %s' %key)
cur_key = key
params_dict[key] = []
if "(" in line:
first_entry = line.split("(")[1].split("[")[2].split("]")[0].split(",")
#print(first_entry)
first_entry_float = [float(s) for s in first_entry]
params_dict[cur_key].append(np.array(first_entry_float))
else:
entry = line.split("[")[1].split("]")[0].split(",")
entry_float = [float(s) for s in entry]
params_dict[cur_key].append(entry_float)
for key, value in params_dict.items():
params_dict[key] = | np.array(params_dict[key]) | numpy.array |
#
# Read the trajectory file generated by gravity_test and plot it.
#
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from pylab import *
def loadTrajectory(file):
"""load the trajectory data from the file"""
import re
# format is et, x, y, z, vx, vy, vz
re1='(\\s+)' # White Space
re2='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float
rg = re.compile(re1+re2+re1+re2+re1+re2+re1+re2+re1+re2+re1+re2+re1+re2,re.IGNORECASE|re.DOTALL)
x = []
y = []
z = []
f = open(file, 'r')
for line in f:
m = rg.search(line)
if m:
if (m.group(4)!=''):
x = append(x, float(m.group(4)))
y = append(y, float(m.group(6)))
z = append(z, float(m.group(8)))
return x,y,z
def axisEqual3D(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
#load the trajectory [this is the output from the gravity test case]
x,y,z = loadTrajectory('../../bin/gravity_test.txt')
#open the figure:
fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111, projection='3d')
#plot orbit:
ax.plot(x,y,z, label='trajectory')
ax.set_xlabel('x [km]')
ax.set_ylabel('y [km]')
ax.set_zlabel('z [km]')
#plot sphere:
# Note: matplotlib doesn't do the line-hiding correctly for the orbit...
if (False):
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = 6378.0 * np.outer(np.cos(u), np.sin(v))
y = 6378.0 * np.outer(np.sin(u), np.sin(v))
z = 6378.0 * np.outer(np.ones( | np.size(u) | numpy.size |
import sys
import random
import argparse
import numpy as np
import sys
import os
import gc
import utils
from scipy.io import loadmat
from scipy import spatial
import meshio
from plyfile import PlyData
"""
Generate training data in the form of points for meshes in local neighborhoods.
"""
sys.setrecursionlimit(10000)
def ensure_dir_exists(d):
if not os.path.exists(d):
os.makedirs(d)
def generate_sample_counts(entries, total_count):
counts = np.zeros(len(entries), dtype=int)
for i in range(total_count):
ind = np.random.randint(len(entries))
counts[ind] += 1
return counts
def area_normals(verts, faces):
coords = verts[faces]
vec_A = coords[:, 1, :] - coords[:, 0, :]
vec_B = coords[:, 2, :] - coords[:, 0, :]
raw_normal = np.cross(vec_A, vec_B)
return raw_normal
def uniform_sample_surface(verts, faces, n_pts):
areaN = area_normals(verts, faces)
face_areas = 0.5 * np.linalg.norm(areaN, axis=-1)
# chose which faces
face_inds = np.random.choice(faces.shape[0], size=(n_pts,), replace=True, p=face_areas/np.sum(face_areas))
# Get barycoords for each sample
r1_sqrt = np.sqrt(np.random.rand(n_pts))
r2 = np.random.rand(n_pts)
bary_vals = np.zeros((n_pts, 3))
bary_vals[:, 0] = 1. - r1_sqrt
bary_vals[:, 1] = r1_sqrt * (1. - r2)
bary_vals[:, 2] = r1_sqrt * r2
return face_inds, bary_vals
def get_samples(verts, faces, n_pts):
face_inds, bary_vals = uniform_sample_surface(verts, faces, n_pts)
# face_normals = igl.per_face_normals(verts, faces, np.array((0., 0., 0.,)))
areaN = area_normals(verts, faces)
face_normals = areaN / | np.linalg.norm(areaN, axis=-1) | numpy.linalg.norm |
import numpy as np
from mpi4py import MPI
from mpi4py_fft import PFFT, newDistArray, DistArray
import time
import copy as cp
def get_local_mesh(FFT, L):
"""Returns local mesh."""
X = np.ogrid[FFT.local_slice(False)]
N = FFT.global_shape()
for i in range(len(N)):
X[i] = (X[i]*L[i]/N[i])
X = [np.broadcast_to(x, FFT.shape(False)) for x in X]
return X
def get_local_wavenumbermesh(FFT, L):
"""Returns local wavenumber mesh."""
s = FFT.local_slice()
N = FFT.global_shape()
# Set wavenumbers in grid
k = [np.fft.fftfreq(n, 1./n).astype(int) for n in N[:-1]]
k.append(np.fft.rfftfreq(N[-1], 1./N[-1]).astype(int))
K = [ki[si] for ki, si in zip(k, s)]
Ks = np.meshgrid(*K, indexing='ij', sparse=True)
Lp = 2*np.pi/L
for i in range(ndim):
Ks[i] = (Ks[i]*Lp[i]).astype(float)
return [np.broadcast_to(k, FFT.shape(True)) for k in Ks]
comm = MPI.COMM_WORLD
subcomm = comm.Split()
print(subcomm)
nvars = 8
ndim = 2
axes = tuple(range(ndim))
N = np.array([nvars] * ndim, dtype=int)
print(N, axes)
fft = PFFT(subcomm, N, axes=axes, dtype=np.float, slab=True)
# L = np.array([2*np.pi] * ndim, dtype=float)
L = np.array([1] * ndim, dtype=float)
print(fft.subcomm)
X = get_local_mesh(fft, L)
K = get_local_wavenumbermesh(fft, L)
K = np.array(K).astype(float)
K2 = np.sum(K*K, 0, dtype=float)
u = newDistArray(fft, False)
print(type(u))
print(u.subcomm)
uex = newDistArray(fft, False)
u[:] = np.sin(2 * np.pi * X[0]) * np.sin(2 * np.pi* X[1])
print(u.shape, X[0].shape)
# exit()
uex[:] = -2.0 * (2.0 * np.pi) ** 2 * np.sin(2 * np.pi * X[0]) * | np.sin(2 * np.pi* X[1]) | numpy.sin |
import matplotlib.pyplot as plt
import numpy as np
import utils.util as util
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
plt.rcParams["figure.figsize"] = (10,10) # width x height
| np.random.seed(0) | numpy.random.seed |
import wfdb
import os
import numpy as np
import tensorflow as tf
import sys
from utils import save_dict
import more_itertools as mit
model_path = 'ecg_ResNet.h5'
def getDataSet(sample_path, slice, label):
sig, fields = wfdb.rdsamp(sample_path)
sig = sig[:, 1]
slice_tmp = np.zeros(SIZE)
ann = wfdb.rdann(sample_path, 'atr')
Rlocation = ann.sample
Rclass = ann.symbol
Rclass = [0] * len(Rclass)
sample_descrip = fields['comments']
ann_note = np.array(ann.aux_note)
af_start = np.where((ann_note == '(AFIB') | (ann_note == '(AFL'))[0]
af_end = np.where(ann_note == '(N')[0]
end_points = []
for j in range(len(af_start)):
if sample_descrip == ['paroxysmal atrial fibrillation'] or sample_descrip == ['persistent atrial fibrillation']:
start_end = [[af_start[j], af_end[j]]]
end_points.extend(start_end)
if end_points != [] and sample_descrip == ['persistent atrial fibrillation']:
Rclass[end_points[j][0]:end_points[j][1] + 1] = np.ones(end_points[j][1] - end_points[j][0] + 1, int)
if end_points != [] and sample_descrip == ['paroxysmal atrial fibrillation']:
Rclass[end_points[j][0]:end_points[j][1] + 1] = np.ones(end_points[j][1] - end_points[j][0] + 1, int)
for i in range(1, len(Rlocation)):
slice_per_peak = sig[Rlocation[i-1] : Rlocation[i]]
label_per_peak = Rclass[i]
if len(slice_per_peak) <= SIZE:
slice_tmp[0:len(slice_per_peak)] = slice_per_peak
slice.append(slice_tmp)
label.append(label_per_peak)
else:
slice_tmp = slice_per_peak[0:len(slice_tmp)]
slice.append(slice_tmp)
label.append(label_per_peak)
return slice, label, Rlocation, Rclass
def Process(dataSet, labelSet):
dataSet = np.array(dataSet).reshape(-1, SIZE)
labelSet = np.array(labelSet).reshape(-1, 1)
train_ds = np.hstack((dataSet, labelSet))
X = train_ds[:, :SIZE].reshape(-1, SIZE, 1)
Y = train_ds[:, SIZE]
model = tf.keras.models.load_model(filepath=model_path)
Y_pred = model.predict(X)
Y_pred = | np.argmax(Y_pred, axis=1) | numpy.argmax |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
try:
from mgmetis.parmetis import part_kway
from mpi4py import MPI
comm = MPI.COMM_WORLD
has_mpi = True
except (ImportError, ModuleNotFoundError):
has_mpi = False
def create_graph(dtype=None):
# NOTE: test the example in the documentation
xadj = [int(x) for x in "0 2 5 8 11 13 16 20 24 28 31 33 36 39 42 44".split()]
adjncy = [
int(x)
for x in "1 5 0 2 6 1 3 7 2 4 8 3 9 0 6 10 1 5 7 11 2 6 8 12 3 7 9 13 4 8 14 5 11 6 10 12 7 11 13 8 12 14 9 13".split()
]
if dtype is None:
return xadj, adjncy
return np.asarray(xadj, dtype=dtype), np.asarray(adjncy, dtype=dtype)
def split_graph(rank, dtype=None):
xadj, adjncy = create_graph(int)
n = xadj.size // 2
if rank == 0:
xadj0 = xadj[: n + 1].copy()
adjs0 = adjncy[: xadj0[-1]].copy()
if dtype is None:
return list(xadj0), (adjs0)
return np.asarray(xadj0, dtype=dtype), np.asarray(adjs0, dtype=dtype)
xadj1 = xadj[n:].copy()
adjs1 = adjncy[xadj1[0] :].copy()
xadj1 -= xadj1[0]
if dtype is None:
return list(xadj1), list(adjs1)
return | np.asarray(xadj1, dtype=dtype) | numpy.asarray |
import time
import math
import numpy as np
import numpy.linalg as npl
from cvxopt import solvers as cvxSolvers
from cvxopt import matrix as cvxMatrix
from copy import deepcopy
import PyCommon.modules.Optimization.csQPOASES as cqp
from PyCommon.modules.Simulator import csDartModel as cdm
# import PyCommon.modules.pydart2 as pydart
import pydart2 as pydart
from PyCommon.modules.Motion import ysMotion as ym
def setTimeStamp(timeStamp, timeIndex, prevTime):
if timeIndex == 0:
prevTime = time.time()
if len(timeStamp) < timeIndex + 1:
timeStamp.append(0.)
curTime = time.time()
timeStamp[timeIndex] += curTime - prevTime
prevTime = curTime
timeIndex += 1
return timeStamp, timeIndex, prevTime
def makeFrictionCone(skeleton, world, model, bodyIDsToCheck, numFrictionBases):
"""
a number of basis is numFrictionBases+1
:type skeleton: ym.JointSkeleton
:type world: pydart.World
:type model: cdm.DartModel
:type bodyIDsToCheck: list[int]
:type numFrictionBases: int
:rtype:
"""
cBodyIds, cPositions, cPositionsLocal, cVelocities = model.getContactPoints(bodyIDsToCheck)
N = None
D = None
E = None
cNum = len(cBodyIds)
if cNum == 0:
return len(cBodyIds), cBodyIds, cPositions, cPositionsLocal, cVelocities, None, None, None, None, None
d = [None]*numFrictionBases # type: np.ndarray
DOFs = model.getDOFs()
for idx in range(len(cBodyIds)):
body = model.getBody(cBodyIds[idx])
# jacobian = body.world_jacobian(cPositionsLocal[idx])
jacobian = body.linear_jacobian(cPositionsLocal[idx])
n = np.array([[0., 1., 0.]]).T
JTn = np.dot(jacobian.T, n)
if N is None:
JTN = JTn.copy()
N = n.copy()
else:
JTN = np.hstack((JTN, JTn))
N = np.hstack((N, n))
# cVel = cVelocities[idx]
# offsetAngle = math.atan2(cVel[2], cVel[0])
offsetAngle = 0.
for i in range(numFrictionBases):
d[i] = np.array([[math.cos((2.*math.pi*i)/numFrictionBases), 0., math.sin((2.*math.pi*i)/numFrictionBases)]]).T
for i in range(numFrictionBases):
JTd = np.dot(jacobian.T, d[i])
if D is None:
JTD = JTd.copy()
D = d[i].copy()
else:
JTD = np.hstack((JTD, JTd))
D = np.hstack((D, d[i]))
E = np.zeros((cNum*numFrictionBases, cNum))
for cIdx in range(cNum):
for fcIdx in range(numFrictionBases):
E[cIdx*numFrictionBases + fcIdx][cIdx] = 1.
return len(cBodyIds), cBodyIds, cPositions, cPositionsLocal, cVelocities, JTN, JTD, E, N, D
def getLCPMatrix(world, model, invM, invMc, mu, tau, contactNum, contactPositions, JTN, JTD, E, factor=1.):
"""
:type world: pydart.World
:type model: cdm.DartModel
:type invM: np.ndarray
:type invMc: np.ndarray
:type mu: float
:type tau: np.ndarray
:type contactNum: int
:type contactPositions: list[np.ndarray]
:type JTN: np.ndarray
:type JTD: np.ndarray
:type E: np.ndarray
:type factor: float
:return:
"""
totalDOF = model.getTotalDOF()
h = model.GetTimeStep()
invh = 1./h
mus = mu * np.eye(contactNum) # type: np.ndarray
temp_NM = JTN.T.dot(invM)
temp_DM = JTD.T.dot(invM)
# pdb.set_trace()
# A =[ A11, A12, 0]
# [ A21, A22, E]
# [ mus, -E.T, 0]
A11 = h*temp_NM.dot(JTN)
A12 = h*temp_NM.dot(JTD)
A21 = h*temp_DM.dot(JTN)
A22 = h*temp_DM.dot(JTD)
A = np.concatenate(
(
factor * np.concatenate((A11, A12, np.zeros((A11.shape[0], E.shape[1]))), axis=1),
factor * np.concatenate((A21, A22, E), axis=1),
h * np.concatenate((mus, -E.T, np.zeros((mus.shape[0], E.shape[1]))), axis=1),
), axis=0
)
A = A + 0.1*np.eye(A.shape[0])
qdot_0 = np.asarray(model.skeleton.dq)
if tau is None:
tau = np.zeros(np.shape(qdot_0))
# non-penentration condition
# b1 = N.T.dot(qdot_0 - h*invMc) + h*temp_NM.dot(tau)
# improved non-penentration condition : add position condition
penDepth = 0.003
# penDepth = 0.005
bPenDepth = np.zeros(A11.shape[0])
for i in range(contactNum):
if abs(contactPositions[i][1]) > penDepth:
bPenDepth[i] = contactPositions[i][1] + penDepth
b1 = JTN.T.dot(qdot_0 - h*invMc) + h*temp_NM.dot(tau) + 0.5*invh * bPenDepth
b2 = JTD.T.dot(qdot_0 - h*invMc) + h*temp_DM.dot(tau)
b3 = np.zeros(mus.shape[0])
b = np.hstack((np.hstack((factor * b1, factor * b2)), b3))
return A, b
def calcLCPForces(motion, world, model, bodyIDsToCheck, mu, tau=None, numFrictionBases=8, solver='qp'):
"""
:type motion: ym.JointMotion
:type world: pydart.World
:type model: cdm.DartModel
:type bodyIDsToCheck: list[int]
:type mu: float
:type tau: np.ndarray
:type numFrictionBases: int
:type solver: str
:return:
"""
timeStamp = []
timeIndex = 0
prevTime = time.time()
# model = VpControlModel
contactNum, bodyIDs, contactPositions, contactPositionsLocal, contactVelocities, JTN, JTD, E, N, D \
= makeFrictionCone(motion[0].skeleton, world, model, bodyIDsToCheck, numFrictionBases)
if contactNum == 0:
return bodyIDs, contactPositions, contactPositionsLocal, None, None
timeStamp, timeIndex, prevTime = setTimeStamp(timeStamp, timeIndex, prevTime)
totalDOF = model.getTotalDOF()
invM = model.skeleton.inv_mass_matrix()
invMc = np.dot(invM, model.skeleton.coriolis_and_gravity_forces())
timeStamp, timeIndex, prevTime = setTimeStamp(timeStamp, timeIndex, prevTime)
# pdb.set_trace()
# A =[ A11, A12, 0]
# [ A21, A22, E]
# [ mus, -E.T, 0]
factor = 100.
A, b = getLCPMatrix(world, model, invM, invMc, mu, tau, contactNum, contactPositions, JTN, JTD, E, factor)
# lo = np.zeros(A.shape[0])
lo = 0.*np.ones(A.shape[0])
hi = 1000000. * np.ones(A.shape[0])
x = 0.*np.ones(A.shape[0])
# normalizeMatrix(A, b)
# print A[0]
if solver == 'qp':
# solve using cvxopt QP
# if True:
try:
Aqp = cvxMatrix(A+A.T)
bqp = cvxMatrix(b)
Gqp = cvxMatrix(np.vstack((-A, -np.eye(A.shape[0]))))
hqp = cvxMatrix(np.hstack((b.T, np.zeros(A.shape[0]))))
timeStamp, timeIndex, prevTime = setTimeStamp(timeStamp, timeIndex, prevTime)
cvxSolvers.options['show_progress'] = False
cvxSolvers.options['maxiters'] = 100
solution = cvxSolvers.qp(Aqp, bqp, Gqp, hqp)
xqp = np.array(solution['x']).flatten()
x = xqp.copy()
except ValueError as e:
print(e)
pass
normalForce = x[:contactNum]
tangenForce = x[contactNum:contactNum + numFrictionBases*contactNum]
# tangenForce = np.zeros_like(x[contactNum:contactNum + numFrictionBases*contactNum])
minTangenVel = x[contactNum + numFrictionBases*contactNum:]
# print minTangenVel
tangenForceDual = (np.dot(A,x)+b)[contactNum:contactNum+numFrictionBases*contactNum]
# print "hehe:", (np.dot(A,x)+b)[contactNum:contactNum+numFrictionBases*contactNum]
# print "hihi:", tangenForce
# print np.dot(tangenForce, tangenForceDual)
forces = []
for cIdx in range(contactNum):
force = np.zeros(3)
force[1] = normalForce[cIdx]
# contactTangenForce = tangenForce[numFrictionBases*cIdx:numFrictionBases*(cIdx+1)]
contactTangenForceDual = tangenForceDual[numFrictionBases*cIdx:numFrictionBases*(cIdx+1)]
fcIdx = np.argmax(tangenForce[cIdx*numFrictionBases:(cIdx+1)*numFrictionBases])
d = np.array((math.cos(2.*math.pi*fcIdx/numFrictionBases), 0., math.sin(2.*math.pi*fcIdx/numFrictionBases)))
force += tangenForce[cIdx*numFrictionBases + fcIdx] * d
# for fcIdx in range(numFrictionBases):
# d = np.array((math.cos(2.*math.pi*fcIdx/numFrictionBases), 0., math.sin(2.*math.pi*fcIdx/numFrictionBases)))
# force += tangenForce[cIdx*numFrictionBases + fcIdx] * d
# minBasisIdx = np.argmin(contactTangenForceDual)
# d = np.array((math.cos((2.*math.pi*minBasisIdx)/numFrictionBases), 0., math.sin((2.*math.pi*minBasisIdx)/numFrictionBases)))
# force += tangenForce[cIdx*numFrictionBases + minBasisIdx] * d
forces.append(force)
# repairForces(forces, contactPositions)
# print forces
timeStamp, timeIndex, prevTime = setTimeStamp(timeStamp, timeIndex, prevTime)
# debug
__HP__DEBUG__= False
if __HP__DEBUG__ and len(bodyIDs) ==4:
vpidx = 3
DOFs = model.getDOFs()
Jic = yjc.makeEmptyJacobian(DOFs, 1)
qdot_0 = ype.makeFlatList(totalDOF)
ype.flatten(model.getBodyRootDOFVelocitiesLocal(), qdot_0)
jointAxeses = model.getBodyRootDOFAxeses()
bodyidx = model.id2index(bodyIDs[vpidx])
contactJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, bodyidx)]
jointPositions = model.getJointPositionsGlobal()
jointPositions[0] = model.getBodyPositionGlobal(0)
yjc.computeLocalRootJacobian(Jic, DOFs, jointPositions, jointAxeses, [contactPositions[vpidx]], contactJointMasks)
h = world.GetTimeStep()
vv = np.dot(Jic, qdot_0) - h * np.dot(Jic, invMc) + h * np.dot(Jic, np.dot(invM, tau))
for vpidxx in range(len(bodyIDs)):
bodyidx = model.id2index(bodyIDs[vpidxx])
contactJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, bodyidx)]
yjc.computeLocalRootJacobian(Jic, DOFs, jointPositions, jointAxeses, [contactPositions[vpidxx]], contactJointMasks)
vv += h * np.dot(Jic, np.dot(invM, np.dot(Jic[:3].T, forces[vpidxx])))
print("vv:", vv[:3])
return bodyIDs, contactPositions, contactPositionsLocal, forces, timeStamp
def calcLCPbasicControl(
motion, world, model, bodyIDsToCheck, mu, totalForce, weights,
tau0=None, variableDofIdx=None, numFrictionBases=8):
"""
:type motion: ym.JointMotion
:type world: pydart.World
:type model: cdm.DartModel
:type bodyIDsToCheck: list[int]
:type mu: float
:type totalForce: list[float] | np.ndarray
:type tau0: np.ndarray
:type variableDofIdx: list[float] | np.ndarray
:type numFrictionBases: int
:return:
"""
# tau0 = None
# model = VpControlModel
# numFrictionBases = 8
contactNum, bodyIDs, contactPositions, contactPositionsLocal, contactVelocities, JTN, JTD, E, N, D \
= makeFrictionCone(motion[0].skeleton, world, model, bodyIDsToCheck, numFrictionBases)
if contactNum == 0:
return bodyIDs, contactPositions, contactPositionsLocal, None, None
wLCP = weights[0]
wTorque = weights[1]
wForce = weights[2]
totalDOF = model.getTotalDOF()
if variableDofIdx is None:
variableDofIdx = range(0, totalDOF)
variableDof = len(variableDofIdx)
# invM = np.zeros((totalDOF, totalDOF))
# invMc = np.zeros(totalDOF)
# model.getInverseEquationOfMotion(invM, invMc)
# M = model.skeleton.mass_matrix()
# invM = npl.inv(M)
invM = model.skeleton.inv_mass_matrix()
c = model.skeleton.coriolis_and_gravity_forces()
invMc = np.dot(invM, c)
h = world.time_step()
invh = 1./h
mus = mu * np.eye(contactNum) # type: np.ndarray
temp_NM = JTN.T.dot(invM)
temp_DM = JTD.T.dot(invM)
A00 = np.eye(variableDof)
A10 = h*temp_NM[:, variableDofIdx]
A11 = h*temp_NM.dot(JTN)
A12 = h*temp_NM.dot(JTD)
A20 = h*temp_DM[:, variableDofIdx]
A21 = h*temp_DM.dot(JTN)
A22 = h*temp_DM.dot(JTD)
factor = 1.
# A, b = getLCPMatrix(world, model, pinvM0, c, mu, tau0, contactNum, contactPositions, JTN, JTD, E, factor)
# A0 = np.concatenate((A00, np.zeros((A00.shape[0], A11.shape[1]+A12.shape[1]+E.shape[1]))), axis=1)
A0 = np.zeros((A00.shape[0], A00.shape[1] + A11.shape[1]+A12.shape[1]+E.shape[1]))
A1 = np.concatenate((A10, A11, A12, np.zeros((A11.shape[0], E.shape[1]))), axis=1)
A2 = np.concatenate((A20, A21, A22, E), axis=1)
A3 = np.concatenate((np.zeros((mus.shape[0], A00.shape[1])), 1.*mus, -1.*E.T, np.zeros((mus.shape[0], E.shape[1]))), axis=1)
A_ori = np.concatenate((A0,
wLCP*A1,
wLCP*A2,
wLCP*A3), axis=0) * factor
A = A_ori.copy()
# A = A_ori + 0.01 * np.diag(np.hstack((np.zeros(A0.shape[0]), np.ones(A1.shape[0]+A2.shape[0]), np.zeros(A3.shape[0]))))
# A = A_ori + 0.01 * np.eye(A_ori.shape[0])
# bx= h * (M*qdot_0 + tau - c)
# b =[N.T * Jc * invM * kx]
# [D.T * Jc * invM * kx]
# [0]
qdot_0 = model.skeleton.dq.copy()
if tau0 is None:
tau0 = np.zeros(np.shape(qdot_0))
# non-penentration condition
# b1 = N.T.dot(qdot_0 - h*invMc) + h*temp_NM.dot(tau)
# improved non-penentration condition : add position condition
penDepth = 0.003
bPenDepth = np.zeros(A1.shape[0])
for i in range(contactNum):
if abs(contactPositions[i][1]) > penDepth:
bPenDepth[i] = contactPositions[i][1] + penDepth
invM_tau0 = np.zeros_like(qdot_0)
if variableDof < totalDOF:
_tau0 = np.delete(tau0, variableDof)
invM_tau0 += np.dot(np.delete(invM, variableDof), _tau0)
b0 = np.zeros(A00.shape[0])
b1 = JTN.T.dot(qdot_0 - h*invMc + h*invM_tau0) + 0.5*invh*bPenDepth
# b1 = JTN.T.dot(qdot_0 - h*invMc + h*invM_tau0) + .1*invh*bPenDepth
b2 = JTD.T.dot(qdot_0 - h*invMc + h*invM_tau0)
b3 = np.zeros(mus.shape[0])
b = np.hstack((wTorque*b0, wLCP*np.hstack((np.hstack((b1, b2)), b3)))) * factor
x = 100.*np.ones(A.shape[0])
zqp = np.zeros(A.shape[0])
Qfqp = None
pfqp = None
# for torque equality constraints computation
modelCom = model.getCOM()
rcN = np.zeros((3, N.shape[1]))
rcD = np.zeros((3, D.shape[1]))
for cIdx in range(len(contactPositions)):
r = contactPositions[cIdx] - modelCom
rcN[:3, cIdx] = np.cross(r, N[:3, cIdx])
for fbIdx in range(numFrictionBases):
dIdx = numFrictionBases * cIdx + fbIdx
rcD[:3, dIdx] = np.cross(r, D[:3, dIdx])
if True:
# if True:
try:
# Qqp = cvxMatrix(A+A.T)
# pqp = cvxMatrix(b)
# Qtauqp = np.hstack((np.dot(pinvM1[:6], np.hstack((JTN, JTD))), np.zeros_like(N[:6])))
# ptauqp = np.dot(pinvM1[:6], (-np.asarray(c)+np.asarray(tau0))) + np.asarray(tau0[:6])
Qtauqp = np.hstack((np.eye(variableDof), np.zeros((A00.shape[0], A11.shape[1]+A12.shape[1]+E.shape[1]))))
ptauqp = np.zeros(variableDof)
# Q2dotqp = np.hstack((np.dot(invM, np.concatenate((wTorque* np.eye(variableDof), JTN, JTD), axis=1)), np.zeros((A0.shape[0], E.shape[1])) ))
# p2dotqp = -invMc.copy()
# Qqp = cvxMatrix(2.*A + wTorque * np.dot(Qtauqp.T, Qtauqp))
# pqp = cvxMatrix(b + wTorque * np.dot(ptauqp.T, Qtauqp))
Qfqp = np.concatenate((np.zeros((3, variableDof)), N[:3], D[:3], np.zeros_like(N[:3])), axis=1)
pfqp = -totalForce[:3]
# Qfqp = np.concatenate((np.zeros((1, totalDOF)),N[1:2], D[1:2], np.zeros_like(N[1:2])), axis=1)
# pfqp = -totalForce[1:2]
# TODO:
# add momentum term
# QtauNormqp = np.hstack((np.dot(pinvM1, np.hstack((JTN, JTD))), np.zeros((pinvM1.shape[0], N.shape[1]))))
# ptauNormqp = np.dot(pinvM1, (-np.asarray(c)+np.asarray(tau0))) + np.asarray(tau0)
# QqNormqp = np.hstack((np.dot(pinvM0, np.hstack((JTN, JTD))), np.zeros((pinvM0.shape[0], N.shape[1]))))
# pqNormqp = np.dot(pinvM0, (-np.asarray(c)+np.asarray(tau0))) + np.asarray(tau0)
# Qqp = cvxMatrix(2.*A + wTorque * np.dot(Qfqp.T, Qfqp) + np.dot(QqNormqp.T, QqNormqp))
# pqp = cvxMatrix(b + wTorque * np.dot(pfqp.T, Qfqp) + np.dot(pqNormqp.T, QqNormqp))
# Qqp = cvxMatrix(2.*A + wForce * np.dot(Qfqp.T, Qfqp) + wTorque * np.dot(QtauNormqp.T, QtauNormqp))
# pqp = cvxMatrix(b + wForce * np.dot(pfqp.T, Qfqp) + wTorque * np.dot(ptauNormqp.T, QtauNormqp))
# objective : LCP
Qqp = cvxMatrix(A+A.T )
pqp = cvxMatrix(b)
QQ = A+A.T
pp = b.copy()
# objective : torque
if True:
Qqp += cvxMatrix(wTorque * np.dot(Qtauqp.T, Qtauqp) )
pqp += cvxMatrix(wTorque * np.dot(ptauqp.T, Qtauqp))
QQ += wTorque * np.dot(Qtauqp.T, Qtauqp)
pp += wTorque * np.dot(ptauqp.T, Qtauqp)
# objective : q2dot
if False:
Qqp += cvxMatrix(wTorque * np.dot(Q2dotqp.T, Q2dotqp) )
pqp += cvxMatrix(wTorque * np.dot(p2dotqp.T, Q2dotqp))
QQ += wTorque * np.dot(Q2dotqp.T, Q2dotqp)
pp += wTorque * np.dot(p2dotqp.T, Q2dotqp)
# objective : force
if True:
Qqp += cvxMatrix(wForce * np.dot(Qfqp.T, Qfqp) )
pqp += cvxMatrix(wForce * np.dot(pfqp.T, Qfqp))
QQ += wForce * np.dot(Qfqp.T, Qfqp)
pp += wForce * np.dot(pfqp.T, Qfqp)
equalConstForce = False
G = np.vstack((-A[variableDof:], -np.eye(A.shape[0])[variableDof:]))
hnp = np.hstack((b[variableDof:].T, np.zeros(A.shape[0])[variableDof:]))
# G = np.vstack((-A_ori[totalDOF:], -np.eye(A_ori.shape[0])[totalDOF:]))
# hnp = np.hstack((b[totalDOF:].T, np.zeros(A_ori.shape[0])[totalDOF:]))
if False and not equalConstForce:
# 3direction
# if not equalConstForce:
constMu = .1
constFric = totalForce[1]*constMu
totalForceMat = np.concatenate((np.zeros((6, totalDOF)), N, D, np.zeros_like(N)), axis=1)
G = np.concatenate((G, -totalForceMat[:3], totalForceMat[:3]), axis=0)
hnp = np.hstack((hnp, np.zeros(6)))
hnp[-6] = -totalForce[0] - constFric
hnp[-5] = -totalForce[1] * 0.9
hnp[-4] = -totalForce[2] - constFric
hnp[-3] = totalForce[0] + constFric
hnp[-2] = totalForce[1] * 1.1
hnp[-1] = totalForce[2] + constFric
if False and not equalConstForce:
# just normal direction
# if not equalConstForce:
constMu = .1
constFric = totalForce[1]*constMu
totalForceMat = np.concatenate((np.zeros((6, totalDOF)), N, D, np.zeros_like(N)), axis=1)
G = np.concatenate((G, -totalForceMat[1:2], totalForceMat[1:2]), axis=0)
hnp = np.hstack((hnp, np.zeros(2)))
hnp[-2] = -totalForce[1] * 0.9
hnp[-1] = totalForce[1] * 1.1
# G = np.vstack((G, np.hstack((np.ones((2, N.shape[1])), np.zeros((2, D.shape[1]+N.shape[1]))))))
# G[-2] *= -1.
# hnp = np.hstack((hnp, np.zeros(2)))
# hnp[-2] = -totalForce[1] * .9
# hnp[-1] = totalForce[1] * 1.1
# root torque 0 condition as inequality constraint
# Atauqp = np.hstack((np.dot(pinvM1, np.hstack((JTN, JTD))), np.zeros((pinvM1.shape[0], N.shape[1]))))
# btauqp = np.dot(pinvM1, (np.asarray(c)-np.asarray(tau0))) - np.array(tau0)
# G = np.concatenate((G, -Atauqp, Atauqp), axis=0)
# hnp = np.hstack((hnp, np.hstack((-btauqp, btauqp))))
# hnp[-2*pinvM1.shape[0]:] += 1. * np.ones(2*pinvM1.shape[0])
Gqp = cvxMatrix(G)
hqp = cvxMatrix(hnp)
# check correctness of equality constraint
# tau = np.dot(pinvM1, -c + tau0 + np.dot(JTN, normalForce) + np.dot(JTD, tangenForce))
# tau = pinvM1*JTN*theta + pinvM1*JTD*phi + pinvM1*tau0 - pinvM1*b + tau0
# Atauqp = np.hstack((np.dot(pinvM1[:6], np.hstack((JTN, JTD))), np.zeros_like(N[:6])))
# btauqp = np.dot(pinvM1[:6], (np.asarray(c)-np.asarray(tau0))) - np.asarray(tau0[:6])
# Atauqp = np.hstack((np.dot(pinvM1, np.hstack((JTN, JTD))), np.zeros((pinvM1.shape[0], N.shape[1]))))
# btauqp = np.dot(pinvM1, (np.asarray(c)-np.asarray(tau0))) - np.asarray(tau0)
Atauqp = np.hstack((np.eye(6), np.zeros((6, A.shape[1]-6))))
btauqp = np.zeros(6)
AextTorqp = np.concatenate((rcN, rcD, np.zeros_like(N[:3])), axis=1)
bextTorqp = totalForce[3:]
# Atauqp = np.vstack((Atauqp, AextTorqp))
# btauqp = np.hstack((btauqp, bextTorqp))
if equalConstForce:
Atauqp = cvxMatrix(np.vstack((np.concatenate((N[1:2], D[1:2], np.zeros(N[1:2].shape))), Atauqp)))
btauqp = cvxMatrix(np.hstack((np.array((totalForce[1],)), btauqp)))
# Atauqp = cvxMatrix(np.vstack((np.concatenate((N[1:2], D[1:2], np.zeros(N[1:2].shape), AextTorqp)), Atauqp)))
# btauqp = cvxMatrix(np.concatenate((np.array(totalForce[1]), bextTorqp, btauqp), axis=1))
Aqp = cvxMatrix(Atauqp)
bqp = cvxMatrix(btauqp)
cvxSolvers.options['show_progress'] = False
cvxSolvers.options['maxiters'] = 100
cvxSolvers.options['refinement'] = 1
cvxSolvers.options['kktsolver'] = "robust"
xqp = np.array(cvxSolvers.qp(Qqp, pqp, Gqp, hqp, Aqp, bqp)['x']).flatten()
x = xqp.copy()
# print "x: ", x
# zqp = np.dot(A_ori, xqp) + b
# zqp = np.dot(A, xqp) + b
# print "QP z: ", np.dot(xqp, zqp)
# if np.dot(xqp, zqp) < np.dot(x, z):
# bp::list qp(const object &H, const object &g, const object &A, const object &lb, const object &ub, const object &lbA, const object ubA, int nWSR)
# print qpos.qp
# lb = [-1000.]*(totalDOF-6)
# lb.extend([0.]*(A.shape[0]-totalDOF))
# xqpos = qpos.qp(QQ[6:, 6:], pp[6:], G[:, 6:], lb, None, None, hnp, 200, False, "NONE")
# xtmp = [0.]*6
# xtmp.extend(xqpos[:])
# x = np.array(xtmp)
# lb = [-1000.]*totalDOF
# lb.extend([0.]*(A.shape[0]-totalDOF))
# xqpos = qpos.qp(QQ, pp, G, lb, None, None, hnp, 200, False, "NONE")
# x = np.array(xqpos)
zqp = np.dot(A, x) + b
'''
cons = []
# for ii in range(A.shape[0]):
# cons.append({'type': 'eq',
# 'fun' : lambda xx: np.dot(Atauqp[i], xx)
# #,'jac' : lambda xx: Atauqp[i]
# })
for ii in range(G.shape[0]):
cons.append({'type':'ineq',
'fun' : lambda xx: -np.dot(G[:,6:][i], xx)+hnp[i]
#,'jac' : lambda xx: -G[i]
})
L-BFGS-B
TNC
COBYLA
SLSQP
res = spopt.minimize(lambda xx: np.dot(xx, .5*np.dot(QQ[6:, 6:], xx)+pp[6:]), xqp[6:],
# jac=lambda xx: np.dot(np.dot(QQ, xx)+pp),
method='SLSQP', constraints=cons, options={'disp': True})
# res = spopt.minimize(lambda xx: np.dot(xx, .5*np.dot(QQ, xx)+pp) , xqp)
print res.x
# print res.hess
# print res.message
'''
except ValueError as e:
print('LCPbasicControl!!', e)
pass
def refine(xx):
for i in range(len(xx)):
if xx[i] < 0.001:
xx[i] = 0.
return xx
_tau = x[:variableDof]
normalForce = x[variableDof:variableDof+contactNum]
tangenForce = x[variableDof+contactNum:variableDof+contactNum + numFrictionBases*contactNum]
minTangenVel = x[variableDof+contactNum + numFrictionBases*contactNum:]
tau = np.array(tau0).copy()
tau[variableDofIdx] = x[:variableDof]
# for i in range(len(tau)):
# tau[i] = 10.*x[i]
# print np.array(tau)
# zqp = np.dot(A, x)+b
lcpValue = np.dot(x[variableDof:], zqp[variableDof:])
tauValue = np.dot(_tau, _tau)
# Q2dotqpx = np.dot(Q2dotqp, x)+p2dotqp
# q2dotValue = np.dot(Q2dotqpx, Q2dotqpx)
Qfqpx = np.dot(Qfqp, x)+pfqp
forceValue = np.dot(Qfqpx, Qfqpx)
print("LCP value: ", wLCP, lcpValue/wLCP, lcpValue)
print("tau value: ", wTorque, tauValue, wTorque*tauValue)
# print "q2dot value: ", wTorque, q2dotValue, wTorque*q2dotValue
print("For value: ", wForce, forceValue, wForce*forceValue)
# print "x: ", x[totalDOF:]
# print "z: ", zqp[totalDOF:]
# print "b: ", b[totalDOF:]
# print "elevalue: ", np.multiply(x[totalDOF:], zqp[totalDOF:])
forces = []
for cIdx in range(contactNum):
force = np.zeros(3)
force[1] = normalForce[cIdx]
for fcIdx in range(numFrictionBases):
d = np.array((math.cos(2.*math.pi*fcIdx/numFrictionBases), 0., math.sin(2.*math.pi*fcIdx/numFrictionBases)))
force += tangenForce[cIdx*numFrictionBases + fcIdx] * d
# print force
forces.append(force)
# repairForces(forces, contactPositions)
# print forces
return bodyIDs, contactPositions, contactPositionsLocal, forces, tau
def calcLCPbasicControlHD(motion, world, model, bodyIDsToCheck,
mu, totalForce, weights, ddq0=None, variableDofIdx=None, numFrictionBases=8):
"""
:type motion: ym.JointMotion
:type world: pydart.World
:type model: cdm.DartModel
:type bodyIDsToCheck: list[int]
:type mu: float
:type ddq0: np.ndarray
:type variableDofIdx: np.ndarray
:type numFrictionBases: int
:return:
"""
# tau0 = None
# model = VpControlModel
# numFrictionBases = 8
contactNum, bodyIDs, contactPositions, contactPositionsLocal, contactVelocities, JTN, JTD, E, N, D \
= makeFrictionCone(motion[0].skeleton, world, model, bodyIDsToCheck, numFrictionBases)
if contactNum == 0:
return bodyIDs, contactPositions, contactPositionsLocal, None, None, None
DEBUG_MATSIZE = False
DEBUG_OBJVALUE = True
wLCP = weights[0]
wTorque = weights[1]
wForce = weights[2]
totalDOF = model.getTotalDOF()
# joint dofs except foot and root joint dofs
specifiedDofIdxTemp = list(range(6, model.getTotalDOF()))
for dofidx in variableDofIdx:
specifiedDofIdxTemp.remove(dofidx)
specifiedDofIdx = np.array(specifiedDofIdxTemp)
M = model.skeleton.mass_matrix()
# invM = model.skeleton.inv_mass_matrix()
invM = npl.inv(M)
c = model.skeleton.coriolis_and_gravity_forces()
invMc = np.dot(invM, c)
M00 = M[:6, :6]
M01 = M[:6, 6:]
M10 = M[6:, :6]
M11 = M[6:, 6:]
M11inv = npl.inv(M11)
Mschur = npl.inv(M00 - np.dot(M01, np.dot(M11inv, M10)))
# (A - BD^-1 C)^-1
# -(A - BD^-1 C)^-1 BD^-1
# -D^-1 C(A - BD^-1 C)^-1
# D^-1 + D^-1 C(A-BD^-1 C)^-1 B D^-1
invM00 = invM[:6, :6]
invM01 = invM[:6, 6:]
invM10 = invM[6:, :6]
invM11 = invM[6:, 6:]
hatM = np.vstack((np.dot(invM01, npl.inv(invM11)), np.eye(totalDOF-6)))
tildeM = invM[:6, :] - np.dot(hatM[:6, :], invM[6:, :])
hatMCon = hatM[:, specifiedDofIdx - 6]
hatMVar = hatM[:, variableDofIdx- 6]
Mcon = M[:, specifiedDofIdx]
Mvar = M[:, variableDofIdx]
variableDofNum = len(variableDofIdx)
h = world.time_step()
invh = 1./h
mus = mu * np.eye(contactNum) # type: np.ndarray
temp_NM = JTN[:6, :].T.dot(tildeM)
temp_DM = JTD[:6, :].T.dot(tildeM)
# A00 = np.eye(totalDOF)
A00 = np.eye(variableDofNum)
A10 = h*JTN.T.dot(hatMVar)
A11 = h*temp_NM.dot(JTN)
A12 = h*temp_NM.dot(JTD)
A20 = h*JTD.T.dot(hatMVar)
A21 = h*temp_DM.dot(JTN)
A22 = h*temp_DM.dot(JTD)
factor = 5.
# A, b = getLCPMatrix(world, model, pinvM0, c, mu, tau0, contactNum, contactPositions, JTN, JTD, E, factor)
# A0 = np.concatenate((A00, np.zeros((A00.shape[0], A11.shape[1]+A12.shape[1]+E.shape[1]))), axis=1)
A0 = np.zeros((A00.shape[0], A00.shape[1] + A11.shape[1]+A12.shape[1]+E.shape[1]))
A1 = np.concatenate((A10, A11, A12, np.zeros((A11.shape[0], E.shape[1]))), axis=1)
A2 = np.concatenate((A20, A21, A22, E), axis=1)
A3 = np.concatenate((np.zeros((mus.shape[0], A00.shape[1])), 1.*mus, -1.*E.T, np.zeros((mus.shape[0], E.shape[1]))), axis=1)
A_ori = np.concatenate((A0,
wLCP*A1,
wLCP*A2,
wLCP*A3), axis=0) * factor
A = A_ori.copy()
# A = A_ori + 0.01 * np.eye(A_ori.shape[0])*factor
# bx= h * (M*qdot_0 + tau - c)
# b =[N.T * Jc * invM * kx]
# [D.T * Jc * invM * kx]
# [0]
qdot_0 = model.skeleton.dq.copy()
# non-penentration condition
# b1 = N.T.dot(qdot_0 - h*invMc) + h*temp_NM.dot(tau)
# improved non-penentration condition : add position condition
penDepth = 0.003
bPenDepth = np.zeros(A1.shape[0])
for i in range(contactNum):
if abs(contactPositions[i][1]) > penDepth:
bPenDepth[i] = contactPositions[i][1] + penDepth
b0 = np.zeros(A00.shape[0])
b1 = JTN.T.dot(qdot_0 - h*invMc)# + 0.5*invh*bPenDepth
b2 = JTD.T.dot(qdot_0 - h*invMc)
b3 = np.zeros(mus.shape[0])
b = np.hstack((wTorque*b0, wLCP*np.hstack((np.hstack((b1, b2)), b3)))) * factor
x = 100.*np.ones(A.shape[0])
zqp = np.zeros(A.shape[0])
Qfqp = None
pfqp = None
# for external torque equality constraints computation
modelCom = model.getCOM()
rcN = np.zeros((3, N.shape[1]))
rcD = np.zeros((3, D.shape[1]))
for cIdx in range(len(contactPositions)):
r = contactPositions[cIdx] - modelCom
rcN[:3, cIdx] = np.cross(r, N[:3, cIdx])
for fbIdx in range(numFrictionBases):
dIdx = numFrictionBases * cIdx + fbIdx
rcD[:3, dIdx] = np.cross(r, D[:3, dIdx])
varToTauA = np.concatenate((Mvar + np.dot(M[:, :6], hatM[:6, :])[:, variableDofIdx-6],
np.dot(np.dot(M[:, :6], tildeM), JTN) - JTN,
np.dot(np.dot(M[:, :6], tildeM), JTD) - JTD,
np.zeros((totalDOF, E.shape[1]))), axis=1)
varToTaub = np.dot(Mcon + np.dot(M[:, :6], hatM[:6, :])[:, specifiedDofIdx-6], ddq0[6:]) \
+ np.dot(np.dot(M[:, :6], tildeM), c) - c
if True:
# try:
Qtauqp = varToTauA[6:, :]
ptauqp = varToTaub[6:]
Qfqp = np.concatenate((np.zeros((3, variableDofNum)), N[:3], D[:3], np.zeros_like(N[:3])), axis=1)
pfqp = -totalForce[:3]
# objective : LCP
Qqp = cvxMatrix(A+A.T )
pqp = cvxMatrix(b)
QQ = A+A.T
pp = b.copy()
if DEBUG_MATSIZE:
print("matrix size:", Qqp.size)
print("rankLCP:", npl.matrix_rank(Qqp))
# objective : qvar
if True:
Qqvarqp = np.concatenate((np.eye(variableDofNum), np.zeros((variableDofNum, N.shape[1])),
np.zeros((variableDofNum, D.shape[1])), np.zeros((variableDofNum, N.shape[1]))),
axis=1)
Qqp += cvxMatrix(0.1 * wTorque * np.dot(Qqvarqp.T, Qqvarqp))
# objective : torque
if True:
Qqp += cvxMatrix(wTorque * np.dot(Qtauqp.T, Qtauqp) )
pqp += cvxMatrix(wTorque * np.dot(ptauqp.T, Qtauqp))
QQ += wTorque * np.dot(Qtauqp.T, Qtauqp)
pp += wTorque * np.dot(ptauqp.T, Qtauqp)
# objective : q2dot
if False:
Qqp += cvxMatrix(wTorque * np.dot(Q2dotqp.T, Q2dotqp) )
pqp += cvxMatrix(wTorque * np.dot(p2dotqp.T, Q2dotqp))
QQ += wTorque * np.dot(Q2dotqp.T, Q2dotqp)
pp += wTorque * np.dot(p2dotqp.T, Q2dotqp)
# objective : force
if True:
Qqp += cvxMatrix(wForce * np.dot(Qfqp.T, Qfqp) )
pqp += cvxMatrix(wForce * np.dot(pfqp.T, Qfqp))
QQ += wForce * np.dot(Qfqp.T, Qfqp)
pp += wForce * np.dot(pfqp.T, Qfqp)
if DEBUG_MATSIZE:
print("matrix size:", Qqp.size)
print("rankP:", npl.matrix_rank(Qqp))
equalConstForce = False
G = np.vstack((-A[variableDofNum:, :], -np.eye(A.shape[0])[variableDofNum:, :]))
hnp = np.hstack((b[variableDofNum:].T, np.zeros(A.shape[0])[variableDofNum:]))
Gqp = cvxMatrix(G)
hqp = cvxMatrix(hnp)
# check correctness of equality constraint
# Atauqp = np.hstack((np.eye(6), np.zeros((6, A.shape[1]-6))))
# btauqp = np.zeros((6))
Atauqp = varToTauA[:6, :]
btauqp = varToTaub[:6]
AextTorqp = np.concatenate((rcN, rcD, np.zeros_like(N[:3])), axis=1)
bextTorqp = totalForce[3:]
if DEBUG_MATSIZE:
print('rankG: ', npl.matrix_rank(G))
print('rankA: ', npl.matrix_rank(Atauqp))
# Atauqp = np.vstack((Atauqp, AextTorqp))
# btauqp = np.hstack((btauqp, bextTorqp))
Aqp = cvxMatrix(Atauqp)
bqp = cvxMatrix(btauqp)
if DEBUG_MATSIZE:
print('rankAll:', npl.matrix_rank(np.concatenate((Qqp, G, Atauqp), axis=0)))
print('expected rank:', variableDofNum+contactNum*10)
cvxSolvers.options['show_progress'] = False
cvxSolvers.options['maxiters'] = 100
cvxSolvers.options['refinement'] = 1
# cvxSolvers.options['kktsolver'] = "robust"
xqp = np.array(cvxSolvers.qp(Qqp, pqp, Gqp, hqp, Aqp, bqp)['x']).flatten()
x = xqp.copy()
# print "x: ", x
# zqp = np.dot(A_ori, xqp) + b
# zqp = np.dot(A, xqp) + b
# print "QP z: ", np.dot(xqp, zqp)
# if np.dot(xqp, zqp) < np.dot(x, z):
# bp::list qp(const object &H, const object &g, const object &A, const object &lb, const object &ub, const object &lbA, const object ubA, int nWSR)
# print qpos.qp
# lb = [-1000.]*(totalDOF-6)
# lb.extend([0.]*(A.shape[0]-totalDOF))
# xqpos = qpos.qp(QQ[6:, 6:], pp[6:], G[:, 6:], lb, None, None, hnp, 200, False, "NONE")
# xtmp = [0.]*6
# xtmp.extend(xqpos[:])
# x = np.array(xtmp)
# lb = [-1000.]*totalDOF
# lb.extend([0.]*(A.shape[0]-totalDOF))
# xqpos = qpos.qp(QQ, pp, G, lb, None, None, hnp, 200, False, "NONE")
# x = np.array(xqpos)
zqp = np.dot(A, x) + b
# except Exception, e:
# print('LCPbasicControl!!', e)
# pass
def refine(xx):
for i in range(len(xx)):
if xx[i] < 0.001:
xx[i] = 0.
return xx
qvar = x[:variableDofNum]
normalForce = x[variableDofNum:variableDofNum+contactNum]
tangenForce = x[variableDofNum+contactNum:variableDofNum+contactNum + numFrictionBases*contactNum]
minTangenVel = x[variableDofNum+contactNum + numFrictionBases*contactNum:]
# for i in range(len(tau)):
# tau[i] = 10.*x[i]
# print np.array(tau)
zqp = np.dot(A, x)+b
tau = np.dot(varToTauA, x) + varToTaub
lcpValue = np.dot(x[variableDofNum:], zqp[variableDofNum:])
qvarValue = np.dot(qvar, qvar)
tauValue = np.dot(tau, tau)
# Q2dotqpx = np.dot(Q2dotqp, x)+p2dotqp
# q2dotValue = np.dot(Q2dotqpx, Q2dotqpx)
Qfqpx = np.dot(Qfqp, x)+pfqp
forceValue = np.dot(Qfqpx, Qfqpx)
if DEBUG_OBJVALUE:
print("LCP value: ", wLCP, lcpValue/wLCP, lcpValue)
print("qvar valu: ", wTorque*.01, qvarValue, .01*wTorque*qvarValue)
print("tau value: ", wTorque, tauValue, wTorque*tauValue)
# print("q2dot value: ", wTorque, q2dotValue, wTorque*q2dotValue)
print("For value: ", wForce, forceValue, wForce*forceValue)
# print("x: ", x[totalDOF:])
# print("z: ", zqp[totalDOF:])
# print("b: ", b[totalDOF:])
# print("elevalue: ", np.multiply(x[totalDOF:], zqp[totalDOF:]))
forces = []
for cIdx in range(contactNum):
force = np.zeros(3)
force[1] = normalForce[cIdx]
for fcIdx in range(numFrictionBases):
d = np.array((math.cos(2.*math.pi*fcIdx/numFrictionBases), 0., math.sin(2.*math.pi*fcIdx/numFrictionBases)))
force += tangenForce[cIdx*numFrictionBases + fcIdx] * d
# print force
forces.append(force)
# repairForces(forces, contactPositions)
# print forces
return bodyIDs, contactPositions, contactPositionsLocal, forces, tau, qvar
#soft contact
def makeSoftFrictionCone(skeleton, world, model, bodyIDsToCheck, numFrictionBases, mu):
"""
a number of basis is numFrictionBases
:type skeleton: ym.JointSkeleton
:type world: pydart.World
:type model: cdm.DartModel
:type bodyIDsToCheck: list[int]
:type numFrictionBases: int
:rtype:
"""
cBodyIds, cPositions, cPositionsLocal, cVelocities = model.getContactPoints(bodyIDsToCheck)
N = None
V = None
J = None
cNum = len(cBodyIds)
if cNum == 0:
return len(cBodyIds), cBodyIds, cPositions, cPositionsLocal, cVelocities, None, None, None
DOFs = model.getDOFs()
for idx in range(len(cBodyIds)):
body = model.getBody(cBodyIds[idx])
# jacobian = body.world_jacobian(cPositionsLocal[idx])
jacobian = body.linear_jacobian(cPositionsLocal[idx])
if J is None:
J = jacobian.copy()
else:
J = np.vstack((J, jacobian))
n = np.zeros((1, 3*cNum))
n[:, 3*idx:3*idx+3] = np.array([[0., 1., 0.]])
if N is None:
N = n.copy()
else:
N = np.vstack((N, n))
Vi = np.zeros((3, numFrictionBases * cNum))
for i in range(numFrictionBases):
v_temp = np.array([mu * math.cos((2.*math.pi*i)/numFrictionBases), 1., mu * math.sin((2.*math.pi*i)/numFrictionBases)])
len_v_temp = npl.norm(v_temp)
Vi[:, idx*numFrictionBases + i:idx*numFrictionBases + i+1] \
= np.array([[v_temp[0]/len_v_temp, v_temp[1]/len_v_temp, v_temp[2]/len_v_temp]]).T
if V is None:
V = Vi.copy()
else:
V = np.vstack((V, Vi))
return len(cBodyIds), cBodyIds, cPositions, cPositionsLocal, cVelocities, J, V, N
def getSoftMatrix(world, model, invM, mu, tau, contactNum, contactPositions, J, V, N, factor=1.):
"""
:type world: pydart.World
:type model: cdm.DartModel
:type invM: np.ndarray
:type mu: float
:type tau: np.ndarray
:type contactNum: int
:type JTN: np.ndarray
:type JTD: np.ndarray
:type E: np.ndarray
:type factor: float
:return:
"""
totalDOF = model.getTotalDOF()
h = model.GetTimeStep()
invh = 1./h
dq0 = np.asarray(model.skeleton.dq)
if tau is None:
tau = np.zeros(np.shape(dq0))
reg = 1.
# R = reg * np.eye(J.shape[0])
R = reg * np.eye(V.shape[1])
A_t = h * np.dot(J, np.dot(invM, J.T))
ddq_pure = np.dot(invM, tau - model.skeleton.coriolis_and_gravity_forces())
c_t = np.dot(J, dq0 + h*ddq_pure)
C0 = -np.eye(V.shape[1])
C1_temp = np.dot(N, A_t)
C1 = -np.dot(C1_temp, V)
c0 = np.zeros(V.shape[1])
c1 = np.dot(N, c_t) # - v_min
# from daseong's source,
# R : 0.01, epsilon: 0.1, kappa: 0.05, rho: 0.8
epsilon = 1.
kappa = 0.004
kp = (1. + epsilon) / (kappa*kappa)
kd = 2. * (1. + epsilon) / kappa
contactVels = np.dot(J[1::3, :], dq0)
vels = np.dot(J, dq0)
contactDepths = np.array([-contactPositions[i][1]-0.003 for i in range(len(contactPositions))])
# geom_vels[3 * geom_point_idx + 2] + h*(kp * m_contact[i].depth - kd * geom_vels[3*geom_point_idx + 2])
# v_min = contactVels + h*(kp * contactDepths - kd * contactVels)
# v_min = contactVels + h*kp * contactDepths
# v_min = h*(kp * contactDepths - kd * contactVels)
# v_min = h*(kp * contactDepths)
v_min = contactVels + invh * contactDepths
c1 -= v_min
v_star = np.zeros_like(c_t)
# v_star = -0.001* vels
v_star[1::3] = np.min(np.vstack((v_min, 0.001* | np.ones_like(v_min) | numpy.ones_like |
import numpy as np
# scipy.stats.cramervonmises
# Suponha que desejamos testar se os dados gerados por
# scipy.stats.norm.rvs foram, de fato, extraídos da
# distribuição normal padrão. Escolhemos um nível de
# significância alfa = 0,05.
from scipy import stats
rng = np.random.default_rng()
x = stats.norm.rvs(size=500, random_state=rng)
res = stats.cramervonmises(x, 'norm')
res.statistic, res.pvalue
#CramerVonMisesResult(statistic=0.1276613786697622, pvalue=0.46556649116631343)
#O valor de p excede nosso nível de significância escolhido,
# portanto, não rejeitamos a hipótese nula de que a amostra observada
# é extraída da distribuição normal padrão.
# Agora, suponha que desejamos verificar se as mesmas amostras
# deslocadas em 2,1 são consistentes com o fato de terem sido tiradas
# de uma distribuição normal com uma média de 2.
y = x + 2.1
res = stats.cramervonmises(y, 'norm', args=(2,))
#CramerVonMisesResult(statistic=0.7040268563073291, pvalue=0.012420322007088758)
#Aqui, usamos a palavra-chave args para especificar a média (loc)
# da distribuição normal para testar os dados. Isso é equivalente ao
# seguinte, em que criamos uma distribuição normal com média 2,1 e,
# em seguida, passamos seu cdf método como um argumento.
frozen_dist = stats.norm(loc=2)
res = stats.cramervonmises(y, frozen_dist.cdf)
res.statistic, res.pvalue
#(0.7040268563073291, 0.012420322007088758)
#Em todos dos casos, rejeitaríamos a hipótese nula de que a amostra
# observada é retirada de uma distribuição normal com uma média de 2
# (e variância padrão de 1) porque o valor de p 0,01 é menor do que
# nosso nível de significância escolhido.
# scipy.stats.cramervonmises_2samp
# Suponha que desejamos testar se duas amostras geradas por scipy.stats.norm.rvstêm a mesma distribuição. Escolhemos um nível de significância alfa = 0,05.
from scipy import stats
rng = np.random.default_rng()
x = stats.norm.rvs(size=100, random_state=rng)
y = stats.norm.rvs(size=70, random_state=rng)
res = stats.cramervonmises_2samp(x, y)
res.statistic, res.pvalue #(0.12726890756302467, 0.47115054777270216)
#O valor p excede nosso nível de significância escolhido, portanto,
# não rejeitamos a hipótese nula de que as amostras observadas são
# retiradas da mesma distribuição.
#Para tamanhos de amostra pequenos, pode-se calcular os valores p exatos:
x = stats.norm.rvs(size=7, random_state=rng)
y = stats.t.rvs(df=2, size=6, random_state=rng)
res = stats.cramervonmises_2samp(x, y, method='exact')
res.statistic, res.pvalue #(0.042124542124541975, 0.9801864801864801)
# O valor p com base na distribuição assintótica é uma boa aproximação,
# embora o tamanho da amostra seja pequeno.
res = stats.cramervonmises_2samp(x, y, method='asymptotic')
res.statistic, res.pvalue #(0.042124542124541975, 0.9937806294485269)
#Independentemente do método, não se rejeitaria a hipótese nula no
# nível de significância escolhido neste exemplo.
x = stats.norm.rvs(size=700, random_state=rng)
y = stats.t.rvs(df=2, size=600, random_state=rng)
res = stats.cramervonmises_2samp(x, y)
print(res) #CramerVonMisesResult(statistic=0.6771188644688664, pvalue=0.014472209121915047)
#scipy.stats.kstest
from scipy import stats
rng = np.random.default_rng()
x = np.linspace(-15, 15, 9)
stats.kstest(x, 'norm')
# KstestResult(statistic=0.4443560271592436, pvalue=0.03885014008678778)
stats.kstest(stats.norm.rvs(size=100, random_state=rng), stats.norm.cdf)
#As linhas acima são equivalentes a:
stats.kstest(stats.norm.rvs, 'norm', N=100)
#Testando variáveis aleatórias t distribuídas em relação à distribuição normal
# Com 100 graus de liberdade, a distribuição t parece próxima da distribuição normal,
# e o teste KS não rejeita a hipótese de que a amostra veio da distribuição normal:
stats.kstest(stats.t.rvs(100, size=100, random_state=rng), 'norm')
# KstestResult(statistic=0.10694118810178882, pvalue=0.18878890547885985)
#Com 3 graus de liberdade, a distribuição t é suficientemente diferente da distribuição normal,
# de modo que podemos rejeitar a hipótese de que a amostra veio da distribuição normal no
# nível de 10%:
stats.kstest(stats.t.rvs(3, size=100, random_state=rng), 'norm')
#KstestResult(statistic=0.11786287323060995, pvalue=0.11456645992107758)
#scipy.stats.ks_2samp
from scipy import stats
rng = np.random.default_rng()
n1 = 200 # tamanho da primeira amostra
n2 = 300 # tamanho da segunda amostra
#Para uma distribuição diferente, podemos rejeitar a hipótese nula uma vez que o valor p está abaixo de 1%:
rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1, random_state=rng)
rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5, random_state=rng)
stats.ks_2samp(rvs1, rvs2)
# KstestResult(statistic=0.24, pvalue=1.5876939054582095e-06)
#Para uma distribuição ligeiramente diferente, não podemos rejeitar a hipótese nula em um alfa de 10% ou inferior,
# uma vez que o valor de p em 0,219 é superior a 10%
rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0, random_state=rng)
stats.ks_2samp(rvs1, rvs3)
# KstestResult(statistic=0.095, pvalue=0.2192140768654085)
#Para uma distribuição idêntica, não podemos rejeitar a hipótese nula uma vez que o valor p é alto, 41%:
rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0, random_state=rng)
stats.ks_2samp(rvs1, rvs4)
#scipy.stats.anderson_ksamp
from scipy import stats
rng = np.random.default_rng()
# A hipótese nula de que as duas amostras aleatórias vêm da mesma distribuição pode ser rejeitada
# no nível de 5% porque o valor de teste retornado é maior do que o valor crítico para 5% (1,961),
# mas não no nível de 2,5%. A interpolação dá um nível de significância aproximado de 3,2%:
stats.anderson_ksamp([rng.normal(size=50),
rng.normal(loc=0.5, size=30)])
# p valor = significance_level = 0.07396028404997687
# A hipótese nula não pode ser rejeitada para três amostras de uma distribuição idêntica.
# O valor p relatado (25%) foi limitado e pode não ser muito preciso (uma vez que corresponde ao valor 0,449,
# enquanto a estatística é -0,731):
stats.anderson_ksamp([rng.normal(size=50),
rng.normal(size=30), rng.normal(size=20)])
#Anderson_ksampResult(statistic=-0.5917988120678772, critical_values=array([0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856, 4.07210043, 5.56419101]), significance_level=0.25)
#scipy.stats.ansari
from scipy.stats import ansari
rng = np.random.default_rng()
#Para esses exemplos, criaremos três conjuntos de dados aleatórios. Os dois primeiros, com tamanhos 35 e 25,
# são extraídos de uma distribuição normal com média 0 e desvio padrão 2. O terceiro conjunto de dados tem
# tamanho 25 e é extraído de uma distribuição normal com desvio padrão 1,25.
x1 = rng.normal(loc=0, scale=2, size=35)
x2 = rng.normal(loc=0, scale=2, size=25)
x3 = rng.normal(loc=0, scale=1.25, size=25)
# Primeiramente, aplicamos ansari para x1 e x2 . Essas amostras são retiradas da mesma distribuição, portanto,
# esperamos que o teste de Ansari-Bradley não nos leve a concluir que as escalas das distribuições são diferentes.
ansari(x1, x2)
#AnsariResult(statistic=534.0, pvalue=0.811752031516162)
# Com um valor de p próximo de 1, não podemos concluir que existe uma diferença significativa nas escalas (conforme o esperado).
# Agora aplique o teste a x1 e x3 :
ansari(x1, x3)
# AnsariResult(statistic=464.0, pvalue=0.01846645873767982)
# A probabilidade de observar tal valor extremo da estatística sob a hipótese nula de escalas iguais é de apenas 1,84%.
# Tomamos isso como evidência contra a hipótese nula em favor da alternativa: as escalas das distribuições das quais as
# amostras foram retiradas não são iguais.
# Podemos usar o parâmetro alternativo para realizar um teste unilateral. No exemplo acima, a escala de x1 é maior do que x3 e,
# portanto, a proporção das escalas de x1 e x3 é maior do que 1. Isso significa que o valor p quando alternative='greater'deve estar próximo de 0 e,
# portanto, devemos ser capazes de rejeitar o nulo hipótese:
ansari(x1, x3, alternative='greater')
#Como podemos ver, o valor p é de fato bastante baixo. O uso de alternative='less'deve,
# portanto, produzir um grande valor p:
ansari(x1, x3, alternative='less')
# scipy.stats.fligner
#Testa se as listas de a , b e c vêm de populações com variâncias iguais.
from scipy.stats import fligner
a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
stat, p = fligner(a, b, c)
p #pvalue=0.00450826080004775
#O pequeno valor de p sugere que as populações não têm variâncias iguais.
#Isso não é surpreendente, dado que a variância da amostra de b é muito maior do que
# a de a e c :
[np.var(x, ddof=1) for x in [a, b, c]] #[0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
#outros testes de variancia
from scipy.stats import bartlett, levene
bartlett(a, b, c)
#BartlettResult(statistic=22.789434813726768, pvalue=1.1254782518834628e-05)
levene(a, b, c)
#LeveneResult(statistic=7.584952754501659, pvalue=0.002431505967249681)
#scipy.stats.jarque_bera
from scipy import stats
rng = np.random.default_rng()
x = rng.normal(0, 1, 100000)
jarque_bera_test = stats.jarque_bera(x)
jarque_bera_test
# Jarque_beraResult(statistic=3.3415184718131554, pvalue= 0.18810419594996775)
jarque_bera_test.statistic
# 3.3415184718131554
jarque_bera_test.pvalue
# 0.18810419594996775
# scipy.stats.kurtosistest
from scipy.stats import kurtosistest
kurtosistest(list(range(20)))
# KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
kurtosistest(list(range(20)), alternative='less')
# KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174)
kurtosistest(list(range(20)), alternative='greater')
# KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583)
rng = np.random.default_rng()
s = rng.normal(0, 1, 1000)
kurtosistest(s)
#KurtosistestResult(statistic=-0.3188545786000282, pvalue=0.7498367888656665)
# Pacote statsmodels
import numpy as np
import pandas as pd
import statsmodels.api as sm
nsample = 100
x = np.linspace(0, 10, 100)
X = np.column_stack((x, x ** 2))
print(X[0:3])
beta = np.array([1, 0.1, 10])
e = | np.random.normal(size=nsample) | numpy.random.normal |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Copyright (c) 2007-2020, Acoular Development Team.
#------------------------------------------------------------------------------
"""
This file contains all the functionalities which are very expansive, regarding
computational costs. All functionalities are optimized via NUMBA.
"""
import numpy as np
import numba as nb
cachedOption = True # if True: saves the numba func as compiled func in sub directory
parallelOption = 'parallel' # if numba.guvectorize is used: 'CPU' for single threading; 'parallel' for multithreading; 'cuda' for calculating on GPU
# Formerly known as 'faverage'
@nb.njit([nb.complex128[:,:,:](nb.complex128[:,:,:], nb.complex128[:,:]),
nb.complex64[:,:,:](nb.complex64[:,:,:], nb.complex64[:,:])], cache=cachedOption)
def calcCSM(csm, SpecAllMics):
""" Adds a given spectrum to the Cross-Spectral-Matrix (CSM).
Here only the upper triangular matrix of the CSM is calculated. After
averaging over the various ensembles, the whole CSM is created via complex
conjugation transposing. This happens outside
(in :class:`PowerSpectra<acoular.spectra.PowerSpectra>`).
This method was called 'faverage' in acoular versions <= 16.5.
Parameters
----------
csm : complex128[nFreqs, nMics, nMics]
The cross spectral matrix which gets updated with the spectrum of the ensemble.
SpecAllMics : complex128[nFreqs, nMics]
Spectrum of the added ensemble at all Mics.
Returns
-------
None : as the input csm gets overwritten.
"""
#==============================================================================
# It showed, that parallelizing brings no benefit when calling calcCSM once per
# ensemble (as its done at the moment). BUT it could be whorth, taking a closer
# look to parallelization, when averaging over all ensembles inside this numba
# optimized function. See "vglOptimierungFAverage.py" for some information on
# the various implementations and their limitations.
#==============================================================================
nFreqs = csm.shape[0]
nMics = csm.shape[1]
for cntFreq in range(nFreqs):
for cntColumn in range(nMics):
temp = SpecAllMics[cntFreq, cntColumn].conjugate()
for cntRow in range(cntColumn + 1): # calculate upper triangular matrix (of every frequency-slice) only
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllMics[cntFreq, cntRow]
return csm
def beamformerFreq(steerVecType, boolRemovedDiagOfCSM, normFactor, inputTupleSteer, inputTupleCsm):
""" Conventional beamformer in frequency domain. Use either a predefined
steering vector formulation (see Sarradj 2012) or pass your own
steering vector.
Parameters
----------
steerVecType : (one of the following strings: 'classic' (I), 'inverse' (II), 'true level' (III), 'true location' (IV), 'custom')
Either build the steering vector via the predefined formulations
I - IV (see :ref:`Sarradj, 2012<Sarradj2012>`) or pass it directly.
boolRemovedDiagOfCSM : bool
Should the diagonal of the csm be removed?
normFactor : float
In here both the signalenergy loss factor (due to removal of the csm diagonal) as well as
beamforming algorithm (music, capon, ...) dependent normalization factors are handled.
inputTupleSteer : contains the information needed to create the steering vector. Is dependent of steerVecType. There are 2 cases:
steerVecType != 'custom' :
inputTupleSteer = (distGridToArrayCenter, distGridToAllMics, waveNumber) , with
distGridToArrayCenter : float64[nGridpoints]
Distance of all gridpoints to the center of sensor array
distGridToAllMics : float64[nGridpoints, nMics]
Distance of all gridpoints to all sensors of array
waveNumber : float64
The wave number
steerVecType == 'custom' :
inputTupleSteer = steeringVector , with
steeringVector : complex128[nGridPoints, nMics]
The steering vector of each gridpoint for the same frequency as the CSM
inputTupleCsm : contains the data of measurement as a tuple. There are 2 cases:
perform standard CSM-beamformer:
inputTupleCsm = csm
csm : complex128[ nMics, nMics]
The cross spectral matrix for one frequency
perform beamformer on eigenvalue decomposition of csm:
inputTupleCsm = (eigValues, eigVectors) , with
eigValues : float64[nEV]
nEV is the number of eigenvalues which should be taken into account.
All passed eigenvalues will be evaluated.
eigVectors : complex128[nMics, nEV]
Eigen vectors corresponding to eigValues. All passed eigenvector slices will be evaluated.
Returns
-------
*Autopower spectrum beamforming map [nGridPoints]
*steer normalization factor [nGridPoints]... contains the values the autopower needs to be multiplied with, in order to
fullfill 'steer^H * steer = 1' as needed for functional beamforming.
Some Notes on the optimization of all subroutines
-------------------------------------------------
Reducing beamforming equation:
Let the csm be C and the steering vector be h, than, using Linear Albegra, the conventional beamformer can be written as
.. math:: B = h^H \\cdot C \\cdot h,
with ^H meaning the complex conjugated transpose.
When using that C is a hermitian matrix one can reduce the equation to
.. math:: B = h^H \\cdot C_D \\cdot h + 2 \\cdot Real(h^H \\cdot C_U \\cdot h),
where C_D and C_U are the diagonal part and upper part of C respectively.
Steering vector:
Theoretically the steering vector always includes the term "exp(distMicsGrid - distArrayCenterGrid)",
but as the steering vector gets multplied with its complex conjugation in all beamformer routines,
the constant "distArrayCenterGrid" cancels out --> In order to save operations, it is not implemented.
Spectral decomposition of the CSM:
In Linear Algebra the spectral decomposition of the CSM matrix would be:
.. math:: CSM = \\sum_{i=1}^{nEigenvalues} \\lambda_i (v_i \\cdot v_i^H) ,
where lambda_i is the i-th eigenvalue and
v_i is the eigenvector[nEigVal,1] belonging to lambda_i and ^H denotes the complex conjug transpose.
Using this, one must not build the whole CSM (which would be time consuming), but can drag the
steering vector into the sum of the spectral decomp. This saves a lot of operations.
Squares:
Seemingly "a * a" is slightly faster than "a**2" in numba
Square of abs():
Even though "a.real**2 + a.imag**2" would have fewer operations, modern processors seem to be optimized
for "a * a.conj" and are slightly faster the latter way. Both Versions are much faster than "abs(a)**2".
Using Cascading Sums:
When using the Spectral-Decomposition-Beamformer one could use numpys cascading sums for the scalar product
"eigenVec.conj * steeringVector". BUT (at the moment) this only brings benefits in comp-time for a very
small range of nMics (approx 250) --> Therefor it is not implemented here.
"""
boolIsEigValProb = isinstance(inputTupleCsm, tuple)# len(inputTupleCsm) > 1
# get the beamformer type (key-tuple = (isEigValProblem, formulationOfSteeringVector, RemovalOfCSMDiag))
beamformerDict = {(False, 'classic', False) : _freqBeamformer_Formulation1AkaClassic_FullCSM,
(False, 'classic', True) : _freqBeamformer_Formulation1AkaClassic_CsmRemovedDiag,
(False, 'inverse', False) : _freqBeamformer_Formulation2AkaInverse_FullCSM,
(False, 'inverse', True) : _freqBeamformer_Formulation2AkaInverse_CsmRemovedDiag,
(False, 'true level', False) : _freqBeamformer_Formulation3AkaTrueLevel_FullCSM,
(False, 'true level', True) : _freqBeamformer_Formulation3AkaTrueLevel_CsmRemovedDiag,
(False, 'true location', False) : _freqBeamformer_Formulation4AkaTrueLocation_FullCSM,
(False, 'true location', True) : _freqBeamformer_Formulation4AkaTrueLocation_CsmRemovedDiag,
(False, 'custom', False) : _freqBeamformer_SpecificSteerVec_FullCSM,
(False, 'custom', True) : _freqBeamformer_SpecificSteerVec_CsmRemovedDiag,
(True, 'classic', False) : _freqBeamformer_EigValProb_Formulation1AkaClassic_FullCSM,
(True, 'classic', True) : _freqBeamformer_EigValProb_Formulation1AkaClassic_CsmRemovedDiag,
(True, 'inverse', False) : _freqBeamformer_EigValProb_Formulation2AkaInverse_FullCSM,
(True, 'inverse', True) : _freqBeamformer_EigValProb_Formulation2AkaInverse_CsmRemovedDiag,
(True, 'true level', False) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_FullCSM,
(True, 'true level', True) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_CsmRemovedDiag,
(True, 'true location', False) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_FullCSM,
(True, 'true location', True) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_CsmRemovedDiag,
(True, 'custom', False) : _freqBeamformer_EigValProb_SpecificSteerVec_FullCSM,
(True, 'custom', True) : _freqBeamformer_EigValProb_SpecificSteerVec_CsmRemovedDiag}
coreFunc = beamformerDict[(boolIsEigValProb, steerVecType, boolRemovedDiagOfCSM)]
# prepare Input
if steerVecType == 'custom': # beamformer with custom steering vector
steerVec = inputTupleSteer
#nFreqs, nGridPoints = steerVec.shape[0], steerVec.shape[1]
nGridPoints = steerVec.shape[0]
else: # predefined beamformers (Formulation I - IV)
distGridToArrayCenter, distGridToAllMics, waveNumber = inputTupleSteer#[0], inputTupleSteer[1], inputTupleSteer[2]
if not isinstance(waveNumber, np.ndarray): waveNumber = np.array([waveNumber])
#nFreqs, nGridPoints = waveNumber.shape[0], distGridToAllMics.shape[0]
nGridPoints = distGridToAllMics.shape[0]
if boolIsEigValProb:
eigVal, eigVec = inputTupleCsm#[0], inputTupleCsm[1]
else:
csm = inputTupleCsm
# beamformer routine: parallelized over Gridpoints
beamformOutput = np.zeros(nGridPoints, np.float64)
steerNormalizeOutput = np.zeros_like(beamformOutput)
result = np.zeros(nGridPoints, np.float64)
normalHelp = np.zeros_like(result)
if steerVecType == 'custom': # beamformer with custom steering vector
if boolIsEigValProb:
coreFunc(eigVal, eigVec, steerVec, normFactor, result, normalHelp)
else:
coreFunc(csm, steerVec, normFactor, result, normalHelp)
else: # predefined beamformers (Formulation I - IV)
if boolIsEigValProb:
coreFunc(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp)
else:
coreFunc(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp)
beamformOutput = result
steerNormalizeOutput = normalHelp
return beamformOutput, steerNormalizeOutput
#%% beamformers - steer * CSM * steer
@nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_Formulation1AkaClassic_FullCSM(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
for cntMics in range(nMics):
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg))
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq)
scalarProd = 0.0
for cntMics in range(nMics):
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
scalarProd += (csm[cntMics, cntMics] * steerVec[cntMics].conjugate() * steerVec[cntMics]).real # include diagonal of csm
normalizeFactor = nMics # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / nMics
result[0] = scalarProd / (normalizeFactor * normalizeFactor) * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_Formulation1AkaClassic_CsmRemovedDiag(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
for cntMics in range(nMics):
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg))
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
for cntMics in range(nMics):
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
normalizeFactor = nMics # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / nMics
result[0] = scalarProd / (normalizeFactor * normalizeFactor) * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_Formulation2AkaInverse_FullCSM(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += distGridToAllMics[cntMics] * distGridToAllMics[cntMics]
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) * distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
for cntMics in range(nMics):
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
scalarProd += (csm[cntMics, cntMics] * steerVec[cntMics].conjugate() * steerVec[cntMics]).real # include diagonal of csm
normalizeFactor = nMics * distGridToArrayCenter[0] # specific normalization of steering vector formulation
normalizeFactorSquared = normalizeFactor * normalizeFactor
normalizeSteer[0] = helpNormalize / normalizeFactorSquared
result[0] = scalarProd / normalizeFactorSquared * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_Formulation2AkaInverse_CsmRemovedDiag(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += distGridToAllMics[cntMics] * distGridToAllMics[cntMics]
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) * distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
for cntMics in range(nMics):
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
normalizeFactor = nMics * distGridToArrayCenter[0] # specific normalization of steering vector formulation
normalizeFactorSquared = normalizeFactor * normalizeFactor
normalizeSteer[0] = helpNormalize / normalizeFactorSquared
result[0] = scalarProd / normalizeFactorSquared * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_Formulation3AkaTrueLevel_FullCSM(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
for cntMics in range(nMics):
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
scalarProd += (csm[cntMics, cntMics] * steerVec[cntMics].conjugate() * steerVec[cntMics]).real # include diagonal of csm
normalizeFactor = distGridToArrayCenter[0] * helpNormalize # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / (distGridToArrayCenter[0] * distGridToArrayCenter[0]) / helpNormalize
result[0] = scalarProd / (normalizeFactor * normalizeFactor) * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_Formulation3AkaTrueLevel_CsmRemovedDiag(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
for cntMics in range(nMics):
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
normalizeFactor = distGridToArrayCenter[0] * helpNormalize # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / (distGridToArrayCenter[0] * distGridToArrayCenter[0]) / helpNormalize
result[0] = scalarProd / (normalizeFactor * normalizeFactor) * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_Formulation4AkaTrueLocation_FullCSM(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
for cntMics in range(nMics):
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
scalarProd += (csm[cntMics, cntMics] * steerVec[cntMics].conjugate() * steerVec[cntMics]).real # include diagonal of csm
normalizeFactor = nMics * helpNormalize # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / nMics
result[0] = scalarProd / normalizeFactor * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_Formulation4AkaTrueLocation_CsmRemovedDiag(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
for cntMics in range(nMics):
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
normalizeFactor = nMics * helpNormalize # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / nMics
result[0] = scalarProd / normalizeFactor * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.complex128[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(m),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_SpecificSteerVec_FullCSM(csm, steerVec, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += steerVec[cntMics] * steerVec[cntMics].conjugate()
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
scalarProd += (csm[cntMics, cntMics] * steerVec[cntMics].conjugate() * steerVec[cntMics]).real # include diagonal of csm
normalizeSteer[0] = helpNormalize.real
result[0] = scalarProd * signalLossNormalization[0]
@nb.guvectorize([(nb.complex128[:,:], nb.complex128[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(m,m),(m),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_SpecificSteerVec_CsmRemovedDiag(csm, steerVec, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = csm.shape[0]
# performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq')
scalarProd = 0.0
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += steerVec[cntMics] * steerVec[cntMics].conjugate()
leftVecMatrixProd = 0.0 + 0.0j
for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal)
leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate()
scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2')
normalizeSteer[0] = helpNormalize.real
result[0] = scalarProd * signalLossNormalization[0]
#%% beamformers - Eigenvalue Problem
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_Formulation1AkaClassic_FullCSM(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = distGridToAllMics.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
for cntMics in range(nMics):
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg))
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdFullCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
for cntMics in range(nMics):
scalarProdFullCSMperEigVal += eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics]
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdFullCSM += scalarProdFullCSMAbsSquared * eigVal[cntEigVal]
normalizeFactor = nMics # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / nMics
result[0] = scalarProdFullCSM / (normalizeFactor * normalizeFactor) * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_Formulation1AkaClassic_CsmRemovedDiag(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = distGridToAllMics.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
for cntMics in range(nMics):
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg))
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdReducedCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
scalarProdDiagCSMperEigVal = 0.0
for cntMics in range(nMics):
temp1 = eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics] # Dont call it 'expArg' like in steer-loop, because expArg is now a float (no double) which would cause errors of approx 1e-8
scalarProdFullCSMperEigVal += temp1
scalarProdDiagCSMperEigVal += (temp1 * temp1.conjugate()).real
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdReducedCSM += (scalarProdFullCSMAbsSquared - scalarProdDiagCSMperEigVal) * eigVal[cntEigVal]
normalizeFactor = nMics # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / nMics
result[0] = scalarProdReducedCSM / (normalizeFactor * normalizeFactor) * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_Formulation2AkaInverse_FullCSM(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = distGridToAllMics.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += distGridToAllMics[cntMics] * distGridToAllMics[cntMics]
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) * distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdFullCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
for cntMics in range(nMics):
scalarProdFullCSMperEigVal += eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics]
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdFullCSM += scalarProdFullCSMAbsSquared * eigVal[cntEigVal]
normalizeFactor = nMics * distGridToArrayCenter[0] # specific normalization of steering vector formulation
normalizeFactorSquared = normalizeFactor * normalizeFactor
normalizeSteer[0] = helpNormalize / normalizeFactorSquared
result[0] = scalarProdFullCSM / normalizeFactorSquared * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_Formulation2AkaInverse_CsmRemovedDiag(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = distGridToAllMics.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += distGridToAllMics[cntMics] * distGridToAllMics[cntMics]
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) * distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdReducedCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
scalarProdDiagCSMperEigVal = 0.0
for cntMics in range(nMics):
temp1 = eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics] # Dont call it 'expArg' like in steer-loop, because expArg is now a float (no double) which would cause errors of approx 1e-8
scalarProdFullCSMperEigVal += temp1
scalarProdDiagCSMperEigVal += (temp1 * temp1.conjugate()).real
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdReducedCSM += (scalarProdFullCSMAbsSquared - scalarProdDiagCSMperEigVal) * eigVal[cntEigVal]
normalizeFactor = nMics * distGridToArrayCenter[0] # specific normalization of steering vector formulation
normalizeFactorSquared = normalizeFactor * normalizeFactor
normalizeSteer[0] = helpNormalize / normalizeFactorSquared
result[0] = scalarProdReducedCSM / normalizeFactorSquared * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_FullCSM(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = distGridToAllMics.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdFullCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
for cntMics in range(nMics):
scalarProdFullCSMperEigVal += eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics]
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdFullCSM += scalarProdFullCSMAbsSquared * eigVal[cntEigVal]
normalizeFactor = distGridToArrayCenter[0] * helpNormalize # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / (distGridToArrayCenter[0] * distGridToArrayCenter[0]) / helpNormalize
result[0] = scalarProdFullCSM / (normalizeFactor * normalizeFactor) * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_CsmRemovedDiag(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = distGridToAllMics.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdReducedCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
scalarProdDiagCSMperEigVal = 0.0
for cntMics in range(nMics):
temp1 = eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics] # Dont call it 'expArg' like in steer-loop, because expArg is now a float (no double) which would cause errors of approx 1e-8
scalarProdFullCSMperEigVal += temp1
scalarProdDiagCSMperEigVal += (temp1 * temp1.conjugate()).real
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdReducedCSM += (scalarProdFullCSMAbsSquared - scalarProdDiagCSMperEigVal) * eigVal[cntEigVal]
normalizeFactor = distGridToArrayCenter[0] * helpNormalize # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / (distGridToArrayCenter[0] * distGridToArrayCenter[0]) / helpNormalize
result[0] = scalarProdReducedCSM / (normalizeFactor * normalizeFactor) * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_FullCSM(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = distGridToAllMics.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdFullCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
for cntMics in range(nMics):
scalarProdFullCSMperEigVal += eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics]
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdFullCSM += scalarProdFullCSMAbsSquared * eigVal[cntEigVal]
normalizeFactor = nMics * helpNormalize # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / nMics
result[0] = scalarProdFullCSM / normalizeFactor * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_CsmRemovedDiag(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = distGridToAllMics.shape[0]
steerVec = np.zeros((nMics), np.complex128)
# building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl.
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics])
steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdReducedCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
scalarProdDiagCSMperEigVal = 0.0
for cntMics in range(nMics):
temp1 = eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics] # Dont call it 'expArg' like in steer-loop, because expArg is now a float (no double) which would cause errors of approx 1e-8
scalarProdFullCSMperEigVal += temp1
scalarProdDiagCSMperEigVal += (temp1 * temp1.conjugate()).real
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdReducedCSM += (scalarProdFullCSMAbsSquared - scalarProdDiagCSMperEigVal) * eigVal[cntEigVal]
normalizeFactor = nMics * helpNormalize # specific normalization of steering vector formulation
normalizeSteer[0] = 1.0 / nMics
result[0] = scalarProdReducedCSM / normalizeFactor * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.complex128[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(m),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_SpecificSteerVec_FullCSM(eigVal, eigVec, steerVec, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = eigVec.shape[0]
# get h^H * h for normalization
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += steerVec[cntMics] * steerVec[cntMics].conjugate()
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdFullCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
for cntMics in range(nMics):
scalarProdFullCSMperEigVal += eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics]
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdFullCSM += scalarProdFullCSMAbsSquared * eigVal[cntEigVal]
normalizeSteer[0] = helpNormalize.real
result[0] = scalarProdFullCSM * signalLossNormalization[0]
@nb.guvectorize([(nb.float64[:], nb.complex128[:,:], nb.complex128[:], nb.float64[:], nb.float64[:], nb.float64[:])],
'(e),(m,e),(m),()->(),()', nopython=True, target=parallelOption, cache=cachedOption)
def _freqBeamformer_EigValProb_SpecificSteerVec_CsmRemovedDiag(eigVal, eigVec, steerVec, signalLossNormalization, result, normalizeSteer):
# see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements.
nMics = eigVec.shape[0]
# get h^H * h for normalization
helpNormalize = 0.0
for cntMics in range(nMics):
helpNormalize += steerVec[cntMics] * steerVec[cntMics].conjugate()
# performing matrix-vector-multplication via spectral decomp. (see bottom of information header of 'beamformerFreq')
scalarProdReducedCSM = 0.0
for cntEigVal in range(len(eigVal)):
scalarProdFullCSMperEigVal = 0.0 + 0.0j
scalarProdDiagCSMperEigVal = 0.0
for cntMics in range(nMics):
temp1 = eigVec[cntMics, cntEigVal].conjugate() * steerVec[cntMics]
scalarProdFullCSMperEigVal += temp1
scalarProdDiagCSMperEigVal += (temp1 * temp1.conjugate()).real
scalarProdFullCSMAbsSquared = (scalarProdFullCSMperEigVal * scalarProdFullCSMperEigVal.conjugate()).real
scalarProdReducedCSM += (scalarProdFullCSMAbsSquared - scalarProdDiagCSMperEigVal) * eigVal[cntEigVal]
normalizeSteer[0] = helpNormalize.real
result[0] = scalarProdReducedCSM * signalLossNormalization[0]
#%% Point - Spread - Function
def calcPointSpreadFunction(steerVecType, distGridToArrayCenter, distGridToAllMics, waveNumber, indSource, dtype):
""" Calculates the Point-Spread-Functions. Use either a predefined steering vector
formulation (see :ref:`Sarradj, 2012<Sarradj2012>`) or pass it your own steering vector.
Parameters
----------
steerVecType : (one of the following strings: 'classic' (I), 'inverse' (II), 'true level' (III), 'true location' (IV))
One of the predefined formulations I - IV (see :ref:`Sarradj, 2012<Sarradj2012>`).
distGridToArrayCenter : float64[nGridpoints]
Distance of all gridpoints to the center of sensor array
distGridToAllMics : float64[nGridpoints, nMics]
Distance of all gridpoints to all sensors of array
waveNumber : float64
The free field wave number.
indSource : a LIST of int (e.g. indSource=[5] is fine; indSource=5 doesn't work):
specifies which gridpoints should be assumed to be sources
--> a seperate psf will be calculated for each source
dtype : either 'float64' or 'float32'
Determines the precision of the result. For big maps this could be worth downgrading.
Returns
-------
Autopower spectrum PSF map : [nFreqs, nGridPoints, nSources]
Some Notes on the optimization of all subroutines
-------------------------------------------------
Reducing beamforming equation:
Let the steering vector be h, than, using Linear Albegra, the PSF of a SourcePoint S would be
.. math:: B = h^H \\cdot (a_S \\cdot a_S^H) \\cdot h,
with ^H meaning the complex conjugated transpose and a_s the transfer function from source to gridpoint.
The (...)-part equals the CSM that the source would produce via the chosen steering vec formulation.
Using (for example) tensor calculus, one can reduce the equation to:
.. math:: B = \\left| h^H \\cdot a_S \\right| ^ 2.
Steering vector:
Theoretically the steering vector always includes the term "exp(distMicsGrid - distArrayCenterGrid)", but as the steering vector gets multplied with its complex conjugation in
all beamformer routines, the constant "distArrayCenterGrid" cancels out --> In order to save operations, it is not implemented.
Squares:
Seemingly "a * a" is slightly faster than "a**2" in numba
Square of abs():
Even though "a.real**2 + a.imag**^2" would have fewer operations, modern processors seem to be optimized for "a * a.conj" and are slightly faster the latter way.
Both Versions are much faster than "abs(a)**2".
"""
# get the steering vector formulation
psfDict = {'classic' : _psf_Formulation1AkaClassic,
'inverse' : _psf_Formulation2AkaInverse,
'true level' : _psf_Formulation3AkaTrueLevel,
'true location' : _psf_Formulation4AkaTrueLocation}
coreFunc = psfDict[steerVecType]
# prepare input
nGridPoints = distGridToAllMics.shape[0]
nSources = len(indSource)
if not isinstance(waveNumber, np.ndarray): waveNumber = np.array([waveNumber])
# psf routine: parallelized over Gridpoints
psfOutput = np.zeros((nGridPoints, nSources), dtype=dtype)
coreFunc(distGridToArrayCenter,
distGridToAllMics,
distGridToArrayCenter[indSource],
distGridToAllMics[indSource, :],
waveNumber,
psfOutput)
return psfOutput
@nb.guvectorize([(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float64[:]),
(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float32[:])],
'(),(m),(s),(s,m),()->(s)', nopython=True, target=parallelOption, cache=cachedOption)
def _psf_Formulation1AkaClassic(distGridToArrayCenter, distGridToAllMics, distSourcesToArrayCenter, distSourcesToAllMics, waveNumber, result):
nMics = distGridToAllMics.shape[0]
for cntSources in range(len(distSourcesToArrayCenter)):
# see bottom of information header of 'calcPointSpreadFunction' for infos on the PSF calculation and speed improvements.
scalarProd = 0.0 + 0.0j
for cntMics in range(nMics):
expArg = np.float32(waveNumber[0] * (distGridToAllMics[cntMics] - distSourcesToAllMics[cntSources, cntMics]))
scalarProd += (np.cos(expArg) - 1j * np.sin(expArg)) / distSourcesToAllMics[cntSources, cntMics]
normalizeFactor = distSourcesToArrayCenter[cntSources] / nMics
scalarProdAbsSquared = (scalarProd * scalarProd.conjugate()).real
result[cntSources] = scalarProdAbsSquared * (normalizeFactor * normalizeFactor)
@nb.guvectorize([(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float64[:]),
(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float32[:])],
'(),(m),(s),(s,m),()->(s)', nopython=True, target=parallelOption, cache=cachedOption)
def _psf_Formulation2AkaInverse(distGridToArrayCenter, distGridToAllMics, distSourcesToArrayCenter, distSourcesToAllMics, waveNumber, result):
nMics = distGridToAllMics.shape[0]
for cntSources in range(len(distSourcesToArrayCenter)):
# see bottom of information header of 'calcPointSpreadFunction' for infos on the PSF calculation and speed improvements.
scalarProd = 0.0 + 0.0j
for cntMics in range(nMics):
expArg = np.float32(waveNumber[0] * (distGridToAllMics[cntMics] - distSourcesToAllMics[cntSources, cntMics]))
scalarProd += (np.cos(expArg) - 1j * np.sin(expArg)) / distSourcesToAllMics[cntSources, cntMics] * distGridToAllMics[cntMics]
normalizeFactor = distSourcesToArrayCenter[cntSources] / distGridToArrayCenter[0] / nMics
scalarProdAbsSquared = (scalarProd * scalarProd.conjugate()).real
result[cntSources] = scalarProdAbsSquared * (normalizeFactor * normalizeFactor)
@nb.guvectorize([(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float64[:]),
(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float32[:])],
'(),(m),(s),(s,m),()->(s)', nopython=True, target=parallelOption, cache=cachedOption)
def _psf_Formulation3AkaTrueLevel(distGridToArrayCenter, distGridToAllMics, distSourcesToArrayCenter, distSourcesToAllMics, waveNumber, result):
nMics = distGridToAllMics.shape[0]
for cntSources in range(len(distSourcesToArrayCenter)):
# see bottom of information header of 'calcPointSpreadFunction' for infos on the PSF calculation and speed improvements.
scalarProd = 0.0 + 0.0j
helpNormalizeGrid = 0.0
for cntMics in range(nMics):
expArg = np.float32(waveNumber[0] * (distGridToAllMics[cntMics] - distSourcesToAllMics[cntSources, cntMics]))
scalarProd += (np.cos(expArg) - 1j * np.sin(expArg)) / distSourcesToAllMics[cntSources, cntMics] / distGridToAllMics[cntMics]
helpNormalizeGrid += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
normalizeFactor = distSourcesToArrayCenter[cntSources] / distGridToArrayCenter[0] / helpNormalizeGrid
scalarProdAbsSquared = (scalarProd * scalarProd.conjugate()).real
result[cntSources] = scalarProdAbsSquared * (normalizeFactor * normalizeFactor)
@nb.guvectorize([(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float64[:]),
(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float32[:])],
'(),(m),(s),(s,m),()->(s)', nopython=True, target=parallelOption, cache=cachedOption)
def _psf_Formulation4AkaTrueLocation(distGridToArrayCenter, distGridToAllMics, distSourcesToArrayCenter, distSourcesToAllMics, waveNumber, result):
nMics = distGridToAllMics.shape[0]
for cntSources in range(len(distSourcesToArrayCenter)):
# see bottom of information header of 'calcPointSpreadFunction' for infos on the PSF calculation and speed improvements.
scalarProd = 0.0 + 0.0j
helpNormalizeGrid = 0.0
for cntMics in range(nMics):
expArg = np.float32(waveNumber[0] * (distGridToAllMics[cntMics] - distSourcesToAllMics[cntSources, cntMics]))
scalarProd += (np.cos(expArg) - 1j * np.sin(expArg)) / distSourcesToAllMics[cntSources, cntMics] / distGridToAllMics[cntMics]
helpNormalizeGrid += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics])
normalizeFactor = distSourcesToArrayCenter[cntSources]
scalarProdAbsSquared = (scalarProd * scalarProd.conjugate()).real
result[cntSources] = scalarProdAbsSquared * (normalizeFactor * normalizeFactor) / nMics / helpNormalizeGrid
# CURRENTLY NOT NEEDED, AS CUSTOM PSF WILL BE CALCULATED IN fbeamform.SteeringVector WITH THE USE OF Trait transfer
#@<EMAIL>([(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float64[:]),
# (nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:,:], nb.float64[:], nb.float32[:])],
# '(),(m),(s),(s,m),()->(s)', nopython=True, target=parallelOption, cache=cachedOption)
#def _psf_SpecificSteerVec(steerVec, steerVecSources, result):
# nMics = len(steerVec)
# for cntSources in range(steerVecSources.shape[0]):
# # see bottom of information header of 'calcPointSpreadFunction' for infos on the PSF calculation and speed improvements.
# scalarProd = 0.0 + 0.0j
# for cntMics in range(nMics):
# scalarProd += steerVec[cntMics].conjugate() * steerVecSources[cntSources, cntMics]
# scalarProdAbsSquared = (scalarProd * scalarProd.conjugate()).real
# result[cntSources] = scalarProdAbsSquared
#%% Damas - Gauss Seidel
# Formerly known as 'gseidel'
@nb.guvectorize([#(nb.float32[:,:], nb.float32[:], nb.int64[:], nb.float64[:], nb.float32[:]),
(nb.float64[:,:], nb.float64[:], nb.int64[:], nb.float64[:], nb.float64[:]),
#(nb.float32[:,:], nb.float64[:], nb.int64[:], nb.float64[:], nb.float64[:]),
#(nb.float64[:,:], nb.float32[:], nb.int64[:], nb.float64[:], nb.float32[:])
],
'(g,g),(g),(),()->(g)', nopython=True, target=parallelOption, cache=cachedOption,fastmath=True)
def damasSolverGaussSeidel(A, dirtyMap, nIterations, relax, damasSolution):
""" Solves the DAMAS inverse problem via modified gauss seidel.
This is the original formulation from :ref:`Brooks and Humphreys, 2006<BrooksHumphreys2006>`.
Parameters
----------
A : float32/float64[nFreqs, nGridpoints, nGridpoints] (or float64[...])
The PSF build matrix (see :ref:`Brooks and Humphreys, 2006<BrooksHumphreys2006>`)
dirtyMap : float32/float64[nFreqs, nGridpoints] (or float64[...])
The conventional beamformer map
nIterations : int64[scalar]
number of Iterations the damas solver has to go through
relax : int64[scalar]
relaxation parameter (=1.0 in :ref:`Brooks and Humphreys, 2006<BrooksHumphreys2006>`)
damasSolution : float32/float64[nFreqs, nGridpoints] (or float64[...])
starting solution
Returns
-------
None : as damasSolution is overwritten with end result of the damas iterative solver.
"""
# nGridPoints = len(dirtyMap)
# for cntIter in range(nIterations[0]):
# for cntGrid in range(nGridPoints):
# solHelp = np.float32(0)
# for cntGridHelp in range(cntGrid): # lower sum
# solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
# for cntGridHelp in range(cntGrid + 1, nGridPoints): # upper sum
# solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
# solHelp = (1 - relax[0]) * damasSolution[cntGrid] + relax[0] * (dirtyMap[cntGrid] - solHelp)
# if solHelp > 0.0:
# damasSolution[cntGrid] = solHelp
# else:
# damasSolution[cntGrid] = 0.0
nGridPoints = len(dirtyMap)
for cntIter in range(nIterations[0]):
for cntGrid in range(nGridPoints):
solHelp = 0.0
for cntGridHelp in range(nGridPoints): # full sum
solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
solHelp -= A[cntGrid, cntGrid] * damasSolution[cntGrid]
solHelp = (1 - relax[0]) * damasSolution[cntGrid] + relax[0] * (dirtyMap[cntGrid] - solHelp)
if solHelp > 0.0:
damasSolution[cntGrid] = solHelp
else:
damasSolution[cntGrid] = 0.0
#%% Transfer - Function
def calcTransfer(distGridToArrayCenter, distGridToAllMics, waveNumber):
""" Calculates the transfer functions between the various mics and gridpoints.
Parameters
----------
distGridToArrayCenter : float64[nGridpoints]
Distance of all gridpoints to the center of sensor array
distGridToAllMics : float64[nGridpoints, nMics]
Distance of all gridpoints to all sensors of array
waveNumber : complex128
The wave number should be stored in the imag-part
Returns
-------
The Transferfunctions in format complex128[nGridPoints, nMics].
"""
nGridPoints, nMics = distGridToAllMics.shape[0], distGridToAllMics.shape[1]
result = | np.zeros((nGridPoints, nMics), np.complex128) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: <NAME> <<EMAIL>>
# Minor fixes by <NAME>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME>
# <NAME> <<EMAIL>>
# (parts based on earlier work by <NAME>)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import logsumexp
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import deprecated
from .utils.extmath import safe_sparse_dot
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted, check_non_negative
from .utils.validation import _check_sample_weight
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB',
'CategoricalNB']
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_classes, n_samples).
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks.
Only used in predict* methods.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by <NAME>, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier.
epsilon_ : float
absolute additive value to variances.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
sigma_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. deprecated:: 1.0
`sigma_` is deprecated in 1.0 and will be removed in 1.2.
Use `var_` instead.
var_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. versionadded:: 1.0
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, reset=False)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by <NAME>, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_new * n_past / n_total) * (mu - new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
if _refit:
self.classes_ = None
first_call = _check_partial_fit_first_call(self, classes)
X, y = self._validate_data(X, y, reset=first_call)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if first_call:
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.var_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.var_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = | np.in1d(unique_y, classes) | numpy.in1d |
# -*- coding: utf-8 -*-
# Created on Tue Dec 18 16:49:20 2019
# @author: arthurd
"""
HexGrid Module.
Generate Hexagonal grid. Cartesian-Hexagonal coordinates interaction.
"""
import numpy as np
import math
import matplotlib.pyplot as plt
from pyproj import Proj, Transformer
def hexbin_grid(bbox, side_length=1, proj_init=None, proj_out=None):
"""
Create a grid of hexagons.
See http://www.calculatorsoup.com/calculators/geometry-plane/polygon.php
Parameters
----------
bbox : Tuple
Box of the area to generate the hexagons.
Format : Lower X, Lower Y, Upper X, Upper Y.
side_length : float, optional
Side length of the hexagons. The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example
-------
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
polygons : List
List of hexagons. An hexagons is a list of coordinates (tuple, Lat, Lon).
"""
startx = bbox[0]
starty = bbox[1]
endx = bbox[2]
endy = bbox[3]
proj = proj_init != proj_out
if proj:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
startx, starty = transformer.transform(startx, starty)
endx, endy = transformer.transform(endx, endy)
# calculate coordinates of the hexagon points
p = side_length * 0.5
b = side_length * math.cos(math.radians(30))
w = b * 2
h = 2 * side_length
# offset start and end coordinates by hex widths and heights to guarantee coverage
startx = startx - w
starty = starty - h/2
endx = endx
endy = endy
origx = startx
# offsets for moving along and up rows
xoffset = b
yoffset = 3 * p
P1 = np.empty((0, 2))
P2 = np.empty((0, 2))
P3 = np.empty((0, 2))
P4 = np.empty((0, 2))
P5 = np.empty((0, 2))
P6 = np.empty((0, 2))
row = 0
while starty < endy:
if row % 2 == 0:
startx = origx + xoffset
else:
startx = origx + w
while startx <= endx:
p1 = [startx, starty + p]
p2 = [startx, starty + (3 * p)]
p3 = [startx + b, starty + h]
p4 = [startx + w, starty + (3 * p)]
p5 = [startx + w, starty + p]
p6 = [startx + b, starty]
P1 = np.vstack((P1, p1))
P2 = np.vstack((P2, p2))
P3 = np.vstack((P3, p3))
P4 = np.vstack((P4, p4))
P5 = np.vstack((P5, p5))
P6 = np.vstack((P6, p6))
startx += w
starty += yoffset
row += 1
if proj:
transformer = Transformer.from_proj(Proj(init=proj_out), Proj(init=proj_init))
lon1, lat1 = transformer.transform(P1[:,0], P1[:,1])
P1 = np.column_stack((lon1, lat1))
lon2, lat2 = transformer.transform(P2[:,0], P2[:,1])
P2 = np.column_stack((lon2, lat2))
lon3, lat3 = transformer.transform(P3[:,0], P3[:,1])
P3 = np.column_stack((lon3, lat3))
lon4, lat4 = transformer.transform(P4[:,0], P4[:,1])
P4 = np.column_stack((lon4, lat4))
lon5, lat5 = transformer.transform(P5[:,0], P5[:,1])
P5 = np.column_stack((lon5, lat5))
lon6, lat6 = transformer.transform(P6[:,0], P6[:,1])
P6 = np.column_stack((lon6, lat6))
polygons = []
for i in range(len(P1)):
hexagon = [(P1[i][0], P1[i][1]),
(P2[i][0], P2[i][1]),
(P3[i][0], P3[i][1]),
(P4[i][0], P4[i][1]),
(P5[i][0], P5[i][1]),
(P6[i][0], P6[i][1])]
polygons.append(hexagon)
return polygons
# =============================================================================
# DEPRECATED
# =============================================================================
# def get_size_hexgrid(bbox, side_length):
# startx, starty, endx, endy = bbox[0], bbox[1], bbox[2], bbox[3]
# # width & height of the bbox
# w = abs(endx - startx)
# h = abs(endy - starty)
# # parameters of the hexagon
# R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
# r = R * np.cos(np.deg2rad(30))
# # number of hexagons (vertivcal & horizontal)
# Nw = int((w + r)//(2*r)) + 1
# Nh = int((h + R)//(R + side_length/2)) + 1
#
# shorter_lines = 0 if (w > 2*(Nw - 1)*r) else 1
#
# return Nw, Nh, shorter_lines
# =============================================================================
def cartesian_to_hex(point, origin=(0, 0), side_length=1,
proj_init=None, proj_out=None):
"""
Convert cartesian coordinates to hexagonal coordinates system.
Parameters
----------
point : Tuple
Point in cartesian coordinates to convert.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hex_coord : Tuple
Point's coordinates in hexagonal coordinates system.
"""
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
point = transformer.transform(point[0], point[1])
origin = transformer.transform(origin[0], origin[1])
mat = np.array([[np.sqrt(3)/3, -1/3],
[0 , 2/3 ]])
point = np.array(point)
hex_coord = np.dot(mat, point - origin)/side_length
return hex_coord
def hex_to_cartesian(hexa, origin=(0, 0), side_length=1,
proj_init=None, proj_out=None):
"""
Convert hexagonal coordinates to cartesian.
Parameters
----------
hexa : Tuple
Hexagonal coordinates.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
point : Tuple
Hexagon's coordinates in cartesian.
"""
mat = np.array([[np.sqrt(3), np.sqrt(3)/2],
[0 , 3/2 ]])
hex_coord = np.array(hexa)
cart_coord = side_length * np.dot(mat, hex_coord)
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
cart_coord = transformer.transform(cart_coord[0], cart_coord[1])
origin = transformer.transform(origin[0], origin[1])
return cart_coord + origin
def hexs_to_cartesians(Q, R, origin=(0, 0), side_length=1,
proj_init=None, proj_out=None):
"""
Convert a list of hexagonal coordinates to cartesian.
Parameters
----------
Q : numpy 1D array
Columns indexes of hexagons coordinates.
R : numpy 1D array
Rows indexes of hexagons coordinates.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
points : numpy 2D array
Hexagons points in cartesian coordinates.
"""
assert len(Q) == len(R), 'The dimension of Q and R indexes should be the same'
mat = np.array([[np.sqrt(3), np.sqrt(3)/2],
[0 , 3/2 ]])
hex_coord = np.vstack((Q, R))
cart_coord = side_length * np.dot(mat, hex_coord)
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
X, Y = transformer.transform(cart_coord[0], cart_coord[1])
cart_coord = np.vstack((X, Y))
origin = transformer.transform(origin[0], origin[1])
origin = np.vstack(([origin[0]] * len(Q), [origin[1]] * len(R)))
return cart_coord + origin
def cartesians_to_hexs(X, Y, origin=(0, 0), side_length=1,
proj_init=None, proj_out=None):
"""
Convert a list of cartesian points to hexagonal coordinates.
Parameters
----------
X : numpy 1D array
X indexes of cartesian points.
Y : numpy 1D array
Y indexes of cartesian points.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagons : numpy 2D array
Cartesian points in hexagonal coordinates.
"""
# Test if the length of X and Y are equals
assert len(X) == len(Y), 'The dimension of X and Y should be the same'
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
X, Y = transformer.transform(X, Y)
origin = transformer.transform(origin[0], origin[1])
mat = np.array([[np.sqrt(3)/3, -1/3],
[0 , 2/3 ]])
coord = np.vstack((X, Y))
origin = np.vstack(([origin[0]] * len(X), [origin[1]] * len(Y)))
return np.dot(mat, coord - origin)/side_length
def nearest_hexagon(point, origin=(0, 0), side_length=1, proj_init=None, proj_out=None):
"""
Get the nearest hexagon center from a cartesian point.
Parameters
----------
point : Tuple
Cartesian point.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagon_center : Tuple
Hexagonal coordinates of the nearest hexagon from point.
"""
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
point = transformer.transform(point[0], point[1])
origin = transformer.transform(origin[0], origin[1])
# Hexagonal coordinates
hex_coord = cartesian_to_hex(point, origin=origin, side_length=side_length)
# Cube coordinates
x = hex_coord[0]
z = hex_coord[1]
y = - x - z
# Rounding cube coordinates
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
x_diff = abs(rx - x)
y_diff = abs(ry - y)
z_diff = abs(rz - z)
if x_diff > y_diff and x_diff > z_diff:
rx = -ry - rz
elif y_diff > z_diff:
ry = -rx - rz
else:
rz = -rx - ry
# Hexagonal coordinates
q = rx
r = rz
return q, r
def nearest_hexagons(X, Y, origin=(0, 0), side_length=1, proj_init=None, proj_out=None):
"""
Get the nearest hexagons centers from a list of cartesian points.
Parameters
----------
X : numpy 1D array
X indexes of cartesian points.
Y : numpy 1D array
Y indexes of cartesian points.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagons_center : numpy 2D array
Cartesian points in hexagonal coordinates.
"""
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
X, Y = transformer.transform(X, Y)
origin = transformer.transform(origin[0], origin[1])
hex_coord = cartesians_to_hexs(X, Y, origin=origin, side_length=side_length)
# Cube coordinates
X = hex_coord[0]
Z = hex_coord[1]
Y = - X - Z
# Rounding cube coordinates
rX = np.round(X)
rY = np.round(Y)
rZ = np.round(Z)
X_diff = abs(rX - X)
Y_diff = abs(rY - Y)
Z_diff = abs(rZ - Z)
for i in range(len(X)):
if X_diff[i] > Y_diff[i] and X_diff[i] > Z_diff[i]:
rX[i] = -rY[i] - rZ[i]
elif Y_diff[i] > Z_diff[i]:
rY[i] = -rX[i] - rZ[i]
else:
rZ[i] = -rX[i] - rY[i]
# Hexagonal coordinates
Q = rX
R = rZ
# # Cartesian coordinates
# X, Y = hexs_to_cartesians(Q, R, origin=origin, side_length=side_length)
# if proj:
# transformer = Transformer.from_proj(Proj(init=proj_out), Proj(init=proj_init))
# X, Y = transformer.transform(X, Y)
return Q, R
def hexagon_coordinates(center, side_length=1, r=0.8660254037844389, R=1.0000000000000002,
proj_init=None, proj_out=None):
"""
Get the hexagon's coordinates points from its center.
Parameters
----------
center : Tuple
Center of the hexagon, in cartesian coordinates.
side_length : Float, optional
Side length of the hexagon.
The default is 1.
r : Float, optional
Intern radius. The default is 0.8660254037844389.
R : Float, optional
Extern radius. The default is 1.0000000000000002.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagon : List
List of points belonging to the hexagon.
"""
if side_length != 1 and r == 0.8660254037844389 and R == 1.0000000000000002:
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
r = R * np.cos(np.deg2rad(30))
elif side_length == 1 and r != 0.8660254037844389 and R == 1.0000000000000002:
side_length = 2*r *np.tan(np.deg2rad(30))
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
elif side_length == 1 and r == 0.8660254037844389 and R != 1.0000000000000002:
r = R * np.cos(np.deg2rad(30))
side_length = 2*r *np.tan(np.deg2rad(30))
proj = proj_init != proj_out
if proj:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
center = transformer.transform(center[0], center[1])
point1 = [center[0], center[1] - R]
point2 = [center[0] + r, center[1] - side_length/2]
point3 = [center[0] + r, center[1] + side_length/2]
point4 = [center[0], center[1] + R]
point5 = [center[0] - r, center[1] + side_length/2]
point6 = [center[0] - r, center[1] - side_length/2]
if proj:
transformer = Transformer.from_proj(Proj(init=proj_out), Proj(init=proj_init))
point1 = transformer.transform(point1[0], point1[1])
point2 = transformer.transform(point2[0], point2[1])
point3 = transformer.transform(point3[0], point3[1])
point4 = transformer.transform(point4[0], point4[1])
point5 = transformer.transform(point5[0], point5[1])
point6 = transformer.transform(point6[0], point6[1])
return [point1, point2, point3, point4, point5, point6, point1]
def hexagons_coordinates(X, Y, side_length=1, r=0.8660254037844389, R=1.0000000000000002,
proj_init=None, proj_out=None):
"""
Get the hexagons' coordinates points from a list of center.
Parameters
----------
X : numpy 1D array
X indexes of cartesian center.
Y : numpy 1D array
Y indexes of cartesian center.
side_length : Float, optional
Side length of the hexagon.
The default is 1.
r : Float, optional
Intern radius. The default is 0.8660254037844389.
R : Float, optional
Extern radius. The default is 1.0000000000000002.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagons : List
List of hexagons, composed by their coordinates.
"""
if side_length != 1 and r == 0.8660254037844389 and R == 1.0000000000000002:
R = 0.5 * side_length * 1/(np.sin( | np.deg2rad(30) | numpy.deg2rad |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt;
from scipy.spatial.distance import *
from scipy.sparse.linalg import eigs
import pandas as pd
import csv
import pickle as pkl
#Compute the similarit matrix
def compute_similarities(X):
similarities = np.zeros((X.shape[0],X.shape[0]))
similarities = squareform(pdist(X,'euclidean'))
return similarities
#Get the K-NN for each element
def KNN(K,similarities):
I = (np.argsort(similarities,1))
I=I[:,1:K+1]
return I
#Compute the reconstruction weights W
def compute_W(I,X,D,K):
W=np.zeros((X.shape[0],X.shape[0]))
for i in range(0,X.shape[0]):
Z=X[I[i,:],:]
Z=Z-X[i,:]
C=np.dot(Z,np.transpose(Z))
if K>D:
C=C+10**(-3)*np.trace(C)*np.eye(C.shape[0])
w=np.linalg.solve(C,np.transpose(np.ones(C.shape[0])))
W[i,I[i,:]]=w/ | np.sum(w) | numpy.sum |
import os
import numpy as np
import pytest
import fitsio
import piff
from .._se_image import _get_wcs_inverse, SEImageSlice
SE_DIMS_CUT = 512
def test_se_image_get_wcs_inverse_caches(se_image_data, coadd_image_data):
_get_wcs_inverse.cache_clear()
psf_mod = piff.PSF.read(se_image_data['source_info']['piff_path'])
se_im = SEImageSlice(
source_info=se_image_data['source_info'],
psf_model=psf_mod,
wcs=se_image_data['eu_wcs'],
wcs_position_offset=1,
wcs_color=0,
psf_kwargs={"GI_COLOR": 0.61},
noise_seeds=[10],
mask_tape_bumps=False,
)
se_im._im_shape = (512, 512)
_get_wcs_inverse(
coadd_image_data['eu_wcs'],
coadd_image_data['position_offset'],
se_im,
se_im._im_shape,
8,
)
assert _get_wcs_inverse.cache_info().hits == 0
_get_wcs_inverse(
coadd_image_data['eu_wcs'],
coadd_image_data['position_offset'],
se_im,
se_im._im_shape,
8,
)
assert _get_wcs_inverse.cache_info().hits == 1
se_im = SEImageSlice(
source_info=se_image_data['source_info'],
psf_model=psf_mod,
wcs=se_image_data['eu_wcs'],
wcs_position_offset=1,
wcs_color=0,
psf_kwargs={"GI_COLOR": 0.61},
noise_seeds=[10],
mask_tape_bumps=False,
)
se_im._im_shape = (512, 512)
_get_wcs_inverse(
coadd_image_data['eu_wcs'],
coadd_image_data['position_offset'],
se_im,
se_im._im_shape,
8,
)
assert _get_wcs_inverse.cache_info().hits == 2
@pytest.mark.skipif(
os.environ.get('TEST_DESDATA', None) is None,
reason=(
'SEImageSlice can only be tested if '
'test data is at TEST_DESDATA'))
def test_se_image_get_wcs_inverse_pixmappy(se_image_data, coadd_image_data):
coadd_wcs = coadd_image_data['eu_wcs']
se_wcs = piff.PSF.read(
se_image_data['source_info']['piff_path']
).wcs[se_image_data['source_info']['ccdnum']]
# this hack mocks up an esutil-like interface to the pixmappy WCS
def se_image2sky(x, y):
if np.ndim(x) == 0 and np.ndim(y) == 0:
is_scalar = True
else:
is_scalar = False
# the factor of +1 here converts from zero to one indexed
ra, dec = se_wcs._radec(
(np.atleast_1d(x) - se_wcs.x0 +
se_image_data['source_info']['position_offset']),
(np.atleast_1d(y) - se_wcs.y0 +
se_image_data['source_info']['position_offset']),
c=0.61,
)
np.degrees(ra, out=ra)
np.degrees(dec, out=dec)
if is_scalar:
return ra[0], dec[0]
else:
return ra, dec
se_wcs.image2sky = se_image2sky
# we use the bit mask to exclude tape bumps and edges
bmask = fitsio.read(
se_image_data['source_info']['bmask_path'],
ext=se_image_data['source_info']['bmask_ext'])
bmask = bmask[:SE_DIMS_CUT, :SE_DIMS_CUT]
# the apprixmate inversion assumes zero-indexed positions in and out
_get_wcs_inverse.cache_clear()
wcs_inv = _get_wcs_inverse(
coadd_wcs,
coadd_image_data['position_offset'],
se_wcs,
(SE_DIMS_CUT, SE_DIMS_CUT),
8,
)[0]
assert _get_wcs_inverse.cache_info().hits == 0
wcs_inv = _get_wcs_inverse(
coadd_wcs,
coadd_image_data['position_offset'],
se_wcs,
(SE_DIMS_CUT, SE_DIMS_CUT),
8,
)[0]
assert _get_wcs_inverse.cache_info().hits == 1
rng = np.random.RandomState(seed=100)
# all of the positions here are zero indexed
x_se = rng.uniform(low=-0.5, high=bmask.shape[1]-0.5, size=1000)
y_se = rng.uniform(low=-0.5, high=bmask.shape[0]-0.5, size=1000)
x_se_pix = (x_se + 0.5).astype(np.int64)
y_se_pix = (y_se + 0.5).astype(np.int64)
bmask_vals = bmask[y_se_pix, x_se_pix]
# we demand good interpolations where there are
# 1) no suspect pixels (eg tape bumps for Y3, bit 2048)
# 2) no edges (bit 512)
# 3) not in the 64 pixels around the edge of the CCD
buff = 64
ok_pix = (
((bmask_vals & 2048) == 0) &
((bmask_vals & 512) == 0) &
(x_se_pix >= buff) &
(x_se_pix < bmask.shape[1] - buff) &
(y_se_pix >= buff) &
(y_se_pix < bmask.shape[0] - buff))
coadd_pos = coadd_wcs.sky2image(*se_wcs.image2sky(x_se, y_se))
# outputs are one-indexed so we convert back
coadd_x = coadd_pos[0] - coadd_image_data['position_offset']
coadd_y = coadd_pos[1] - coadd_image_data['position_offset']
x_se_intp, y_se_intp = wcs_inv(coadd_x, coadd_y)
err = np.sqrt(
((x_se - x_se_intp) * 0.263)**2 +
((y_se - y_se_intp) * 0.263)**2)
# everything should be finite
assert np.all(np.isfinite(err)), np.sum(~ | np.isfinite(err) | numpy.isfinite |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sbn
import pandas as pd
import torch
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, MultivariateNormalVariable, DeterministicVariable
from brancher.transformations import TriangularLinear, Sigmoid, Bias, PlanarFlow
from brancher import inference
import brancher.functions as BF
# N repetitions
N_rep = 15 #15
# Data list
condition_list = [lambda t: (t < 10 or t > 20), lambda t: (t < 0 or t > 20), lambda t: True]
condition_label = ["Bridge", "Full", "Past"]
N_itr_PC = 400 #1500 #1000
N_itr_MF = 2000
N_itr_MN = 6000
N_itr_NN = 400
N_smpl = 50
optimizer = "Adam"
lr = 0.05#0.05
lr_mn = 0.005
N_ELBO_smpl = 1000
for cond, label in zip(condition_list, condition_label):
Lk1 = []
Lk2 = []
Lk3 = []
Lk4 = []
MSE1 = []
MSE2 = []
MSE3 = []
MSE4 = []
for rep in range(N_rep):
print("Repetition: {}".format(rep))
# Probabilistic model #
T = 30 #30
dt = 0.02
driving_noise = 0.5
measure_noise = 2. #1.
s = 10.
r = 28.
b = 8 / 3.
x0 = NormalVariable(0., driving_noise, 'x0')
h0 = NormalVariable(0., driving_noise, 'h0')
z0 = NormalVariable(0., driving_noise, 'z0')
x = [x0]
h = [h0]
z = [z0]
y = []
x_names = ["x0"]
h_names = ["h0"]
z_names = ["z0"]
y_names = ["y0"]
y_range = [t for t in range(T) if cond(t)]
if 0 in y_range:
y0 = NormalVariable(x0, measure_noise, 'y0')
for t in range(1, T):
x_names.append("x{}".format(t))
h_names.append("h{}".format(t))
z_names.append("z{}".format(t))
new_x = x[t - 1] + dt * s * (h[t - 1] - x[t - 1])
new_h = h[t - 1] + dt * (x[t - 1] * (r - z[t - 1]) - h[t - 1])
new_z = z[t - 1] + dt * (x[t - 1] * h[t - 1] - b * z[t - 1])
x.append(NormalVariable(new_x, np.sqrt(dt) * driving_noise, x_names[t]))
h.append(NormalVariable(new_h, np.sqrt(dt) * driving_noise, h_names[t]))
z.append(NormalVariable(new_z, np.sqrt(dt) * driving_noise, z_names[t]))
if t in y_range:
y_name = "y{}".format(t)
y_names.append(y_name)
y.append(NormalVariable(x[t], measure_noise, y_name))
AR_model = ProbabilisticModel(x + y + z + h)
# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[yt].data) for yt in y]
ground_truth = [float(data[xt].data) for xt in x]
# Observe data #
[yt.observe(data[yt][:, 0, :]) for yt in y]
# Structured variational distribution #
mx0 = DeterministicVariable(value=0., name="mx0", learnable=True)
Qx = [NormalVariable(mx0, 5*driving_noise, 'x0', learnable=True)]
Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
Qxlambda = [RootVariable(-1., 'x0_lambda', learnable=True)]
mh0 = DeterministicVariable(value=0., name="mh0", learnable=True)
Qh = [NormalVariable(mh0, 5*driving_noise, 'h0', learnable=True)]
Qh_mean = [RootVariable(0., 'h0_mean', learnable=True)]
Qhlambda = [RootVariable(-1., 'h0_lambda', learnable=True)]
mz0 = DeterministicVariable(value=0., name="mz0", learnable=True)
Qz = [NormalVariable(mz0, 5*driving_noise, 'z0', learnable=True)]
Qz_mean = [RootVariable(0., 'z0_mean', learnable=True)]
Qzlambda = [RootVariable(-1., 'z0_lambda', learnable=True)]
for t in range(1, T):
Qx_mean.append(RootVariable(0, x_names[t] + "_mean", learnable=True))
Qxlambda.append(RootVariable(-1., x_names[t] + "_lambda", learnable=True))
Qh_mean.append(RootVariable(0, h_names[t] + "_mean", learnable=True))
Qhlambda.append(RootVariable(1., h_names[t] + "_lambda", learnable=True))
Qz_mean.append(RootVariable(0, z_names[t] + "_mean", learnable=True))
Qzlambda.append(RootVariable(1., z_names[t] + "_lambda", learnable=True))
new_x = Qx[t - 1] + dt * s * (Qh[t - 1] - Qx[t - 1])
new_h = Qh[t - 1] + dt * (Qx[t - 1] * (r - Qz[t - 1]) - Qh[t - 1])
new_z = Qz[t - 1] + dt * (Qx[t - 1] * Qh[t - 1] - b * Qz[t - 1])
Qx.append(NormalVariable(BF.sigmoid(Qxlambda[t]) * new_x + (1 - BF.sigmoid(Qxlambda[t])) * Qx_mean[t],
2*driving_noise, x_names[t], learnable=True))
Qh.append(NormalVariable(BF.sigmoid(Qhlambda[t]) * new_h + (1 - BF.sigmoid(Qhlambda[t])) * Qh_mean[t],
2*driving_noise, h_names[t], learnable=True))
Qz.append(NormalVariable(BF.sigmoid(Qzlambda[t]) * new_z + (1 - BF.sigmoid(Qzlambda[t])) * Qz_mean[t],
2*driving_noise, z_names[t], learnable=True))
variational_posterior = ProbabilisticModel(Qx + Qh + Qz)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=N_itr_PC,
number_samples=N_smpl,
optimizer=optimizer,
lr=lr)
loss_list1 = AR_model.diagnostics["loss curve"]
# ELBO
#ELBO1.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))
#print("PC {}".format(ELBO1[-1]))
# MSE
posterior_samples = AR_model._get_posterior_sample(2000)
x_mean1 = []
lower_bound1 = []
upper_bound1 = []
for xt in x:
x_posterior_samples = posterior_samples[xt].detach().numpy().flatten()
mean = np.mean(x_posterior_samples)
sd = np.sqrt(np.var(x_posterior_samples))
x_mean1.append(mean)
lower_bound1.append(mean - sd)
upper_bound1.append(mean + sd)
MSE = np.mean((np.array(ground_truth) - np.array(x_mean1)) ** 2)
var = 0.5 * (np.array(upper_bound1) - np.array(lower_bound1)) ** 2
Lk = np.mean(
0.5 * (np.array(ground_truth) - np.array(x_mean1)) ** 2 / var + 0.5 * np.log(var) + 0.5 * np.log(
2 * np.pi))
print("PC MSE {}".format(MSE))
print("PC lk {}".format(Lk))
MSE1.append(MSE)
Lk1.append(Lk)
# Mean field
Qx = [NormalVariable(0., driving_noise, 'x0', learnable=True)]
Qh = [NormalVariable(0., driving_noise, 'h0', learnable=True)]
Qz = [NormalVariable(0., driving_noise, 'z0', learnable=True)]
for t in range(1, T):
Qx.append(NormalVariable(0., driving_noise, x_names[t], learnable=True))
Qh.append(NormalVariable(0., driving_noise, h_names[t], learnable=True))
Qz.append(NormalVariable(0., driving_noise, z_names[t], learnable=True))
variational_posterior = ProbabilisticModel(Qx + Qh + Qz)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=N_itr_MF,
number_samples=N_smpl,
optimizer=optimizer,
lr=lr)
loss_list2 = AR_model.diagnostics["loss curve"]
# ELBO
#ELBO2.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))
#print("MF {}".format(ELBO2[-1]))
# MSE
posterior_samples = AR_model._get_posterior_sample(2000)
x_mean2 = []
h_mean2 = []
z_mean2 = []
lower_bound2 = []
upper_bound2 = []
for xt, ht, zt in zip(x, h, z):
x_posterior_samples = posterior_samples[xt].detach().numpy().flatten()
h_posterior_samples = posterior_samples[ht].detach().numpy().flatten()
z_posterior_samples = posterior_samples[zt].detach().numpy().flatten()
mean = np.mean(x_posterior_samples)
h_mean = np.mean(h_posterior_samples)
z_mean = np.mean(z_posterior_samples)
sd = np.sqrt(np.var(x_posterior_samples))
x_mean2.append(mean)
h_mean2.append(h_mean)
z_mean2.append(z_mean)
lower_bound2.append(mean - sd)
upper_bound2.append(mean + sd)
MSE = np.mean((np.array(ground_truth) - np.array(x_mean2)) ** 2)
var = 0.5 * (np.array(upper_bound2) - np.array(lower_bound2)) ** 2
Lk = np.mean(
0.5 * (np.array(ground_truth) - np.array(x_mean2)) ** 2 / var + 0.5 * np.log(var) + 0.5 * np.log(
2 * np.pi))
print("MF MSE {}".format(MSE))
print("MF lk {}".format(Lk))
MSE2.append(MSE)
Lk2.append(Lk)
QV = MultivariateNormalVariable(loc=np.zeros((3*T,)),
scale_tril=0.1*np.identity(3*T),
name="V",
learnable=True)
Qx = [DeterministicVariable(QV[0], 'x0')]
Qh = [DeterministicVariable(QV[0], 'h0')]
Qz = [DeterministicVariable(QV[0], 'z0')]
for t in range(1, T):
Qx.append(DeterministicVariable(x_mean2[t] + QV[t], x_names[t]))
Qh.append(DeterministicVariable(h_mean2[t] + QV[T + t], h_names[t]))
Qz.append(DeterministicVariable(z_mean2[t] + QV[2*T + t], z_names[t]))
variational_posterior = ProbabilisticModel(Qx + Qh + Qz)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=N_itr_MN,
number_samples=N_smpl,
optimizer=optimizer,
lr=lr_mn)
loss_list3 = AR_model.diagnostics["loss curve"]
# ELBO
#ELBO3.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))
#print("MN {}".format(ELBO3[-1]))
# MSE
posterior_samples = AR_model._get_posterior_sample(2000)
x_mean3 = []
lower_bound3 = []
upper_bound3 = []
for xt in x:
x_posterior_samples = posterior_samples[xt].detach().numpy().flatten()
mean = np.mean(x_posterior_samples)
sd = np.sqrt(np.var(x_posterior_samples))
x_mean3.append(mean)
lower_bound3.append(mean - sd)
upper_bound3.append(mean + sd)
MSE = np.mean((np.array(ground_truth) - np.array(x_mean3)) ** 2)
var = 0.5 * ( | np.array(upper_bound3) | numpy.array |
import pip
try:
__import__('math')
except ImportError:
pip.main([ 'install', 'math' ])
try:
__import__('pandas')
except ImportError:
pip.main([ 'install', 'pandas' ])
try:
__import__('scipy')
except ImportError:
pip.main([ 'install', 'scipy' ])
try:
__import__('matplotlib')
except ImportError:
pip.main([ 'install', 'matplotlib' ])
try:
__import__('networkx')
except ImportError:
pip.main([ 'install', 'networkx' ])
try:
__import__('numpy')
except ImportError:
pip.main([ 'install', 'numpy' ])
try:
__import__('datetime')
except ImportError:
pip.main([ 'install', 'datetime' ])
import math
import numpy as np
import pandas as pd
from scipy.stats import cauchy
import random
import matplotlib.pyplot as plt
import networkx as nx
from numpy.random import choice as np_choice
random_matrix = pd.DataFrame([[int(random.random() * 100) for _ in range(100)]
for _ in range(100)])
random_matrix.to_csv('random_matrix.csv', header=True, index=False)
random_matrix = pd.read_csv('random_matrix.csv')
spisok = random_matrix.values.tolist()
def simulated_annealing(dist, n, t0):
"""
Функция, в которой реализован алгоритм имитации отжига
:param dist: list -- матрица весов
:param n: int -- длина пути
:param t0: int -- оптимальная температура
"""
def temperatura(k, t):
"""
Функция расчета оптимальной температуры для алгоритма имитации отжига
:param k: int -- количество городов
:param t: int -- температура
:return t/k: float -- коэффициент,
который нужен для вычисления следующей температуры
"""
return t / k
way = [element for element in range(n)]
rand0 = [element for element in range(1, n)]
tk = 1
m = 1
s = 0
x0 = 0.1
x = [x0]
t = t0
s_list = []
while t > tk:
sp = 0
t = temperatura(m, t0)
x.append(random.uniform(0, 1))
way_p = [way[j] for j in range(n)]
rand = random.sample(rand0, 2)
way_p[rand[0]], way_p[rand[1]] = way_p[rand[1]], way_p[rand[0]]
for j in range(n - 1):
sp = sp + dist[way_p[j]][way_p[j + 1]]
sp = sp + dist[way_p[0]][way_p[-1]]
if m == 1 or sp < s:
s = sp
way = [way_p[j] for j in range(n)]
else:
p = math.exp(-(sp - s) / t)
if x[m - 1] < p:
x[m - 1], x[m] = x[m], x[m - 1]
s = sp
way = [way_p[j] for j in range(n)]
m += 1
s_list.append(s)
way.append(way[0])
return way, s, m, s_list
def inlet():
"""
Функция ввода и выбора, каким путем мы хотим задать матрицу весов
:return dist: list -- матрица весов
"""
def file():
"""
Функция, которая считывает файл csv и заполняет матрицу
значениями, взятыми оттуда
:return matrix_1: list -- матрица, считываемая с csv файла
"""
import csv
matrix_1 = []
name = input("Введите названи файла. Например, city.csv: ")
with open(name) as file:
reader = csv.reader(file, delimiter=';', quotechar=',')
for row in reader:
matrix_1.append(row)
matrix_1 = [[float(matrix_1[i][j]) for j in range(len(matrix_1))]
for i in range(len(matrix_1))]
return matrix_1
def random_dist(k):
"""
Функция, которая герерирует матрицу
:param k: int -- количество городов
:return d: list -- сгенерируемая матрица
"""
d = [[0 if elem == j else random.uniform(0, 10) for j in range(k)]
for elem in range(k)]
for elem in range(k):
print(d[elem])
return d
def matr(m, n):
"""
Функция заполнения матрицы элементов.
:param m: int -- количество строк в матрице
:param n: int -- количество столбцов в матрице
:return matrix: list -- заполненная элементами матрица
"""
def el_int(el):
"""
Функция на проверку типа введенного элемента в матрице (целое).
Она возвращает True, если число целое, False - если нет.
:param el: элемент матрицы
"""
try:
int(el)
return True
except ValueError:
return False
def el_float(el):
"""
Функция на проверку типа введенного элемента в матрице (вещественное).
Она возвращает True, если число вещественное, False - если нет.
:param el: элемент матрицы
"""
try:
float(el)
return True
except ValueError:
return False
def el_complex(el):
"""
Функция на проверку типа введенного элемента в матрице (комплексное).
Она возвращает True, если число комплексное, False - если нет.
:param el: элемент матрицы
"""
try:
complex(el)
return True
except ValueError:
return False
def rev_complex(h):
"""
Функция преобразует комплексное число в нормальный вид, т. е. в вид a + i*b
Пример: если вы ввели -j + 1, функция преобразует это в 1 - j
:param h: str -- элемент матрицы
:return h_rev: str -- преобразованный элемент
"""
h_rev = ''
sep = 0
if h[0] == '+' or h[0] == '-':
for element_matr in range(1, len(h)):
if h[element_matr] == '+' or h[element_matr] == '-':
sep = element_matr
break
h_rev = h[sep:len(h)] + h[0:sep]
else:
for element_matr in range(0, len(h)):
if h[element_matr] == '+' or h[element_matr] == '-':
sep = element_matr
break
h_rev = h[sep:len(h)] + '+' + h[0:sep]
return (h_rev)
matrix = []
print('Введите элементы строки матрицы через пробел:')
for elem_matr in range(0, m):
a = []
row = input()
row = row.split(' ')
matrix.append(row)
if len(row) != n:
print('Некорректное количество элементов в строке матрицы.')
exit()
for j in range(0, n):
el = matrix[elem_matr][j]
k = 0
while k == 0:
if el_int(el) is True:
matrix[elem_matr][j] = int(el)
k = 1
else:
if el_float(el) is True:
matrix[elem_matr][j] = float(el)
k = 1
else:
if el_complex(el) is True:
matrix[elem_matr][j] = complex(el)
k = 1
else:
if el_complex(rev_complex(el)) is True:
matrix[elem_matr][j] = complex(
rev_complex(el))
k = 1
else:
el = input('Неверный формат ввода. '
'Повторите ввод '
'элемента [{}, '
'{}]: '.format(elem_matr, j))
return (matrix)
print("Ввод данных")
length = int(input("Введите: 1 - для считывания файла с устройства, "
"2 - для случайной генерации, "
"3 - для ввода матрицы с клавиатуры\n"))
if length == 1:
dist = file()
if length == 2:
k = int(input("Введите количество городов: "))
dist = random_dist(k)
if length == 3:
k = int(input("Введите количество городов: "))
dist = matr(k, k)
return dist
class AntColony(object):
"""
Класс для нахождения оптимального пути алгоритмом Муравьиной колонии.
"""
def __init__(self, distances, n_ants, n_best, n_iterations,
decay, alpha=1, beta=1):
"""
Функция для замены 0 на inf
:param distances: list -- матрица весов
:param n_ants: int -- количество муравьев
:param n_best: int
:param n_iterations: int -- количество итераций
:param decay: float
:param alpha: int -- значение ориентации феромонов
:param beta: int -- значение ориентации на длину пути
"""
i = 0
j = 0
while i < len(distances):
while j < len(distances):
if distances[i][j] == 0:
distances[i][j] = np.inf
i += 1
j += 1
else:
continue
self.distances = | np.array(distances) | numpy.array |
import gym
from gym.spaces import Box
from gym.utils import seeding
import numpy as np
from .world import World
from .agents import Car, Building, Pedestrian, Painting
from .geometry import Point
import time
class Scenario1(gym.Env):
def __init__(self):
self.seed(0) # just in case we forget seeding
self.init_ego = Car(Point(20, 20), heading = np.pi/2)
self.init_ego.velocity = Point(1., 0.)
self.collision_point = Point(20, 90)
self.target = Point(20, 120)
self.wall = Point(25, 80)
self.dt = 0.1
self.T = 40
self.initiate_world()
self.reset()
def initiate_world(self):
self.world = World(self.dt, width = 120, height = 120, ppm = 5)
self.world.add(Building(Point(72.5, 107.5), Point(95, 25)))
self.world.add(Building(Point(7.5, 107.5), Point(15, 25)))
self.world.add(Building(Point(7.5, 40), Point(15, 80)))
self.world.add(Building(Point(72.5, 40), Point(95, 80)))
def reset(self):
self.ego = self.init_ego.copy()
self.ego.min_speed = 0.
self.ego.max_speed = 20.
self.ego_reaction_time = 0.6
self.add_noise()
self.world.reset()
self.world.add(self.ego)
return self._get_obs()
def close(self):
self.world.close()
def add_noise(self):
self.ego.center += Point(0, 20*self.np_random.rand() - 10)
self.ego_reaction_time += self.np_random.rand() - 0.5
@property
def observation_space(self):
low = np.array([0, 0, -600, 0])
high= np.array([self.target.y + self.ego.max_speed*self.dt, self.ego.max_speed, 80, 0])
return Box(low=low, high=high)
@property
def action_space(self):
return Box(low=np.array([-3.5]), high=np.array([2.]))
def seed(self, seed):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_ego_control(self,policy_no=0):
ttc_ego = (self.collision_point.y - self.ego.y) / np.abs(self.ego.yp + 1e-8)
ttc_adv = np.inf
if policy_no==0: # aggressive
if ttc_ego < 0.05 or ttc_adv < 0:
return np.array([0, 1.95 + 0.05*self.np_random.rand()], dtype=np.float32)
elif ttc_ego < ttc_adv - 0.1:
return np.array([0, np.minimum(2.0, np.maximum(1.2, self.ego.inputAcceleration + self.np_random.rand()*0.2 - 0.1))], dtype=np.float32)
else:
return np.array([0, -3.25-np.random.rand()*0.25], dtype=np.float32)
elif policy_no==1: # cautious
ttw_ego = (self.wall.y - self.ego.y)/ | np.abs(self.ego.yp + 1e-8) | numpy.abs |
# Code adapted from https://github.com/ycjungSubhuman/python-warp-ui
import typing
import os
import argparse
import cv2
import numpy as np
from matplotlib.backend_bases import Event, KeyEvent
from matplotlib.image import AxesImage
from matplotlib.collections import PathCollection
from matplotlib.quiver import Quiver
import matplotlib.pyplot as plt
import torch
import sys
sys.path.append("..")
from adaptiveStrokeNet import JohnsonAdaptiveStroke, utils
from typing import Any, List, Union
from vtk_tps import create_vtk_thin_spline_warp
PATH_OUTPUT = 'out.png'
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, help='input image path')
parser.add_argument('-r', '--reference', type=str,
help='reference image path')
parser.add_argument('-s', '--points_start', type=str,
help='input image keypoint .npy file (optional)')
parser.add_argument('-e', '--points_end', type=str,
help='reference image keypoint .npy file (optional)')
parser.add_argument('-o', '--output', type=str,
help='output image path (default="out.png")')
args = parser.parse_args()
assert args.input is not None
assert args.reference is not None
return args
def plot_df(axis: plt.Axes, df: np.ndarray, scale: int = 8) -> Quiver:
return axis.quiver(df[::-scale, ::scale, 0], -df[::-scale, ::scale, 1],
units='xy', scale=(1/scale), angles='xy')
def plot_image(axis: plt.Axes, image: np.ndarray) -> AxesImage:
return axis.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
def get_grid(width: int, height: int) -> np.ndarray:
xs = np.stack([np.arange(0, width) for _ in range(height)]).astype(np.float32)
ys = np.stack([np.arange(0, height) for _ in range(width)]).astype(np.float32).T
return np.concatenate([xs[..., np.newaxis], ys[..., np.newaxis]], 2)
def calc_df(
width: int,
height: int,
ptrs_from: List[np.ndarray],
ptrs_to: List[np.ndarray]):
num_disps = min(len(ptrs_from), len(ptrs_to))
grid = get_grid(width, height)
tps = cv2.createThinPlateSplineShapeTransformer()
arr_src = np.expand_dims(np.array(ptrs_from), 0)
arr_dst = np.expand_dims(np.array(ptrs_to), 0)
matches = [cv2.DMatch(i, i, 0) for i in range(num_disps)]
tps.estimateTransformation(arr_src, arr_dst, matches)
grid_warped = tps.applyTransformation(
grid.reshape(1, -1, 2))[1].reshape(height, width, 2)
return grid_warped
def save_result_warp(
path_output: str,
image: np.ndarray,
df_forward: np.ndarray,
df_backward: np.ndarray,
ptrs_start: List[np.ndarray],
ptrs_end: List[np.ndarray]):
postfix_df_forward = '.df.forward.npy'
postfix_df_backward = '.df.backward.npy'
postfix_ptrs_start = '.ptrs.start.npy'
postfix_ptrs_end = '.ptrs.end.npy'
assert image.shape[:2] == df_forward.shape[:2]
assert image.shape[:2] == df_backward.shape[:2]
cv2.imwrite(path_output, image)
np.save(path_output+postfix_df_forward, df_forward)
np.save(path_output+postfix_df_backward, df_backward)
np.save(path_output+postfix_ptrs_start, np.stack(ptrs_start))
np.save(path_output+postfix_ptrs_end, np.stack(ptrs_end))
class WarpingWindow:
def __init__(self,
image_in: np.ndarray,
image_ref: np.ndarray,
path_output: str):
self.image_in = image_in
self.image_warp = image_in
self.image_ref = image_ref
self.path_output = path_output
self.ptrs_start_control = []
self.ptr_start_selected = None
self.ptrs_end_control = []
self.ptr_end_selected = None
self.add_border_points(4)
self.fig_root, self.axes = plt.subplots(1, 3)
self.axes[0].set_title('set source points')
self.axes[1].set_title('set target points')
self.axes[2].set_title('warp output')
self.fig_root.canvas.mpl_connect('button_press_event', self._callback_press)
self.fig_root.canvas.mpl_connect('button_release_event', self._callback_release)
self.fig_root.canvas.mpl_connect('motion_notify_event', self._callback_motion)
self.fig_root.canvas.mpl_connect('key_press_event', self._callback_key)
self.axesimage_warp = plot_image(self.axes[2], self.image_in)
self.scatter_in = self.axes[0].scatter([], [], s=5, c='#0000ff')
self.fig_nst_output, self.nst_output_ax = plt.subplots(1, 1)
self.nst_output_ax.set_title('Reversible Warping output')
self.nst_output_im = self.nst_output_ax.imshow(self.image_in)
self.scatter_out = self.axes[1].scatter([], [], s=5, c='#ff0000')
self.scatter_warp = self.axes[2].scatter([], [], s=5, c='#ff0000')
self.device = torch.device("cuda")
plot_image(self.axes[0], self.image_in)
plot_image(self.axes[1], self.image_ref)
STYLE = "giovanni"
model_path = [f.path for f in os.scandir(f"{os.getcwd()}/../models/adaptiveStroke/" + STYLE) if f.name.endswith(".model")][0]
style_model = JohnsonAdaptiveStroke()
state_dict = torch.load(model_path)
style_model.eval()
style_model.load_state_dict(state_dict)
style_model.to(self.device)
self.style_model = style_model
def add_border_points(self, num_pts=50):
w_pts = np.linspace(0,1,num=num_pts,endpoint=False)
h_pts = np.linspace(0,1,num=num_pts,endpoint=False)
border_pts = [np.array([0, 0]), np.array([1, 0]), np.array([0, 1]), np.array([1, 1])]
for i in range(1, num_pts):
border_pts.append(np.array([h_pts[i], 0]))
border_pts.append(np.array([h_pts[i], 1]))
border_pts.append( | np.array([0, w_pts[i]]) | numpy.array |
import argparse
from datetime import datetime
import numpy as np
import random
import tensorflow as tf
import socket
import os
import sys
import h5py
import struct
BASE_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'data'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
sys.path.append(os.path.join(BASE_DIR, 'preprocessing'))
import model_normalization as model
from concurrent.futures import ThreadPoolExecutor
import data_sdf_h5_queue # as data
import create_file_lst
slim = tf.contrib.slim
lst_dir, cats, all_cats, raw_dirs = create_file_lst.get_all_info()
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
parser.add_argument('--max_epoch', type=int, default=1, help='Epoch to run [default: 201]')
parser.add_argument('--img_h', type=int, default=137, help='Image Height')
parser.add_argument('--img_w', type=int, default=137, help='Image Width')
parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=1e-4, help='Initial learning rate [default: 0.001]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.9, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--num_classes', type=int, default=1024, help='vgg dim')
parser.add_argument('--num_points', type=int, default=1, help='Point Number [default: 2048]')
parser.add_argument('--sdf_res', type=int, default=64, help='sdf grid')
parser.add_argument('--mask_tp', type=str, default="neg_two_sides")
parser.add_argument('--mask_rt', type=int, default=40000)
parser.add_argument('--alpha', action='store_true')
parser.add_argument('--rot', action='store_true')
parser.add_argument('--tanh', action='store_true')
parser.add_argument('--cat_limit', type=int, default=168000, help="balance each category, 1500 * 24 = 36000")
parser.add_argument('--multi_view', action='store_true')
parser.add_argument('--num_sample_points', type=int, default=1, help='Sample Point Number [default: 2048]')
parser.add_argument('--log_dir', default='checkpoint/exp_200', help='Log dir [default: log]')
parser.add_argument('--test_lst_dir', default=lst_dir, help='test mesh data list')
parser.add_argument('--iso', type=float, default=0.0, help='iso value')
parser.add_argument('--threedcnn', action='store_true')
parser.add_argument('--img_feat_onestream', action='store_true')
parser.add_argument('--img_feat_twostream', action='store_true')
parser.add_argument('--category', default="all", help='Which single class to train on [default: None]')
parser.add_argument('--binary', action='store_true')
parser.add_argument('--create_obj', action='store_true', help="create_obj or test accuracy on test set")
parser.add_argument('--store', action='store_true')
parser.add_argument('--view_num', type=int, default=24, help="how many views do you want to create for each obj")
parser.add_argument('--cam_est', action='store_true', help="if you are using the estimated camera image h5")
parser.add_argument('--augcolorfore', action='store_true')
parser.add_argument('--augcolorback', action='store_true')
parser.add_argument('--backcolorwhite', action='store_true')
FLAGS = parser.parse_args()
print('pid: %s'%(str(os.getpid())))
print(FLAGS)
EPOCH_CNT = 0
NUM_POINTS = FLAGS.num_points
BATCH_SIZE = FLAGS.batch_size
RESOLUTION = FLAGS.sdf_res+1
TOTAL_POINTS = RESOLUTION * RESOLUTION * RESOLUTION
if FLAGS.img_feat_twostream:
SPLIT_SIZE = int(np.ceil(TOTAL_POINTS / 214669.0))
elif FLAGS.threedcnn :
SPLIT_SIZE = 1
else:
SPLIT_SIZE = int(np.ceil(TOTAL_POINTS / 274625.0))
NUM_SAMPLE_POINTS = int(np.ceil(TOTAL_POINTS / SPLIT_SIZE))
GPU_INDEX = FLAGS.gpu
PRETRAINED_MODEL_PATH = FLAGS.log_dir
LOG_DIR = FLAGS.log_dir
SDF_WEIGHT = 10.
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_INDEX
if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR)
RESULT_PATH = os.path.join(LOG_DIR, 'test_results_allpts')
if FLAGS.cam_est:
RESULT_OBJ_PATH = os.path.join(LOG_DIR, 'test_objs', "camest_"
+ str(RESOLUTION) + "_" + str(FLAGS.iso))
print("RESULT_OBJ_PATH: ",RESULT_OBJ_PATH)
else:
RESULT_OBJ_PATH = os.path.join(LOG_DIR, 'test_objs', str(RESOLUTION) + "_" + str(FLAGS.iso))
if not os.path.exists(RESULT_PATH): os.mkdir(RESULT_PATH)
if not os.path.exists(RESULT_OBJ_PATH): os.makedirs(RESULT_OBJ_PATH, exist_ok=True)
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_test.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
IMG_SIZE = FLAGS.img_h
HOSTNAME = socket.gethostname()
print("HOSTNAME:", HOSTNAME)
VV = False
VV = VV and (HOSTNAME == "ubuntu")
TEST_LISTINFO = []
cat_ids = []
cats_limit = {}
if FLAGS.category == "all":
for key, value in cats.items():
cat_ids.append(value)
cats_limit[value] = 0
else:
cat_ids.append(cats[FLAGS.category])
cats_limit[cats[FLAGS.category]] = 0
for cat_id in cat_ids:
test_lst = os.path.join(FLAGS.test_lst_dir, cat_id+"_test.lst")
with open(test_lst, 'r') as f:
lines = f.read().splitlines()
for line in lines:
render_list = random.sample(range(24), FLAGS.view_num)
for render in render_list:
cats_limit[cat_id] += 1
TEST_LISTINFO += [(cat_id, line.strip(), render)]
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
if FLAGS.threedcnn:
info = {'rendered_dir': raw_dirs["renderedh5_dir_v2"],
'sdf_dir': raw_dirs["3dnnsdf_dir"]}
elif FLAGS.img_feat_onestream or FLAGS.img_feat_twostream:
info = {'rendered_dir': raw_dirs["renderedh5_dir"],
'sdf_dir': raw_dirs["sdf_dir"]}
if FLAGS.cam_est:
info['rendered_dir']= raw_dirs["renderedh5_dir_est"]
else:
info = {'rendered_dir': raw_dirs["renderedh5_dir_v2"],
'sdf_dir': raw_dirs['sdf_dir_v2']}
TEST_DATASET = data_sdf_h5_queue.Pt_sdf_img(FLAGS,
listinfo=TEST_LISTINFO, info=info, cats_limit=cats_limit, shuffle=False)
print(info)
def create():
log_string(LOG_DIR)
input_pls = model.placeholder_inputs(BATCH_SIZE, NUM_POINTS, (IMG_SIZE, IMG_SIZE),
num_sample_pc=NUM_SAMPLE_POINTS, scope='inputs_pl', FLAGS=FLAGS)
is_training_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
batch = tf.Variable(0, name='batch')
print("--- Get model and loss")
# Get model and loss
end_points = model.get_model(input_pls, NUM_POINTS, is_training_pl, bn=False,FLAGS=FLAGS)
loss, end_points = model.get_loss(end_points,
sdf_weight=SDF_WEIGHT, num_sample_points=NUM_SAMPLE_POINTS, FLAGS=FLAGS)
# Create a session
gpu_options = tf.GPUOptions() # per_process_gpu_memory_fraction=0.99
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
######### Loading Checkpoint ###############
saver = tf.train.Saver([v for v in tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES) if
('lr' not in v.name) and ('batch' not in v.name)])
ckptstate = tf.train.get_checkpoint_state(PRETRAINED_MODEL_PATH)
if ckptstate is not None:
LOAD_MODEL_FILE = os.path.join(PRETRAINED_MODEL_PATH, os.path.basename(ckptstate.model_checkpoint_path))
try:
# load_model(sess, PRETRAINED_PN_MODEL_FILE, ['refpc_reconstruction','sdfprediction','vgg_16'], strict=True)
with NoStdStreams():
saver.restore(sess, LOAD_MODEL_FILE)
print("Model loaded in file: %s" % LOAD_MODEL_FILE)
except:
print("Fail to load overall modelfile: %s" % PRETRAINED_MODEL_PATH)
###########################################
ops = {'input_pls': input_pls,
'is_training_pl': is_training_pl,
'loss': loss,
'step': batch,
'end_points': end_points}
TEST_DATASET.start()
test_one_epoch(sess, ops)
TEST_DATASET.shutdown()
class NoStdStreams(object):
def __init__(self,stdout = None, stderr = None):
self.devnull = open(os.devnull,'w')
self._stdout = stdout or self.devnull or sys.stdout
self._stderr = stderr or self.devnull or sys.stderr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush(); self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush(); self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.devnull.close()
def test_one_epoch(sess, ops):
""" ops: dict mapping from string to tf ops """
is_training = False
# Shuffle train samples
num_batches = int(len(TEST_DATASET)) // FLAGS.batch_size
print('num_batches', num_batches)
loss_all = 0
log_string(str(datetime.now()))
losses = {}
for lossname in ops['end_points']['losses'].keys():
losses[lossname] = 0
with ThreadPoolExecutor(max_workers=4) as executor:
for batch_idx in range(num_batches):
batch_data = TEST_DATASET.fetch()
extra_pts = np.zeros((1, SPLIT_SIZE * NUM_SAMPLE_POINTS - TOTAL_POINTS, 3), dtype=np.float32)
batch_points = np.zeros((SPLIT_SIZE, 0, NUM_SAMPLE_POINTS, 3), dtype=np.float32)
if not FLAGS.threedcnn:
for b in range(BATCH_SIZE):
print(batch_data)
sdf_params = batch_data['sdf_params'][b]
x_ = np.linspace(sdf_params[0], sdf_params[3], num=RESOLUTION)
y_ = np.linspace(sdf_params[1], sdf_params[4], num=RESOLUTION)
z_ = np.linspace(sdf_params[2], sdf_params[5], num=RESOLUTION)
z, y, x = np.meshgrid(z_, y_, x_, indexing='ij')
x = | np.expand_dims(x, 3) | numpy.expand_dims |
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
from six import string_types
from .Utils.SolverUtils import *
from . import Utils
norm = np.linalg.norm
__all__ = [
'Minimize', 'Remember', 'SteepestDescent', 'BFGS', 'GaussNewton',
'InexactGaussNewton', 'ProjectedGradient', 'NewtonRoot',
'StoppingCriteria', 'IterationPrinters'
]
SolverICG = SolverWrapI(sp.linalg.cg, checkAccuracy=False)
class StoppingCriteria(object):
"""docstring for StoppingCriteria"""
iteration = {
"str": "%d : maxIter = %3d <= iter = %3d",
"left": lambda M: M.maxIter, "right": lambda M: M.iter,
"stopType": "critical"
}
iterationLS = {
"str": "%d : maxIterLS = %3d <= iterLS = %3d",
"left": lambda M: M.maxIterLS, "right": lambda M: M.iterLS,
"stopType": "critical"
}
armijoGoldstein = {
"str": "%d : ft = %1.4e <= alp*descent = %1.4e",
"left": lambda M: M._LS_ft,
"right": lambda M: M.f + M.LSreduction * M._LS_descent,
"stopType": "optimal"
}
tolerance_f = {
"str": "%d : |fc-fOld| = %1.4e <= tolF*(1+|f0|) = %1.4e",
"left": lambda M: 1 if M.iter==0 else abs(M.f-M.f_last),
"right": lambda M: 0 if M.iter==0 else M.tolF*(1+abs(M.f0)),
"stopType": "optimal"
}
moving_x = {
"str": "%d : |xc-x_last| = %1.4e <= tolX*(1+|x0|) = %1.4e",
"left": lambda M: 1 if M.iter==0 else norm(M.xc-M.x_last),
"right": lambda M: 0 if M.iter==0 else M.tolX*(1+norm(M.x0)),
"stopType": "optimal"
}
tolerance_g = {
"str": "%d : |proj(x-g)-x| = %1.4e <= tolG = %1.4e",
"left": lambda M: norm(M.projection(M.xc - M.g) - M.xc),
"right": lambda M: M.tolG,
"stopType": "optimal"
}
norm_g = {
"str": "%d : |proj(x-g)-x| = %1.4e <= 1e3*eps = %1.4e",
"left": lambda M: norm(M.projection(M.xc - M.g) - M.xc),
"right": lambda M: 1e3*M.eps,
"stopType": "critical"
}
bindingSet = {
"str": "%d : probSize = %3d <= bindingSet = %3d",
"left": lambda M: M.xc.size,
"right": lambda M: np.sum(M.bindingSet(M.xc)),
"stopType": "critical"
}
bindingSet_LS = {
"str": "%d : probSize = %3d <= bindingSet = %3d",
"left": lambda M: M._LS_xt.size,
"right": lambda M: np.sum(M.bindingSet(M._LS_xt)),
"stopType": "critical"
}
phi_d_target_Minimize = {
"str": "%d : phi_d = %1.4e <= phi_d_target = %1.4e ",
"left": lambda M: M.parent.phi_d,
"right": lambda M: M.parent.phi_d_target,
"stopType": "critical"
}
phi_d_target_Inversion = {
"str": "%d : phi_d = %1.4e <= phi_d_target = %1.4e ",
"left": lambda I: I.phi_d, "right": lambda I: I.phi_d_target,
"stopType": "critical"
}
class IterationPrinters(object):
"""docstring for IterationPrinters"""
iteration = {
"title": "#", "value": lambda M: M.iter, "width": 5, "format": "%3d"
}
f = {
"title": "f", "value": lambda M: M.f, "width": 10, "format": "%1.2e"
}
norm_g = {
"title": "|proj(x-g)-x|",
"value": lambda M: norm(M.projection(M.xc - M.g) - M.xc),
"width": 15, "format": "%1.2e"
}
totalLS = {
"title": "LS", "value": lambda M: M.iterLS, "width": 5, "format": "%d"
}
iterationLS = {
"title": "#", "value": lambda M: (M.iter, M.iterLS), "width": 5,
"format": "%3d.%d"
}
LS_ft = {
"title": "ft", "value": lambda M: M._LS_ft, "width": 10,
"format": "%1.2e"
}
LS_t = {
"title": "t", "value": lambda M: M._LS_t, "width": 10,
"format": "%0.5f"
}
LS_armijoGoldstein = {
"title": "f + alp*g.T*p",
"value": lambda M: M.f + M.LSreduction*M._LS_descent, "width": 16,
"format": "%1.2e"
}
itType = {
"title": "itType", "value": lambda M: M._itType, "width": 8,
"format": "%s"
}
aSet = {
"title": "aSet", "value": lambda M: np.sum(M.activeSet(M.xc)),
"width": 8, "format": "%d"
}
bSet = {
"title": "bSet", "value": lambda M: np.sum(M.bindingSet(M.xc)),
"width": 8, "format": "%d"
}
comment = {
"title": "Comment", "value": lambda M: M.comment, "width": 12,
"format": "%s"
}
beta = {
"title": "beta", "value": lambda M: M.parent.beta, "width": 10,
"format": "%1.2e"
}
phi_d = {
"title": "phi_d", "value": lambda M: M.parent.phi_d, "width": 10,
"format": "%1.2e"
}
phi_m = {
"title": "phi_m", "value": lambda M: M.parent.phi_m, "width": 10,
"format": "%1.2e"
}
class Minimize(object):
"""
Minimize is a general class for derivative based optimization.
"""
name = "General Optimization Algorithm" #: The name of the optimization algorithm
maxIter = 20 #: Maximum number of iterations
maxIterLS = 10 #: Maximum number of iterations for the line-search
maxStep = np.inf #: Maximum step possible, used in scaling before the line-search.
LSreduction = 1e-4 #: Expected decrease in the line-search
LSshorten = 0.5 #: Line-search step is shortened by this amount each time.
tolF = 1e-1 #: Tolerance on function value decrease
tolX = 1e-1 #: Tolerance on norm(x) movement
tolG = 1e-1 #: Tolerance on gradient norm
eps = 1e-5 #: Small value
stopNextIteration = False #: Stops the optimization program nicely.
debug = False #: Print debugging information
debugLS = False #: Print debugging information for the line-search
comment = '' #: Used by some functions to indicate what is going on in the algorithm
counter = None #: Set this to a SimPEG.Utils.Counter() if you want to count things
parent = None #: This is the parent of the optimization routine.
def __init__(self, **kwargs):
self.stoppers = [
StoppingCriteria.tolerance_f, StoppingCriteria.moving_x,
StoppingCriteria.tolerance_g, StoppingCriteria.norm_g,
StoppingCriteria.iteration
]
self.stoppersLS = [
StoppingCriteria.armijoGoldstein, StoppingCriteria.iterationLS
]
self.printers = [
IterationPrinters.iteration, IterationPrinters.f,
IterationPrinters.norm_g, IterationPrinters.totalLS
]
self.printersLS = [
IterationPrinters.iterationLS, IterationPrinters.LS_ft,
IterationPrinters.LS_t, IterationPrinters.LS_armijoGoldstein
]
Utils.setKwargs(self, **kwargs)
@property
def callback(self):
return getattr(self, '_callback', None)
@callback.setter
def callback(self, value):
if self.callback is not None:
print(
'The callback on the {0!s} Optimization was '
'replaced.'.format(self.__name__)
)
self._callback = value
@Utils.timeIt
def minimize(self, evalFunction, x0):
"""minimize(evalFunction, x0)
Minimizes the function (evalFunction) starting at the location x0.
:param callable evalFunction: function handle that evaluates: f, g, H = F(x)
:param numpy.ndarray x0: starting location
:rtype: numpy.ndarray
:return: x, the last iterate of the optimization algorithm
evalFunction is a function handle::
(f[, g][, H]) = evalFunction(x, return_g=False, return_H=False )
def evalFunction(x, return_g=False, return_H=False):
out = (f,)
if return_g:
out += (g,)
if return_H:
out += (H,)
return out if len(out) > 1 else out[0]
The algorithm for general minimization is as follows::
startup(x0)
printInit()
while True:
doStartIteration()
f, g, H = evalFunction(xc)
printIter()
if stoppingCriteria(): break
p = findSearchDirection()
p = scaleSearchDirection(p)
xt, passLS = modifySearchDirection(p)
if not passLS:
xt, caught = modifySearchDirectionBreak(p)
if not caught: return xc
doEndIteration(xt)
printDone()
finish()
return xc
"""
self.evalFunction = evalFunction
self.startup(x0)
self.printInit()
print('x0 has any nan: {:b}'.format(np.any(np.isnan(x0))))
while True:
self.doStartIteration()
self.f, self.g, self.H = evalFunction(
self.xc, return_g=True, return_H=True
)
self.printIter()
if self.stoppingCriteria():
break
self.searchDirection = self.findSearchDirection()
del self.H #: Doing this saves memory, as it is not needed in the rest of the computations.
p = self.scaleSearchDirection(self.searchDirection)
xt, passLS = self.modifySearchDirection(p)
if not passLS:
xt, caught = self.modifySearchDirectionBreak(p)
if not caught:
return self.xc
self.doEndIteration(xt)
if self.stopNextIteration:
break
self.printDone()
self.finish()
return self.xc
@Utils.callHooks('startup')
def startup(self, x0):
"""
**startup** is called at the start of any new minimize call.
This will set::
x0 = x0
xc = x0
iter = iterLS = 0
:param numpy.ndarray x0: initial x
:rtype: None
:return: None
"""
self.iter = 0
self.iterLS = 0
self.stopNextIteration = False
x0 = self.projection(x0) # ensure that we start of feasible.
self.x0 = x0
self.xc = x0
self.f_last = np.nan
self.x_last = x0
@Utils.count
@Utils.callHooks('doStartIteration')
def doStartIteration(self):
"""doStartIteration()
**doStartIteration** is called at the start of each minimize
iteration.
:rtype: None
:return: None
"""
pass
def printInit(self, inLS=False):
"""
**printInit** is called at the beginning of the optimization
routine.
If there is a parent object, printInit will check for a
parent.printInit function and call that.
"""
pad = ' '*10 if inLS else ''
name = self.name if not inLS else self.nameLS
Utils.printTitles(
self, self.printers if not inLS else self.printersLS, name, pad
)
@Utils.callHooks('printIter')
def printIter(self, inLS=False):
"""
**printIter** is called directly after function evaluations.
If there is a parent object, printIter will check for a
parent.printIter function and call that.
"""
pad = ' '*10 if inLS else ''
Utils.printLine(
self, self.printers if not inLS else self.printersLS, pad=pad
)
def printDone(self, inLS=False):
"""
**printDone** is called at the end of the optimization routine.
If there is a parent object, printDone will check for a
parent.printDone function and call that.
"""
pad = ' '*10 if inLS else ''
stop, done = (
(' STOP! ', ' DONE! ') if not inLS else
('----------------', ' End Linesearch ')
)
stoppers = self.stoppers if not inLS else self.stoppersLS
Utils.printStoppers(self, stoppers, pad='', stop=stop, done=done)
@Utils.callHooks('finish')
def finish(self):
"""finish()
**finish** is called at the end of the optimization.
:rtype: None
:return: None
"""
pass
def stoppingCriteria(self, inLS=False):
if self.iter == 0:
self.f0 = self.f
self.g0 = self.g
return Utils.checkStoppers(
self, self.stoppers if not inLS else self.stoppersLS
)
@Utils.timeIt
@Utils.callHooks('projection')
def projection(self, p):
"""projection(p)
projects the search direction.
by default, no projection is applied.
:param numpy.ndarray p: searchDirection
:rtype: numpy.ndarray
:return: p, projected search direction
"""
return p
@Utils.timeIt
def findSearchDirection(self):
"""findSearchDirection()
**findSearchDirection** should return an approximation of:
.. math::
H p = - g
Where you are solving for the search direction, p
The default is:
.. math::
H = I
p = - g
And corresponds to SteepestDescent.
The latest function evaluations are present in::
self.f, self.g, self.H
:rtype: numpy.ndarray
:return: p, Search Direction
"""
return -self.g
@Utils.count
def scaleSearchDirection(self, p):
"""scaleSearchDirection(p)
**scaleSearchDirection** should scale the search direction if
appropriate.
Set the parameter **maxStep** in the minimize object, to scale back
the gradient to a maximum size.
:param numpy.ndarray p: searchDirection
:rtype: numpy.ndarray
:return: p, Scaled Search Direction
"""
if self.maxStep < np.abs(p.max()):
p = self.maxStep*p/np.abs(p.max())
return p
nameLS = "Armijo linesearch" #: The line-search name
@Utils.timeIt
def modifySearchDirection(self, p):
"""modifySearchDirection(p)
**modifySearchDirection** changes the search direction based on
some sort of linesearch or trust-region criteria.
By default, an Armijo backtracking linesearch is preformed with the
following parameters:
* maxIterLS, the maximum number of linesearch iterations
* LSreduction, the expected reduction expected, default: 1e-4
* LSshorten, how much the step is reduced, default: 0.5
If the linesearch is completed, and a descent direction is found,
passLS is returned as True.
Else, a modifySearchDirectionBreak call is preformed.
:param numpy.ndarray p: searchDirection
:rtype: tuple
:return: (xt, passLS) numpy.ndarray, bool
"""
# Projected Armijo linesearch
self._LS_t = 1
self.iterLS = 0
while self.iterLS < self.maxIterLS:
self._LS_xt = self.projection(self.xc + self._LS_t*p)
self._LS_ft = self.evalFunction(
self._LS_xt, return_g=False, return_H=False
)
self._LS_descent = np.inner(self.g, self._LS_xt - self.xc) # this takes into account multiplying by t, but is important for projection.
if self.stoppingCriteria(inLS=True):
break
self.iterLS += 1
self._LS_t = self.LSshorten*self._LS_t
if self.debugLS:
if self.iterLS == 1: self.printInit(inLS=True)
self.printIter(inLS=True)
if self.debugLS and self.iterLS > 0:
self.printDone(inLS=True)
return self._LS_xt, self.iterLS < self.maxIterLS
@Utils.count
def modifySearchDirectionBreak(self, p):
"""modifySearchDirectionBreak(p)
Code is called if modifySearchDirection fails
to find a descent direction.
The search direction is passed as input and
this function must pass back both a new searchDirection,
and if the searchDirection break has been caught.
By default, no additional work is done, and the
evalFunction returns a False indicating the break was not caught.
:param numpy.ndarray p: searchDirection
:rtype: tuple
:return: (xt, breakCaught) numpy.ndarray, bool
"""
self.printDone(inLS=True)
print('The linesearch got broken. Boo.')
return p, False
@Utils.count
@Utils.callHooks('doEndIteration')
def doEndIteration(self, xt):
"""doEndIteration(xt)
**doEndIteration** is called at the end of each minimize iteration.
By default, function values and x locations are shuffled to store 1
past iteration in memory.
self.xc must be updated in this code.
:param numpy.ndarray xt: tested new iterate that ensures a descent direction.
:rtype: None
:return: None
"""
# store old values
self.f_last = self.f
self.x_last, self.xc = self.xc, xt
self.iter += 1
if self.debug:
self.printDone()
if self.callback is not None:
self.callback(xt)
def save(self, group):
group.setArray('searchDirection', self.searchDirection)
if getattr(self, 'parent', None) is None:
group.setArray('x', self.xc)
else: # Assume inversion is the parent
group.attrs['phi_d'] = self.parent.phi_d
group.attrs['phi_m'] = self.parent.phi_m
group.attrs['beta'] = self.parent.beta
group.setArray('m', self.xc)
group.setArray('dpred', self.parent.dpred)
class Remember(object):
"""
This mixin remembers all the things you tend to forget.
You can remember parameters directly, naming the str in Minimize,
or pass a tuple with the name and the function that takes Minimize.
For Example::
opt.remember('f',('norm_g', lambda M: np.linalg.norm(M.g)))
opt.minimize(evalFunction, x0)
opt.recall('f')
The param name (str) can also be located in the parent (if no conflicts),
and it will be looked up by default.
"""
_rememberThese = []
def remember(self, *args):
self._rememberThese = args
def recall(self, param):
assert param in self._rememberList, (
"You didn't tell me to remember " + param +
", you gotta tell me what to remember!"
)
return self._rememberList[param]
def _startupRemember(self, x0):
self._rememberList = {}
for param in self._rememberThese:
if isinstance(param, string_types):
self._rememberList[param] = []
elif isinstance(param, tuple):
self._rememberList[param[0]] = []
def _doEndIterationRemember(self, *args):
for param in self._rememberThese:
if isinstance(param, string_types):
if self.debug: print('Remember is remembering: ' + param)
val = getattr(self, param, None)
if val is None and getattr(self, 'parent', None) is not None:
# Look to the parent for the param if not found here.
val = getattr(self.parent, param, None)
self._rememberList[param].append( val )
elif isinstance(param, tuple):
if self.debug: print('Remember is remembering: ' + param[0])
self._rememberList[param[0]].append( param[1](self) )
class ProjectedGradient(Minimize, Remember):
name = 'Projected Gradient'
maxIterCG = 5
tolCG = 1e-1
lower = -np.inf
upper = np.inf
def __init__(self,**kwargs):
super(ProjectedGradient, self).__init__(**kwargs)
self.stoppers.append(StoppingCriteria.bindingSet)
self.stoppersLS.append(StoppingCriteria.bindingSet_LS)
self.printers.extend([
IterationPrinters.itType, IterationPrinters.aSet,
IterationPrinters.bSet, IterationPrinters.comment
])
def _startup(self, x0):
# ensure bound vectors are the same size as the model
if type(self.lower) is not np.ndarray:
self.lower = np.ones_like(x0)*self.lower
if type(self.upper) is not np.ndarray:
self.upper = np.ones_like(x0)*self.upper
self.explorePG = True
self.exploreCG = False
self.stopDoingPG = False
self._itType = 'SD'
self.comment = ''
self.aSet_prev = self.activeSet(x0)
@Utils.count
def projection(self, x):
"""projection(x)
Make sure we are feasible.
"""
return np.median(np.c_[self.lower, x, self.upper], axis=1)
@Utils.count
def activeSet(self, x):
"""activeSet(x)
If we are on a bound
"""
return np.logical_or(x == self.lower, x == self.upper)
@Utils.count
def inactiveSet(self, x):
"""inactiveSet(x)
The free variables.
"""
return np.logical_not(self.activeSet(x))
@Utils.count
def bindingSet(self, x):
"""bindingSet(x)
If we are on a bound and the negative gradient points away from the
feasible set.
Optimality condition. (Satisfies Kuhn-Tucker) MoreToraldo91
"""
bind_up = np.logical_and(x == self.lower, self.g >= 0)
bind_low = np.logical_and(x == self.upper, self.g <= 0)
return np.logical_or(bind_up, bind_low)
@Utils.timeIt
def findSearchDirection(self):
"""findSearchDirection()
Finds the search direction based on either CG or steepest descent.
"""
self.aSet_prev = self.activeSet(self.xc)
allBoundsAreActive = sum(self.aSet_prev) == self.xc.size
if self.debug:
print('findSearchDirection: stopDoingPG: ', self.stopDoingPG)
if self.debug:
print('findSearchDirection: explorePG: ', self.explorePG)
if self.debug:
print('findSearchDirection: exploreCG: ', self.exploreCG)
if self.debug:
print('findSearchDirection: aSet', np.sum(self.activeSet(self.xc)))
if self.debug:
print(
'findSearchDirection: bSet', np.sum(self.bindingSet(self.xc))
)
if self.debug:
print(
'findSearchDirection: allBoundsAreActive: ', allBoundsAreActive
)
if self.explorePG or not self.exploreCG or allBoundsAreActive:
if self.debug:
print('findSearchDirection.PG: doingPG')
self._itType = 'SD'
p = -self.g
else:
if self.debug:
print('findSearchDirection.CG: doingCG')
# Reset the max decrease each time you do a CG iteration
self.f_decrease_max = -np.inf
self._itType = '.CG.'
iSet = self.inactiveSet(self.xc) # The inactive set (free variables)
bSet = self.bindingSet(self.xc)
shape = (self.xc.size, np.sum(iSet))
v = np.ones(shape[1])
i = np.where(iSet)[0]
j = np.arange(shape[1])
if self.debug:
print('findSearchDirection.CG: Z.shape', shape)
Z = sp.csr_matrix((v, (i, j)), shape=shape)
def reduceHess(v):
# Z is tall and skinny
return Z.T*(self.H*(Z*v))
operator = sp.linalg.LinearOperator(
(shape[1], shape[1]), reduceHess, dtype=self.xc.dtype
)
p, info = sp.linalg.cg(
operator, -Z.T*self.g, tol=self.tolCG, maxiter=self.maxIterCG
)
p = Z*p # bring up to full size
# aSet_after = self.activeSet(self.xc+p)
return p
@Utils.timeIt
def _doEndIteration_ProjectedGradient(self, xt):
"""_doEndIteration_ProjectedGradient(xt)"""
aSet = self.activeSet(xt)
bSet = self.bindingSet(xt)
self.explorePG = not np.all(aSet == self.aSet_prev) # explore proximal gradient
self.exploreCG = np.all(aSet == bSet) # explore conjugate gradient
f_current_decrease = self.f_last - self.f
self.comment = ''
if self.iter < 1:
# Note that this is reset on every CG iteration.
self.f_decrease_max = -np.inf
else:
self.f_decrease_max = max(self.f_decrease_max, f_current_decrease)
self.stopDoingPG = f_current_decrease < 0.25 * self.f_decrease_max
if self.stopDoingPG:
self.comment = 'Stop SD'
self.explorePG = False
self.exploreCG = True
# implement 3.8, MoreToraldo91
# self.eta_2 * max_decrease where max decrease
# if true go to CG
# don't do too many steps of PG in a row.
if self.debug:
print(
'doEndIteration.ProjGrad, f_current_decrease: ',
f_current_decrease
)
if self.debug:
print(
'doEndIteration.ProjGrad, f_decrease_max: ',
self.f_decrease_max
)
if self.debug:
print('doEndIteration.ProjGrad, stopDoingSD: ', self.stopDoingPG)
class BFGS(Minimize, Remember):
name = 'BFGS'
nbfgs = 10
def __init__(self, **kwargs):
Minimize.__init__(self, **kwargs)
@property
def bfgsH0(self):
"""
Approximate Hessian used in preconditioning the problem.
Must be a SimPEG.Solver
"""
if getattr(self, '_bfgsH0', None) is None:
print("""
Default solver: SolverDiag is being used in bfgsH0
"""
)
self._bfgsH0 = SolverDiag(sp.identity(self.xc.size))
return self._bfgsH0
@bfgsH0.setter
def bfgsH0(self, value):
self._bfgsH0 = value
def _startup_BFGS(self, x0):
self._bfgscnt = -1
self._bfgsY = np.zeros((x0.size, self.nbfgs))
self._bfgsS = np.zeros((x0.size, self.nbfgs))
if not np.any([p is IterationPrinters.comment for p in self.printers]):
self.printers.append(IterationPrinters.comment)
def bfgs(self, d):
n = self._bfgscnt
nn = ktop = min(self._bfgsS.shape[1], n)
return self.bfgsrec(ktop, n, nn, self._bfgsS, self._bfgsY, d)
def bfgsrec(self, k, n, nn, S, Y, d):
"""BFGS recursion"""
if k < 0:
d = self.bfgsH0 * d # Assume that bfgsH0 is a SimPEG.Solver
else:
khat = 0 if nn is 0 else np.mod(n-nn+k,nn)
gamma = np.vdot(S[:, khat], d)/ | np.vdot(Y[:, khat], S[:, khat]) | numpy.vdot |
#!/usr/bin/env python3
# Standard library
import datetime as dt
import functools
import json
import logging as log
import os
import re
import warnings
from multiprocessing import Pool
# Third-party
import matplotlib as mpl
import matplotlib.pyplot as plt
import netCDF4 as nc4
import numpy as np
import scipy as sp
import shapely.geometry as geo
from descartes.patch import PolygonPatch
from matplotlib import cm
from mpl_toolkits.basemap import Basemap
try:
from numpy.ma.core import MaskedArrayFutureWarning
except ImportError:
MaskedArrayFutureWarning = None # type: ignore
# Local
from ..utils.netcdf import nc_prepare_file
from ..utils.spatial import path_along_domain_boundary
from .utilities_misc import Domain
from .utilities_misc import Field2D
from .utilities_misc import inds2lonlat
from .utilities_misc import order_dict
__all__ = []
# Plot precip
PRECIP_LEVELS_PSEUDO_LOG_ORIG = np.array(
[
0.1,
0.2,
1.0,
2.0,
4.0,
6.0,
10.0,
20.0,
40.0,
60.0,
]
)
PRECIP_LEVELS_PSEUDO_LOG = np.array(
[
0.1,
0.22,
0.46,
1,
2.2,
4.6,
10,
22,
46,
100,
]
)
PRECIP_LEVELS_PSEUDO_LOG_NARROW = np.array(
[
1,
1.5,
2.2,
3.2,
4.6,
7,
10,
15,
22,
32,
] # 46,
)
PRECIP_LEVELS_LOG = 10 ** np.arange(-1, 2.1, 0.2)
PRECIP_LEVELS_LOG_NARROW = 10 ** np.arange(0, 1.6, 0.1)
assert len(PRECIP_LEVELS_LOG) == 16
# Precip NCL colormap 'precip2_17lev'
# src: www.ncl.ucar.edu/Document/Graphics/ColorTables/precip2_17lev.shtml
PRECIP_COLORS_RGB_RADAR = [
(100, 100, 100),
(150, 130, 150),
(4, 2, 252),
(4, 142, 44),
(4, 254, 4),
(252, 254, 4),
(252, 202, 4),
(252, 126, 4),
(252, 26, 4),
(172, 2, 220),
]
PRECIP_COLORS_HEX_RADAR = [
"{:02X}{:02X}{:02X}".format(r, g, b) for r, g, b in PRECIP_COLORS_RGB_RADAR
]
PRECIP_COLORS_RGB_MCH17 = [
(255, 255, 255),
# (235, 246, 255),
(214, 226, 255),
(181, 201, 255),
(142, 178, 255),
(127, 150, 255),
(114, 133, 248),
(99, 112, 248),
(0, 158, 30),
(60, 188, 61),
(179, 209, 110),
(185, 249, 110),
(255, 249, 19),
(255, 163, 9),
(229, 0, 0),
(189, 0, 0),
(129, 0, 0),
# ( 0, 0, 0),
]
PRECIP_COLORS_HEX_MCH17 = [
"{:02X}{:02X}{:02X}".format(r, g, b) for r, g, b in PRECIP_COLORS_RGB_RADAR
]
def create_cmap_precip(
colors_rgb=PRECIP_COLORS_RGB_RADAR,
levels=PRECIP_LEVELS_PSEUDO_LOG,
over="black",
lognorm=False,
):
"""Create precipitation colormap."""
if len(levels) != len(colors_rgb):
err = ("numbers of precip levels and colors differ: {} != {}").format(
len(levels), len(colors_rgb)
)
raise ValueError(err)
if lognorm:
levels = np.log10(levels)
cols = np.array(colors_rgb) / 255
fct = lambda l: (l - levels[0]) / (levels[-1] - levels[0])
cols_cmap = [(fct(l), c) for l, c in zip(levels, cols)]
cmap = mpl.colors.LinearSegmentedColormap.from_list("precip", cols_cmap)
cmap.set_under("white", alpha=0)
cmap.set_over(over)
return cmap
cmap_precip_pseudo_log = create_cmap_precip(
PRECIP_COLORS_RGB_RADAR, PRECIP_LEVELS_PSEUDO_LOG
)
cmap_precip_pseudo_log__lognorm = create_cmap_precip(
PRECIP_COLORS_RGB_RADAR, PRECIP_LEVELS_PSEUDO_LOG, lognorm=True
)
cmap_precip_pseudo_log_narrow__lognorm = create_cmap_precip(
PRECIP_COLORS_RGB_RADAR, PRECIP_LEVELS_PSEUDO_LOG_NARROW, lognorm=True
)
cmap_precip_log = create_cmap_precip(PRECIP_COLORS_RGB_MCH17, PRECIP_LEVELS_LOG)
def plot_precip(
outfile,
title,
fld,
lon=None,
lat=None,
*,
grid=None,
levels=None,
topo=None,
cmap_topo="terrain",
cmap=None,
clabel=None,
map_limits=None,
logtrans=False,
title_standalone=False,
cbar_standalone=False,
cbar_extend="max",
cbar_orientation="horizontal",
cbar_ticklabel_rotation=None,
cbar_ticklabel_offset=0,
cbar_ticklabel_stride=1,
draw_gridlines=True,
title_x=0.5,
title_y=1.02,
dpi=300,
title_fs=12,
fsS=14,
fsM=16,
fsL=18,
fsScale=1,
):
if title_standalone or cbar_standalone:
outfile = outfile.replace(".png", ".plot.png")
print("plot " + outfile)
if lon is None or lat is None:
if grid is None:
raise ValueError("must pass lon/lat or grid")
lon, lat = grid["lon"], grid["lat"]
n_levels_default = 10
auto_levels = levels is None
fsS *= fsScale
fsM *= fsScale
fsL *= fsScale
fig, ax = plt.subplots()
w_standalone = 0.6 * fig.get_size_inches()[0]
m = setup_map_crclim(
lon,
lat,
ax=ax,
lw_coasts=2,
map_limits=map_limits,
draw_gridlines=draw_gridlines,
)
mlon, mlat = m(lon, lat)
if topo is not None:
# Plot topography
# SR_TMP<
topo_mode = "color"
# SR_TMP>
if topo_mode == "color":
levels_topo = np.arange(0, 4001, 500)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=MaskedArrayFutureWarning)
ax.contourf(mlon, mlat, topo, levels=levels_topo, cmap=cmap_topo)
elif topo_mode == "contour":
levels_topo = np.arange(0, 4001, 1000)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=MaskedArrayFutureWarning)
ax.contour(
mlon, mlat, topo, levels=levels_topo, colors="black", linewidths=0.5
)
else:
raise ValueError("invalid topography plot mode: " + topo_mode)
if auto_levels and logtrans:
# Try to derive levels somewhat smartly
# If it fails, leave it to matplotlib
try:
logmin = np.log10(np.percentile(fld[fld > 0], 1))
logmax = np.log10(np.percentile(fld[fld > 0], 99))
if logmin == logmax:
levels = None
else:
levels = 10 ** np.linspace(logmin, logmax, n_levels_default)
except:
levels = None
if not logtrans:
fld_plt = fld
levels_plt = levels
else:
# Use logarithmic contour levels
# Manual transformation rather than LogNorm() to allow 'extend'
with np.errstate(divide="ignore"):
fld_plt = np.where(fld > 0, np.log10(fld), np.nan)
levels_plt = np.log10(levels) if levels is not None else None
# Plot field
_lvls = n_levels_default if auto_levels else levels_plt
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=MaskedArrayFutureWarning)
p = ax.contourf(mlon, mlat, fld_plt, _lvls, cmap=cmap, extend=cbar_extend)
if levels_plt is None:
# Extract levels that matplotlib computed on its own
levels_plt = np.asarray(p.levels)
levels = 10 ** levels_plt if logtrans else levels_plt
# Determine how to format colorbar labels
if all(
int(lvl) == float(lvl)
for lvl in levels[cbar_ticklabel_offset::cbar_ticklabel_stride]
):
sigdig = None
stripzero = False
else:
# sigdig = max([2, len(str(int(max(levels))))])
sigdig = max(
[2, max([len("{:f}".format(l).strip("0").split(".")[1]) for l in levels])]
)
stripzero = True
# Add colorbar (optionally as standalone plot)
outfile_cbar = outfile.replace(".plot.png", ".cbar.png")
plot_cbar(
levels,
fig=fig,
p=p,
standalone=cbar_standalone,
label=clabel,
levels_plt=levels_plt,
sigdig=sigdig,
stripzero=stripzero,
cmap=cmap,
extend=cbar_extend,
outfile=outfile_cbar,
orientation=cbar_orientation,
w=w_standalone,
dpi=dpi,
ticklabel_rotation=cbar_ticklabel_rotation,
tick_offset=cbar_ticklabel_offset,
tick_stride=cbar_ticklabel_stride,
fsS=fsS,
fsM=fsM,
fsL=fsL,
fsScale=1,
)
if title:
# Add title (optionally as standalone plot)
if not title_standalone:
ax.set_title(title, fontsize=title_fs, x=title_x, y=title_y)
else:
outfile_title = outfile.replace(".plot.png", ".title.png")
plot_title_standalone(
title, outfile_title, fs=title_fs, w=w_standalone, dpi=dpi
)
fig.savefig(outfile, bbox_inches="tight", dpi=dpi)
plt.close("all")
def plot_title_standalone(title, outfile, *, w=6, dpi=None, fs=12):
"""Save plot title to separate file."""
fig, ax = plt.subplots(figsize=(w, w / 20))
ax.axis("off")
ax.text(
0.5,
-1.0,
title,
transform=fig.transFigure,
fontsize=fs,
horizontalalignment="center",
verticalalignment="bottom",
)
fig.savefig(outfile, bbox_inches="tight", dpi=dpi)
def plot_cbar(
levels,
*,
levels_plt=None,
levels_con=None,
levels_con_inds=None,
fig=None,
p=None,
w=6,
dpi=None,
standalone=False,
fmt=None,
sigdig=None,
stripzero=False,
outfile=None,
align_ticklabels="left",
tick_offset=0,
tick_stride=1,
cmap=None,
label=None,
extend=None,
orientation="horizontal",
ticklabel_rotation=None,
fsS=14,
fsM=16,
fsL=18,
fsScale=1,
):
fsS *= fsScale
fsM *= fsScale
fsL *= fsScale
if levels_plt is None:
levels_plt = levels
# Select and format tickmark labels
if align_ticklabels == "left":
cb_ticks = levels_plt[tick_offset::tick_stride]
cb_ticklabels = format_ticklabels(
levels[tick_offset::tick_stride],
fmt=fmt,
sigdig=sigdig,
stripzero=stripzero,
)
elif align_ticklabels == "right":
cb_ticks = levels_plt[::-1][tick_offset::tick_stride][::-1]
cb_ticklabels = format_ticklabels(
levels[::-1][tick_offset::tick_stride][::-1],
fmt=fmt,
sigdig=sigdig,
stripzero=stripzero,
)
else:
err = "invalid tickmark label alignment '{}'".format(align_ticklabels)
raise ValueError(err)
kwas_plt = dict(levels=levels_plt, cmap=cmap, extend=extend)
kwas_cb = dict(ticks=cb_ticks, orientation=orientation, extend=extend)
if not standalone:
# SR_TMP<
if levels_con is not None:
raise NotImplementedError("levels_con and not standalone")
if levels_con_inds is not None:
raise NotImplementedError("levels_con_inds and not standalone")
# SR_TMP>
# Add cbar to plot
if orientation == "horizontal":
kwas_cb.update(dict(shrink=0.55, pad=0.04))
elif orientation == "vertical":
kwas_cb.update(dict(shrink=0.85)) # , pad=0.04))
cb = fig.colorbar(p, **kwas_cb)
cb.set_label(label, size=fsM)
_kwas = dict(rotation=ticklabel_rotation, fontsize=fsS)
if orientation == "horizontal":
cb.ax.set_xticklabels(cb_ticklabels, **_kwas)
elif orientation == "vertical":
cb.ax.set_yticklabels(cb_ticklabels, **_kwas)
else:
# Plot cbar to separate file
save_cbar_standalone(
outfile,
kwas_plt,
kwas_cb,
w=w,
dpi=dpi,
levels_con=levels_con,
levels_con_inds=levels_con_inds,
label=label,
ticklabels=cb_ticklabels,
ticklabel_rotation=ticklabel_rotation,
fsS=fsS,
fsM=fsM,
)
def save_cbar_standalone(
outfile,
kwas_cont,
kwas_cbar,
*,
label=None,
ticklabels=None,
w=6,
dpi=None,
ticklabel_rotation=None,
levels_con=None,
levels_con_inds=None,
fsS=14,
fsM=16,
):
orientation = kwas_cbar.get("orientation")
fig, ax = plt.subplots(figsize=(w, w / 6))
ax.axis("off")
gs = mpl.gridspec.GridSpec(2, 1, bottom=0.6, height_ratios=[0, 1])
ax0, ax1 = fig.add_subplot(gs[0]), fig.add_subplot(gs[1])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=MaskedArrayFutureWarning)
p = ax0.contourf(
[[0, 1], [0, 1]], [[0, 1], [0, 1]], [[0, 0], [0, 0]], **kwas_cont
)
ax0.set_visible(False)
cb = fig.colorbar(p, cax=ax1, **kwas_cbar)
if label is not None:
cb.set_label(label, size=fsM)
_kwas = dict(rotation=ticklabel_rotation, fontsize=fsS)
if orientation == "horizontal":
cb.ax.set_xticklabels(ticklabels, **_kwas)
elif orientation == "vertical":
cb.ax.set_yticklabels(ticklabels, **_kwas)
else:
cb.ax.set_xticklabels(ticklabels, **_kwas)
cb.ax.set_yticklabels(ticklabels, **_kwas)
if levels_con is not None:
# Add contour levels
levels = kwas_cont["levels"]
for lvl in levels_con:
lvl_01 = (lvl - levels[0]) / (levels[-1] - levels[0])
if orientation == "horizontal":
cb.ax.axvline(lvl_01, c="black")
elif orientation == "vertical":
cb.ax.axhline(lvl_01, c="black")
else:
raise ValueError("must pass orientation alongsize levels_con")
if levels_con_inds is not None:
# Add contour levels based on color levels
levels = kwas_cont["levels"]
for ind in levels_con_inds:
lvl_01 = ind * 1 / (len(levels) - 1)
if orientation == "horizontal":
cb.ax.axvline(lvl_01, c="black")
elif orientation == "vertical":
cb.ax.axhline(lvl_01, c="black")
else:
raise ValueError("must pass orientation alongsize levels_con_inds")
fig.savefig(outfile, dpi=dpi)
def format_ticklabels(labels, fmt=None, stripzero=False, sigdig=None):
if fmt is None:
fmt = "{:g}"
if sigdig is not None and not (isinstance(sigdig, int) and sigdig > 0):
raise ValueError("sigdig not a positive number: {}".format(sigdig))
labels_fmtd = []
for label in labels:
if not sigdig:
try:
label_fmtd = fmt.format(label)
except TypeError:
label_fmtd = label
else:
if label == 0:
label_fmtd = "0"
if sigdig > 1:
# label_fmtd = label_fmtd+"."+"0"*(sigdig - 1)
label_fmtd = "0.0"
else:
_f = 10 ** (sigdig + 1 - np.ceil(np.log10(label)))
try:
label_fmtd = "{:g}".format(int(label * _f + 0.5) / _f)
except ValueError:
# E.g., in case of NaN
label_fmtd = str(label * _f)
# Append zeros if necessary
if "." in label_fmtd:
pre, post = label_fmtd.split(".")
else:
pre, post = label_fmtd, ""
if pre == "0":
n = len(post.lstrip("0"))
else:
n = len(pre) + len(post)
if n < sigdig:
post += "0" * (sigdig - n)
label_fmtd = "{}.{}".format(pre, post)
if stripzero and label != 0:
# Remove leading zero bevore decimal point
label_fmtd = label_fmtd.lstrip("0")
labels_fmtd.append(label_fmtd)
return labels_fmtd
# INPUT
def import_lonlat(lonlat_file, lon_name="lon", lat_name="lat"):
"""Import the fields 'lon' and 'lat' from an NPZ archive."""
try:
with np.load(lonlat_file) as f:
lon, lat = f[lon_name], f[lat_name]
except KeyError as e:
err = "Field {} not found in file {}".format(e, lonlat_file)
raise IOError(err)
except Exception as e:
err = "Error reading lon/lat file: {}({})".format(e.__class__.__name__, e)
raise IOError(err)
else:
return lon, lat
def import_tracks(
cls_reader,
infiles,
lon,
lat,
domain=None,
smoothing_sigma=None,
return_config_id=False,
return_config_tracker=False,
):
"""Import tracks along with the features from a JSON file."""
if domain is None:
domain = Domain(list(zip(*path_along_domain_boundary(lon, lat))))
reader = cls_reader(domain=domain)
tracks = []
config_id = None
config_track = None
for infile in sorted(infiles):
data = reader.read_file(infile, include_tracker_config=return_config_tracker)
# Make sure the identification configs match
try:
data["CONFIG"]["IDENTIFY"]
except KeyError:
log.warning("CONFIG/IDENTIFY not found in {}".format(infile))
if "CONFIG" in data:
if config_id is None:
config_id = data["CONFIG"]["IDENTIFY"]
else:
if config_id != data["CONFIG"]["IDENTIFY"]:
msg = "CONIG/IDENTIFY in {} differs from previous!".format(infile)
log.warning(msg)
# Make sure the tracking configs match
if return_config_tracker:
try:
config_track = data["CONFIG"]["TRACKER"]
except KeyError:
log.warning("CONFIG/TRACKER not found in {}".format(infile))
else:
if config_track != data["CONFIG"]["TRACKER"]:
msg = "CONIG/TRACKER in {} differs from previous!".format(infile)
log.warning(msg)
# Extract tracks
new_tracks = data["TRACKS"]
tracks.extend(new_tracks)
log.info("read {} tracks from file {}".format(len(new_tracks), infile))
results = [tracks]
if return_config_id:
results.append(config_id)
if return_config_tracker:
results.append(config_track)
return results
def write_old_tracks(
outfile, tracks, cls_writer_json, cls_writer_bin, configs=None, block_order=None
):
outfile_bin = outfile.replace(".json", ".npz")
# Exctract contours and minima (necessary to re-build cyclones)
features = [f for t in tracks for f in t.features() if not f.is_mean()]
contours = [c for f in features for c in f.contours()]
minima = [m for f in features for m in f.minima()]
# Initialize and configure JSON writer
writer_json = cls_writer_json()
writer_json.set_config(save_paths=False, contour_path_file=outfile_bin)
if block_order:
writer_json.set_config(block_order=block_order)
# If given, add configs
if configs:
for name, config in configs.items():
writer_json.add_config({name: config})
# Add tracks etc.
writer_json.add_tracks(tracks)
writer_json.add_contours(contours)
writer_json.add_points("MINIMA", minima)
# Write JSON file
writer_json.write_file(outfile)
# Write contour paths to binary
writer_bin = cls_writer_bin()
writer_bin.write_contour_path_file(outfile_bin, contours)
# INPUT: NETCDF/NPZ
def read_topography(input_file, field_name):
"""Read the topography field from the respective input file."""
log.info("read topography field {n} from {f}".format(n=field_name, f=input_file))
try:
# Try netCDF file
with nc4.Dataset(input_file, "r") as fi:
lon = fi["lon"][:]
lat = fi["lat"][:]
fld_raw = fi[field_name][0] # strip leading time dimension
except Exception:
# Try NPZ archive
try:
with np.load(input_file) as f:
fld_raw = f[field_name]
lon = f["lon"]
lat = f["lat"]
except IOError:
err = "Cannot import file (unknown format): {}".format(input_file)
raise IOError(err) from None
fld = Field2D(fld_raw, lon, lat, name=field_name)
return fld
# INPUT: JSON
class IOReaderJsonBase:
def __init__(self):
self._header = {}
def read_file(self, filename, **kwas):
self._data_path = os.path.dirname(os.path.abspath(filename))
with open(filename) as f:
jstr = f.read()
jdat = self.read_string(jstr, **kwas)
return jdat
def get_header(self):
return self._header
def _json_remove_excessive_newlines(jstr, ind, n):
"""Remove newline before lines with a certain indent.
Problem:
The JSON writer either inserts no newline characters at all, or
after every entry. The former is impractical, the latter blows up
JSON files containing large sets of data (e.g. contour coordinates).
Solution:
To keep the JSON file structure clear, keep newlines before lines
with an indent of up to N spaces. Newline characters before every
line with more indent are removed.
Arguments:
- jstr: Indented JSON string.
- ind: Number of spaces per level indent.
- n: Lowest level for which newlines are retained.
"""
# Remove all newlines for >N spaces indent.
rx1 = "\n {{{n}, }}(?=[^ ])".format(n=n * ind)
# Remove newline before closing bracket of list entries at indent N
rx2 = "\n {{{n}}}(?=[\]}}])".format(n=(n - 1) * ind)
jstr = re.sub(rx1, "", jstr)
jstr = re.sub(rx2, "", jstr)
return jstr
# INPUT: BINARY
class IOReaderBinaryBase:
def __init__(self):
pass
# OUTPUT: JSON
class IOWriterJsonBase:
def __init__(self):
self._cache = {}
self._header = {}
def write_file(self, filename):
"""Write the cached data to a file.
Arguments:
- filename: Name of the output JSON file.
"""
jstr = self.write_string()
with open(filename, mode="w") as f:
f.write(jstr)
def write_string(self):
"""Merge the list of cached JSON strings into a single one.
The various write methods create stand-alone json blocks as strings.
Merge the blocks into one block.
If the property block_order is set (list of names), the blocks
in the list are written in the respective order.
"""
if len(self._cache) == 0:
raise ValueError("Nothing to write!")
# Order the blocks (alphabetically if not specified otherwise)
if not "block_order" in self._header:
block_list_raw = {k: v for k, v in sorted(self._cache.items())}
else:
block_list_raw = {}
names_all = list(self._cache.keys())
for name in self._header["block_order"]:
if name in names_all:
block_list_raw[name] = self._cache[name]
names_all.remove(name)
for name in names_all:
block_list_raw[name] = self._cache[name]
# Turn the objects in each block into JSON strings
block_list = [
self._write_string_method(name)(objects)
for name, objects in block_list_raw.items()
]
# Add the header block
block_list.insert(0, self.write_header())
# Make the stand-alone blocks appendable
block_list[:-1] = [re.sub(r"\n\}\Z", ", ", b) for b in block_list[:-1]]
block_list[1:] = [re.sub(r"\A{\n", "", b) for b in block_list[1:]]
# Return the blocks as one
return "\n".join(block_list)
def write_header(self):
"""Write the header information to a JSON string."""
name = "HEADER"
header = {name: order_dict(self._header)}
jstr = json.dumps(header, indent=2)
jstr = _json_remove_excessive_newlines(jstr, 2, 3)
return jstr
def set_config(self, **kwas):
"""Add configuration parameters to the HEADER block."""
for key, val in kwas.items():
if key not in self.__class__.valid_header_param_list:
msg = (
"Invalid HEADER parameter '{k}'." " Valid parameters:\n {pl}"
).format(k=key, pl="\n ".join(self.__class__.valid_header_param_list))
raise ValueError(msg)
self._header[key] = val
def _add_to_cache(self, name, objs):
if isinstance(objs, dict):
if name not in self._cache:
self._cache[name] = {}
self._cache[name].update(objs)
else:
# SR_TMP<
try:
objs = sorted(objs, key=lambda o: o.id())
except TypeError:
objs = sorted(objs, key=lambda o: o.id)
# SR_TMP>
if name not in self._cache:
self._cache[name] = []
self._cache[name].extend(objs)
def write_string_objs_info(
self, name, objs, ind=2, max_ind_lvl=3, tags=None, **kwas
):
json_dict = {}
if tags:
json_dict[name] = {}
for tag in tags:
json_dict[name][tag] = []
for obj in sorted(objs):
tag = obj.type
json_dict[name][tag].append(obj.get_info())
max_ind_lvl += 2
else:
json_dict[name] = []
for obj in sorted(objs):
json_dict[name].append(obj.get_info(**kwas))
jstr = json.dumps(json_dict, indent=ind)
jstr = _json_remove_excessive_newlines(jstr, ind, max_ind_lvl)
return jstr
def add_config(self, config):
self._add_to_cache("CONFIG", config)
def write_string_config(self, config):
"""Write the config dict to JSON.
The formatted JSON data is returned as a string.
Arguments:
- config: The config dict.
"""
name = "CONFIG"
jdat = {name: {}}
for name_conf, conf in sorted(config.items()):
log.info(
"write config section {nsc} to {nc}".format(nc=name, nsc=name_conf)
)
jdat[name][name_conf] = order_dict(config[name_conf])
jstr = json.dumps(jdat, indent=2)
return jstr
# Abstract methods (must be overridded by subclasses)
def _write_string_method(self, name, register=None):
if not register:
err = (
"IOWriterJsonBase._write_string_method must be overridden "
"by subclass!"
)
raise NotImplementedError(err)
for key, fct in register.items():
if key.endswith("*"):
if name.startswith(key[:-1]):
return fct
if key == name:
return fct
err = "No write_string_* method found for '{}'".format(name)
raise ValueError(err)
# OUTPUT: BINARY
class IOWriterBinaryBase:
def __init__(self):
pass
class FeatureTrackIOWriterBinary(IOWriterBinaryBase):
def __init__(self):
super().__init__()
def write_feature_path_file(self, file_name, features):
data = {str(feature.id()): feature.shell for feature in features}
np.savez_compressed(file_name, **data)
# OUTPUT: PLOTTING
def plot_histogram(
outfile,
title,
data,
nbins=None,
normalize=False,
scale_factor=None,
range=None,
*kwargs,
):
"""Create a histogram plot.
Arguments:
- outfile: name of output file (incl. suffix)
- title: plot title
- data: data to plot
Optional arguments:
- nbins: number of bins
- normalize: normalize y data (divide by total)
- scale_factor: factor the histogram values are multiplied with
- xrange: data range for x-axis
- *kwargs: all other arguments are passed on to "bar_plot"
"""
kwargs_hist = {}
if nbins:
kwargs_hist["bins"] = nbins
if xrange:
kwargs_hist["range"] = xrange
hist, bins = np.histogram(data, **kwargs_hist)
width = 0.8 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
if normalize:
hist = hist.astype(float) / float(len(data))
if scale_factor:
hist = hist.astype(float) * scale_factor
title += " (scaled by {:.3f})".format(scale_factor)
bar_plot(outfile, title, center, hist, width=width, **kwargs)
def plot_histogram_2d(
outfile,
title,
xdata,
ydata,
xnbins=None,
ynbins=None,
xrange=None,
yrange=None,
normalize=False,
scale_factor=None,
**kwargs,
):
"""Create a 2D histogram plot.
Arguments:
- outfile: name of output file (incl. suffix)
- title: plot title
- xdata: data to plot (x axis)
- ydata: data to plot (y axis)
Optional arguments:
- xnbins: number of bins
- ynbins: number of bins
- xrange: data range for x-axis
- yrange: data range for y-axis
- normalize: normalize y data (divide by total)
- scale_factor: factor the histogram values are multiplied with
- *kwargs: all other arguments are passed on to "bar_plot"
"""
if xrange is None:
xrange = [xdata.min(), ydata.max()]
if yrange is None:
yrange = [ydata.min(), ydata.max()]
range = [xrange, yrange]
if xnbins is None:
xnbins = 10
if ynbins is None:
ynbins = 10
nbins = [xnbins, ynbins]
hist, xbins, ybins = np.histogram2d(xdata, xdata, bins=nbins, yange=range)
if normalize:
hist = hist.astype(float) / float(len(xdata) + len(ydata))
if scale_factor:
hist = hist.astype(float) * scale_factor
title += " (scaled by {:.3f})".format(scale_factor)
color_plot(outfile, title, hist, xbins, ybins, **kwargs)
def color_plot(
outfile,
title,
data,
*,
verbose=True,
lon=None,
lat=None,
domain=None,
xbins=None,
ybins=None,
zbounds=None,
levels=None,
zticks=None,
zticklabels=None,
cmap=None,
cbar_over=False,
cbar_under=False,
add_colorbar=True,
domain_boundary=None,
):
if verbose:
print("plot {}".format(outfile))
if cmap is None:
cmap = cm.Greys
fig, ax = plt.subplots()
extent = None
if xbins and ybins is not None:
extent = [xbins[0], xbins[-1], ybins[0], ybins[-1]]
if cbar_over and cbar_under:
extend = "both"
elif cbar_over:
extend = "max"
elif cbar_under:
extend = "min"
else:
extend = "neither"
vmin, vmax = None, None
if levels is not None:
vmin, vmax = levels[0], levels[-1]
if lon is None or lat is None:
norm = None
if zbounds:
norm = mpl.colors.BoundaryNorm(zbounds, cmap.N)
im = plt.imshow(
data,
interpolation="nearest",
origin="low",
cmap=cmap,
extent=extent,
vmin=vmin,
vmax=vmax,
norm=norm,
)
ax.images.append(im)
else:
im = ax.contourf(
lon,
lat,
data,
levels=levels,
cmap=cmap,
extend=extend,
vmin=vmin,
vmax=vmax,
latlon=True,
)
m = setup_map_crclim(lon, lat, ax=ax, map_limits=domain)
if domain_boundary:
px, py = domain_boundary
ax.plot(px, py, linewidth=1, color="black")
if add_colorbar:
cbar = plt.colorbar(im, extend=extend)
if zticks:
cbar.set_ticks(zticks)
if zticklabels:
cbar.set_ticklabels(zticklabels)
ax.set_aspect("auto")
ax.set_title(title)
fig.savefig(outfile)
plt.close()
def bar_plot(
outfile,
title,
center,
hist,
width=None,
xticks=None,
yticks=None,
xlabels=None,
ylabels=None,
xticks2=None,
yticks2=None,
xlabels2=None,
ylabels2=None,
xrange=None,
yrange=None,
xscale=None,
yscale=None,
verbose=True,
):
"""Create a bar plot.
Arguments:
- outfile: name of output file (incl. suffix)
- title: plot title
- center: Center points of the bars
- hist: histogram data to plot
Optional arguments:
- width: width of the bars
- plot_bars: lot the data using vertical bars or lines
- xrange: data range for x-axis
- xticks: list of tickmarks for bottom x-axis
- yticks: list of tickmarks for left y-axis
- xlabels: list of tickmark labels for bottom x-axis
- ylabels: list of tickmark labels for left y-axis
- xticks2: list of tickmarks for bottom x-axis
- yticks2: list of tickmarks for right y-axis
- xlabels2: list of tickmark labels for top x-axis
- ylabels2: list of tickmark labels for right y-axis
- xscale: scale of x axis
- yscale: scale of y axis
- verbose: verbosity switch
"""
if verbose:
print("plot {}".format(outfile))
fig, ax = plt.subplots()
ax.bar(center, hist, align="center", width=width)
ax.set_title(title)
ax.grid(True)
# Set axis ranges ans scales
if xrange:
ax.set_xlim(xrange)
if yrange:
ax.set_ylim(yrange)
if xscale:
ax.set_xscale(xscale)
if yscale:
ax.set_yscale(yscale)
# Set tick marks and labels
if xticks:
ax.set_xticks(xticks)
if xlabels:
ax.set_xticklabels(xlabels)
if yticks:
ax.set_yticks(yticks)
if ylabels:
ax.set_yticklabels(ylabels)
# Add second x-axis on top
if any(i for i in [xticks2, ylabels2]):
ax2 = ax.twiny()
ax2.set_xbound(ax.get_xbound())
ax2.set_xlim(ax.get_xlim())
ax2.grid(True, linestyle="-")
if xticks2:
ax2.set_xticks(xticks2)
if xlabels2:
ax2.set_xticklabels(xlabels2)
title_pos_y = ax.title.get_position()[1]
ax.title.set_y(title_pos_y + 0.05)
# add second y-axis on the right
if any(i for i in [yticks2, ylabels2]):
raise NotImplementedError
fig.savefig(outfile)
def xy_plot(
outfile,
title,
data_x,
data_y,
*,
type="scatter",
multi=False,
color=None,
verbose=True,
xlabel=None,
ylabel=None,
xlabel2=None,
ylabel2=None,
xticks=None,
yticks=None,
xlabels=None,
ylabels=None,
xticks2=None,
yticks2=None,
xlabels2=None,
ylabels2=None,
xrange=None,
yrange=None,
xscale=None,
yscale=None,
):
if verbose:
print("plot {}".format(outfile))
fig, ax = plt.subplots()
if type == "scatter":
symbol = "o"
elif type == "line":
symbol = ""
else:
err = "Invalid plot type {}".format(type)
raise ValueError(err)
# Plot data
if not multi:
data_x, data_y = [data_x], [data_y]
for dx, dy in zip(data_x, data_y):
p = ax.plot(dx, dy, symbol)
if color:
p.set_color(color)
# Set axis ranges and scales
if xrange:
ax.set_xlim(xrange)
if yrange:
ax.set_ylim(yrange)
if xscale:
ax.set_xscale(xscale)
if yscale:
ax.set_yscale(yscale)
# Set tick marks and labels
if xticks:
ax.set_xticks(xticks)
if xlabels:
ax.set_xticklabels(xlabels)
if yticks:
ax.set_yticks(yticks)
if ylabels:
ax.set_yticklabels(ylabels)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
# Add second x-axis on top
if any(i for i in [xticks2, ylabels2]):
ax2 = ax.twiny()
ax2.set_xbound(ax.get_xbound())
ax2.set_xlim(ax.get_xlim())
ax2.grid(True, linestyle="-")
if xticks2:
ax2.set_xticks(xticks2)
if xlabels2:
ax2.set_xticklabels(xlabels2)
if xlabel2:
ax2.set_xlabel(xlabel2)
title_pos_y = ax.title.get_position()[1]
ax.title.set_y(title_pos_y + 0.05)
# add second y-axis on the right
if any(i for i in [yticks2, ylabels2]):
raise NotImplementedError
ax.set_title(title)
fig.savefig(outfile)
def reduce_colormap(cmap, name=None, n=20, first=None, last=None):
n_colors = 256
indices = np.round(np.linspace(0, n_colors, n)).astype(int)
colors = [cmap(i) for i in indices]
if first is not None:
colors[0] = first
if last is not None:
colors[-1] = last
return mpl.colors.ListedColormap(colors, name=name)
class InFileTimestep:
"""Represents an input file at a certain timestep.
To read a file, a timestep is passed, which is used to complete the file
name from a template where only the timestep information is missing.
"""
def __init__(self, tmpl, fct, ts_format="%Y%m%d%H", **kwas):
assert isinstance(tmpl, str)
self.tmpl = tmpl
self.fct = fct
self.ts_format = ts_format
self.kwas = kwas
self._ifile_prev = None
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.tmpl)
def __eq__(self, other):
if self.tmpl == other.tmpl:
return True
return False
def __hash__(self):
return id(self)
def read_fields(self, ts, names, lon, lat, **kwas):
ifile = self.get_infile(ts)
if self._ifile_prev == ifile:
return None
log.info(
"[{}] read {} from {}".format(
ts.strftime(self.ts_format), ", ".join(names), ifile
)
)
kwas.update(self.kwas)
try:
fields = self.fct(ifile, names, lon=lon, lat=lat, **kwas)
except Exception as e:
err = "Cannot read fields [{}] from {}: {}({})".format(
", ".join(names), ifile, e.__class__.__name__, e
)
raise IOError(err)
self._ifile_prev = ifile
return dict(zip(names, fields))
def get_infile(self, ts):
sts = ts.strftime(self.ts_format)
yyyy, mm, dd, hh, nn = sts[:4], sts[4:6], sts[6:8], sts[8:10], sts[10:12]
ifile = self.tmpl.format(YYYY=yyyy, MM=mm, DD=dd, HH=hh, NN=nn)
return ifile
class InField:
def __init__(
self,
name,
ifile,
fnames,
pp_fct=None,
*,
assoc=None,
del_old=True,
infile_lonlat=None,
**kwas,
):
self.name = name
self.ifile = ifile
self.fnames = [fnames] if isinstance(fnames, str) else fnames
self.pp_fct = pp_fct
self.assoc = assoc
# SR_TMP<
if infile_lonlat is None:
self.lon = None
self.lat = None
else:
self.lon, self.lat = self._read_lonlat(infile_lonlat)
# SR_TMP>
self.pp_kwas = kwas
self.del_old = del_old
self._raw_data = None
self._data = None
def __repr__(self):
return "{}({}: {}, {})".format(
self.__class__.__name__, self.name, self.ifile, self.fnames
)
def __eq__(self, other):
if (
self.name == other.name
and self.ifile == other.ifile
and self.assoc == other.assoc
and self.fnames == other.fnames
and
# self._raw_data == other._raw_data):
np.array_equal(self._raw_data, other._raw_data)
):
return True
return False
@classmethod
def manager(cls, *args, **kwas):
return InFieldManager(*args, **kwas)
@classmethod
def track(cls, *args, **kwas):
return InField_Track(*args, **kwas)
def _read_lonlat(self, infile):
with nc4.Dataset(infile, "r") as fi:
lon = fi["lon"][:]
lat = fi["lat"][:]
return lon, lat
def data(self, ts):
if not isinstance(self._data, dict):
return self._data
if len(self._data) == 0:
return None
# Make sure types of timesteps match (int/datetime)
_some_ts = next(iter(self._data.keys()))
if isinstance(ts, dt.datetime):
if not isinstance(_some_ts, dt.datetime):
# Convert datetime to int
ts = int(ts.strftime(self.ifile.ts_format))
elif isinstance(_some_ts, dt.datetime):
# Convert int to datetime
ts = dt.datetime.strptime(str(ts), self.ifile.ts_format)
if self.del_old:
# Remove old fields
for key_ts in [k for k in self._data.keys()]:
if key_ts < ts:
del self._data[key_ts]
return self._data.get(ts)
def preproc(self, timestep):
self.pp_kwas["assoc"] = self.assoc
self.pp_kwas["ts"] = timestep
self.pp_kwas["fnames"] = self.fnames
if self._data is None:
old_data = None
else:
assert len(self.fnames) == 1
old_data = self._data
if self.pp_fct:
# Use custom preproc function with arguments
new_data = self.pp_fct(self._raw_data, old_data=old_data, **self.pp_kwas)
elif self.pp_kwas:
# Use default preproc function with arguments
new_data = self.pp_base(self._raw_data, old_data=old_data, **self.pp_kwas)
elif len(self._raw_data) == 1:
# No arguments, no preproc!
new_data = next(iter(self._raw_data.values()))
else:
err = (
"For multiple fields, a preprocessing function must "
"be provided to reduce the fields to a single field"
)
raise Exception(err)
self._data = new_data
@classmethod
def pp_base(
cls,
fld,
conversion_factor=None,
smoothing_sigma=None,
slice=None,
minval=None,
maxval=None,
**kwas,
):
"""Basic pre-processing of a single field."""
if len(fld) != 1:
err = "pp_base can only handle one field ({} passed)".format(len(fld))
raise ValueError(err)
fld = next(iter(fld.values()))
if slice:
fld = slice(fld)
if conversion_factor:
fld = fld * conversion_factor
if smoothing_sigma:
fld = sp.ndimage.gaussian_filter(fld, sigma=smoothing_sigma, order=0)
if minval is not None:
fld[fld < minval] = np.nan
if maxval is not None:
fld[fld > maxval] = np.nan
return fld
class InField_Track(InField):
def __init__(self, *args, **kwas):
kwas["del_old"] = kwas.get("del_old", True)
super().__init__(*args, **kwas)
def preproc(self, timestep):
super().preproc(timestep)
tracks, config_id, config_tracks = self._data
ts_start = min([track.ts_start() for track in tracks])
ts_end = max([track.ts_end() for track in tracks])
tracks_ts = select_tracks_ts(tracks, ts_start, ts_end)
self._data = tracks_ts
class InFieldManager:
"""Manage input fields used for plotting."""
def __init__(self):
self.elements = []
self.files = {}
def __repr__(self):
return "{}({} files, {} elements)".format(
self.__class__.__name__, len(self.files), len(self.elements)
)
def __iter__(self):
return iter(self.elements)
def n(self):
return len(self.elements)
def update(self, other):
"""Update managers with one or more others."""
# Deal with multiple others (i.e. a list of Managers)
# Note: sequence hard-coded as list; support for arbitrary
# containers could be implemented in case it's ever necessary
if isinstance(other, list):
for other_ in other:
self.update(other_)
return
# If not a list, other must be the same class as self
elif not isinstance(other, self.__class__):
err = "invalid class of other (must be one of [{}]): {}".format(
", ".join([c.__name__ for c in [self.__class__, list]]),
other.__class__.__name__,
)
raise ValueError(err)
# Update files dict and elements list
# Note: Need to manually compare the InFileTimestep keys ('==')
# because 'is' comparison won't work (__eq__ vs. __hash__)
for key_other, val_other in other.files.items():
for key_self, val_self in self.files.items():
if key_self == key_other:
del self.files[key_self]
for vs in val_self:
self.elements.remove(vs[0])
break
self.files[key_other] = val_other
for vs in val_other:
self.elements.append(vs[0])
def add_elements(self, elements):
for element in elements:
self.add_element
def add_element(self, element):
self.elements.append(element)
field = element.field
if field:
# Store input file of field
if field.ifile:
if field.ifile not in self.files:
self.files[field.ifile] = []
self.files[field.ifile].append((element, 0))
# Store input file of associated field
if field.assoc:
if field.assoc.ifile:
if field.assoc.ifile not in self.files:
self.files[field.assoc.ifile] = []
self.files[field.assoc.ifile].append((element, 1))
def read_data(self, ts, lon, lat, trim_bnd_n, **kwas):
"""For every field, read the data from file."""
# Loop over input files
fields_priority = []
fields_other = []
for file, plot_elements in self.files.items():
# Collect field names
fnames = []
for element, type_ in plot_elements:
if type_ == 0:
fnames.extend(element.field.fnames)
elif type_ == 1:
fnames.extend(element.field.assoc.fnames)
fnames = sorted(set(fnames))
# Read data (if it's not in memory already)
try:
new_data = file.read_fields(
ts, fnames, lon, lat, trim_bnd_n=trim_bnd_n, **kwas
)
except Exception as e:
err = "error reading file {}: {}({})".format(file, type(e).__name__, e)
raise Exception(err)
if new_data is None:
continue
# Add new data
for element, type_ in plot_elements:
if type_ == 0:
field = element.field
if field not in fields_priority and field not in fields_other:
fields_other.append(field)
elif type_ == 1:
field = element.field.assoc
if field in fields_other:
fields_other.remove(field)
if field not in fields_priority:
fields_priority.append(field)
if field._raw_data is None:
field._raw_data = {}
for fname in field.fnames:
field._raw_data[fname] = new_data[fname]
# Post-process the new raw data
# First, make sure all fields which are associates of others have
# been post-processed, as these are used for post-processing those
# fields which they are associates of
for field in fields_priority:
field.preproc(ts)
for field in fields_other:
field.preproc(ts)
class PlotElement:
def __init__(self, name=None, field=None, **kwas):
if "fld" in kwas:
msg = (
"{}: initialized with argument 'fld'; " "did you mean 'field'?"
).format(self.__class__.__name__)
log.warning(msg)
self.name = name if name else "noname"
self.field = field
self.pltkw = kwas.pop("pltkw", {})
self.kwas_misc = kwas # SR_TMP
def __repr__(self):
return "{}({}: {})".format(self.__class__.__name__, self.name, self.field)
def __eq__(self, other):
if self.name == other.name and self.field == other.field:
return True
return False
@classmethod
def contour(cls, *args, **kwas):
return PlotElement_Contour(*args, **kwas)
@classmethod
def color(cls, *args, **kwas):
return PlotElement_Color(*args, **kwas)
@classmethod
def shading(cls, *args, **kwas):
return PlotElement_Shading(*args, **kwas)
@classmethod
def line(cls, *args, **kwas):
return PlotElement_Line(*args, **kwas)
@classmethod
def feature_shading(cls, *args, **kwas):
return PlotElement_FeatureShading(*args, **kwas)
@classmethod
def feature_contour(cls, *args, **kwas):
return PlotElement_FeatureContour(*args, **kwas)
@classmethod
def feature_track_old(cls, *args, **kwas):
return PlotElement_FeatureTrack_Old(*args, **kwas)
@classmethod
def feature_track(cls, *args, **kwas):
return PlotElement_FeatureTrack(*args, **kwas)
def derive(self, name=None, **kwas):
if name is None:
name = "derived({})".format(self.name)
kwas_other = dict(field=self.field)
kwas_other.update(self._derive_kwas())
for key, val in kwas.copy().items():
if isinstance(val, dict) and key in kwas_other:
tmp = kwas.pop(key)
kwas_other[key].update(val)
kwas_other.update(self.kwas_misc)
kwas_other.update(kwas)
other = self.__class__(name, **kwas_other)
return other
def _derive_kwas(self):
kwas = dict(
pltkw=self.pltkw.copy(),
)
try:
kwas["cbar"] = (self.cbar_kwas.copy(),)
except AttributeError:
pass
return kwas
class PlotElement_Contour(PlotElement):
def __init__(self, *args, **kwas):
super().__init__(*args, **kwas)
def plot(self, ax, m, mlon, mlat, ts, **kwas):
if self.field.data(ts) is None:
return
if np.nansum(self.field.data(ts)) > 0:
ax.contour(mlon, mlat, self.field.data(ts), **self.pltkw)
class PlotElement_Color(PlotElement):
def __init__(self, *args, cbar=None, **kwas):
if "cmap" in kwas:
kwas["pltkw"]["cmap"] = kwas.pop("cmap")
super().__init__(*args, **kwas)
self.add_cbar = bool(cbar)
try:
self.cbar_kwas = {k: v for k, v in cbar.items()}
except AttributeError:
self.cbar_kwas = {}
self.labelsize = self.cbar_kwas.pop("labelsize", 20)
# SR_TMP<
deprec = ["color_under", "color_over", "alpha_under", "alpha_over"]
if any(i in kwas for i in deprec):
err = "arguments deprecated: {}".format(deprec)
raise ValueError(err)
# SR_TMP>
def plot(self, ax, m, mlon, mlat, ts, **kwas):
if self.field.data(ts) is None:
return
p = ax.contourf(mlon, mlat, self.field.data(ts), **self.pltkw)
if self.add_cbar:
cbar = ax.figure.colorbar(p, **self.cbar_kwas)
cbar.ax.tick_params(labelsize=self.labelsize)
if "label" in self.cbar_kwas:
cbar.set_label(self.cbar_kwas["label"], size=self.labelsize)
# cbar.ax.set_xlabel(self.cbar_kwas["label"], size=self.labelsize)
def _derive_kwas(self):
return dict(
cbar=self.cbar_kwas.copy(),
pltkw=self.pltkw.copy(),
)
class PlotElement_Shading(PlotElement):
def __init__(self, *args, lower=None, upper=None, **kwas):
super().__init__(*args, **kwas)
if lower is None and upper is None:
err = "{}: either lower or upper threshold must be passed".format(
self.__class__.__name__
)
raise ValueError(err)
self.lower = lower
self.upper = upper
def plot(self, ax, m, mlon, mlat, ts, **kwas):
if self.field is None:
err = "cannot plot {}: field is None".format(self.name)
raise Exception(err)
if self.field.data(ts) is None:
return
lower = self.field.data(ts).min() if self.lower is None else self.lower
upper = self.field.data(ts).max() if self.upper is None else self.upper
p = ax.contourf(
mlon,
mlat,
self.field.data(ts),
levels=[lower, upper],
vmin=lower,
vmax=upper,
**self.pltkw,
)
def _derive_kwas(self):
return dict(
pltkw=self.pltkw.copy(),
)
class PlotElement_Line(PlotElement):
def __init__(self, *args, **kwas):
super().__init__(*args, **kwas)
def plot(self, ax, m, mlon, mlat, ts, **kwas):
if self.field.data(ts) is None:
return
for fid, line in self.field.data(ts).items():
px, py = line
if m:
px, py = m(px, py)
ax.plot(px, py, **self.pltkw)
class PlotElement_FeatureShading(PlotElement):
def __init__(self, *args, **kwas):
super().__init__(*args, **kwas)
def plot(self, ax, m, mlon, mlat, ts, **kwas):
if self.field.data(ts) is None:
return
if self.pltkw.get("color") is None:
return
features = self.field.data(ts)
if features is not None:
for feature in features:
ax_add_feature_shading(
ax,
m,
feature,
mlon=mlon,
mlat=mlat,
convert_lonlat=True,
**self.pltkw,
)
class PlotElement_FeatureContour(PlotElement):
def __init__(self, *args, cmode=None, pltkw=None, cmap=None, cbar=None, **kwas):
super().__init__(*args, **kwas)
self.cmode = cmode
self.pltkw = {} if pltkw is None else pltkw
self.cmap = cmap
self.cbarkw = {} if cbar is None else cbar.copy()
def plot(self, ax, m, mlon, mlat, ts, *, label_features, label_events, **kwas):
# Prepare some parameters
vmin = self.kwas_misc.get("vmin", 0)
vmax = self.kwas_misc.get("vmax")
if self.cmode is not None and not vmax:
features = self.field.data(ts)
if features is not None:
if self.cmode == "size":
vmax = max([f.n for f in features])
elif self.cmode == "size/log10":
vmax = max([np.log10(f.n) for f in features])
else:
err = "cmode {}: vmax".format(self.cmode)
raise NotImplementedError(err)
# Select features by timestep
if isinstance(ts, dt.datetime):
ts_int = int(ts.strftime(self.ts_format))
else:
ts_int = int(ts)
features = self.field.data(ts)
if features is None:
features = []
else:
features = [f for f in features if f.timestep == ts_int]
for feature in features:
# Determine color of outline
if self.cmode is None:
pass
elif self.cmode.startswith("size"):
if not self.cmap:
raise Exception(
"{}({}): missing cmap".format(
self.__class__.__name__, self.name
)
)
if self.cmode == "size":
n = feature.n
elif self.cmode == "size/log10":
n = | np.log10(feature.n) | numpy.log10 |
import scipy.optimize as opt
import numpy as np
import pylab as plt
#define model function and pass independant variables x and y as a list
def twoD_Gaussian(xy, amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
x = xy[0]
y = xy[1]
xo = float(xo)
yo = float(yo)
a = ( | np.cos(theta) | numpy.cos |
import libjevois as jevois
import cv2 as cv
import numpy as np
import sys
## Object detection and recognition using OpenCV Deep Neural Networks (DNN)
#
# This module runs an object detection deep neural network using the OpenCV DNN
# library. Detection networks analyze a whole scene and produce a number of
# bounding boxes around detected objects, together with identity labels
# and confidence scores for each detected box.
#
# This module supports detection networks implemented in TensorFlow, Caffe,
# Darknet, Torch, etc as supported by the OpenCV DNN module.
#
# Included with the standard JeVois distribution are:
#
# - OpenCV Face Detector, Caffe model
# - MobileNet + SSD trained on Pascal VOC (20 object classes), Caffe model
# - MobileNet + SSD trained on Coco (80 object classes), TensorFlow model
# - MobileNet v2 + SSD trained on Coco (80 object classes), TensorFlow model
# - Darknet Tiny YOLO v3 trained on Coco (80 object classes), Darknet model
# - Darknet Tiny YOLO v2 trained on Pascal VOC (20 object classes), Darknet model
#
# See the module's constructor (__init__) code and select a value for \b model to switch network. Object categories are
# as follows:
# - The 80 COCO object categories are: person, bicycle, car, motorbike, aeroplane, bus, train, truck, boat, traffic,
# fire, stop, parking, bench, bird, cat, dog, horse, sheep, cow, elephant, bear, zebra, giraffe, backpack, umbrella,
# handbag, tie, suitcase, frisbee, skis, snowboard, sports, kite, baseball, baseball, skateboard, surfboard, tennis,
# bottle, wine, cup, fork, knife, spoon, bowl, banana, apple, sandwich, orange, broccoli, carrot, hot, pizza, donut,
# cake, chair, sofa, pottedplant, bed, diningtable, toilet, tvmonitor, laptop, mouse, remote, keyboard, cell,
# microwave, oven, toaster, sink, refrigerator, book, clock, vase, scissors, teddy, hair, toothbrush.
#
# - The 20 Pascal-VOC object categories are: aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow,
# diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor.
#
# Sometimes it will make mistakes! The performance of yolov3-tiny is about 33.1% correct (mean average precision) on
# the COCO test set. The OpenCV Face Detector is quite fast and robust!
#
# This module is adapted from the sample OpenCV code:
# https://github.com/opencv/opencv/blob/master/samples/dnn/object_detection.py
#
# More pre-trained models are available on github in opencv_extra
#
#
# @author <NAME>
#
# @videomapping YUYV 640 502 20.0 YUYV 640 480 20.0 JeVois PyDetectionDNN
# @email <EMAIL>
# @address 880 W 1st St Suite 807, Los Angeles CA 90012, USA
# @copyright Copyright (C) 2018 by <NAME>
# @mainurl http://jevois.org
# @supporturl http://jevois.org
# @otherurl http://jevois.org
# @license GPL v3
# @distribution Unrestricted
# @restrictions None
# @ingroup modules
class GarbageTracker:
# ####################################################################################################
## Constructor
def __init__(self):
self.confThreshold = 0.5 # Confidence threshold (0..1), higher for stricter detection confidence.
self.nmsThreshold = 0.4 # Non-maximum suppression threshold (0..1), higher to remove more duplicate boxes.
self.inpWidth = 160 # Resized image width passed to network
self.inpHeight = 120 # Resized image height passed to network
self.scale = 2/255 # Value scaling factor applied to input pixels
self.mean = [127.5, 127.5, 127.5] # Mean BGR value subtracted from input image
self.rgb = True # True if model expects RGB inputs, otherwise it expects BGR
self.bbox = None
self.tracker = cv.TrackerKCF_create()
# Select one of the models:
#model = 'Face' # OpenCV Face Detector, Caffe model
#model = 'MobileNetV2SSD' # MobileNet v2 + SSD trained on Coco (80 object classes), TensorFlow model
#model = 'MobileNetSSD' # MobileNet + SSD trained on Pascal VOC (20 object classes), Caffe model
model = 'MobileNetSSDcoco' # MobileNet + SSD trained on Coco (80 object classes), TensorFlow model
#model = 'YOLOv3' # Darknet Tiny YOLO v3 trained on Coco (80 object classes), Darknet model
#model = 'YOLOv2' # Darknet Tiny YOLO v2 trained on Pascal VOC (20 object classes), Darknet model
# You should not have to edit anything beyond this point.
backend = cv.dnn.DNN_BACKEND_DEFAULT
target = cv.dnn.DNN_TARGET_CPU
self.classes = None
classnames = None
if (model == 'MobileNetSSD'):
classnames = '/jevois/share/darknet/yolo/data/voc.names'
modelname = '/jevois/share/opencv-dnn/detection/MobileNetSSD_deploy.caffemodel'
configname = '/jevois/share/opencv-dnn/detection/MobileNetSSD_deploy.prototxt'
self.rgb = False
elif (model == 'MobileNetV2SSD'):
classnames = '/jevois/share/darknet/yolo/data/coco.names'
modelname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v2_coco_2018_03_29.pb'
configname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v2_coco_2018_03_29.pbtxt'
elif (model == 'MobileNetSSDcoco'):
classnames = '/jevois/share/darknet/yolo/data/coconew.names'
modelname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v1_coco_2017_11_17.pb'
configname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v1_coco_2017_11_17.pbtxt'
self.rgb = False
self.nmsThreshold = 0.1
elif (model == 'YOLOv3'):
classnames = '/jevois/share/darknet/yolo/data/coco.names'
modelname = '/jevois/share/darknet/yolo/weights/yolov3-tiny.weights'
configname = '/jevois/share/darknet/yolo/cfg/yolov3-tiny.cfg'
elif (model == 'YOLOv2'):
classnames = '/jevois/share/darknet/yolo/data/voc.names'
modelname = '/jevois/share/darknet/yolo/weights/yolov2-tiny-voc.weights'
configname = '/jevois/share/darknet/yolo/cfg/yolov2-tiny-voc.cfg'
self.inpWidth = 320
self.inpHeight = 240
else:
classnames = '/jevois/share/opencv-dnn/detection/opencv_face_detector.classes'
modelname = '/jevois/share/opencv-dnn/detection/opencv_face_detector.caffemodel'
configname = '/jevois/share/opencv-dnn/detection/opencv_face_detector.prototxt'
self.scale = 1.0
self.mean = [104.0, 177.0, 123.0]
self.rgb = False
# Load names of classes
if classnames:
with open(classnames, 'rt') as f:
self.classes = f.read().rstrip('\n').split('\n')
# Load a network
self.net = cv.dnn.readNet(modelname, configname)
self.net.setPreferableBackend(backend)
self.net.setPreferableTarget(target)
self.timer = jevois.Timer('Neural detection', 10, jevois.LOG_DEBUG)
self.model = model
garbageclasses = ["shoe", "hat", "eye glasses", "frisbee",
"bottle", "plate", "wine glass", "cup", "fork", "spoon", "bowl",
"banana", "apple", "sandwich", "orange", "broccoli", "carrot", "fruit",
"hotdog", "pizza", "donut", "cake",
"vase", "scissors", "toothbrush", "cardboard", "napkin",
"net", "paper", "plastic", "straw"]
self.garbageclasses = garbageclasses
# ####################################################################################################
## Get names of the network's output layers
def getOutputsNames(self, net):
layersNames = self.net.getLayerNames()
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# ####################################################################################################
## Analyze and draw boxes, object names, and confidence scores
def postprocess(self, frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
out_center_x, out_center_y = frameWidth/2, frameHeight/2
def track(classId, conf, box):
# Track the last box on the list
if self.bbox is None:
left = box[0]
top = box[1]
width = box[2]
height = box[3]
self.bbox = (left, top, left + width, top + height)
ok = self.tracker.init(frame, self.bbox)
drawPred(classId, conf, left, top, left + width, top + height, color=(255, 255, 255))
else:
ok, self.bbox = self.tracker.update(frame)
if ok:
p1 = (int(self.bbox[0]), int(self.bbox[1]))
p2 = (int(self.bbox[0] + self.bbox[2]), int(self.bbox[1] + self.bbox[3]))
drawPred(classId, conf, p1[0], p1[1], p2[0], p2[1], color=(255, 0, 0))
else:
self.bbox = None
def drawPred(classId, conf, left, top, right, bottom, color=(0,255,0)):
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), color, 2)
label = '%.2f' % (conf * 100)
# Print a label of class.
if self.classes:
if (classId >= len(self.classes)):
label = 'Oooops id=%d: %s' % (classId, label)
else:
label = '%s: %s' % (self.classes[classId], label)
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.4, 1)
top = max(top, labelSize[1])
cv.rectangle(frame, (left, top - labelSize[1]-2), (left + labelSize[0], top + baseLine),
(255, 255, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0))
def tellRobot(out_center_x, out_center_y, serial_format="XY"):
if self.bbox is None:
jevois.sendSerial("stop")
else:
box_center_x, box_center_y = self.bbox[0] + self.bbox[2]/2, self.bbox[1] + self.bbox[3]/2
if serial_format == "XY":
if out_center_x < box_center_x:
move_x = box_center_x - out_center_x
elif box_center_x < out_center_x:
move_x = out_center_x - box_center_x
elif box_center_x == out_center_x:
move_x = 0
if out_center_y < box_center_y:
move_y = box_center_y - out_center_y
elif box_center_y < out_center_y:
move_y = out_center_y - box_center_y
elif box_center_y == out_center_y:
move_y = 0
if move_x < 100:
move_x = 100
if move_y < 100:
move_y = 100
jevois.sendSerial("move {} {}".format(int(move_x), int(move_y)))
else:
jevois.sendSerial("Invalid Serial Format")
layerNames = self.net.getLayerNames()
lastLayerId = self.net.getLayerId(layerNames[-1])
lastLayer = self.net.getLayer(lastLayerId)
classIds = []
confidences = []
boxes = []
if self.net.getLayer(0).outputNameToIndex('im_info') != -1: # Faster-RCNN or R-FCN
# Network produces output blob with a shape 1x1xNx7 where N is a number of
# detections and an every detection is a vector of values
# [batchId, classId, confidence, left, top, right, bottom]
for out in outs:
for detection in out[0, 0]:
classId = int(detection[1]) - 1
confidence = detection[2]
is_garbage = self.classes[classId] in self.garbageclasses
if (confidence > self.confThreshold) and (is_garbage):
left = int(detection[3])
top = int(detection[4])
right = int(detection[5])
bottom = int(detection[6])
width = right - left + 1
height = bottom - top + 1
classIds.append(classId) # Skip background label
confidences.append(float(confidence))
boxes.append([left, top, width, height])
elif lastLayer.type == 'DetectionOutput':
# Network produces output blob with a shape 1x1xNx7 where N is a number of
# detections and an every detection is a vector of values
# [batchId, classId, confidence, left, top, right, bottom]
for out in outs:
for detection in out[0, 0]:
classId = int(detection[1]) - 1
confidence = detection[2]
is_garbage = self.classes[classId] in self.garbageclasses
if (confidence > self.confThreshold) and (is_garbage):
left = int(detection[3] * frameWidth)
top = int(detection[4] * frameHeight)
right = int(detection[5] * frameWidth)
bottom = int(detection[6] * frameHeight)
width = right - left + 1
height = bottom - top + 1
classIds.append(classId) # Skip background label
confidences.append(float(confidence))
boxes.append([left, top, width, height])
elif lastLayer.type == 'Region':
# Network produces output blob with a shape NxC where N is a number of
# detected objects and C is a number of classes + 4 where the first 4
# numbers are [center_x, center_y, width, height]
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
is_garbage = self.classes[classId] in self.garbageclasses
if (confidence > self.confThreshold) and (is_garbage):
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
else:
jevois.LERROR('Unknown output layer type: ' + lastLayer.type)
return
indices = cv.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
# drawPred(classIds[i], confidences[i], left, top, left + width, top + height, color=(0, 255, 0))
track(classIds[i], confidences[i], box)
# Tell the robot what to do
tellRobot(out_center_x, out_center_y, "XY")
# ####################################################################################################
## JeVois main processing function
def process(self, inframe, outframe):
frame = inframe.getCvBGR()
self.timer.start()
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
out_center_x, out_center_y = frameWidth/2, frameHeight/2
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, self.scale, (self.inpWidth, self.inpHeight), self.mean, self.rgb, crop=False)
# Run a model
self.net.setInput(blob)
if self.net.getLayer(0).outputNameToIndex('im_info') != -1: # Faster-RCNN or R-FCN
frame = cv.resize(frame, (self.inpWidth, self.inpHeight))
self.net.setInput( | np.array([self.inpHeight, self.inpWidth, 1.6], dtype=np.float32) | numpy.array |
import numpy as np
import numpy.testing as npt
from stumpy import scraamp, aamp, config
from stumpy.scraamp import prescraamp
import pytest
import naive
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
window_size = [8, 16, 32]
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
substitution_values = [np.nan, np.inf]
percentages = [(0.01, 0.1, 1.0)]
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescraamp_self_join(T_A, T_B):
for p in [1.0, 2.0, 3.0]:
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_B, m, T_B, s=s, exclusion_zone=zone, p=p)
np.random.seed(seed)
comp_P, comp_I = prescraamp(T_B, m, s=s, p=p)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescraamp_A_B_join(T_A, T_B):
for p in [1.0, 2.0, 3.0]:
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_A, m, T_B, s=s, p=p)
np.random.seed(seed)
comp_P, comp_I = prescraamp(T_A, m, T_B=T_B, s=s, p=p)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_prescraamp_A_B_join_swap(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_B, m, T_A, s=s)
np.random.seed(seed)
comp_P, comp_I = prescraamp(T_B, m, T_B=T_A, s=s)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
def test_prescraamp_self_join_larger_window(T_A, T_B, m):
if len(T_B) > m:
zone = int(np.ceil(m / 4))
for s in range(1, zone + 1):
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_B, m, T_B, s=s, exclusion_zone=zone)
np.random.seed(seed)
comp_P, comp_I = prescraamp(T_B, m, s=s)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
def test_scraamp_int_input():
with pytest.raises(TypeError):
scraamp(
np.arange(10), 5, ignore_trivial=True, percentage=1.0, pre_scraamp=False
)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("percentages", percentages)
def test_scraamp_self_join(T_A, T_B, percentages):
m = 3
zone = int(np.ceil(m / 4))
for p in [1.0, 2.0, 3.0]:
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scraamp(T_B, m, T_B, percentage, zone, False, None, p=p)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scraamp(
T_B,
m,
ignore_trivial=True,
percentage=percentage,
pre_scraamp=False,
p=p,
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("percentages", percentages)
def test_scraamp_A_B_join(T_A, T_B, percentages):
m = 3
for p in [1.0, 2.0, 3.0]:
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scraamp(T_A, m, T_B, percentage, None, False, None, p=p)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scraamp(
T_A,
m,
T_B,
ignore_trivial=False,
percentage=percentage,
pre_scraamp=False,
p=p,
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("percentages", percentages)
def test_scraamp_A_B_join_swap(T_A, T_B, percentages):
m = 3
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scraamp(T_B, m, T_A, percentage, None, False, None)
ref_P = ref_mp[:, 0]
# ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scraamp(
T_B, m, T_A, ignore_trivial=False, percentage=percentage, pre_scraamp=False
)
approx.update()
comp_P = approx.P_
# comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
@pytest.mark.parametrize("percentages", percentages)
def test_scraamp_self_join_larger_window(T_A, T_B, m, percentages):
if len(T_B) > m:
zone = int(np.ceil(m / 4))
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scraamp(T_B, m, T_B, percentage, zone, False, None)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scraamp(
T_B, m, ignore_trivial=True, percentage=percentage, pre_scraamp=False
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_scraamp_self_join_full(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_B, m, exclusion_zone=zone)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scraamp(T_B, m, ignore_trivial=True, percentage=1.0, pre_scraamp=False)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
ref_mp = aamp(T_B, m, ignore_trivial=True)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_scraamp_A_B_join_full(T_A, T_B):
m = 3
ref_mp = naive.aamp(T_A, m, T_B=T_B)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scraamp(
T_A, m, T_B, ignore_trivial=False, percentage=1.0, pre_scraamp=False
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
ref_mp = aamp(T_A, m, T_B=T_B, ignore_trivial=False)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_scraamp_A_B_join_full_swap(T_A, T_B):
m = 3
ref_mp = naive.aamp(T_B, m, T_B=T_A)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scraamp(
T_B, m, T_A, ignore_trivial=False, percentage=1.0, pre_scraamp=False
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
def test_scraamp_self_join_full_larger_window(T_A, T_B, m):
if len(T_B) > m:
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_B, m, exclusion_zone=zone)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scraamp(T_B, m, ignore_trivial=True, percentage=1.0, pre_scraamp=False)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("percentages", percentages)
def test_scraamp_plus_plus_self_join(T_A, T_B, percentages):
m = 3
zone = int(np.ceil(m / 4))
for p in [1.0, 2.0, 3.0]:
for s in range(1, zone + 1):
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(
T_B, m, T_B, s=s, exclusion_zone=zone, p=p
)
ref_mp = naive.scraamp(T_B, m, T_B, percentage, zone, True, s, p=p)
for i in range(ref_mp.shape[0]):
if ref_P[i] < ref_mp[i, 0]:
ref_mp[i, 0] = ref_P[i]
ref_mp[i, 1] = ref_I[i]
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
# ref_left_I = ref_mp[:, 2]
# ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scraamp(
T_B,
m,
ignore_trivial=True,
percentage=percentage,
pre_scraamp=True,
s=s,
p=p,
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
# comp_left_I = approx.left_I_
# comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_I)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
# npt.assert_almost_equal(ref_left_I, comp_left_I)
# npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("percentages", percentages)
def test_scraamp_plus_plus_A_B_join(T_A, T_B, percentages):
m = 3
zone = int(np.ceil(m / 4))
for p in [1.0, 2.0, 3.0]:
for s in range(1, zone + 1):
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_P, ref_I = naive.prescraamp(T_A, m, T_B, s=s, p=p)
ref_mp = naive.scraamp(T_A, m, T_B, percentage, None, False, None, p=p)
for i in range(ref_mp.shape[0]):
if ref_P[i] < ref_mp[i, 0]:
ref_mp[i, 0] = ref_P[i]
ref_mp[i, 1] = ref_I[i]
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scraamp(
T_A,
m,
T_B,
ignore_trivial=False,
percentage=percentage,
pre_scraamp=True,
s=s,
p=p,
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_scraamp_plus_plus_self_join_full(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_B, m, exclusion_zone=zone)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scraamp(
T_B, m, ignore_trivial=True, percentage=1.0, pre_scraamp=True, s=zone
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_scraamp_plus_plus_A_B_join_full(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_A, m, T_B=T_B)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scraamp(
T_A, m, T_B=T_B, ignore_trivial=False, percentage=1.0, pre_scraamp=True, s=zone
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_scraamp_plus_plus_A_B_join_full_swap(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_B, m, T_B=T_A)
ref_P = ref_mp[:, 0]
ref_I = ref_mp[:, 1]
ref_left_I = ref_mp[:, 2]
ref_right_I = ref_mp[:, 3]
approx = scraamp(
T_B, m, T_B=T_A, ignore_trivial=False, percentage=1.0, pre_scraamp=True, s=zone
)
approx.update()
comp_P = approx.P_
comp_I = approx.I_
comp_left_I = approx.left_I_
comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
npt.assert_almost_equal(ref_left_I, comp_left_I)
npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("percentages", percentages)
def test_scraamp_constant_subsequence_self_join(percentages):
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64)))
m = 3
zone = int(np.ceil(m / 4))
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scraamp(T, m, T, percentage, zone, False, None)
ref_P = ref_mp[:, 0]
# ref_I = ref_mp[:, 1]
# ref_left_I = ref_mp[:, 2]
# ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scraamp(
T, m, ignore_trivial=True, percentage=percentage, pre_scraamp=False
)
approx.update()
comp_P = approx.P_
# comp_I = approx.I_
# comp_left_I = approx.left_I_
# comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
npt.assert_almost_equal(ref_P, comp_P)
# npt.assert_almost_equal(ref_I, comp_I)
# npt.assert_almost_equal(ref_left_I, comp_left_I)
# npt.assert_almost_equal(ref_right_I, comp_right_I)
@pytest.mark.parametrize("percentages", percentages)
def test_scraamp_identical_subsequence_self_join(percentages):
identical = np.random.rand(8)
T = np.random.rand(20)
T[1 : 1 + identical.shape[0]] = identical
T[11 : 11 + identical.shape[0]] = identical
m = 3
zone = int(np.ceil(m / 4))
for percentage in percentages:
seed = np.random.randint(100000)
np.random.seed(seed)
ref_mp = naive.scraamp(T, m, T, percentage, zone, False, None)
ref_P = ref_mp[:, 0]
# ref_I = ref_mp[:, 1]
# ref_left_I = ref_mp[:, 2]
# ref_right_I = ref_mp[:, 3]
np.random.seed(seed)
approx = scraamp(
T, m, ignore_trivial=True, percentage=percentage, pre_scraamp=False
)
approx.update()
comp_P = approx.P_
# comp_I = approx.I_
# comp_left_I = approx.left_I_
# comp_right_I = approx.right_I_
naive.replace_inf(ref_P)
naive.replace_inf(comp_P)
| npt.assert_almost_equal(ref_P, comp_P, decimal=config.STUMPY_TEST_PRECISION) | numpy.testing.assert_almost_equal |
from functools import reduce
from copy import copy
from time import time
import numpy as np
import numpy.random as npr
import numpy.linalg as la
import scipy.linalg as sla
from scipy.linalg import solve_discrete_lyapunov, solve_discrete_are
from utility.matrixmath import vec, mat, mdot, matmul_lr, specrad, dlyap, dare, dare_gain
from quadtools import quadblock, quadstack, unquadblock, unquadstack
class LinearSystem:
def __init__(self, A, B, C, a, Aa, b, Bb, c, Cc, Q, W):
self.A = A
self.B = B
self.C = C
self.a = a
self.b = b
self.c = c
self.Aa = Aa
self.Bb = Bb
self.Cc = Cc
self.Q = Q
self.W = W
self.n = A.shape[0]
self.m = B.shape[1]
self.p = C.shape[0]
@property
def data(self):
return self.A, self.B, self.C, self.a, self.Aa, self.b, self.Bb, self.c, self.Cc, self.Q, self.W
@property
def dims(self):
return self.n, self.m, self.p
@property
def AB(self):
return np.block([self.A, self.B])
@property
def AC(self):
return np.block([[self.A], [self.C]])
class LinearSystemControlled(LinearSystem):
def __init__(self, system, K, L):
super().__init__(*system.data)
self.K = K
self.L = L
# Zeros matrices
self.Zn = np.zeros([self.n, self.n])
@property
def BK(self):
return self.B @ self.K
@property
def LC(self):
return self.L @ self.C
@property
def F(self):
return self.A + self.BK - self.LC
@property
def Phi_aug(self):
return np.block([[self.A, self.BK],
[self.LC, self.F]])
@property
def AK(self):
return self.A + self.BK
@property
def AL(self):
return self.A - self.LC
@property
def IK(self):
return np.block([[np.eye(self.n)], [self.K]])
@property
def IL(self):
return np.block([np.eye(self.n), self.L])
@property
def QK(self):
return matmul_lr(self.IK.T, self.Q)
@property
def WL(self):
return matmul_lr(self.IL, self.W)
@property
def IK_aug(self):
return sla.block_diag( | np.eye(self.n) | numpy.eye |
# _*_ coding: utf-8 _*_
"""
Calculate grid derivative.
"""
import numpy as np
from dk_met_base.arr import conform_dims
def center_finite_diff_n(grid, dim=1, r=None, map_scale=None,
cyclic=False, second=False):
"""
Performs a centered finite difference operation on the given dimension.
using:
Central finite difference scheme second order for first derivatives
(u[i+1]-u[i-1])/(2dx)
Central finite difference scheme second order for second derivatives
(u[i+1]+u[i-1]-2*u[i])/(dx*dx)
reference:
http://www.cfm.brown.edu/people/jansh/resources/APMA1180/fd.pdf
notice: for second derivatives, ensure equal interval.
:param grid: a multi-dimensional numpy array.
:param r: A scalar, one-dimensional, or multi-dimensional array containing
the coordinates along which grid is to be difference. Does need
not be equally spaced from a computational point of view.
>scalar: r assumed to be the (constant) distance between
adjacent points.
>one-dimensional (and the same size as the dimension of
grid): applied to all dimensions of grid.
>multi-dimensional: then it must be the same size as grid.
:param dim: A scalar integer indicating which dimension of grid to
calculate the center finite difference on.
Dimension numbering starts at 1, default=1.
:param map_scale: map scale coefficient, a scalar, one-dimensional,
or multi-dimensional array like r.
:param cyclic: cyclic or periodic boundary.
:param second: calculate second derivatives, default is first derivatives.
:return: finite difference array.
"""
# move specified dimension to the first
p = np.arange(grid.ndim)
p[-1] = dim - 1
p[dim-1] = -1
grid = np.transpose(grid, p)
# construct shift vector
sf = np.arange(grid.ndim)
sf[0] = -1
sb = np.arange(grid.ndim)
sb[0] = 1
# check coordinates
if r is not None:
if len(r) == 1:
rr = np.arange(grid.shape[0], dtype=np.float) * r
else:
rr = r
if np.ndim(rr) == 1:
rr = conform_dims(grid.shape, rr, [0])
else:
rr = np.transpose(rr, p)
if map_scale is not None: # check map scale
mps = map_scale
if np.ndim(mps) == 1:
mps = conform_dims(grid.shape, mps, [0])
if np.ndim(mps) > 1:
mps = np.transpose(mps, p)
rr *= mps
#
# Compute center finite difference
#
# first derivative
if not second:
# value difference
dgrid = np.roll(grid, -1, -1) - np.roll(grid, 1, -1)
# grid space
if r is not None:
drr = | np.roll(rr, -1, -1) | numpy.roll |
from __future__ import print_function, division
import os, sys, warnings, platform
from time import time
import numpy as np
if "PyPy" not in platform.python_implementation():
from scipy.io import loadmat, savemat
from Florence.Tensor import makezero, itemfreq, unique2d, in2d
from Florence.Utils import insensitive
from .vtk_writer import write_vtu
try:
import meshpy.triangle as triangle
has_meshpy = True
except ImportError:
has_meshpy = False
from .HigherOrderMeshing import *
from .NodeArrangement import *
from .GeometricPath import *
from warnings import warn
from copy import deepcopy
"""
Mesh class providing most of the pre-processing functionalities of the Core module
<NAME> - 13/06/2015
"""
class Mesh(object):
"""Mesh class provides the following functionalities:
1. Generating higher order meshes based on a linear mesh, for tris, tets, quads and hexes
2. Generating linear tri and tet meshes based on meshpy back-end
3. Generating linear tri meshes based on distmesh back-end
4. Finding bounary edges and faces for tris and tets, in case they are not provided by the mesh generator
5. Reading Salome meshes in binary (.dat/.txt/etc) format
6. Reading gmsh files .msh
7. Checking for node numbering order of elements and fixing it if desired
8. Writing meshes to unstructured vtk file format (.vtu) in xml and binary formats,
including high order elements
"""
def __init__(self, element_type=None):
super(Mesh, self).__init__()
# self.faces and self.edges ARE BOUNDARY FACES
# AND BOUNDARY EDGES, RESPECTIVELY
self.degree = None
self.ndim = None
self.edim = None
self.nelem = None
self.nnode = None
self.elements = None
self.points = None
self.corners = None
self.edges = None
self.faces = None
self.element_type = element_type
self.face_to_element = None
self.edge_to_element = None
self.boundary_edge_to_element = None
self.boundary_face_to_element = None
self.all_faces = None
self.all_edges = None
self.interior_faces = None
self.interior_edges = None
# TYPE OF BOUNDARY FACES/EDGES
self.boundary_element_type = None
# FOR GEOMETRICAL CURVES/SURFACES
self.edge_to_curve = None
self.face_to_surface = None
self.spatial_dimension = None
self.reader_type = None
self.reader_type_format = None
self.reader_type_version = None
self.writer_type = None
self.filename = None
# self.has_meshpy = has_meshpy
def SetElements(self,arr):
self.elements = arr
def SetPoints(self,arr):
self.points = arr
def SetEdges(self,arr):
self.edges = arr
def SetFaces(self,arr):
self.faces = arr
def GetElements(self):
return self.elements
def GetPoints(self):
return self.points
def GetEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetEdgesTri()
elif self.element_type == "quad":
self.GetEdgesQuad()
elif self.element_type == "pent":
self.GetEdgesPent()
elif self.element_type == "tet":
self.GetEdgesTet()
elif self.element_type == "hex":
self.GetEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.all_edges
def GetBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetBoundaryEdgesTri()
elif self.element_type == "quad":
self.GetBoundaryEdgesQuad()
elif self.element_type == "pent":
self.GetBoundaryEdgesPent()
elif self.element_type == "tet":
self.GetBoundaryEdgesTet()
elif self.element_type == "hex":
self.GetBoundaryEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.edges
def GetInteriorEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetInteriorEdgesTri()
elif self.element_type == "quad":
self.GetInteriorEdgesQuad()
elif self.element_type == "pent":
self.GetInteriorEdgesPent()
elif self.element_type == "tet":
self.GetInteriorEdgesTet()
elif self.element_type == "hex":
self.GetInteriorEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.interior_edges
def GetFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetFacesTet()
elif self.element_type == "hex":
self.GetFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.all_faces
def GetBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetBoundaryFacesTet()
elif self.element_type == "hex":
self.GetBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.faces
def GetInteriorFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetInteriorFacesTet()
elif self.element_type == "hex":
self.GetInteriorFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.interior_faces
def GetElementsEdgeNumbering(self):
assert self.element_type is not None
if self.element_type == "tri":
return self.GetElementsEdgeNumberingTri()
elif self.element_type == "quad":
return self.GetElementsEdgeNumberingQuad()
else:
raise ValueError('Type of element not understood')
return self.edge_to_element
def GetElementsWithBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
return self.GetElementsWithBoundaryEdgesTri()
elif self.element_type == "quad":
return self.GetElementsWithBoundaryEdgesQuad()
else:
raise ValueError('Type of element not understood')
return self.boundary_edge_to_element
def GetElementsFaceNumbering(self):
assert self.element_type is not None
if self.element_type == "tet":
return self.GetElementsFaceNumberingTet()
elif self.element_type == "hex":
return self.GetElementsFaceNumberingHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.face_to_element
def GetElementsWithBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
return self.GetElementsWithBoundaryFacesTet()
elif self.element_type == "hex":
return self.GetElementsWithBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.boundary_face_to_element
@property
def Bounds(self):
"""Returns bounds of a mesh i.e. the minimum and maximum coordinate values
in every direction
"""
assert self.points is not None
if self.points.shape[1] == 3:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1]),
np.min(self.points[:,2])],
[np.max(self.points[:,0]),
np.max(self.points[:,1]),
np.max(self.points[:,2])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 2:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1])],
[np.max(self.points[:,0]),
np.max(self.points[:,1])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 1:
bounds = np.array([[np.min(self.points[:,0])],
[np.max(self.points[:,0])]])
makezero(bounds)
return bounds
else:
raise ValueError("Invalid dimension for mesh coordinates")
def GetEdgesTri(self):
"""Find all edges of a triangular mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementTri(p-1)[0]
# CHECK IF FACES ARE ALREADY AVAILABLE
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1 and self.all_edges.shape[1] == p+1:
warn("Mesh edges seem to be already computed. I am going to recompute them")
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.zeros((3*self.elements.shape[0],p+1),dtype=np.uint64)
edges[:self.elements.shape[0],:] = self.elements[:,node_arranger[0,:]]
edges[self.elements.shape[0]:2*self.elements.shape[0],:] = self.elements[:,node_arranger[1,:]]
edges[2*self.elements.shape[0]:,:] = self.elements[:,node_arranger[2,:]]
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesTet
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesTet":
self.all_edges = edges
return edges
def GetBoundaryEdgesTri(self):
"""Find boundary edges (lines) of triangular mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementTri(p-1)[0]
# CONCATENATE ALL THE EDGES MADE FROM ELEMENTS
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]]),axis=0)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesTri(self):
"""Computes interior edges of a triangular mesh
returns:
interior_edges ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesTri()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesTri()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetFacesTet(self):
"""Find all faces (surfaces) in the tetrahedral mesh (boundary & interior).
Sets all_faces property and returns it
returns:
arr: numpy ndarray of all faces
"""
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 3 and p > 1:
pass
else:
return self.all_faces
node_arranger = NodeArrangementTet(p-1)[0]
fsize = int((p+1.)*(p+2.)/2.)
# GET ALL FACES FROM THE ELEMENT CONNECTIVITY
faces = np.zeros((4*self.elements.shape[0],fsize),dtype=np.uint64)
faces[:self.elements.shape[0],:] = self.elements[:,node_arranger[0,:]]
faces[self.elements.shape[0]:2*self.elements.shape[0],:] = self.elements[:,node_arranger[1,:]]
faces[2*self.elements.shape[0]:3*self.elements.shape[0],:] = self.elements[:,node_arranger[2,:]]
faces[3*self.elements.shape[0]:,:] = self.elements[:,node_arranger[3,:]]
# REMOVE DUPLICATES
self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)
face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)
face_to_element[:,0] = idx % self.elements.shape[0]
face_to_element[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_to_element
return self.all_faces
def GetEdgesTet(self):
"""Find all edges (lines) of tetrahedral mesh (boundary & interior)"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1] == 2 and p > 1:
pass
else:
return self.all_edges
# FIRST GET BOUNDARY FACES
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 3 and p > 1:
self.GetFacesTet()
else:
self.GetFacesTet()
# BUILD A 2D MESH
tmesh = Mesh()
# tmesh = deepcopy(self)
tmesh.element_type = "tri"
tmesh.elements = self.all_faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# COMPUTE ALL EDGES
self.all_edges = tmesh.GetEdgesTri()
return self.all_edges
def GetBoundaryFacesTet(self):
"""Find boundary faces (surfaces) of a tetrahedral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.faces,np.ndarray):
if self.faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.faces.shape[1] == 3 and p > 1:
pass
else:
return
node_arranger = NodeArrangementTet(p-1)[0]
# CONCATENATE ALL THE FACES MADE FROM ELEMENTS
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES
freqs_inv = itemfreq(inv)
faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.faces = uniques[faces_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)
# THE FOLLOWING WILL COMPUTE FACES BASED ON SORTING AND NOT TAKING INTO ACCOUNT
# THE ELEMENT CONNECTIVITY
# boundary_face_to_element[:,0] = np.remainder(idx[faces_ext_flags],self.elements.shape[0])
# boundary_face_to_element[:,1] = np.floor_divide(idx[faces_ext_flags],self.elements.shape[0])
# OR EQUIVALENTLY
# boundary_face_to_element[:,0] = idx[faces_ext_flags] % self.elements.shape[0]
# boundary_face_to_element[:,1] = idx[faces_ext_flags] // self.elements.shape[0]
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES
all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
# boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]
self.faces = self.faces.astype(np.uint64)
self.boundary_face_to_element = boundary_face_to_element
def GetBoundaryEdgesTet(self):
"""Find boundary edges (lines) of tetrahedral mesh.
Note that for tetrahedrals this function is more robust than Salome's default edge generator
"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
# FIRST GET BOUNDARY FACES
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesTet()
# BUILD A 2D MESH
tmesh = Mesh()
tmesh.element_type = "tri"
tmesh.elements = self.faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES
self.edges = tmesh.GetEdgesTri()
def GetInteriorFacesTet(self):
"""Computes interior faces of a tetrahedral mesh
returns:
interior_faces ndarray of interior faces
face_flags 1D array of face flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesTet()
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesTet()
face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)
face_flags[face_flags==True] = 1
face_flags[face_flags==False] = 0
interior_faces = self.all_faces[face_flags==False,:]
return interior_faces, face_flags
def GetInteriorEdgesTet(self):
"""Computes interior faces of a tetrahedral mesh
returns:
interior_edges ndarray of interior edges
edge_flags 1D array of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesTet()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesTet()
edge_flags = in2d(self.all_edges.astype(self.edges.dtype),self.edges,consider_sort=True)
edge_flags[edge_flags==True] = 1
edge_flags[edge_flags==False] = 0
interior_edges = self.all_edges[edge_flags==False,:]
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetEdgesQuad(self):
"""Find the all edges of a quadrilateral mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesHex
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesHex":
self.all_edges = edges
return edges
def GetBoundaryEdgesQuad(self):
"""Find boundary edges (lines) of a quadrilateral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesQuad(self):
"""Computes interior edges of a quadrilateral mesh
returns:
interior_faces ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesQuad()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesQuad()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetFacesHex(self):
"""Find all faces (surfaces) in the hexahedral mesh (boundary & interior).
Sets all_faces property and returns it
returns:
arr: numpy ndarray of all faces
"""
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 4 and p > 1:
pass
else:
return self.all_faces
node_arranger = NodeArrangementHex(p-1)[0]
fsize = int((p+1)**3)
# GET ALL FACES FROM THE ELEMENT CONNECTIVITY
faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# REMOVE DUPLICATES
self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)
face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)
face_to_element[:,0] = idx % self.elements.shape[0]
face_to_element[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_to_element
return self.all_faces
def GetEdgesHex(self):
"""Find all edges (lines) of tetrahedral mesh (boundary & interior)"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1] == 2 and p > 1:
pass
else:
return self.all_edges
# FIRST GET BOUNDARY FACES
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesHex()
# BUILD A 2D MESH
tmesh = Mesh()
# tmesh = deepcopy(self)
tmesh.element_type = "quad"
tmesh.elements = self.all_faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# COMPUTE ALL EDGES
self.all_edges = tmesh.GetEdgesQuad()
return self.all_edges
def GetBoundaryFacesHex(self):
"""Find boundary faces (surfaces) of a hexahedral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.faces,np.ndarray):
if self.faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.faces.shape[1] == 4 and p > 1:
pass
else:
return
node_arranger = NodeArrangementHex(p-1)[0]
# CONCATENATE ALL THE FACES MADE FROM ELEMENTS
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES
freqs_inv = itemfreq(inv)
faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.faces = uniques[faces_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES
all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
# boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]
self.faces = self.faces.astype(np.uint64)
self.boundary_face_to_element = boundary_face_to_element
def GetBoundaryEdgesHex(self):
"""Find boundary edges (lines) of hexahedral mesh.
"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
# FIRST GET BOUNDARY FACES
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesHex()
# BUILD A 2D MESH
tmesh = Mesh()
tmesh.element_type = "quad"
tmesh.elements = self.faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES
self.edges = tmesh.GetEdgesQuad()
def GetInteriorFacesHex(self):
"""Computes interior faces of a hexahedral mesh
returns:
interior_faces ndarray of interior faces
face_flags 1D array of face flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesHex()
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesHex()
face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)
face_flags[face_flags==True] = 1
face_flags[face_flags==False] = 0
interior_faces = self.all_faces[face_flags==False,:]
return interior_faces, face_flags
def GetInteriorEdgesHex(self):
"""Computes interior faces of a hexahedral mesh
returns:
interior_edges ndarray of interior edges
edge_flags 1D array of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesHex()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesHex()
edge_flags = in2d(self.all_edges.astype(self.edges.dtype),self.edges,consider_sort=True)
edge_flags[edge_flags==True] = 1
edge_flags[edge_flags==False] = 0
interior_edges = self.all_edges[edge_flags==False,:]
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetEdgesPent(self):
"""Find the all edges of a pentagonal mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = np.array([
[0,1],
[1,2],
[2,3],
[3,4],
[4,0],
])
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],
self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
self.all_edges = edges
return edges
def GetBoundaryEdgesPent(self):
"""Find boundary edges (lines) of a pentagonal mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = np.array([
[0,1],
[1,2],
[2,3],
[3,4],
[4,0],
])
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],
self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesPent(self):
"""Computes interior edges of a pentagonal mesh
returns:
interior_faces ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesPent()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesPent()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetHighOrderMesh(self,p=1, silent=True, **kwargs):
"""Given a linear tri, tet, quad or hex mesh compute high order mesh based on it.
This is a static method linked to the HigherOrderMeshing module"""
if not isinstance(p,int):
raise ValueError("p must be an integer")
else:
if p < 1:
raise ValueError("Value of p={} is not acceptable. Provide p>=1.".format(p))
if self.degree is None:
self.InferPolynomialDegree()
C = p-1
if 'C' in kwargs.keys():
if kwargs['C'] != p - 1:
raise ValueError("Did not understand the specified interpolation degree of the mesh")
del kwargs['C']
# DO NOT COMPUTE IF ALREADY COMPUTED FOR THE SAME ORDER
if self.degree == None:
self.degree = self.InferPolynomialDegree()
if self.degree == p:
return
# SITUATIONS WHEN ANOTHER HIGH ORDER MESH IS REQUIRED, WITH ONE HIGH
# ORDER MESH ALREADY AVAILABLE
if self.degree != 1 and self.degree - 1 != C:
dum = self.GetLinearMesh(remap=True)
self.__dict__.update(dum.__dict__)
if not silent:
print('Generating p = '+str(C+1)+' mesh based on the linear mesh...')
t_mesh = time()
# BUILD A NEW MESH BASED ON THE LINEAR MESH
if self.element_type == 'line':
nmesh = HighOrderMeshLine(C,self,**kwargs)
if self.element_type == 'tri':
if self.edges is None:
self.GetBoundaryEdgesTri()
# nmesh = HighOrderMeshTri(C,self,**kwargs)
nmesh = HighOrderMeshTri_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'tet':
# nmesh = HighOrderMeshTet(C,self,**kwargs)
nmesh = HighOrderMeshTet_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'quad':
if self.edges is None:
self.GetBoundaryEdgesQuad()
nmesh = HighOrderMeshQuad(C,self,**kwargs)
elif self.element_type == 'hex':
nmesh = HighOrderMeshHex(C,self,**kwargs)
self.points = nmesh.points
self.elements = nmesh.elements.astype(np.uint64)
if isinstance(self.corners,np.ndarray):
# NOT NECESSARY BUT GENERIC
self.corners = nmesh.corners.astype(np.uint64)
if isinstance(self.edges,np.ndarray):
self.edges = nmesh.edges.astype(np.uint64)
if isinstance(self.faces,np.ndarray):
if isinstance(nmesh.faces,np.ndarray):
self.faces = nmesh.faces.astype(np.uint64)
self.nelem = nmesh.nelem
self.nnode = self.points.shape[0]
self.element_type = nmesh.info
self.degree = C+1
self.ChangeType()
if not silent:
print('Finished generating the high order mesh. Time taken', time()-t_mesh,'sec')
def EdgeLengths(self,which_edges='boundary'):
"""Computes length of edges, for 2D and 3D meshes
which_edges: [str] 'boundary' for boundary edges only
and 'all' for all edges
"""
assert self.points is not None
assert self.element_type is not None
lengths = None
if which_edges == 'boundary':
if self.edges is None:
self.GetBoundaryEdges()
edge_coords = self.points[self.edges[:,:2],:]
lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
elif which_edges == 'all':
if self.all_edges is None:
self.GetEdges()
edge_coords = self.points[self.all_edges[:,:2],:]
lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
return lengths
def Lengths(self,):
"""Computes length of all types of elements
"""
self.__do_essential_memebers_exist__()
if self.element_type == "line":
coords = self.points[self.elements[:,:2],:]
lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)
else:
self.GetEdges()
coord = self.all_edges
coords = self.points[self.elements[:,:2],:]
lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)
return lengths
def Areas(self, with_sign=False, gpoints=None):
"""Find areas of all 2D elements [tris, quads].
For 3D elements returns surface areas of all faces
input:
with_sign: [str] compute with/without sign
gpoints: [ndarray] given coordinates to use instead of
self.points
returns: 1D array of nelem x 1 containing areas
"""
assert self.elements is not None
assert self.element_type is not None
if gpoints is None:
assert self.points is not None
gpoints = self.points
if self.element_type == "tri":
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2] = gpoints
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*np.linalg.det(points[self.elements[:,:3],:])
elif self.element_type == "quad":
# NODE ORDERING IS IRRELEVANT, AS IT IS THESE AREAS
# WHICH DETERMINE NODE ORDERING
# AREA OF QUAD ABCD = AREA OF ABC + AREA OF ACD
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2] = gpoints
# FIND AREAS ABC
area0 = np.linalg.det(points[self.elements[:,:3],:])
# FIND AREAS ACD
area1 = np.linalg.det(points[self.elements[:,[0,2,3]],:])
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*(area0+area1)
elif self.element_type == "tet":
# GET ALL THE FACES
faces = self.GetFacesTet()
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2]=gpoints[:,:2]
area0 = np.linalg.det(points[faces[:,:3],:])
points[:,:2]=gpoints[:,[2,0]]
area1 = np.linalg.det(points[faces[:,:3],:])
points[:,:2]=gpoints[:,[1,2]]
area2 = np.linalg.det(points[faces[:,:3],:])
area = 0.5*np.linalg.norm(area0+area1+area2)
elif self.element_type == "hex":
from Florence.Tensor import unique2d
C = self.InferPolynomialDegree() - 1
area = 0
node_arranger = NodeArrangementHex(C)[0]
for i in range(node_arranger.shape[0]):
# print node_arranger[i,:]
# AREA OF FACES
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
if i==0 or i==1:
points[:,:2] = gpoints[:,:2]
elif i==2 or i==3:
points[:,:2] = gpoints[:,[0,2]]
elif i==4 or i==5:
points[:,:2] = gpoints[:,1:]
# FIND AREAS ABC
area0 = np.linalg.det(points[self.elements[:,node_arranger[i,:3]],:])
# FIND AREAS ACD
area1 = np.linalg.det(points[self.elements[:,node_arranger[i,1:]],:])
# FIND AREAS OF ALL THE ELEMENTS
area += 0.5*np.linalg.norm(area0+area1)
# print area
raise ValueError('Hex areas implementation requires further checks')
else:
raise NotImplementedError("Computing areas for", self.element_type, "elements not implemented yet")
if with_sign is False:
if self.element_type == "tri" or self.element_type == "quad":
area = np.abs(area)
elif self.element_type == "tet":
raise NotImplementedError('Numbering order of tetrahedral faces could not be determined')
return area
def Volumes(self, with_sign=False, gpoints=None):
"""Find Volumes of all 3D elements [tets, hexes]
input:
with_sign: [str] compute with/without sign
gpoints: [ndarray] given coordinates to use instead of
self.points
returns: 1D array of nelem x 1 containing volumes
"""
assert self.elements is not None
assert self.element_type is not None
if self.points.shape[1] == 2:
raise ValueError("2D mesh does not have volume")
if gpoints is None:
assert self.points is not None
gpoints = self.points
if self.element_type == "tet":
a = gpoints[self.elements[:,0],:]
b = gpoints[self.elements[:,1],:]
c = gpoints[self.elements[:,2],:]
d = gpoints[self.elements[:,3],:]
det_array = np.dstack((a-d,b-d,c-d))
# FIND VOLUME OF ALL THE ELEMENTS
volume = 1./6.*np.linalg.det(det_array)
elif self.element_type == "hex":
# Refer: https://en.wikipedia.org/wiki/Parallelepiped
a = gpoints[self.elements[:,0],:]
b = gpoints[self.elements[:,1],:]
c = gpoints[self.elements[:,3],:]
d = gpoints[self.elements[:,4],:]
det_array = np.dstack((b-a,c-a,d-a))
# FIND VOLUME OF ALL THE ELEMENTS
volume = np.linalg.det(det_array)
else:
raise NotImplementedError("Computing volumes for", self.element_type, "elements not implemented yet")
if with_sign is False:
volume = np.abs(volume)
return volume
def Sizes(self, with_sign=False):
"""Computes the size of elements for all element types.
This is a generic method that for 1D=lengths, for 2D=areas and for 3D=volumes.
It works for planar and curved elements
"""
self.__do_essential_memebers_exist__()
try:
from Florence import DisplacementFormulation
except ImportError:
raise ValueError("This functionality requires Florence's support")
if self.element_type != "line":
# FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED
formulation = DisplacementFormulation(self)
sizes = np.zeros(self.nelem)
if not with_sign:
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetVolume(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
else:
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetSignedVolume(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
return sizes
else:
warn("Sizes of line elements could be incorrect if the mesh is curvilinear")
return self.Lengths()
def AspectRatios(self,algorithm='edge_based'):
"""Compute aspect ratio of the mesh element-by-element.
For 2D meshes aspect ratio is aspect ratio is defined as
the ratio of maximum edge length to minimum edge length.
For 3D meshes aspect ratio can be either length or area based.
input:
algorithm: [str] 'edge_based' or 'face_based'
returns:
aspect_ratio: [1D array] of size (self.nelem) containing aspect ratio of elements
"""
assert self.points is not None
assert self.element_type is not None
aspect_ratio = None
if algorithm == 'edge_based':
if self.element_type == "tri":
edge_coords = self.points[self.elements[:,:3],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
AC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
minimum = np.minimum(np.minimum(AB,AC),BC)
maximum = np.maximum(np.maximum(AB,AC),BC)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "quad":
edge_coords = self.points[self.elements[:,:4],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,0,:] - edge_coords[:,3,:],axis=1)
minimum = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "tet":
edge_coords = self.points[self.elements[:,:4],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
AC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,0,:],axis=1)
AD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
BD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
minimum = np.minimum(np.minimum(np.minimum(np.minimum(np.minimum(AB,AC),AD),BC),BD),CD)
maximum = np.maximum(np.maximum(np.maximum(np.maximum(np.maximum(AB,AC),AD),BC),BD),CD)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "hex":
edge_coords = self.points[self.elements[:,:8],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,0,:] - edge_coords[:,3,:],axis=1)
minimum0 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum0 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
AB = np.linalg.norm(edge_coords[:,5,:] - edge_coords[:,4,:],axis=1)
BC = np.linalg.norm(edge_coords[:,6,:] - edge_coords[:,5,:],axis=1)
CD = np.linalg.norm(edge_coords[:,7,:] - edge_coords[:,6,:],axis=1)
DA = np.linalg.norm(edge_coords[:,4,:] - edge_coords[:,7,:],axis=1)
minimum1 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum1 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
AB = np.linalg.norm(edge_coords[:,4,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,5,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,6,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,7,:] - edge_coords[:,3,:],axis=1)
minimum2 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum2 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
minimum = np.minimum(minimum0,np.minimum(minimum1,minimum2))
maximum = np.maximum(maximum0,np.maximum(maximum1,maximum2))
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "line":
raise ValueError("Line elments do no have aspect ratio")
elif algorithm == 'face_based':
raise NotImplementedError("Face/area based aspect ratio is not implemented yet")
return aspect_ratio
def FaceNormals(self):
"""Computes outward unit normals on faces.
This is a generic method for all element types apart from lines. If the mesh is in 2D plane
then the unit outward normals will point in Z direction. If the mesh is quad or tri type but
in 3D plane, this will still compute the correct unit outward normals. outwardness can only
be guaranteed for volume meshes.
This method is different from the method self.Normals() as the latter can compute normals
for 1D/2D elements in-plane
"""
self.__do_memebers_exist__()
points = np.copy(self.points)
if points.shape[1] < 3:
dum = np.zeros((points.shape[0],3))
dum[:,:points.shape[1]] = points
points = dum
if self.element_type == "tet" or self.element_type == "hex":
faces = self.faces
elif self.element_type == "tri" or self.element_type == "quad":
faces = self.elements
else:
raise ValueError("Cannot compute face normals on {}".format(self.element_type))
face_coords = self.points[faces[:,:3],:]
p1p0 = face_coords[:,1,:] - face_coords[:,0,:]
p2p0 = face_coords[:,2,:] - face_coords[:,0,:]
normals = np.cross(p1p0,p2p0)
norm_normals = np.linalg.norm(normals,axis=1)
normals[:,0] /= norm_normals
normals[:,1] /= norm_normals
normals[:,2] /= norm_normals
# CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER
if self.element_type == "tet" or self.element_type == "hex":
self.GetElementsWithBoundaryFaces()
meds = self.Medians()
face_element_meds = meds[self.boundary_face_to_element[:,0],:]
p1pm = face_coords[:,1,:] - face_element_meds
# IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP
_check = np.einsum("ij,ij->i",normals,p1pm)
normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]
return normals
def Normals(self, show_plot=False):
"""Computes unit outward normals to the boundary for all element types.
Unity and outwardness are guaranteed
"""
self.__do_memebers_exist__()
ndim = self.InferSpatialDimension()
if self.element_type == "tet" or self.element_type == "hex":
normals = self.FaceNormals()
elif self.element_type == "tri" or self.element_type == "quad" or self.element_type == "line":
if self.points.shape[1] == 3:
normals = self.FaceNormals()
else:
if self.element_type == "tri" or self.element_type == "quad":
edges = self.edges
elif self.element_type == "line":
edges = self.elements
edge_coords = self.points[edges[:,:2],:]
p1p0 = edge_coords[:,1,:] - edge_coords[:,0,:]
normals = np.zeros_like(p1p0)
normals[:,0] = -p1p0[:,1]
normals[:,1] = p1p0[:,0]
norm_normals = np.linalg.norm(normals,axis=1)
normals[:,0] /= norm_normals
normals[:,1] /= norm_normals
# CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER
if self.element_type == "tri" or self.element_type == "quad":
self.GetElementsWithBoundaryEdges()
meds = self.Medians()
edge_element_meds = meds[self.boundary_edge_to_element[:,0],:]
p1pm = edge_coords[:,1,:] - edge_element_meds
# IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP
_check = np.einsum("ij,ij->i",normals,p1pm)
normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]
if show_plot:
if ndim == 2:
mid_edge_coords = 0.5*(edge_coords[:,1,:] + edge_coords[:,0,:])
import matplotlib.pyplot as plt
figure = plt.figure()
self.SimplePlot(figure=figure, show_plot=False)
q = plt.quiver(mid_edge_coords[:,0], mid_edge_coords[:,1],
normals[:,0], normals[:,1],
color='Teal', headlength=5, width=0.004)
plt.axis('equal')
plt.axis('off')
plt.tight_layout()
plt.show()
elif ndim == 3:
mid_face_coords = np.sum(self.points[self.faces,:3],axis=1)/self.faces.shape[1]
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
self.SimplePlot(figure=figure, show_plot=False)
mlab.quiver3d(mid_face_coords[:,0], mid_face_coords[:,1], mid_face_coords[:,2],
normals[:,0], normals[:,1], normals[:,2],
color=(0.,128./255,128./255),line_width=2)
mlab.show()
return normals
def Angles(self, degrees=True):
"""Compute angles of 2D meshes. Strictly 2D meshes and linear elements.
If the mesh is curved the angles would be inaccurate
input:
degrees [bool] if True returns angles in degrees
otherwise in radians
returns:
angles [2D array] of angles per element. Angles are
computed per element so every element will
have as many angles as it's nodes
"""
self.__do_essential_memebers_exist__()
if self.InferElementalDimension() != 2:
raise ValueError("Angles can be computed only for 2D elements")
if self.InferSpatialDimension() != 2:
raise ValueError("Angles can be computed only in 2-dimensional plane")
nodeperelem = self.InferNumberOfNodesPerLinearElement()
angles = np.zeros((self.nelem, nodeperelem))
norm = lambda x: np.linalg.norm(x,axis=1)
edge_coords = self.points[self.elements[:,:],:]
if self.element_type == "tri":
AB = edge_coords[:,1,:] - edge_coords[:,0,:]
AC = edge_coords[:,2,:] - edge_coords[:,0,:]
BC = edge_coords[:,2,:] - edge_coords[:,1,:]
angles[:,0] = np.einsum("ij,ij->i",AB,AC) / (norm(AB)*norm(AC))
angles[:,1] = np.einsum("ij,ij->i",AC,BC) / (norm(AC)*norm(BC))
angles[:,2] = np.einsum("ij,ij->i",BC,-AB)/ (norm(BC)*norm(AB))
angles = np.arccos(angles)
elif self.element_type == "quad":
AB = edge_coords[:,1,:] - edge_coords[:,0,:]
BC = edge_coords[:,2,:] - edge_coords[:,1,:]
CD = edge_coords[:,3,:] - edge_coords[:,2,:]
DA = edge_coords[:,0,:] - edge_coords[:,3,:]
angles[:,0] = np.einsum("ij,ij->i",AB,BC) / (norm(AB)*norm(BC))
angles[:,1] = np.einsum("ij,ij->i",BC,CD) / (norm(BC)*norm(CD))
angles[:,2] = np.einsum("ij,ij->i",CD,DA) / (norm(CD)*norm(DA))
angles[:,3] = np.einsum("ij,ij->i",DA,-AB)/ (norm(DA)*norm(AB))
angles = np.arccos(angles)
if degrees:
angles *= 180/np.pi
return angles
def BoundingBoxes(self, show_plot=False, figure=None):
"""Computes a bounding box for every element.
This method complements the Bounds method/property in that it computes
the bounds for every individual element
returns:
bboxes [3D array] of nelem x ndim x ndim of bounding
boxes for every element
"""
self.__do_essential_memebers_exist__()
ndim = self.InferSpatialDimension()
all_elem_coords = self.points[self.elements]
mins = all_elem_coords.min(axis=1)
maxs = all_elem_coords.max(axis=1)
bboxes = np.zeros((2*self.nelem,self.points.shape[1]))
bboxes[::2] = mins
bboxes[1::2] = maxs
bboxes = bboxes.reshape(self.nelem,2,self.points.shape[1])
if show_plot:
if ndim == 3:
point_generator = lambda bbox: np.array([
[ bbox[0,0], bbox[0,1], bbox[0,2] ],
[ bbox[1,0], bbox[0,1], bbox[0,2] ],
[ bbox[1,0], bbox[1,1], bbox[0,2] ],
[ bbox[0,0], bbox[1,1], bbox[0,2] ],
[ bbox[0,0], bbox[0,1], bbox[1,2] ],
[ bbox[1,0], bbox[0,1], bbox[1,2] ],
[ bbox[1,0], bbox[1,1], bbox[1,2] ],
[ bbox[0,0], bbox[1,1], bbox[1,2] ]
])
elif ndim == 2:
point_generator = lambda bbox: np.array([
[ bbox[0,0], bbox[0,1] ],
[ bbox[1,0], bbox[0,1] ],
[ bbox[1,0], bbox[1,1] ],
[ bbox[0,0], bbox[1,1] ]
])
nsize = 4 if ndim ==2 else 8
ranger = np.arange(nsize)
bmesh = Mesh()
bmesh.element_type = "quad" if ndim ==2 else "hex"
bmesh.elements = np.arange(self.nelem*nsize).reshape(self.nelem,nsize)
bmesh.points = np.zeros((self.nelem*nsize,ndim))
bmesh.nelem = self.nelem
bmesh.nnode = bmesh.points.shape[0]
for i in range(0,self.nelem):
bmesh.points[i*nsize:(i+1)*nsize,:] = point_generator(bboxes[i])
if ndim == 2:
import matplotlib.pyplot as plt
if figure is None:
figure = plt.figure()
self.SimplePlot(figure=figure, show_plot=False)
bmesh.SimplePlot(figure=figure, show_plot=False, edge_color='r')
plt.show()
else:
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
self.SimplePlot(figure=figure, show_plot=False)
bmesh.SimplePlot(figure=figure, show_plot=False, plot_faces=False, edge_color='r')
mlab.show()
return bboxes
def Medians(self, geometric=True):
"""Computes median of the elements tri, tet, quad, hex based on the interpolation function
input:
geometric [Bool] geometrically computes median without relying on FEM bases
retruns:
median: [ndarray] of median of elements
bases_at_median: [1D array] of (p=1) bases at median
"""
self.__do_essential_memebers_exist__()
median = None
if geometric == True:
median = np.sum(self.points[self.elements,:],axis=1)/self.elements.shape[1]
return median
else:
try:
from Florence.FunctionSpace import Tri, Tet
from Florence.QuadratureRules import FeketePointsTri, FeketePointsTet
except ImportError:
raise ImportError("This functionality requires florence's support")
if self.element_type == "tri":
eps = FeketePointsTri(2)
middle_point_isoparametric = eps[6,:]
if not np.isclose(sum(middle_point_isoparametric),-0.6666666):
raise ValueError("Median of triangle does not match [-0.3333,-0.3333]. "
"Did you change your nodal spacing or interpolation functions?")
hpBases = Tri.hpNodal.hpBases
bases_for_middle_point = hpBases(0,middle_point_isoparametric[0],
middle_point_isoparametric[1])[0]
median = np.einsum('ijk,j',self.points[self.elements[:,:3],:],bases_for_middle_point)
elif self.element_type == "tet":
middle_point_isoparametric = FeketePointsTet(3)[21]
if not np.isclose(sum(middle_point_isoparametric),-1.5):
raise ValueError("Median of tetrahedral does not match [-0.5,-0.5,-0.5]. "
"Did you change your nodal spacing or interpolation functions?")
# C = self.InferPolynomialDegree() - 1
hpBases = Tet.hpNodal.hpBases
bases_for_middle_point = hpBases(0,middle_point_isoparametric[0],
middle_point_isoparametric[1],middle_point_isoparametric[2])[0]
median = np.einsum('ijk,j',self.points[self.elements[:,:4],:],bases_for_middle_point)
else:
raise NotImplementedError('Median for {} elements not implemented yet'.format(self.element_type))
return median, bases_for_middle_point
def FindElementContainingPoint(self, point, algorithm="fem", find_parametric_coordinate=True,
scaling_factor=5., tolerance=1.0e-7, maxiter=20, use_simple_bases=False, return_on_geometric_finds=False,
initial_guess=None, initial_guesses=None, restart=False):
"""Find which element does a point lie in using specificed algorithm.
The FEM isoparametric coordinate of the point is returned as well.
If the isoparametric coordinate of the point is not required, issue find_parametric_coordinate=False
input:
point: [tuple] XYZ of enquiry point
algorithm: [str] either 'fem' or 'geometric'. The 'fem' algorithm uses k-d tree
search to get the right bounding box around as few elements as possible.
The size of the box can be specified by the user through the keyword scaling_factor.
The geometric algorithm is a lot more stable and converges much quicker.
The geomtric algorithm first identifies the right element using volume check,
then tries all possible combination of initial guesses to get the FEM
isoparametric point. Trying all possible combination with FEM can be potentially
more costly since bounding box size can be large.
return_on_geometric_finds:
[bool] if geometric algorithm is chosen and this option is on, then it returns
the indices of elements as soon as the volume check and no further checks are
done. This is useful for situations when searching for points that are meant to
be in the interior of the elements rather than at the boundaries or nodes
otherwise the number of elements returned by geometric algorithm is going to be
more than one
return:
element_index [int/1D array of ints] element(s) containing the point.
If the point is shared between many elements a 1D array is returned
iso_parametric_point [1D array] the parametric coordinate of the point within the element.
return only if find_parametric_coordinate=True
"""
if restart:
if initial_guesses is None:
if self.element_type == "pent":
initial_guesses = np.array([
[0.,0.],
[1.,0.],
[1.,0.5],
[0.5,1.],
[0.,1.],
])
else:
raise ValueError("restart option for this element type is only supported if initial_guesses are available")
for i in range(initial_guesses.shape[0]):
ret_val = self.FindElementContainingPoint(point, algorithm=algorithm,
find_parametric_coordinate=find_parametric_coordinate,
scaling_factor=scaling_factor, tolerance=tolerance, maxiter=maxiter,
use_simple_bases=use_simple_bases, return_on_geometric_finds=return_on_geometric_finds,
initial_guess=initial_guesses[i,:], restart=False)
if ret_val[1] is not None:
break
return ret_val
self.__do_essential_memebers_exist__()
C = self.InferPolynomialDegree() - 1
if C > 0:
warn("Note that finding a point within higher order curved mesh is not supported yet")
if C > 0 and algorithm == "geometric":
warn("High order meshes are not supported using geometric algorithim. I am going to operate on linear mesh")
if use_simple_bases:
raise ValueError("Simple bases for high order elements are not available")
return
ndim = self.InferSpatialDimension()
assert len(point) == ndim
from Florence.FunctionSpace import PointInversionIsoparametricFEM
candidate_element, candidate_piso = None, None
if self.element_type == "tet" and algorithm == "fem":
algorithm = "geometric"
if algorithm == "fem":
scaling_factor = float(scaling_factor)
max_h = self.EdgeLengths().max()
# max_h=1.
# FOR CURVED ELEMENTS
# max_h = self.LargestSegment().max()
# GET A BOUNDING BOX AROUND THE POINT, n TIMES LARGER THAN MAXIMUM h, WHERE n is the SCALING FACTOR
if ndim==3:
bounding_box = (point[0]-scaling_factor*max_h,
point[1]-scaling_factor*max_h,
point[2]-scaling_factor*max_h,
point[0]+scaling_factor*max_h,
point[1]+scaling_factor*max_h,
point[2]+scaling_factor*max_h)
elif ndim==2:
bounding_box = (point[0]-scaling_factor*max_h,
point[1]-scaling_factor*max_h,
point[0]+scaling_factor*max_h,
point[1]+scaling_factor*max_h)
# SELECT ELEMENTS ONLY WITHIN THE BOUNDING BOX
mesh = deepcopy(self)
idx_kept_element = self.RemoveElements(bounding_box)[1]
if ndim==3:
for i in range(self.nelem):
coord = self.points[self.elements[i,:],:]
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True, use_simple_bases=use_simple_bases,
initial_guess=initial_guess)
if converged:
# if p_iso[0] >= -1. and p_iso[0] <=1. and \
# p_iso[1] >= -1. and p_iso[1] <=1. and \
# p_iso[2] >= -1. and p_iso[2] <=1. :
if (p_iso[0] > -1. or np.isclose(p_iso[0],-1.,rtol=tolerance)) and \
(p_iso[0] < 1. or np.isclose(p_iso[0], 1.,rtol=tolerance)) and \
(p_iso[1] > -1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[1] < 1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[2] > -1. or np.isclose(p_iso[2],-1.,rtol=tolerance)) and \
(p_iso[2] < 1. or np.isclose(p_iso[2], 1.,rtol=tolerance)) :
candidate_element, candidate_piso = i, p_iso
break
elif ndim==2:
for i in range(self.nelem):
coord = self.points[self.elements[i,:],:]
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True, use_simple_bases=use_simple_bases,
initial_guess=initial_guess)
# if p_iso[0] >= -1. and p_iso[0] <=1. and \
# p_iso[1] >= -1. and p_iso[1] <=1.:
# candidate_element, candidate_piso = i, p_iso
# break
if (p_iso[0] > -1. or np.isclose(p_iso[0],-1.,rtol=tolerance)) and \
(p_iso[0] < 1. or np.isclose(p_iso[0], 1.,rtol=tolerance)) and \
(p_iso[1] > -1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[1] < 1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) :
candidate_element, candidate_piso = i, p_iso
break
self.__update__(mesh)
# print(candidate_element)
if candidate_element is not None:
candidate_element = idx_kept_element[candidate_element]
if find_parametric_coordinate:
return candidate_element, candidate_piso
else:
return candidate_element
else:
if self.element_type == "tet":
from Florence.QuadratureRules.FeketePointsTet import FeketePointsTet
initial_guesses = FeketePointsTet(C)
def GetVolTet(a0,b0,c0,d0):
det_array = np.dstack((a0-d0,b0-d0,c0-d0))
# FIND VOLUME OF ALL THE ELEMENTS
volume = 1./6.*np.abs(np.linalg.det(det_array))
return volume
a = self.points[self.elements[:,0],:]
b = self.points[self.elements[:,1],:]
c = self.points[self.elements[:,2],:]
d = self.points[self.elements[:,3],:]
o = np.tile(point,self.nelem).reshape(self.nelem,a.shape[1])
# TOTAL VOLUME
vol = self.Volumes()
# PARTS' VOLUMES
vol0 = GetVolTet(a,b,c,o)
vol1 = GetVolTet(a,b,o,d)
vol2 = GetVolTet(a,o,c,d)
vol3 = GetVolTet(o,b,c,d)
criterion_check = vol0+vol1+vol2+vol3-vol
elems = np.isclose(criterion_check,0.,rtol=tolerance)
elems_idx = np.where(elems==True)[0]
elif self.element_type == "quad":
from Florence.QuadratureRules.GaussLobattoPoints import GaussLobattoPointsQuad
initial_guesses = GaussLobattoPointsQuad(C)
def GetAreaQuad(a0,b0,c0,d0):
# AREA OF QUAD ABCD = AREA OF ABC + AREA OF ACD
a00 = np.ones((a0.shape[0],3),dtype=np.float64); a00[:,:2] = a0
b00 = np.ones((b0.shape[0],3),dtype=np.float64); b00[:,:2] = b0
c00 = np.ones((c0.shape[0],3),dtype=np.float64); c00[:,:2] = c0
d00 = np.ones((d0.shape[0],3),dtype=np.float64); d00[:,:2] = d0
# FIND AREAS ABC
area0 = np.abs(np.linalg.det(np.dstack((a00,b00,c00))))
# FIND AREAS ACD
area1 = np.abs(np.linalg.det(np.dstack((a00,c00,d00))))
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*(area0+area1)
return area
a = self.points[self.elements[:,0],:]
b = self.points[self.elements[:,1],:]
c = self.points[self.elements[:,2],:]
d = self.points[self.elements[:,3],:]
o = np.tile(point,self.nelem).reshape(self.nelem,a.shape[1])
# TOTAL VOLUME
vol = self.Areas()
# PARTS' VOLUMES - DONT CHANGE THE ORDERING OF SPECIALLY vol1
vol0 = GetAreaQuad(o,c,b,a)
vol1 = GetAreaQuad(o,a,d,c)
criterion_check = vol0+vol1-vol
elems = np.isclose(criterion_check,0.,rtol=tolerance)
elems_idx = np.where(elems==True)[0]
else:
raise NotImplementedError("Geometric algorithm for {} elements not implemented yet".format(self.element_type))
if return_on_geometric_finds:
return elems_idx
for i in range(len(elems_idx)):
coord = self.points[self.elements[elems_idx[i],:],:]
# TRY ALL POSSIBLE INITIAL GUESSES - THIS IS CHEAP AS THE SEARCH SPACE CONTAINS ONLY A
# FEW ELEMENTS
for guess in initial_guesses:
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True,
use_simple_bases=use_simple_bases, initial_guess=guess)
if converged:
break
if converged:
candidate_element, candidate_piso = elems_idx[i], p_iso
break
if find_parametric_coordinate:
return candidate_element, candidate_piso
else:
return candidate_element
def AverageJacobian(self):
"""Computes average Jacobian of elements for all element types over a mesh
This is a generic method that for 1D=lengths, for 2D=areas and for 3D=volumes.
It works for planar and curved elements
"""
self.__do_essential_memebers_exist__()
try:
from Florence import DisplacementFormulation
except ImportError:
raise ValueError("This functionality requires Florence's support")
if self.element_type != "line":
# FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED
formulation = DisplacementFormulation(self)
sizes = np.zeros(self.nelem)
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetAverageJacobian(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
return sizes.mean()
else:
raise ValueError("Not implemented for 1D elements")
def LargestSegment(self, smallest_element=True, nsamples=30,
plot_segment=False, plot_element=False, figure=None, save=False, filename=None):
"""Finds the largest segment that can fit in an element. For curvilinear elements
this measure can be used as (h) for h-refinement studies
input:
smallest_element [bool] if the largest segment size is to be computed in the
smallest element (i.e. element with the smallest area in 2D or
smallest volume in 3D). Default is True. If False, then the largest
segment in the largest element will be computed.
nsample: [int] number of sample points along the curved
edges of the elements. The maximum distance between
all combinations of these points is the largest
segment
plot_segment: [bool] plots segment on tope of [curved/straight] mesh
plot_element: [bool] plots the straight/curved element to which the segment
belongs
figure: [an instance of matplotlib/mayavi.mlab figure for 2D/3D]
save: [bool] wether to save the figure or not
filename: [str] file name for the figure to be save
returns:
largest_segment_length [float] maximum segment length that could be fit within either the
"""
self.__do_memebers_exist__()
if self.element_type == "hex" or self.element_type == "tet":
quantity = self.Volumes()
elif self.element_type == "quad" or self.element_type == "tri":
quantity = self.Areas()
if smallest_element:
omesh = self.GetLocalisedMesh(quantity.argmin())
else:
omesh = self.GetLocalisedMesh(quantity.argmax())
try:
from Florence.PostProcessing import PostProcess
except:
raise ImportError('This function requires florence PostProcessing module')
return
if save:
if filename is None:
raise ValueError("No file name provided. I am going to write one the current directory")
filename = PWD(__file__) + "/output.png"
if self.element_type == "tri":
tmesh = PostProcess.TessellateTris(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "quad":
tmesh = PostProcess.TessellateQuads(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "tet":
tmesh = PostProcess.TessellateTets(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "hex":
tmesh = PostProcess.TessellateHexes(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
ndim = omesh.InferSpatialDimension()
nnode = tmesh.points.shape[0]
largest_segment_lengths = []
nodes = np.array((1,ndim))
for i in range(nnode):
tiled_points = np.tile(tmesh.points[i,:][:,None],nnode).T
segment_lengths = np.linalg.norm(tmesh.points - tiled_points, axis=1)
largest_segment_lengths.append(segment_lengths.max())
nodes = np.vstack((nodes, np.array([i,segment_lengths.argmax()])[None,:]))
largest_segment_lengths = np.array(largest_segment_lengths)
nodes = nodes[1:,:]
largest_segment_length = largest_segment_lengths.max()
corresponding_nodes = nodes[largest_segment_lengths.argmax(),:]
if plot_segment:
segment_coords = tmesh.points[corresponding_nodes,:]
if ndim==2:
import matplotlib.pyplot as plt
if figure == None:
figure = plt.figure()
if plot_element:
if omesh.element_type == "tri":
PostProcess.CurvilinearPlotTri(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
elif omesh.element_type == "quad":
PostProcess.CurvilinearPlotQuad(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
tmesh.SimplePlot(figure=figure,show_plot=False)
if save:
plt.savefig(filename,bbox_inches="tight",dpi=300)
plt.show()
elif ndim==3:
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
if plot_element:
if omesh.element_type == "tet":
PostProcess.CurvilinearPlotTet(omesh,
np.zeros_like(omesh.points),plot_points=True, point_radius=0.13,
figure=figure, interpolation_degree=nsamples, show_plot=False)
elif omesh.element_type == "hex":
PostProcess.CurvilinearPlotHex(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
tmesh.GetEdges()
edge_coords = tmesh.points[np.unique(tmesh.all_edges),:]
mlab.triangular_mesh(tmesh.points[:,0],tmesh.points[:,1],tmesh.points[:,2],
tmesh.elements, representation='wireframe', color=(0,0,0))
# # mlab.points3d(edge_coords[:,0],edge_coords[:,1],edge_coords[:,2],color=(1., 99/255., 71./255), scale_factor=0.03)
# # mlab.plot3d(segment_coords[:,0],segment_coords[:,1],segment_coords[:,2], color=(227./255, 66./255, 52./255))
mlab.points3d(edge_coords[:,0],edge_coords[:,1],edge_coords[:,2],color=(1., 99/255., 71./255), scale_factor=0.17)
mlab.plot3d(segment_coords[:,0],segment_coords[:,1],segment_coords[:,2],
color=(227./255, 66./255, 52./255), line_width=10., representation="wireframe")
if save:
mlab.savefig(filename,dpi=300)
mlab.show()
return largest_segment_length
def CheckNodeNumbering(self,change_order_to='retain', verbose=True):
"""Checks for node numbering order of the imported mesh. Mesh can be tri or tet
input:
change_order_to: [str] {'clockwise','anti-clockwise','retain'} changes the order to clockwise,
anti-clockwise or retains the numbering order - default is 'retain'
output:
original_order: [str] {'clockwise','anti-clockwise','retain'} returns the original numbering order"""
self.__do_essential_memebers_exist__()
# CHECK IF IT IS LINEAR MESH
nodeperelem = self.InferNumberOfNodesPerLinearElement()
assert self.elements.shape[1] == nodeperelem
quantity = np.array([])
if self.element_type == "tri":
quantity = self.Areas(with_sign=True)
elif self.element_type == "quad":
quantity = self.Areas(with_sign=True)
elif self.element_type == "tet":
quantity = self.Volumes(with_sign=True)
elif self.element_type == "hex":
quantity = self.Volumes(with_sign=True)
original_order = ''
# CHECK NUMBERING
if (quantity > 0).all():
original_order = 'anti-clockwise'
if change_order_to == 'clockwise':
self.elements = np.fliplr(self.elements)
elif (quantity < 0).all():
original_order = 'clockwise'
if change_order_to == 'anti-clockwise':
self.elements = np.fliplr(self.elements)
else:
original_order = 'mixed'
if change_order_to == 'clockwise':
self.elements[quantity>0,:] = np.fliplr(self.elements[quantity>0,:])
elif change_order_to == 'anti-clockwise':
self.elements[quantity<0,:] = np.fliplr(self.elements[quantity<0,:])
if original_order == 'anti-clockwise':
print(u'\u2713'.encode('utf8')+b' : ','Imported mesh has',original_order,'node ordering')
else:
print(u'\u2717'.encode('utf8')+b' : ','Imported mesh has',original_order,'node ordering')
return original_order
def GetElementsEdgeNumberingTri(self):
"""Finds edges of elements and their flags saying which edge they are [0,1,2].
At most a triangle can have all its three edges on the boundary.
output:
edge_elements: [1D array] array containing elements which have edges
on the boundary
Note that this method sets the self.edge_to_element to edge_elements,
so the return value is not strictly necessary
"""
if isinstance(self.edge_to_element,np.ndarray):
if self.edge_to_element.shape[0] > 1:
return self.edge_to_element
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
if self.all_edges is None:
self.GetEdgesTri()
all_edges = np.concatenate((self.elements[:,:2],self.elements[:,[1,2]],
self.elements[:,[2,0]]),axis=0).astype(np.int64)
all_edges, idx = unique2d(all_edges,consider_sort=True,order=False, return_index=True)
edge_elements = np.zeros((all_edges.shape[0],2),dtype=np.int64)
edge_elements[:,0] = idx % self.elements.shape[0]
edge_elements[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_elements
return self.edge_to_element
def GetElementsWithBoundaryEdgesTri(self):
"""Finds elements which have edges on the boundary.
At most an element can have all its three edges on the boundary.
output:
edge_elements: [2D array] array containing elements which have edge
on the boundary [cloumn 0] and a flag stating which edges they are [column 1]
"""
if isinstance(self.boundary_edge_to_element,np.ndarray):
if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:
return self.boundary_edge_to_element
# DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.edges is not None
edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
for i in range(self.edges.shape[0]):
x = []
for j in range(2):
x.append(np.where(self.elements[:,:3]==self.edges[i,j])[0])
# FIND WHICH ELEMENTS CONTAIN ALL FACE NODES - FOR INTERIOR ELEMENTS
# THEIR CAN BE MORE THAN ONE ELEMENT CONTAINING ALL FACE NODES
z = x[0]
for k in range(1,len(x)):
z = np.intersect1d(x[k],z)
# CHOOSE ONLY ONE OF THESE ELEMENTS
edge_elements[i,0] = z[0]
# WHICH COLUMNS IN THAT ELEMENT ARE THE FACE NODES LOCATED
cols = np.array([np.where(self.elements[z[0],:]==self.edges[i,0])[0],
np.where(self.elements[z[0],:]==self.edges[i,1])[0]
])
cols = np.sort(cols.flatten())
if cols[0] == 0 and cols[1] == 1:
edge_elements[i,1] = 0
elif cols[0] == 1 and cols[1] == 2:
edge_elements[i,1] = 1
elif cols[0] == 0 and cols[1] == 2:
edge_elements[i,1] = 2
self.boundary_edge_to_element = edge_elements
return edge_elements
def GetElementsWithBoundaryFacesTet(self):
"""Finds elements which have faces on the boundary.
At most a tetrahedral element can have all its four faces on the boundary.
output:
boundary_face_to_element: [2D array] array containing elements which have face
on the boundary [column 0] and a flag stating which faces they are [column 1]
"""
if self.boundary_face_to_element is not None:
return self.boundary_face_to_element
# DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.faces is not None
# THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK
# IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME
# EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY
# HENCE THIS MAPPING BECOMES NECESSARY
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0).astype(self.faces.dtype)
all_faces_in_faces = in2d(all_faces,self.faces[:,:3],consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER
# NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND
# FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.
# WE NEED TO FIND THIS MAPPING NOW
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementTet(C)[0]
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:3].astype(np.int64) - self.faces[:,:3].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Florence.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:3],self.faces[:,:3],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetElementsFaceNumberingTet(self):
"""Finds which faces belong to which elements and which faces of the elements
they are e.g. 0, 1, 2 or 3.
output:
face_elements: [2D array] nfaces x 2 array containing elements which have face
on the boundary with their flags
Note that this method also sets the self.face_to_element to face_elements,
so the return value is not strictly necessary
"""
if isinstance(self.face_to_element,np.ndarray):
if self.face_to_element.shape[0] > 1:
return self.face_to_element
assert self.elements is not None
# GET ALL FACES FROM ELEMENT CONNECTIVITY
if self.all_faces is None:
self.GetFacesTet()
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0).astype(np.int64)
_,idx = unique2d(all_faces,consider_sort=True,order=False, return_index=True)
face_elements = np.zeros((self.all_faces.shape[0],2),dtype=np.int64)
face_elements[:,0] = idx % self.elements.shape[0]
face_elements[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_elements
return self.face_to_element
def ArrangeFacesTet(self):
"""Arranges all the faces of tetrahedral elements
with triangular type node ordering """
if self.all_faces is None:
self.all_faces = self.GetFacesTet()
if self.face_to_element is None:
self.GetElementsFaceNumberingTet()
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
node_arranger = NodeArrangementTet(p-1)[0]
# for i in range(self.face_to_element.shape[0]):
# self.all_faces = self.elements[self.face_to_element[i,0],node_arranger[self.face_to_element[i,1],:]]
self.all_faces = self.elements[self.face_to_element[:,0][:,None],node_arranger[self.face_to_element[:,1],:]]
def GetElementsEdgeNumberingQuad(self):
"""Finds edges of elements and their flags saying which edge they are [0,1,2,3].
At most a quad can have all its four edges on the boundary.
output:
edge_elements: [1D array] array containing elements which have edges
on the boundary
Note that this method sets the self.edge_to_element to edge_elements,
so the return value is not strictly necessary
"""
if isinstance(self.edge_to_element,np.ndarray):
if self.edge_to_element.shape[0] > 1:
return self.edge_to_element
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
if self.all_edges is None:
self.GetEdgesQuad()
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.int64)
all_edges, idx = unique2d(all_edges,consider_sort=True,order=False, return_index=True)
edge_elements = np.zeros((all_edges.shape[0],2),dtype=np.int64)
# edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)
edge_elements[:,0] = idx % self.elements.shape[0]
edge_elements[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_elements
return self.edge_to_element
def GetElementsWithBoundaryEdgesQuad(self):
"""Finds elements which have edges on the boundary.
At most a quad can have all its four edges on the boundary.
output:
boundary_edge_to_element: [2D array] array containing elements which have face
on the boundary [cloumn 0] and a flag stating which edges they are [column 1]
"""
if isinstance(self.boundary_edge_to_element,np.ndarray):
if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:
return self.boundary_edge_to_element
# DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.edges is not None
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(self.edges.dtype)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.boundary_edge_to_element = boundary_edge_to_element
return self.boundary_edge_to_element
def GetElementsWithBoundaryFacesHex(self):
"""Finds elements which have faces on the boundary.
At most a hexahedral can have all its 8 faces on the boundary.
output:
boundary_face_to_element: [2D array] array containing elements which have face
on the boundary [column 0] and a flag stating which faces they are [column 1]
"""
# DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.faces is not None
if self.boundary_face_to_element is not None:
return self.boundary_face_to_element
# THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK
# IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME
# EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY
# HENCE THIS MAPPING BECOMES NECESSARY
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementHex(C)[0]
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(self.faces.dtype)
all_faces_in_faces = in2d(all_faces,self.faces[:,:4],consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER
# NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND
# FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.
# WE NEED TO FIND THIS MAPPING NOW
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:4].astype(np.int64) - self.faces[:,:4].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Florence.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:4],self.faces[:,:4],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetElementsFaceNumberingHex(self):
"""Finds which faces belong to which elements and which faces of the elements
they are e.g. 0, 1, 2 or 3.
output:
face_elements: [2D array] nfaces x 2 array containing elements which have face
on the boundary with their flags
Note that this method also sets the self.face_to_element to face_elements,
so the return value is not strictly necessary
"""
if isinstance(self.face_to_element,np.ndarray):
if self.face_to_element.shape[0] > 1:
return self.face_to_element
assert self.elements is not None
# GET ALL FACES FROM ELEMENT CONNECTIVITY
if self.all_faces is None:
self.GetFacesHex()
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementHex(C)[0]
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(self.all_faces.dtype)
_,idx = unique2d(all_faces,consider_sort=True,order=False, return_index=True)
face_elements = np.zeros((self.all_faces.shape[0],2),dtype=np.int64)
face_elements[:,0] = idx % self.elements.shape[0]
face_elements[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_elements
return self.face_to_element
def ArrangeFacesHex(self):
"""Arranges all the faces of hexahedral elements
with quadrilateral type node ordering """
if self.all_faces is None:
self.all_faces = self.GetFacesHex()
if self.face_to_element is None:
self.GetElementsFaceNumberingHex()
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
node_arranger = NodeArrangementHex(p-1)[0]
self.all_faces = self.elements[self.face_to_element[:,0][:,None],node_arranger[self.face_to_element[:,1],:]]
def GetNodeCommonality(self):
"""Finds the elements sharing a node.
The return values are linked lists [list of numpy of arrays].
Each numpy array within the list gives the elements that contain a given node.
As a result the size of the linked list is nnode
outputs:
els: [list of numpy arrays] element numbers containing nodes
pos: [list of numpy arrays] elemental positions of the nodes
res_flat: [list of numpy arrays] position of nodes in the
flattened element connectivity.
"""
self.__do_essential_memebers_exist__()
elements = self.elements.ravel()
idx_sort = np.argsort(elements)
sorted_elements = elements[idx_sort]
vals, idx_start = np.unique(sorted_elements, return_index=True)
# Sets of indices
flat_pos = np.split(idx_sort, idx_start[1:])
els = np.split(idx_sort // int(self.elements.shape[1]), idx_start[1:])
pos = np.split(idx_sort % int(self.elements.shape[1]), idx_start[1:])
# In case one wants to return only the duplicates i.e. filter keeping only items occurring more than once
# vals, idx_start, count = np.unique(sorted_elements, return_counts=True, return_index=True)
# vals = vals[count > 1]
# res = filter(lambda x: x.size > 1, res)
return els, pos, flat_pos
def Read(self, filename=None, element_type="tri", reader_type=None, reader_type_format=None,
reader_type_version=None, order=0, read_surface_info=False, **kwargs):
"""Convenience mesh reader method to dispatch call to subsequent apporpriate methods"""
if reader_type != 'read_separate':
if not isinstance(filename,str):
raise ValueError("filename must be a string")
return
if reader_type is None:
if filename.split('.')[-1] == "msh":
reader_type = "gmsh"
elif filename.split('.')[-1] == "obj":
reader_type = "obj"
elif filename.split('.')[-1] == "unv":
reader_type = "unv"
elif filename.split('.')[-1] == "fro":
reader_type = "fro"
elif filename.split('.')[-1] == "dat":
for key in kwargs.keys():
inkey = insensitive(key)
if "connectivity" in inkey and "delimiter" not in inkey:
reader_type = "read_separate"
break
if reader_type is None:
raise ValueError("Mesh file format was not undertood. Please specify it using reader_type keyword")
self.filename = filename
self.reader_type = reader_type
self.reader_type_format = reader_type_format
self.reader_type_version = reader_type_version
if self.reader_type == 'salome':
self.ReadSalome(filename, element_type=element_type, read_surface_info=read_surface_info)
elif reader_type == 'GID':
self.ReadGIDMesh(filename, element_type, order)
elif self.reader_type == 'gmsh':
self.ReadGmsh(filename, element_type=element_type, read_surface_info=read_surface_info)
elif self.reader_type == 'obj':
self.ReadOBJ(filename, element_type=element_type, read_surface_info=read_surface_info)
elif self.reader_type == 'fenics':
self.ReadFenics(filename, element_type)
elif self.reader_type == 'vtu':
self.ReadVTK(filename)
elif self.reader_type == 'unv':
self.ReadUNV(filename, element_type)
elif self.reader_type == 'fro':
self.ReadFRO(filename, element_type)
elif self.reader_type == 'read_separate':
# READ MESH FROM SEPARATE FILES FOR CONNECTIVITY AND COORDINATES
from Florence.Utils import insensitive
# return insensitive(kwargs.keys())
for key in kwargs.keys():
inkey = insensitive(key)
if "connectivity" in inkey and "delimiter" not in inkey:
connectivity_file = kwargs.get(key)
if "coordinate" in insensitive(key) and "delimiter" not in inkey:
coordinates_file = kwargs.get(key)
self.ReadSeparate(connectivity_file,coordinates_file,element_type,
delimiter_connectivity=',',delimiter_coordinates=',')
elif self.reader_type == 'ReadHDF5':
self.ReadHDF5(filename)
self.nnode = self.points.shape[0]
# MAKE SURE MESH DATA IS CONTIGUOUS
self.points = np.ascontiguousarray(self.points)
self.elements = np.ascontiguousarray(self.elements)
return
def ReadSalome(self, filename, element_type="tri", read_surface_info=False):
"""Salome .dat format mesh reader"""
if element_type == "line":
el = "102"
bel = ""
elif element_type == "tri":
el = "203"
bel = "102"
elif element_type == "quad":
el = "204"
bel = "102"
elif element_type == "tet":
el = "304"
bel = "203"
elif element_type == "hex":
el = "308"
bel = "204"
if read_surface_info is True and element_type == "line":
warn("No surface info for lines. I am going to ignore this")
read_surface_info = False
with open(filename,'r') as f:
lines = f.readlines()
info = lines[0].rstrip().split()
self.nnode = int(info[0])
all_nelem = int(info[1])
nodes = lines[1:self.nnode+1]
points = []
for line in nodes:
points.append([float(i) for i in line.rstrip().split()[1:4]])
self.points = np.array(points,copy=True)
self.nnode = self.points.shape[0]
edges, faces, elements = [], [], []
for counter in range(self.nnode+1,len(lines)):
line = lines[counter].rstrip().split()
if read_surface_info:
if bel == line[1]:
faces.append([int(i) for i in line[2:]])
if el == line[1]:
elements.append([int(i) for i in line[2:]])
self.element_type = element_type
self.elements = np.array(elements,dtype=np.int64,copy=True) - 1
self.nelem = self.elements.shape[0]
if self.nelem == 0:
raise ValueError("file does not contain {} elements".format(element_type))
ndim = self.InferSpatialDimension()
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
def ReadSeparate(self,connectivity_file,coordinates_file,mesh_type, edges_file = None, faces_file = None,
delimiter_connectivity=' ',delimiter_coordinates=' ', delimiter_edges=' ', delimiter_faces=' ',
ignore_cols_connectivity=None,ignore_cols_coordinates=None,ignore_cols_edges=None,
ignore_cols_faces=None,index_style='c'):
"""Read meshes when the element connectivity and nodal coordinates are written in separate files
input:
connectivity_file: [str] filename containing element connectivity
coordinates_file: [str] filename containing nodal coordinates
mesh_type: [str] type of mesh tri/tet/quad/hex
edges_file: [str] filename containing edges of the mesh (if not given gets computed)
faces_file: [str] filename containing faces of the mesh (if not given gets computed)
delimiter_connectivity: [str] delimiter for connectivity_file - default is white space/tab
delimiter_coordinates: [str] delimiter for coordinates_file - default is white space/tab
delimiter_edges: [str] delimiter for edges_file - default is white space/tab
delimiter_faces: [str] delimiter for faces_file - default is white space/tab
ignore_cols_connectivity: [int] no of columns to be ignored (from the start) in the connectivity_file
ignore_cols_coordinates: [int] no of columns to be ignored (from the start) in the coordinates_file
ignore_cols_edges: [int] no of columns to be ignored (from the start) in the connectivity_file
ignore_cols_faces: [int] no of columns to be ignored (from the start) in the coordinates_file
index_style: [str] either 'c' C-based (zero based) indexing or 'f' fortran-based
(one based) indexing for elements connectivity - default is 'c'
"""
index = 0
if index_style == 'c':
index = 1
from time import time; t1=time()
self.elements = np.loadtxt(connectivity_file,dtype=np.int64,delimiter=delimiter_connectivity) - index
# self.elements = np.fromfile(connectivity_file,dtype=np.int64,count=-1) - index
self.points = np.loadtxt(coordinates_file,dtype=np.float64,delimiter=delimiter_coordinates)
if ignore_cols_connectivity != None:
self.elements = self.elements[ignore_cols_connectivity:,:]
if ignore_cols_coordinates != None:
self.points = self.points[ignore_cols_coordinates:,:]
if (mesh_type == 'tri' or mesh_type == 'quad') and self.points.shape[1]>2:
self.points = self.points[:,:2]
self.element_type = mesh_type
self.nelem = self.elements.shape[0]
# self.edges = None
if edges_file is None:
if mesh_type == "tri":
self.GetBoundaryEdgesTri()
elif mesh_type == "tet":
self.GetBoundaryEdgesTet()
else:
self.edges = np.loadtxt(edges_file,dtype=np.int64,delimiter=delimiter_edges) - index
if ignore_cols_edges !=None:
self.edges = self.edges[ignore_cols_edges:,:]
if faces_file is None:
if mesh_type == "tet":
self.GetBoundaryFacesTet()
else:
self.faces = np.loadtxt(faces_file,dtype=np.int64,delimiter=delimiter_edges) - index
if ignore_cols_faces !=None:
self.faces = self.faces[ignore_cols_faces:,:]
def ReadGIDMesh(self,filename,mesh_type,polynomial_order = 0):
"""Read GID meshes"""
if self.elements is not None and self.points is not None:
self.__reset__()
self.element_type = mesh_type
ndim, self.nelem, nnode, nboundary = np.fromfile(filename,dtype=np.int64,count=4,sep=' ')
if ndim==2 and mesh_type=="tri":
content = np.fromfile(filename,dtype=np.float64,count=4+3*nnode+4*self.nelem,sep=' ')
self.points = content[4:4+3*nnode].reshape(nnode,3)[:,1:]
self.elements = content[4+3*nnode:4+3*nnode+4*self.nelem].reshape(self.nelem,4)[:,1:].astype(np.int64)
self.elements -= 1
self.GetBoundaryEdgesTri()
if ndim==3 and mesh_type=="tet":
content = np.fromfile(filename,dtype=np.float64,count=4+4*nnode+5*self.nelem+9*nboundary,sep=' ')
self.points = content[4:4+4*nnode].reshape(nnode,4)[:,1:]
self.elements = content[4+4*nnode:4+4*nnode+5*self.nelem].reshape(self.nelem,5)[:,1:].astype(np.int64)
self.elements -= 1
face_flags = content[4*nnode+5*self.nelem+4:].reshape(nboundary,9)[:,1:].astype(np.int64)
self.faces = np.ascontiguousarray(face_flags[:,1:4] - 1)
self.face_to_surface = np.ascontiguousarray(face_flags[:,7] - 1)
# self.boundary_face_to_element = np.ascontiguousarray(face_flags[:,0])
# self.GetBoundaryFacesTet()
self.GetBoundaryEdgesTet()
def ReadVTK(self, filename, element_type=None):
"""Read mesh from a vtu file"""
try:
import vtkInterface as vtki
except IOError:
raise IOError("vtkInterface is not installed. Please install it first using 'pip install vtkInterface'")
self.__reset__()
vmesh = vtki.UnstructuredGrid(filename)
flat_elements = np.copy(np.delete(vmesh.cells, vmesh.offset))
if not np.all(vmesh.celltypes == vmesh.celltypes[0]):
raise IOError("Cannot read VTK files with hybrid elements")
cellflag = vmesh.celltypes[0]
if cellflag == 5:
self.element_type = "tri"
divider = 3
elif cellflag == 9:
self.element_type = "quad"
divider = 4
elif cellflag == 10:
self.element_type = "tet"
divider = 4
elif cellflag == 12:
self.element_type = "hex"
divider = 8
elif cellflag == 3:
self.element_type = "line"
divider = 2
else:
raise IOError("VTK element type not understood")
if element_type is not None:
if self.element_type != element_type:
raise ValueError("VTK file does not contain {} elements".format(element_type))
self.elements = np.ascontiguousarray(flat_elements.reshape(int(flat_elements.shape[0]/divider),divider), dtype=np.uint64)
self.points = np.ascontiguousarray(vmesh.points, dtype=np.float64)
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = np.ascontiguousarray(self.points[:,:2])
if self.element_type == "tri" or self.element_type == "quad":
self.GetEdges()
self.GetBoundaryEdges()
elif self.element_type == "tet" or self.element_type == "hex":
self.GetFaces()
self.GetBoundaryFaces()
self.GetBoundaryEdges()
return
def ReadGmsh(self, filename, element_type, p=1, read_surface_info=False):
"""Read gmsh (.msh) file"""
try:
fid = open(filename, "r")
except IOError:
print("File '%s' not found." % (filename))
sys.exit()
msh_version = None
# CHECK MSH FILE VERSION
if "MeshFormat" in fid.readline():
msh_version = int(np.floor(float(fid.readline().split(" ")[0])))
if 4 != msh_version and 2 != msh_version:
raise IOError("Only ASCII version 2 and 4 (>=4.1) .msh file formats are supported")
if 4 != msh_version and 2 != msh_version:
raise IOError("Only ASCII version 2 and 4 (>=4.1) .msh file formats are supported")
fid.close()
if self.elements is not None and self.points is not None:
self.__reset__()
self.filename = filename
bel = -1
if element_type == "line":
el = 1
elif element_type == "tri":
if p == 1:
el = 2
bel = 1
elif p == 2:
el = 9
bel = 8
elif element_type == "quad":
if p == 1:
el = 3
bel = 1
elif p == 2:
el = 10
bel = 8
elif element_type == "tet":
if p == 1:
el = 4
bel = 2
elif p == 2:
el = 11
bel = 9
elif element_type == "hex":
if p == 1:
el = 5
bel = 3
elif p == 2:
el = 12
bel = 10
else:
raise ValueError("Element type not understood")
# NEW FAST READER
var = 0 # for old gmsh versions - needs checks
node_blocks, elem_blocks, face_blocks = None, None, None
rem_nnode, rem_nelem, rem_faces = int(1e09), int(1e09), int(1e09)
face_counter = 0
for line_counter, line in enumerate(open(filename)):
item = line.rstrip()
plist = item.split()
if plist[0] == "Dimension":
self.ndim = plist[1]
elif plist[0] == "Vertices":
rem_nnode = line_counter+1
continue
elif plist[0] == "$Nodes":
rem_nnode = line_counter+1
continue
elif plist[0] == "Triangles":
rem_faces = line_counter+1
continue
elif plist[0] == "Tetrahedra":
rem_nelem = line_counter+1
continue
elif plist[0] == "$Elements":
rem_nelem = line_counter+1
var = 1
continue
if msh_version == 2:
if rem_nnode == line_counter:
self.nnode = int(plist[0])
if rem_faces == line_counter:
face_counter = int(plist[0])
if rem_nelem == line_counter:
self.nelem = int(plist[0])
break
else:
if rem_nnode == line_counter:
node_blocks, self.nnode = int(plist[0]), int(plist[1])
if rem_faces == line_counter:
face_blocks, face_counter = int(plist[0]), int(plist[1])
if rem_nelem == line_counter:
elem_blocks, self.nelem = int(plist[0]), int(plist[1])
break
points, elements, faces, face_to_surface = [],[], [], []
if msh_version == 2:
# RE-READ
ns = self.InferNumberOfNodesPerElement(p=p,element_type=element_type)
for line_counter, line in enumerate(open(filename)):
item = line.rstrip()
plist = item.split()
if var == 0:
if line_counter > rem_nnode and line_counter < self.nnode+rem_nnode+1:
points.append([float(i) for i in plist[:3]])
if line_counter > rem_nelem and line_counter < self.nelem+rem_nelem+1:
elements.append([int(i) for i in plist[:4]])
elif var == 1:
if line_counter > rem_nnode and line_counter < self.nnode+rem_nnode+1:
points.append([float(i) for i in plist[1:]])
if line_counter > rem_nelem and line_counter < self.nelem+rem_nelem+1:
if int(plist[1]) == el:
elements.append([int(i) for i in plist[-ns:]])
# READ SURFACE INFO - CERTAINLY ONLY IF SURFACE ELEMENT TYPE IS QUADS/TRIS
if read_surface_info:
if int(plist[1]) == bel:
faces.append([int(i) for i in plist[5:]])
face_to_surface.append(int(plist[4]))
elif msh_version == 4:
# RE-READ
fid = open(filename)
content = fid.readlines()
# READ NODES
nodes_content = content[rem_nnode+1:2*self.nnode+node_blocks+rem_nnode+1]
incrementer, line_number = 0, 0
# LOOP OVER BLOCKS
for i in range(node_blocks):
incrementer = int(nodes_content[line_number].rstrip().split()[3])
# LOOP OVER NODES OF EACH BLOCK
for j in range(line_number+1, line_number+2*incrementer+1):
plist = nodes_content[j].rstrip().split()
if len(plist) == 1:
continue
points.append([float(plist[k]) for k in range(0,len(plist))])
line_number += 2*incrementer + 1
# READ ELEMENTS
elems_content = content[rem_nelem+1:self.nelem+elem_blocks+rem_nelem+1]
incrementer, line_number = 0, 0
# LOOP OVER BLOCKS
for i in range(elem_blocks):
incrementer = int(elems_content[line_number].rstrip().split()[3])
if el == int(elems_content[line_number].rstrip().split()[2]):
# LOOP OVER ELEMENTS OF EACH BLOCK
for j in range(line_number+1, line_number+incrementer+1):
plist = elems_content[j].rstrip().split()
elements.append([int(plist[k]) for k in range(1,len(plist))])
line_number += incrementer + 1
if read_surface_info:
# READ FACES
incrementer, line_number = 0, 0
# LOOP OVER BLOCKS
for i in range(elem_blocks):
incrementer = int(elems_content[line_number].rstrip().split()[3])
surface_tag = int(elems_content[line_number].rstrip().split()[1])
if bel == int(elems_content[line_number].rstrip().split()[2]):
# LOOP OVER FACES OF EACH BLOCK
for j in range(line_number+1, line_number+incrementer+1):
plist = elems_content[j].rstrip().split()
faces.append([int(plist[k]) for k in range(1,len(plist))])
face_to_surface.append(surface_tag)
line_number += incrementer + 1
self.points = np.array(points,copy=True)
self.elements = np.array(elements,copy=True) - 1
# REORDER CONNECTIVITY
if p == 2:
# TRI6
if el == 9:
self.elements = self.elements[:,[0,1,2,3,5,4]]
# QUAD9
elif el == 10:
self.elements = self.elements[:,[0, 1, 2, 3, 4, 7, 8, 5, 6]]
# TET10
elif el == 11:
self.elements = self.elements[:,[0,1,2,3,4,6,5,7,9,8]]
# CORRECT
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
if self.nelem == 0:
raise ValueError("msh file does not contain {} elements".format(element_type))
if read_surface_info:
self.faces = np.array(faces,copy=True) - 1
self.face_to_surface = np.array(face_to_surface, dtype=np.int64, copy=True).flatten()
self.face_to_surface -= 1
# CHECK IF FILLED
if isinstance(self.face_to_surface,list):
if not self.face_to_surface:
self.face_to_surface = None
elif isinstance(self.face_to_surface,np.ndarray):
if self.face_to_surface.shape[0]==0:
self.face_to_surface = None
if self.points.shape[1] == 3:
if np.allclose(self.points[:,2],0.):
self.points = | np.ascontiguousarray(self.points[:,:2]) | numpy.ascontiguousarray |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Tests for linear_model extensions."""
import numpy as np
import pytest
import unittest
import warnings
from econml.sklearn_extensions.ensemble import SubsampledHonestForest
class TestSubsampledHonestForest(unittest.TestCase):
"""Test SubsampledHonestForest."""
def test_y1d(self):
np.random.seed(123)
n = 5000
d = 5
x_grid = np.linspace(-1, 1, 10)
X_test = np.hstack([x_grid.reshape(-1, 1), np.random.normal(size=(10, d - 1))])
for _ in range(3):
for criterion in ['mse', 'mae']:
X = np.random.normal(0, 1, size=(n, d))
y = X[:, 0] + np.random.normal(0, .1, size=(n,))
est = SubsampledHonestForest(n_estimators=100, max_depth=5, criterion=criterion,
min_samples_leaf=10, verbose=0)
est.fit(X, y)
point = est.predict(X_test)
lb, ub = est.predict_interval(X_test, alpha=0.01)
np.testing.assert_allclose(point, X_test[:, 0], rtol=0, atol=.2)
np.testing.assert_array_less(lb, X_test[:, 0] + .05)
np.testing.assert_array_less(X_test[:, 0], ub + .05)
def test_nonauto_subsample_fr(self):
np.random.seed(123)
n = 5000
d = 5
x_grid = np.linspace(-1, 1, 10)
X_test = np.hstack([x_grid.reshape(-1, 1), np.random.normal(size=(10, d - 1))])
X = np.random.normal(0, 1, size=(n, d))
y = X[:, 0] + np.random.normal(0, .1, size=(n,))
est = SubsampledHonestForest(n_estimators=100, subsample_fr=.8, max_depth=5, min_samples_leaf=10, verbose=0)
est.fit(X, y)
point = est.predict(X_test)
lb, ub = est.predict_interval(X_test, alpha=0.01)
np.testing.assert_allclose(point, X_test[:, 0], rtol=0, atol=.2)
np.testing.assert_array_less(lb, X_test[:, 0] + .05)
np.testing.assert_array_less(X_test[:, 0], ub + .05)
def test_y2d(self):
np.random.seed(123)
n = 5000
d = 5
x_grid = np.linspace(-1, 1, 10)
X_test = np.hstack([x_grid.reshape(-1, 1), np.random.normal(size=(10, d - 1))])
for _ in range(3):
for criterion in ['mse', 'mae']:
X = np.random.normal(0, 1, size=(n, d))
y = X[:, [0, 0]] + np.random.normal(0, .1, size=(n, 2))
est = SubsampledHonestForest(n_estimators=100, max_depth=5, criterion=criterion,
min_samples_leaf=10, verbose=0)
est.fit(X, y)
point = est.predict(X_test)
lb, ub = est.predict_interval(X_test, alpha=0.01)
np.testing.assert_allclose(point, X_test[:, [0, 0]], rtol=0, atol=.2)
np.testing.assert_array_less(lb, X_test[:, [0, 0]] + .05)
np.testing.assert_array_less(X_test[:, [0, 0]], ub + .05)
def test_dishonest_y1d(self):
np.random.seed(123)
n = 5000
d = 1
x_grid = | np.linspace(-1, 1, 10) | numpy.linspace |
"""
The inference algorithm
introduce a new format of fit
trial isolation
unequal trial ready
"""
import logging
import click
import numpy as np
from numpy import identity, einsum, trace
from scipy.linalg import solve, norm, svd, LinAlgError
from . import gp
from .evaluation import timer
from .math import trunc_exp
from .util import clip
logger = logging.getLogger(__name__)
def infer_single_trial(trial, params, config):
max_iter = config["Eniter"]
if max_iter < 1:
return
zdim = params["zdim"]
rank = params["rank"] # rank of prior covariance
likelihood = params["likelihood"]
# misc
dmu_bound = config["dmu_bound"]
tol = config["tol"]
method = config["method"]
poisson_channel = likelihood == "poisson"
gaussian_channel = likelihood == "gaussian"
# parameters
a = params["a"]
b = params["b"]
noise = params["noise"]
gauss_noise = noise[gaussian_channel]
Ir = identity(rank)
# boolean indexing creates copies
# pull indexing out of the loop for performance
y = trial["y"]
x = trial["x"]
mu = trial["mu"]
w = trial["w"]
v = trial["v"]
dmu = trial["dmu"]
prior = params["cholesky"][
y.shape[0]
] # TODO: adapt unequal lengths, move into trials
residual = | np.empty_like(y, dtype=float) | numpy.empty_like |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Module performs Unit Tests for the PointSet data objects.
It can not be considered part of the active code but of the regression test system
"""
#For future compatibility with Python 3
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
import xml.etree.ElementTree as ET
import sys, os, copy
import pickle as pk
import numpy as np
import xarray as xr
# find location of crow, message handler
frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)]+[os.pardir]*4+['framework'])))
sys.path.append(frameworkDir)
from utils.utils import find_crow
find_crow(frameworkDir)
import MessageHandler
import DataObjects
mh = MessageHandler.MessageHandler()
mh.initialize({'verbosity':'debug', 'callerLength':10, 'tagLength':10})
print('Module undergoing testing:')
print(DataObjects.PointSet)
print('')
def createElement(tag,attrib=None,text=None):
"""
Method to create a dummy xml element readable by the distribution classes
@ In, tag, string, the node tag
@ In, attrib, dict, optional, the attribute of the xml node
@ In, text, str, optional, the dict containig what should be in the xml text
"""
if attrib is None:
attrib = {}
if text is None:
text = ''
element = ET.Element(tag,attrib)
element.text = text
return element
results = {"pass":0,"fail":0}
def checkFloat(comment,value,expected,tol=1e-10,update=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ Out, res, bool, True if same
"""
if np.isnan(value) and np.isnan(expected):
res = True
elif np.isnan(value) or np.isnan(expected):
res = False
else:
res = abs(value - expected) <= tol
if update:
if not res:
print("checking float",comment,'|',value,"!=",expected)
results["fail"] += 1
else:
results["pass"] += 1
return res
def checkTrue(comment,res,update=True):
"""
This method is a pass-through for consistency and updating
@ In, comment, string, a comment printed out if it fails
@ In, res, bool, the tested value
@ Out, res, bool, True if test
"""
if update:
if res:
results["pass"] += 1
else:
print("checking bool",comment,'|',res,'is not True!')
results["fail"] += 1
return res
def checkSame(comment,value,expected,update=True):
"""
This method is aimed to compare two identical things
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ Out, res, bool, True if same
"""
res = value == expected
if update:
if res:
results["pass"] += 1
else:
print("checking string",comment,'|',value,"!=",expected)
results["fail"] += 1
return res
def checkArray(comment,first,second,dtype,tol=1e-10,update=True):
"""
This method is aimed to compare two arrays
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ Out, res, bool, True if same
"""
res = True
if len(first) != len(second):
res = False
print("checking answer",comment,'|','lengths do not match:',len(first),len(second))
else:
for i in range(len(first)):
if dtype == float:
pres = checkFloat('',first[i],second[i],tol,update=False)
elif dtype.__name__ in ('str','unicode'):
pres = checkSame('',first[i],second[i],update=False)
if not pres:
print('checking array',comment,'|','entry "{}" does not match: {} != {}'.format(i,first[i],second[i]))
res = False
if update:
if res:
results["pass"] += 1
else:
results["fail"] += 1
return res
def checkRlz(comment,first,second,tol=1e-10,update=True):
"""
This method is aimed to compare two realization
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ Out, res, bool, True if same
"""
res = True
if len(first) != len(second):
res = False
print("checking answer",comment,'|','lengths do not match:',len(first),len(second))
else:
for key,val in first.items():
if isinstance(val,float):
pres = checkFloat('',val,second[key],tol,update=False)
elif type(val).__name__ in ('str','unicode','str_','unicode_'):
pres = checkSame('',val,second[key][0],update=False)
elif isinstance(val,xr.DataArray):
if isinstance(val.item(0),(float,int)):
pres = (val - second[key]).sum()<1e-20 #necessary due to roundoff
else:
pres = val.equals(second[key])
else:
raise TypeError(type(val))
if not pres:
print('checking dict',comment,'|','entry "{}" does not match: {} != {}'.format(key,first[key],second[key]))
res = False
if update:
if res:
results["pass"] += 1
else:
results["fail"] += 1
return res
def checkNone(comment,entry,update=True):
"""
Tests if the entry identifies as None.
@ In, comment, str, comment to print if failed
@ In, entry, object, object to test
@ In, update, bool, optional, if True then updates results
@ Out, None
"""
res = entry is None
if update:
if res:
results["pass"] += 1
else:
print("checking answer",comment,'|','"{}" is not None!'.format(entry))
results["fail"] += 1
def checkFails(comment,errstr,function,update=True,args=None,kwargs=None):
"""
Tests if function fails as expected
@ In, comment, str, comment to print if failed
@ In, errstr, str, expected error string
@ In, function, method, method to run
@ In, update, bool, optional, if True then updates results
@ In, args, list, arguments to function
@ In, kwargs, dict, keywords arguments to function
@ Out, res, bool, result (True if passed)
"""
print('Error testing ...')
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
function(*args,**kwargs)
res = False
msg = 'Function call did not error!'
except Exception as e:
res = checkSame('',e.args[0],errstr,update=False)
if not res:
msg = 'Unexpected error message. \n Received: "{}"\n Expected: "{}"'.format(e.args[0],errstr)
if update:
if res:
results["pass"] += 1
print(' ... end Error testing (PASSED)')
else:
print("checking error",comment,'|',msg)
results["fail"] += 1
print(' ... end Error testing (FAILED)')
print('')
return res
def formatRealization(rlz):
"""
Converts types of each input.
@ In, rlz, dict, var:val
@ Out, rlz, dict, formatted
"""
for k,v in rlz.items():
rlz[k] = | np.atleast_1d(v) | numpy.atleast_1d |
# -*- coding: utf-8 -*-
"""Tests that use cross-checks for generic methods
Should be easy to check consistency across models
Does not cover tsa
Initial cases copied from test_shrink_pickle
Created on Wed Oct 30 14:01:27 2013
Author: <NAME>
"""
from statsmodels.compat.pandas import assert_series_equal, assert_index_equal
from statsmodels.compat.platform import (PLATFORM_OSX, PLATFORM_LINUX32,
PLATFORM_WIN32)
from statsmodels.compat.scipy import SCIPY_GT_14
import numpy as np
import pandas as pd
import pytest
import statsmodels.api as sm
from statsmodels.tools.sm_exceptions import HessianInversionWarning
import statsmodels.tools._testing as smt
from statsmodels.formula.api import ols, glm
from numpy.testing import (assert_, assert_allclose, assert_equal,
assert_array_equal)
class CheckGenericMixin(object):
@classmethod
def setup_class(cls):
nobs = 500
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x)
cls.exog = x
cls.xf = 0.25 * np.ones((2, 4))
cls.predict_kwds = {}
cls.transform_index = None
def test_ttest_tvalues(self):
# test that t_test has same results a params, bse, tvalues, ...
smt.check_ttest_tvalues(self.results)
res = self.results
mat = np.eye(len(res.params))
tt = res.t_test(mat[0])
string_confint = lambda alpha: "[%4.3F %4.3F]" % (
alpha / 2, 1- alpha / 2)
summ = tt.summary() # smoke test for #1323
assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10)
assert_(string_confint(0.05) in str(summ))
# issue #3116 alpha not used in column headers
summ = tt.summary(alpha=0.1)
ss = "[0.05 0.95]" # different formatting
assert_(ss in str(summ))
summf = tt.summary_frame(alpha=0.1)
pvstring_use_t = 'P>|z|' if res.use_t is False else 'P>|t|'
tstring_use_t = 'z' if res.use_t is False else 't'
cols = ['coef', 'std err', tstring_use_t, pvstring_use_t,
'Conf. Int. Low', 'Conf. Int. Upp.']
assert_array_equal(summf.columns.values, cols)
def test_ftest_pvalues(self):
smt.check_ftest_pvalues(self.results)
def test_fitted(self):
smt.check_fitted(self.results)
def test_predict_types(self):
smt.check_predict_types(self.results)
def test_zero_constrained(self):
# not completely generic yet
if (isinstance(self.results.model, (sm.GEE))):
# GEE does not subclass LikelihoodModel
pytest.skip('GEE does not subclass LikelihoodModel')
use_start_params = not isinstance(self.results.model,
(sm.RLM, sm.OLS, sm.WLS))
self.use_start_params = use_start_params # attach for _get_constrained
keep_index = list(range(self.results.model.exog.shape[1]))
# index for params might include extra params
keep_index_p = list(range(self.results.params.shape[0]))
drop_index = [1]
for i in drop_index:
del keep_index[i]
del keep_index_p[i]
if use_start_params:
res1 = self.results.model._fit_zeros(keep_index, maxiter=500,
start_params=self.results.params)
else:
res1 = self.results.model._fit_zeros(keep_index, maxiter=500)
res2 = self._get_constrained(keep_index, keep_index_p)
assert_allclose(res1.params[keep_index_p], res2.params, rtol=1e-10,
atol=1e-10)
assert_equal(res1.params[drop_index], 0)
assert_allclose(res1.bse[keep_index_p], res2.bse, rtol=1e-10,
atol=1e-10)
assert_equal(res1.bse[drop_index], 0)
# OSX has many slight failures on this test
tol = 1e-8 if PLATFORM_OSX else 1e-10
tvals1 = res1.tvalues[keep_index_p]
assert_allclose(tvals1, res2.tvalues, rtol=tol, atol=tol)
# See gh5993
if PLATFORM_LINUX32 or SCIPY_GT_14:
pvals1 = res1.pvalues[keep_index_p]
else:
pvals1 = res1.pvalues[keep_index_p]
assert_allclose(pvals1, res2.pvalues, rtol=tol, atol=tol)
if hasattr(res1, 'resid'):
# discrete models, Logit do not have `resid` yet
# atol discussion at gh-5158
rtol = 1e-10
atol = 1e-12
if PLATFORM_OSX or PLATFORM_WIN32:
# GH 5628
rtol = 1e-8
atol = 1e-10
assert_allclose(res1.resid, res2.resid, rtol=rtol, atol=atol)
ex = self.results.model.exog.mean(0)
predicted1 = res1.predict(ex, **self.predict_kwds)
predicted2 = res2.predict(ex[keep_index], **self.predict_kwds)
assert_allclose(predicted1, predicted2, rtol=1e-10)
ex = self.results.model.exog[:5]
predicted1 = res1.predict(ex, **self.predict_kwds)
predicted2 = res2.predict(ex[:, keep_index], **self.predict_kwds)
assert_allclose(predicted1, predicted2, rtol=1e-10)
def _get_constrained(self, keep_index, keep_index_p):
# override in some test classes, no fit_kwds yet, e.g. cov_type
mod2 = self.results.model
mod_cls = mod2.__class__
init_kwds = mod2._get_init_kwds()
mod = mod_cls(mod2.endog, mod2.exog[:, keep_index], **init_kwds)
if self.use_start_params:
res = mod.fit(start_params=self.results.params[keep_index_p],
maxiter=500)
else:
res = mod.fit(maxiter=500)
return res
def test_zero_collinear(self):
# not completely generic yet
if isinstance(self.results.model, (sm.GEE)):
pytest.skip('Not completely generic yet')
use_start_params = not isinstance(self.results.model,
(sm.RLM, sm.OLS, sm.WLS, sm.GLM))
self.use_start_params = use_start_params # attach for _get_constrained
keep_index = list(range(self.results.model.exog.shape[1]))
# index for params might include extra params
keep_index_p = list(range(self.results.params.shape[0]))
drop_index = []
for i in drop_index:
del keep_index[i]
del keep_index_p[i]
keep_index_p = list(range(self.results.params.shape[0]))
# create collinear model
mod2 = self.results.model
mod_cls = mod2.__class__
init_kwds = mod2._get_init_kwds()
ex = np.column_stack((mod2.exog, mod2.exog))
mod = mod_cls(mod2.endog, ex, **init_kwds)
keep_index = list(range(self.results.model.exog.shape[1]))
keep_index_p = list(range(self.results.model.exog.shape[1]))
k_vars = ex.shape[1]
k_extra = 0
if hasattr(mod, 'k_extra') and mod.k_extra > 0:
keep_index_p += list(range(k_vars, k_vars + mod.k_extra))
k_extra = mod.k_extra
# TODO: Can we choose a test case without this issue?
# If not, should we be getting this warning for all
# model subclasses?
warn_cls = HessianInversionWarning if isinstance(mod, sm.GLM) else None
cov_types = ['nonrobust', 'HC0']
for cov_type in cov_types:
# Note: for RLM we only check default when cov_type is 'nonrobust'
# cov_type is otherwise ignored
if cov_type != 'nonrobust' and (isinstance(self.results.model,
sm.RLM)):
return
if use_start_params:
start_params = np.zeros(k_vars + k_extra)
method = self.results.mle_settings['optimizer']
# string in `method` is not mutable, so no need for copy
sp = self.results.mle_settings['start_params'].copy()
if self.transform_index is not None:
# work around internal transform_params, currently in NB
sp[self.transform_index] = np.exp(sp[self.transform_index])
start_params[keep_index_p] = sp
with pytest.warns(warn_cls):
res1 = mod._fit_collinear(cov_type=cov_type,
start_params=start_params,
method=method, disp=0)
if cov_type != 'nonrobust':
# reestimate original model to get robust cov
with pytest.warns(warn_cls):
res2 = self.results.model.fit(cov_type=cov_type,
start_params=sp,
method=method, disp=0)
else:
with pytest.warns(warn_cls):
# more special casing RLM
if (isinstance(self.results.model, (sm.RLM))):
res1 = mod._fit_collinear()
else:
res1 = mod._fit_collinear(cov_type=cov_type)
if cov_type != 'nonrobust':
# reestimate original model to get robust cov
res2 = self.results.model.fit(cov_type=cov_type)
if cov_type == 'nonrobust':
res2 = self.results
# check fit optimizer arguments, if mle_settings is available
if hasattr(res2, 'mle_settings'):
assert_equal(res1.results_constrained.mle_settings['optimizer'],
res2.mle_settings['optimizer'])
if 'start_params' in res2.mle_settings:
spc = res1.results_constrained.mle_settings['start_params']
assert_allclose(spc,
res2.mle_settings['start_params'],
rtol=1e-10, atol=1e-20)
assert_equal(res1.mle_settings['optimizer'],
res2.mle_settings['optimizer'])
assert_allclose(res1.mle_settings['start_params'],
res2.mle_settings['start_params'],
rtol=1e-10, atol=1e-20)
# Poisson has reduced precision in params, difficult optimization?
assert_allclose(res1.params[keep_index_p], res2.params, rtol=1e-6)
assert_allclose(res1.params[drop_index], 0, rtol=1e-10)
| assert_allclose(res1.bse[keep_index_p], res2.bse, rtol=1e-8) | numpy.testing.assert_allclose |
import itertools as it
from typing import List, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import pytest as pt
from jdetr._typing import JaxArray
from jdetr.losses import SetCriterion
@pt.fixture(scope="module")
def some_boxes() -> Tuple[np.ndarray, np.ndarray]:
boxes = np.array(
[
[0.43910939, 0.79495835, 0.7267344, 0.9483542],
[0.08571875, 0.31514582, 0.6987031, 0.9939375],
[0.344125, 0.0, 0.63554686, 0.32766667],
[0.54446876, 0.80670834, 0.5626875, 0.8576458],
[0, 0, 1, 1],
]
)
labels = | np.array([1, 2, 3, 4, 5]) | numpy.array |
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import numpy as np
import pandas as pd
import warnings
from sklearn.linear_model import LinearRegression
import scipy.cluster.hierarchy as sch
import datetime
import random
class backtest_model:
"""
Given a user-defined portfolio construction strategy (a function that takes in stock-related data and returns portfolio weights) and
the data that the user wish the strategy to be tested on, calculate several evaluation metrics of the portfolio, including
net_returns, sharpe ratio, certainty equivalent returns, turnover, etc.
Various inputs can be modified to suit the needs of strategy and backtesting scenarios, such as price-impact models,
transaction costs, etc.
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param trace_back: indicate whether the strategy need to trace back to past portfolios to function. Note: please handle the boundary situation where past portfolios is empty in the strategy function
:type trace_back: bool
:param name: name of the strategy to be tested
:type name: str
:param missing_val : indicate whether user strategy function can handle missing values in the data on its own. True means the function can deal with missing values. False means it cannot
:type missing_val: bool
"""
def __init__(self, strategy, involved_data_type, need_extra_data=False, trace_back=False, name='Unnamed', missing_val=False):
"""
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param trace_back: indicate whether the strategy need to trace back to past portfolios to function. Note: please handle the boundary situation where past portfolios is empty in the strategy function
:type trace_back: bool
:param name: name of the strategy to be tested
:type name: str
:param missing_val : indicate whether user strategy function can handle missing values in the data on its own. True means the function can deal with missing values. False means it cannot. A wrapper function would be applied to the strategy function to deal with missing data. It will only pass in columns with full data and assign to other assets weight 0 while keeping the relative position the same. Warning: 1. The wrapper will slow the running speed significantly. 2. The wrapper does not cover missing data in "extra_data"..
:type missing_val: bool
"""
def wrapper(function, list_df, extra_data=pd.DataFrame(), historical_portfolios=pd.DataFrame()):
length = list_df[0].shape[1]
for frame in list_df:
if length >= len(frame.columns[frame.isna().any() == False]):
length = len(frame.columns[frame.isna().any() == False])
position_nan = frame.isna().any().values
w = np.zeros(list_df[0].shape[1])
if need_extra_data:
if trace_back:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],extra_data, historical_portfolios)
else:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],extra_data)
else:
if trace_back:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],historical_portfolios)
else:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df])
return w
if not missing_val:
if name not in ['naive allocation portfolio',
'inverse variance allocation portfolio',
'min. variance allocation portfolio',
'basic mean-variance allocation portfolio',
'Fama-French 3-factor model portfolio',
'hierarchical-risk-parity portfolio',
'Bayes_Stein_shrinkage portfolio']:
warnings.warn('The library will deal with missing data. Running speed will be significantly reduced!')
if need_extra_data:
if trace_back:
self.__strategy = lambda x,y,z: wrapper(strategy, x,extra_data=y,historical_portfolios=z)
else:
self.__strategy = lambda x,y: wrapper(strategy, x,extra_data=y)
else:
if trace_back:
self.__strategy = lambda x,z: wrapper(strategy, x,historical_portfolios=z)
else:
self.__strategy = lambda x: wrapper(strategy, x)
else:
self.__strategy = strategy
if type(involved_data_type) != list:
raise Exception('"involved_data_type" must be given in a list')
else:
self.__involved_data_type = involved_data_type
if type(need_extra_data) != bool:
raise Exception('"need_extra_data" must be a bool variable')
else:
self.__need_extra_data = need_extra_data
if type(trace_back) != bool:
raise Exception('"trace_back" must be a bool variable')
else:
self.__trace_back = trace_back
if type(name) != str:
raise Exception('"name" must be a string variable')
else:
self.name = name
self.__last_test_frequency = None
self.__last_test_portfolios = None
self.__price_impact = False
self.__sharpe = None
self.__ceq = None
self.__average_turnover = None
self.__total_turnover = None
self.__net_returns = None
self.__net_excess_returns = None
# function to prepare data, including change of frequency, convert between price, return and ex_return
def __prepare_data(self, data, freq_data, data_type, rf, interval, window, freq_strategy,
volume=pd.DataFrame(), price_impact=False):
if not isinstance(data, pd.DataFrame):
raise Exception('Please provide correct format of test data!')
try:
data.index = pd.to_datetime(data.index)
except:
print(
'Invalid index provided in your test data, please make sure that index is in compatible datetime format')
volume.index = pd.to_datetime(volume.index)
data = data.copy()
if data_type == 'return':
if freq_data != freq_strategy:
warnings.warn(
'data_type==return with interval>1 or change of frequency, Expect large amount of computational error')
data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
data = (1 + data).apply(lambda x: np.cumprod(x))
data = data.resample(freq_strategy).ffill().fillna(method='ffill').pct_change(fill_method=None).dropna(axis=0, how='all')
normal_return_df = data.iloc[:,:-1]
risk_free_df=data.iloc[:,-1]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0).dropna(axis=0, how='all')
return (normal_return_df, excess_return_df, risk_free_df,
pd.DataFrame(index=normal_return_df.index))
else:
normal_return_df = data
excess_return_df = normal_return_df.sub(rf.values, axis=0)
return (normal_return_df, excess_return_df, rf.loc[normal_return_df.index],
pd.DataFrame(index=normal_return_df.index))
elif data_type == 'ex_return':
if freq_data != freq_strategy:
warnings.warn(
'data_type==ex_return with interval>1 or change of frequency, Expect large amount of computational error')
data = data.add(rf, axis=0)
data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
data = (1 + data).apply(lambda x: np.cumprod(x))
data = data.resample(freq_strategy).ffill().fillna(method='ffill').pct_change(fill_method=None).dropna(axis=0, how='all')
normal_return_df = data.iloc[:, :-1]
risk_free_df = data.iloc[:, -1]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0).dropna(axis=0, how='all')
return (normal_return_df, excess_return_df, risk_free_df,
pd.DataFrame(index=normal_return_df.index))
else:
excess_return_df = data
normal_return_df = excess_return_df.add(rf, axis=0)
return (normal_return_df, excess_return_df, rf.loc[normal_return_df.index],
pd.DataFrame(index=normal_return_df.index))
elif data_type == 'price':
#data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
rf_df=np.cumprod(1+rf)
if freq_data != freq_strategy:
data = data.resample(freq_strategy).ffill().fillna(method='ffill')
rf_df=rf_df.resample(freq_strategy).ffill().fillna(method='ffill')
if price_impact:
volume = volume.resample(freq_strategy).mean()
normal_return_df = data.pct_change(fill_method=None).dropna(axis=0, how='all')
risk_free_df=rf_df.pct_change(fill_method=None).dropna(axis=0,how='all').loc[normal_return_df.index]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0)
if price_impact:
return (normal_return_df, excess_return_df, volume.loc[normal_return_df.index],
risk_free_df,
data.loc[normal_return_df.index])
else:
return (normal_return_df, excess_return_df, risk_free_df,
data.loc[normal_return_df.index])
# rebalance function to be applied to each rolling window of length (window)
def __rebalance(self, ex_return_df, normal_return_df, price_df, window, extra_data=None):
historical_portfolios = []
map = {'price': price_df, 'ex_return': ex_return_df, 'return': normal_return_df}
if self.__need_extra_data:
if self.__trace_back:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
extra_data.loc[df.index],
historical_portfolios))
else:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
extra_data.loc[df.index]))
else:
if self.__trace_back:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
historical_portfolios))
else:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type]))
return historical_portfolios
def __test_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, volume, c, initial_wealth, extra_data, price_impact_model='default',power=0.6):
# prepare data
normal_return_df, excess_return_df, volume, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy,
volume,
price_impact=True)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T - 2: # 2 here can change later
raise Exception(
'Too few samples to test on will result in poor performance : reduce window or decrease interval or '
'increase length of data')
# apply rolling windows with __rebalance
portfolios = self.__rebalance(excess_return_df, normal_return_df, price_df, window, extra_data)
try:
assert sum(portfolios[0]) <= 1 + 0.000001
except:
raise Exception(
'Please make sure your strategy builds a portfolios whose sum of weights does not exceed 1!')
portfolios = pd.DataFrame(portfolios).iloc[::interval]
# save the portfolios for calling
self.__last_test_portfolios = portfolios.set_axis(excess_return_df.columns.values, axis='columns').set_axis(
excess_return_df.iloc[window - 1::interval].index.values, axis='index')
if interval > 1:
if price_df.empty:
df=normal_return_df.join(risk_free_rate)
df=(1+df.iloc[window-1:]).apply(lambda x:np.cumprod(x)).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
normal_return_df=df.iloc[:,:-1]
risk_free_rate=df.iloc[:,-1]
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[window - 1::interval].iloc[1:]
else:
price_df = price_df.iloc[window - 1::interval]
normal_return_df=price_df.pct_change(fill_method=None).dropna(axis=0,how='all')
risk_free_rate=np.cumprod(1+risk_free_rate[window-1:]).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
excess_return_df=normal_return_df.sub(risk_free_rate.values, axis=0)
price_df=price_df.iloc[1:]
else:
excess_return_df = excess_return_df.iloc[window:]
normal_return_df = normal_return_df.iloc[window:]
risk_free_rate = risk_free_rate.iloc[window:]
price_df = price_df.iloc[window:]
# pre_balance portfolios that serves as denominators
pre_balance_portfolios = (1 + normal_return_df).mul(portfolios.iloc[:-1].values)
# turnover
# normalise portfolio weight before rebalancing at the start of each period
# note that turnover ratio is not affected by price-impact model
pre_balance_portfolios = pre_balance_portfolios.div(pre_balance_portfolios.sum(axis=1).values, axis=0)
diff = (portfolios.iloc[1:].sub(pre_balance_portfolios.values)).dropna(axis=0, how='all')
self.__total_turnover = abs(diff).sum(axis=1).sum()
self.__average_turnover = self.__total_turnover / (T - window)
# pre_balance portfolios that serves as nominators
pre_balance_portfolios_2 = (1 + normal_return_df.iloc[1:]).mul(portfolios.iloc[1:-1].values)
# factor in the initial_wealth for all 'diff','portfolios'
portfolios *= initial_wealth
pre_balance_portfolios *= initial_wealth
pre_balance_portfolios_2 *= initial_wealth
diff *= initial_wealth
# transform volume to average volume
volume = volume.rolling(window).mean().dropna(axis=0, how='all').fillna(method='ffill').loc[normal_return_df.index]
# evolution of money account
pre_balance_money = np.zeros(risk_free_rate.shape[0])
# Money account value after each period, before rebalancing
pi_models = {'default': {'buy': 1 + c * (diff[diff > 0].div((volume * price_df).values)) ** power,
'sell': 1 - c * (abs(diff[diff < 0]).div((volume * price_df).values)) ** power}}
pi_buy, pi_sell = pi_models[price_impact_model]['buy'], pi_models[price_impact_model]['sell']
# sell = ((abs(diff[diff < 0]).mul(1 - ptc_sell)) * (
# 1 - c * (abs(diff[diff < 0]).div((volume * price_df).values)) ** 0.6)).sum(axis=1)
# buy = ((diff[diff >= 0].mul(1 + ptc_buy)) * (
# 1 + c * (diff[diff >= 0].div((volume * price_df).values)) ** 0.6)).sum(axis=1)
sell = ((abs(diff[diff < 0]).mul(1 - ptc_sell)) * pi_sell).sum(axis=1)
buy = ((diff[diff > 0].mul(1 + ptc_buy)) * pi_buy).sum(axis=1)
fixed = diff[diff != 0].count(axis=1).mul(ftc)
after_balance_money = pre_balance_money + sell - buy - fixed
pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
# net_returns
self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
def __test_no_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, initial_wealth, extra_data):
# prepare data
normal_return_df, excess_return_df, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T - 2: # 3 here can change later
raise Exception(
'Too few samples to test on will result in poor performance : reduce window or decrease interval or '
'increase length of data')
# apply rolling windows with __rebalance
portfolios = self.__rebalance(excess_return_df, normal_return_df, price_df, window, extra_data)
try:
assert sum(portfolios[0]) <= 1 + 0.000001
except:
raise Exception(
'Please make sure your strategy builds a portfolios whose sum of weights does not exceed 1!')
portfolios = pd.DataFrame(portfolios).iloc[::interval]
# save the portfolios for calling
self.__last_test_portfolios = portfolios.set_axis(excess_return_df.columns.values, axis='columns').set_axis(
excess_return_df.iloc[window - 1::interval].index.values, axis='index')
if interval > 1:
if price_df.empty:
df = normal_return_df.join(risk_free_rate)
df = (1 + df.iloc[window - 1:]).apply(lambda x: np.cumprod(x)).iloc[::interval].pct_change(fill_method=None).dropna(
axis=0, how='all')
normal_return_df = df.iloc[:, :-1]
risk_free_rate = df.iloc[:, -1]
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[window - 1::interval].iloc[1:]
else:
price_df = price_df.iloc[window - 1::interval]
normal_return_df = price_df.pct_change(fill_method=None).dropna(axis=0, how='all')
risk_free_rate=np.cumprod(1+risk_free_rate[window-1:]).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[1:]
else:
excess_return_df = excess_return_df.iloc[window:]
normal_return_df = normal_return_df.iloc[window:]
risk_free_rate = risk_free_rate.iloc[window:]
price_df = price_df.iloc[window:]
# pre_balance portfolios that serves as denominators
pre_balance_portfolios = (1 + normal_return_df).mul(portfolios.iloc[:-1].values)
# turnover
# normalise portfolio weight before rebalancing at the start of each period
# note that turnover ratio is not affected by price-impact model
pre_balance_portfolios = pre_balance_portfolios.div(pre_balance_portfolios.sum(axis=1).values, axis=0)
diff = (portfolios.iloc[1:].sub(pre_balance_portfolios.values)).dropna(axis=0, how='all')
self.__total_turnover = abs(diff).sum(axis=1).sum()
self.__average_turnover = self.__total_turnover / (T - window)
# pre_balance portfolios that serves as nominators
pre_balance_portfolios_2 = (1 + normal_return_df.iloc[1:]).mul(portfolios.iloc[1:-1].values)
# if ftc != 0:
# # factor in the initial_wealth for all 'diff','portfolios'
# portfolios *= initial_wealth
# pre_balance_portfolios *= initial_wealth
# pre_balance_portfolios_2 *= initial_wealth
# diff *= initial_wealth
#
# # transaction cost impacts
# sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
# buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
# fixed = diff[diff != 0].count(axis=1).mul(ftc)
# # evolution of money account
# pre_balance_money = np.zeros(risk_free_rate.shape[0])
# after_balance_money = pre_balance_money + sell - buy - fixed
# pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
#
# self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
# pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
#
# self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
# else:
# # transaction cost impacts
# sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
# buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
# # evolution of money account
# pre_balance_money = np.zeros(risk_free_rate.shape[0])
# after_balance_money = pre_balance_money + sell - buy
# pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
#
# self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
# pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
#
# self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
portfolios *= initial_wealth
pre_balance_portfolios *= initial_wealth
pre_balance_portfolios_2 *= initial_wealth
diff *= initial_wealth
# transaction cost impacts
sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
fixed = diff[diff != 0].count(axis=1).mul(ftc)
# evolution of money account
pre_balance_money = np.zeros(risk_free_rate.shape[0])
after_balance_money = pre_balance_money + sell - buy - fixed
pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
def backtest(self, data, freq_data, volume=pd.DataFrame(), data_type='price', rf=pd.Series(dtype='float'),
interval=1, window=60,
freq_strategy='D',
price_impact=False, ptc_buy=0, ptc_sell=0, ftc=0, c=1, initial_wealth=1E6,
extra_data=pd.DataFrame(), price_impact_model='default',power=0.6):
"""
Start the backtesting process with the built model. The function itself will not return anything. To get the results,
please call respective functions.
:param data: historical data that the strategy to be tested on. Index must be datetime format compatible
:type data: pd.DataFrame
:param freq_data: The frequency of the data provided, choose between {'D','W','M'}, where 'D' for day,'W' for week and 'M' for month. 'data' must be taken in the smallest unit of respective frequency, e.g. the frequency 'M' means the data is taken at each month
:type freq_data: str
:param volume: trading volume of each asset during each period (array of size T*N), or average trading volume for each asset over all periods (N-d array). If passing in as pd.DataFrame, then its index must match that of the data.
:type volume: pd.DataFrame or list or np.ndarray or pd.Series
:param data_type: choose from {'price','return','ex_return'} where 'price' stands for price data of assets at each timestamp, 'return' stands for normal percentage return of each asset in each period, 'ex_return' stands for percentage return net of risk-free rate
:type data_type: str
:param rf: data for risk-free rate in each period. Note: if 'rf' is passed in as a dataframe or series, the index of 'rf' must match that of 'data'
:type rf: pd.Series or pd.DataFrame or int or float
:param interval: number of periods that users want their portfolios to be rebalanced, the unit is based on 'freq_strategy'. e.g. If 'freq_data' is 'D', while 'freq_strategy' is 'M', and 'interval' is 2, then the portfolio will be rebalanced every 2 months using the user-defined portfolio-construction strategy
:type interval: int
:param window: length of rolling windows of 'data' wanted to feed into 'strategy' function. e.g. 'window'=60 means each time during rebalancing, past 60 periods of 'data' will be passed into user-defined strategy function
:type window: int
:param freq_strategy: The frequency on which the user want to use 'strategy' to rebalance the portfolio, choose between {'D','W','M'}. If "freq_strategy" is different from "freq_data", the library will resample data on "freq_strategy". Note: 'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'
:type freq_strategy: str
:param price_impact: indicate whether to use price-impact model or not
:type price_impact: bool
:param ptc_buy: proportional transaction cost of buying each asset, measured in basis point. Can be a Series or array that provide one cost for each asset, or a single variable that stands for universal transaction cost. Note: Cannot be a list, and must not contain provide labels
:type ptc_buy: pd.Series or np.ndarray or int or float
:param ptc_sell: proportional transaction cost of selling each asset, measured in basis point. Can be a Series or array that provide one cost for each asset, or a single variable that stands for universal transaction cost. Note: Cannot be a list, and must not contain provide labels
:type ptc_sell: pd.Series or np.ndarray or int or float
:param ftc: dollar value of fixed transaction cost of each transaction, measured in one unit of any currency.
:type ftc: int or float
:param c: market depth indicators. Can be a Series or array that provide one market depth for each asset, or a single variable that stands for universal market depth. Note: Do NOT provide labels
:type c: pd.Series or int or np.ndarray or float
:param initial_wealth: dollar value of initial wealth of testing when 'price-impact' is true or 'ftc'!=0
:type initial_wealth: int or float
:param extra_data: extra_data to be passed into 'strategy' only when 'need_extra_data'==True. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type extra_data: pd.DataFrame
:param price_impact_model: choose the price impact model you want to use from {'default'} (testing feature, to be built on)
:type price_impact_model: str
:return: None
"""
random.seed(1)
if price_impact_model not in {'default'}:
raise Exception('Unknown type of "price_impact_model"!')
if type(initial_wealth) != int and type(initial_wealth) != float:
raise Exception('Wrong type of "initial_wealth" given!')
if type(c) != float and type(c) != int and not isinstance(c, pd.Series) and not isinstance(c.np.ndarray):
raise Exception("Wrong type of 'c' given!")
if type(ftc) != int and type(ftc) != float:
raise Exception("Wrong type of 'ftc' given!")
if type(ptc_buy) != int and type(ptc_buy) != float and not isinstance(ptc_buy, pd.Series) and not isinstance(
ptc_buy,
np.ndarray):
raise Exception("Wrong type of 'ptc_buy' provided!")
else:
ptc_buy /= 10000
if type(ptc_sell) != int and type(ptc_sell) != float and not isinstance(ptc_sell, pd.Series) and not isinstance(
ptc_sell,
np.ndarray):
raise Exception("Wrong type of 'ptc_sell' provided!")
else:
ptc_sell /= 10000
if type(price_impact) != bool:
raise Exception("'price_impact' must be a boolean variable")
if freq_data not in {'D', 'W', 'M'}:
raise Exception("'freq_data' must be chosen from {'D','W','M'}")
if freq_strategy not in {'D', 'W', 'M'}:
raise Exception("'freq_strategy' must be chosen from {'D','W','M'}")
if freq_data == 'W' and freq_strategy == 'D':
raise Exception("'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'")
if freq_data == 'M' and freq_strategy in {'D', 'W'}:
raise Exception("'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'")
if type(window) != int:
raise Exception("'window' must be an 'int' variable")
if type(interval) != int:
raise Exception("'interval' must be an 'int' variable")
if initial_wealth == 1E6:
if price_impact == True or ftc != 0:
warnings.warn('Using default initial_wealth value @1E6!')
if self.__need_extra_data == True:
if isinstance(extra_data, pd.DataFrame) or isinstance(extra_data, pd.Series):
if extra_data.empty:
raise Exception('Please provide extra_data as dataframe')
try:
extra_data.index = pd.to_datetime(extra_data.index)
except:
print(
'Invalid index provided in your "extra_data", please make sure that index is in compatible datetime format')
else:
raise Exception(
'"extra_data" need to be a Series or DataFrame with datetime index corresponding to test data provided')
# if user-defined strategy need extra_data to operate, the library will NOT provide change of frequency functionality
if freq_strategy != freq_data:
raise Exception(
'If "extra_data" needed for your strategy, please make sure "freq_strategy" matches "freq_data"!')
if not extra_data.index.equals(data.index):
raise IndexError('Index of extra_data and index of data do not match!')
if (data_type == 'return' or data_type == 'ex_return') and ('price' in self.__involved_data_type):
raise Exception('"price" data type is involved in your strategy, please provide data with type "price"')
if isinstance(rf, pd.Series) or isinstance(rf, pd.DataFrame):
# if rf.empty and (('ex_return' in self.__involved_data_type) or ('return' in self.__involved_data_type)):
if rf.empty:
raise Exception(
'Please provide risk-free rate! (Set it to 0 if you do not want to consider it. Note that in this case, net_returns and net_excess_returns will be the same)')
if not rf.index.equals(data.index):
raise IndexError('Index of "rf" and index of "data" do not match!')
elif type(rf) == int or type(rf) == float:
rf = pd.Series([rf] * data.shape[0], index=data.index)
else:
raise Exception('Wrong format of "rf" is given.')
# if ftc != 0:
# if data_type != 'price':
# raise Exception('data_type must be "price" when using fixed transaction cost (ftc!=0)')
# divide into price_impact model and no_price_impact model
self.__price_impact = price_impact
frequency_map = {'D': 'Day', 'W': 'Week', 'M': 'Month'}
if price_impact == False:
self.__last_test_frequency = f'{interval} {frequency_map[freq_strategy]}'
self.__test_no_price_impact(data, freq_data, data_type, rf, interval, window, freq_strategy,
ptc_buy, ptc_sell, ftc, initial_wealth, extra_data)
else:
if isinstance(volume, pd.DataFrame):
if not volume.index.equals(data.index):
raise Exception('Index of "volume" and "index" of data do not match!')
elif isinstance(volume, pd.Series) or isinstance(volume, np.ndarray):
try:
volume = pd.DataFrame(volume.reshape(1, -1), columns=data.columns)
except:
print('Check your volume data!')
volume = pd.concat([volume] * data.shape[0]).set_index(data.index)
elif isinstance(volume, list):
try:
volume = pd.DataFrame([volume], columns=data.columns)
except:
print('Check your volume data!')
volume = pd.concat([volume] * data.shape[0]).set_index(data.index)
else:
raise Exception('Please provide volume in correct format!')
if data_type != 'price':
raise Exception('Must provide "price" type data for price-impact model')
elif volume.empty:
raise Exception(
'Must provide correct volume of each asset for price-impact model. For specific requirements '
'please refer to the description of the function')
else:
self.__last_test_frequency = f'{interval} {frequency_map[freq_strategy]}'
self.__test_price_impact(data, freq_data, data_type, rf, interval, window, freq_strategy,
ptc_buy, ptc_sell, ftc, volume, c, initial_wealth, extra_data,
price_impact_model,power)
return
def get_net_excess_returns(self):
'''
Get the net excess returns (net of risk-free rate) and respective dates of the model tested.
'''
return self.__net_excess_returns
def get_net_returns(self):
'''
Get the net returns and respective dates of the model tested
'''
return self.__net_returns
def get_sharpe(self):
'''
Get the sharpe ratio of the model tested
'''
# self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
return self.__sharpe
def get_turnover(self, typ='average'):
'''
Get the average turnover rate of each period as well as total turnover rate over all periods of the model tested
:param typ: choose from {'average','total'}, which indicates average turnover and total turnover respectively
:type typ: str
'''
# print(f"average turnover is: {self.__average_turnover:.5%}")
# print(f"total turnover is: {self.__total_turnover:.5%}")
return self.__average_turnover if typ=='average' else self.__total_turnover
def get_ceq(self, x=1):
'''
Get certainty equivalent returns (ceq) of the model tested with the given risk aversion factor
:param x: risk aversion factor
:type x: float or int or pd.Series or np.ndarray
:return: certainty equivalent returns
'''
self.__ceq = np.mean(self.__net_excess_returns) - x / 2 * np.cov(self.__net_excess_returns, ddof=1)
return self.__ceq
def get_portfolios(self):
return self.__last_test_portfolios
def general_performance(self):
'''
Get a set of performance evaluation metrics of the model tested
'''
output = {}
output['strategy name'] = self.name
output['Price impact'] = 'ON' if self.__price_impact else 'OFF'
output['Start date of portfolio'] = self.__net_returns.index[0]
output['End date of portfolio'] = self.__net_returns.index[-1]
output['Frequency of rebalance'] = self.__last_test_frequency
output['Duration'] = f'{self.__net_returns.shape[0]} periods'
evolution = np.cumprod(1 + self.__net_returns)
output['Final Portfolio Return (%)'] = f"{evolution[-1]:.4%}"
output['Peak Portfolio Return (%)'] = f"{evolution.max():.4%}"
output['Bottom Portfolio Return (%)'] = f"{evolution.min():.4%}"
output['Historical Volatiltiy (%)'] = f"{np.std(self.__net_returns, ddof=1):.4%}"
output['Sharpe Ratio'] = f"{self.__sharpe:.4f}"
std_down = np.std(self.__net_excess_returns[self.__net_excess_returns < 0], ddof=1)
output['Sortino Ratio'] = f"{np.mean(self.__net_excess_returns) / std_down:.4f}"
drawdown = (evolution.max() - evolution.min()) / evolution.max()
output['Calmar Ratio'] = f"{np.mean(self.__net_excess_returns) / drawdown:.4f}"
output['Max. Drawdown (%)'] = f"{drawdown:.4%}"
output['Max. Drawdown Duration'] = evolution.loc[evolution == evolution.max()].index[0] - evolution.loc[
evolution == evolution.min()].index[0]
output[
'% of positive-net-excess-return periods'] = f"{self.__net_excess_returns[self.__net_excess_returns > 0].count() / self.__net_excess_returns.count():.4%}"
output[
'% of positive-net-return periods'] = f"{self.__net_returns[self.__net_returns > 0].count() / self.__net_returns.count():.4%}"
output['Average turnover (%)'] = f"{self.__average_turnover:.4%}"
output['Total turnover (%)'] = f"{self.__total_turnover:.4%}"
output['95% VaR on net-excess returns'] = f"{np.quantile(self.__net_excess_returns, 0.05):.4%}"
output['95% VaR on net returns'] = f"{np.quantile(self.__net_returns, 0.05):.4%}"
return pd.Series(output)
class mperiods_backtest_model(backtest_model):
'''
Subclass mperiods_backtest_model, which specifically handles multi-periods strategies. No trace_back argument needed
because the library automatically enable tracing back to the last portfolio of previous interval.
'''
def __init__(self, strategy, involved_data_type, need_extra_data=False, name='Unnamed'):
"""
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy. Note: different from single-period strategies where functions only take list_df as input (other than extra_data and past_portfolios), multi-periods strategy functions also take current wealth x as input.
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy. Note: in multi-periods models, the library only accepts a list of length 1 at the moment
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param name: name of the strategy to be tested
:type name: str
"""
self.__strategy = strategy
if name not in ['multi-periods global minimum variance portfolio']:
warnings.warn('The library will deal with missing data. Running speed will be significantly reduced!')
if type(involved_data_type) != list:
raise Exception('"involved_data_type" must be given in a list')
else:
self.__involved_data_type = involved_data_type
if type(need_extra_data) != bool:
raise Exception('"need_extra_data" must be a bool variable')
else:
self.__need_extra_data = need_extra_data
if type(name) != str:
raise Exception('"name" must be a string variable')
else:
self.name = name
self.__last_test_frequency = None
self.__last_test_portfolios = None
self.__price_impact = False
self.__sharpe = None
self.__ceq = None
self.__average_turnover = None
self.__total_turnover = None
self.__net_returns = None
self.__net_excess_returns = None
# add in options to pass in extra_data and historical_portfolios later
def __each_interval(self,ex_return_df, normal_return_df, price_df, rf, window, interval, last_portfolio, ptc_buy=0,
ptc_sell=0, ftc=0, volume=pd.DataFrame(), c=1, initial_wealth=1E6, extra_data=None,
price_impact=False, price_impact_model='default'):
'''
everything should match that of the main function "backtest" except for "rf".
last_portfolio: the EVOLVED version of last portfolio available. Note that The very first portfolio would be all 0's.
We pass that to the first interval to calculate the frictions
'''
portfolios = [] # Not measured in weights but money values in each asset
turnover = 0
map = {'price': price_df, 'ex_return': ex_return_df, 'return': normal_return_df}
# length = list_df[0].shape[1]
# for frame in list_df:
# if length >= len(frame.columns[frame.isna().any() == False]):
# length = len(frame.columns[frame.isna().any() == False])
# position_nan = frame.isna().any().values
df = map[self.__involved_data_type[0]]
position_nan = df.isna().any().values
df = df[df.columns[position_nan == False]]
# arguments calculation will be done in each strategy function
if price_impact: # with price impact
if last_portfolio.sum() == 0: # boundary condition at the first portfolio
money_account = initial_wealth
else:
money_account = 0
for t in range(interval):
if t == 0: # at the start of each interval
x = initial_wealth
if self.__need_extra_data:
temp_u = self.__strategy([df.iloc[:window, :]], x, extra_data.iloc[:window, :])
else:
temp_u = self.__strategy([df.iloc[:window, :]], x)
money_account = money_account + x - temp_u.sum() # reset/re-initiate money account
u = np.zeros(df.shape[1])
u[position_nan == False] = temp_u
diff = u - last_portfolio
turnover += sum(abs(diff)) / x
portfolios.append(u)
else:
# first calculate the new current wealth x
evolved_u = (1 + normal_return_df.iloc[window + t - 1, :]).mul(portfolios[-1])
money_account = (1 + rf.iloc[window + t - 1]) * money_account
x = evolved_u.sum() + money_account
# use the new wealth to re-balance the portfolio
if self.__need_extra_data:
temp_u = self.__strategy([df.iloc[:window, :]], x, extra_data.iloc[:window, :])
else:
temp_u = self.__strategy([df.iloc[:window, :]], x)
money_account = x - temp_u.sum() # reset/re-initiate money account
u = np.zeros(df.shape[1])
u[position_nan == False] = temp_u
diff = u - evolved_u
turnover += sum(abs(diff)) / x
portfolios.append(u)
pi_models = {'default': {'buy': 1 + c * (
diff[diff >= 0] / ((volume.iloc[window - 1] * price_df.iloc[window - 1]).values)) ** 0.6,
'sell': 1 - c * (abs(diff[diff < 0]) / (
(volume.iloc[window - 1] * price_df.iloc[window - 1]).values)) ** 0.6}}
pi_buy, pi_sell = pi_models[price_impact_model]['buy'], pi_models[price_impact_model]['sell']
sell = ((abs(diff[diff < 0]) * (1 - ptc_sell)) * pi_sell).sum()
buy = ((diff[diff >= 0] * (1 + ptc_buy)) * pi_buy).sum()
fixed = len(diff[diff != 0]) * (ftc)
money_account = money_account + sell - buy - fixed
# money_account undergoes transformation of interests in next period, to be calculated in t+1
elif not price_impact:
if last_portfolio.sum() == 0: # boundary condition at the first portfolio
money_account = initial_wealth
else:
money_account = 0
for t in range(interval):
if t == 0: # at the start of each interval
x = initial_wealth
if self.__need_extra_data:
temp_u = self.__strategy([df.iloc[:window, :]], x, extra_data.iloc[:window, :])
else:
temp_u = self.__strategy([df.iloc[:window, :]], x)
money_account = money_account + x - temp_u.sum() # reset/re-initiate money account
u = np.zeros(df.shape[1])
u[position_nan == False] = temp_u
diff = u - last_portfolio
turnover += sum(abs(diff)) / x
portfolios.append(u)
else:
# first calculate the new current wealth x
evolved_u = (1 + normal_return_df.iloc[window + t - 1, :]).mul(portfolios[-1])
money_account = (1 + rf.iloc[window + t - 1]) * money_account
x = evolved_u.sum() + money_account
# use the new wealth to re-balance the portfolio
if self.__need_extra_data:
temp_u = self.__strategy([df.iloc[:window, :]], x, extra_data.iloc[:window, :])
else:
temp_u = self.__strategy([df.iloc[:window, :]], x)
money_account = x - temp_u.sum() # reset/re-initiate money account
u = np.zeros(df.shape[1])
u[position_nan == False] = temp_u
diff = u - evolved_u
turnover += sum(abs(diff)) / x
portfolios.append(u)
sell = ((abs(diff[diff < 0]) * (1 - ptc_sell))).sum()
buy = ((diff[diff >= 0] * (1 + ptc_buy))).sum()
fixed = len(diff[diff != 0]) * (ftc)
money_account = money_account + sell - buy - fixed
# at the last period of this interval, the portfolio will undergo market movements
evolved_u = (1 + normal_return_df.iloc[window + interval - 1, :]).mul(portfolios[-1])
money_account = (1 + rf.iloc[window + interval - 1]) * money_account
x = evolved_u.sum() + money_account # this will be the initial_wealth of next interval
# calculate the returns and net returns here so we won't repeat the calculation again
_rf = (1 + rf.iloc[window:window + interval]).cumprod().iloc[-1] - 1
_return = (x - initial_wealth) / initial_wealth
_net_return = _return - _rf
return (portfolios, x, evolved_u, _return, _net_return, turnover)
# return all portfolios including the last-period EVOLVED portfolio,
# and final wealth of current interval and returns and net returns
# rebalance function need to be changed slightly to fit the multi-period strategies
def __rebalance(self, ex_return_df, normal_return_df, price_df, rf, window, interval, ptc_buy=0,
ptc_sell=0, ftc=0, volume=pd.DataFrame(), c=1, initial_wealth=1E6, extra_data=None,
price_impact=False, price_impact_model='default'):
T, N=ex_return_df.shape[0], ex_return_df.shape[1]
historical_portfolios = []
map = {'price': price_df, 'ex_return': ex_return_df, 'return': normal_return_df}
if self.__need_extra_data:
last_portfolio=np.zeros(N)
x=initial_wealth
self.__total_turnover=0
self.__net_returns=[]
self.__net_excess_returns=[]
for index in range(0, T - window + 1, interval):
if price_impact:
portfolios, x, last_portfolio, _return, _net_return, turnover=self.__each_interval(
ex_return_df.iloc[index:index + window], normal_return_df.iloc[index:index + window],
price_df.iloc[index:index + window], rf.iloc[index:index + window], window, interval,
last_portfolio, ptc_buy, ptc_sell, ftc, volume.iloc[index:index + window], c, x,
extra_data.iloc[index:index + window], price_impact, price_impact_model)
else:
portfolios, x, last_portfolio, _return, _net_return, turnover = self.__each_interval(
ex_return_df.iloc[index:index + window], normal_return_df.iloc[index:index + window],
price_df.iloc[index:index + window], rf.iloc[index:index + window], window, interval,
last_portfolio, ptc_buy, ptc_sell, ftc, volume, c, x,
extra_data.iloc[index:index + window], price_impact, price_impact_model)
self.__total_turnover+=turnover
self.__net_returns.append(_return)
self.__net_excess_returns.append(_net_return)
historical_portfolios.extend(portfolios)
else:
last_portfolio = np.zeros(N)
x = initial_wealth
self.__total_turnover = 0
self.__net_returns = []
self.__net_excess_returns = []
for index in range(0, T - window + 1, interval):
if price_impact:
portfolios, x, last_portfolio, _return, _net_return, turnover = self.__each_interval(
ex_return_df.iloc[index:index + window], normal_return_df.iloc[index:index + window],
price_df.iloc[index:index + window], rf.iloc[index:index + window], window, interval,
last_portfolio, ptc_buy, ptc_sell, ftc, volume.iloc[index:index + window], c, x,
extra_data, price_impact, price_impact_model)
else:
portfolios, x, last_portfolio, _return, _net_return, turnover = self.__each_interval(
ex_return_df.iloc[index:index + window], normal_return_df.iloc[index:index + window],
price_df.iloc[index:index + window], rf.iloc[index:index + window], window, interval,
last_portfolio, ptc_buy, ptc_sell, ftc, volume, c, x,
extra_data, price_impact, price_impact_model)
self.__total_turnover += turnover
self.__net_returns.append(_return)
self.__net_excess_returns.append(_net_return)
historical_portfolios.extend(portfolios)
return historical_portfolios
def __test_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, volume, c, initial_wealth, extra_data, price_impact_model='default'):
# prepare data
normal_return_df, excess_return_df, volume, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy,
volume,
price_impact=True)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T - 2: # 2 here can change later
raise Exception(
'Too few samples to test on will result in poor performance : reduce window or decrease interval or '
'increase length of data')
# apply __rebalance to get the portfolios
volume = volume.rolling(window).mean().dropna(axis=0, how='all').loc[normal_return_df.index]
portfolios = self.__rebalance(excess_return_df, normal_return_df, price_df, rf, window, interval, ptc_buy, ptc_sell,
ftc, volume, c, initial_wealth, extra_data, price_impact=True, price_impact_model= price_impact_model)
# Not valid anymore because portfolios are measured in money value instead of weights
# try:
# assert sum(portfolios[0]) <= 1 + 0.000001
# except:
# raise Exception(
# 'Please make sure your strategy builds a portfolios whose sum of weights does not exceed 1!')
# All historical portfolios are saved, including the re-balancing ones in the middle.
# portfolios = pd.DataFrame(portfolios).iloc[::interval]
# save the portfolios for calling
self.__last_test_portfolios = portfolios.set_axis(excess_return_df.columns.values, axis='columns').set_axis(
excess_return_df.iloc[window - 1:].index.values, axis='index')
self.__average_turnover=self.__total_turnover/(len(portfolios))
self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
def __test_no_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, initial_wealth, extra_data):
# prepare data
normal_return_df, excess_return_df, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T - 2: # 3 here can change later
raise Exception(
'Too few samples to test on will result in poor performance : reduce window or decrease interval or '
'increase length of data')
# apply rolling windows with __rebalance
portfolios = self.__rebalance(excess_return_df, normal_return_df, price_df, rf, window, interval, ptc_buy, ptc_sell,
ftc, initial_wealth=initial_wealth, extra_data=extra_data, price_impact=False)
self.__last_test_portfolios = portfolios.set_axis(excess_return_df.columns.values, axis='columns').set_axis(
excess_return_df.iloc[window - 1:].index.values, axis='index')
self.__average_turnover = self.__total_turnover / (len(portfolios))
self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
# built-in strategies in the library
# single-period strategies
def __naive_alloc(list_df):
df = list_df[0]
n = df.shape[1]
res = np.ones(n) / n
return res
naive_alloc = backtest_model(__naive_alloc, ['ex_return'], name='naive allocation portfolio');
def __iv_alloc(list_df):
# Compute the inverse-variance portfolio
df = list_df[0]
cov = df.cov()
ivp = 1. / np.diag(cov)
ivp /= ivp.sum()
return ivp
iv_alloc = backtest_model(__iv_alloc, ['ex_return'], name='inverse variance allocation portfolio')
def __min_var(list_df):
df = list_df[0]
n = df.shape[1]
u = np.ones(n)
cov = df.cov()
in_cov = np.linalg.inv(cov)
w = np.dot(in_cov, u)
w /= w.sum()
return w
min_var = backtest_model(__min_var, ['ex_return'], name='min. variance allocation portfolio')
def __mean_variance(list_df):
df = list_df[0]
n = df.shape[1]
cov = df.cov()
in_cov = np.linalg.inv(cov)
u = df.mean(axis=0)
w = np.dot(in_cov, u)
w /= w.sum()
return w
basic_mean_variance = backtest_model(__mean_variance, ['ex_return'], name='basic mean-variance allocation portfolio')
def __FF3(list_df, extra_data): # with missing data handling
df = list_df[0]
position_nan = df.isna().any().values
w = np.zeros(df.shape[1])
X = extra_data
y = df[df.columns[position_nan == False]]
reg = LinearRegression(fit_intercept=True).fit(X, y)
beta = reg.coef_
var_epi = (y - reg.predict(X)).var(axis=0)
cov = np.dot(np.dot(beta, X.cov()), beta.T) + np.diag(var_epi)
in_cov = np.linalg.inv(cov)
temp_w = np.dot(in_cov, np.ones(y.shape[1]))
temp_w /= temp_w.sum()
w[position_nan == False] = temp_w
return w
FF_3_factor_model = backtest_model(__FF3, ['ex_return'], need_extra_data=True,
name='Fama-French 3-factor model portfolio',missing_val=True)
def __hrp_alloc(list_df):
# Compute the hierarchical-risk-parity portfolio
x = list_df[0]
def getIVP(cov, **kargs):
# Compute the inverse-variance portfolio
ivp = 1. / np.diag(cov)
ivp /= ivp.sum()
return ivp
def getClusterVar(cov, cItems):
# Compute variance per cluster
cov_ = cov.loc[cItems, cItems] # matrix slice
w_ = getIVP(cov_).reshape(-1, 1)
cVar = np.dot(np.dot(w_.T, cov_), w_)[0, 0]
return cVar
def getQuasiDiag(link):
# Sort clustered items by distance
link = link.astype(int)
sortIx = pd.Series([link[-1, 0], link[-1, 1]])
numItems = link[-1, 3] # number of original items
while sortIx.max() >= numItems:
sortIx.index = range(0, sortIx.shape[0] * 2, 2) # make space
df0 = sortIx[sortIx >= numItems] # find clusters
i = df0.index
j = df0.values - numItems
sortIx[i] = link[j, 0] # item 1
df0 = pd.Series(link[j, 1], index=i + 1)
sortIx = sortIx.append(df0) # item 2
sortIx = sortIx.sort_index() # re-sort
sortIx.index = range(sortIx.shape[0]) # re-index
return sortIx.tolist()
def getRecBipart(cov, sortIx):
# Compute HRP alloc
w = pd.Series(1, index=sortIx)
cItems = [sortIx] # initialize all items in one cluster
while len(cItems) > 0:
cItems = [i[j:k] for i in cItems for j, k in ((0, int(len(i) / 2)), (int(len(i) / 2), int(len(i)))) if
len(i) > 1] # bisection
for i in range(0, len(cItems), 2): # parse in pairs
cItems0 = cItems[i] # cluster 1
cItems1 = cItems[i + 1] # cluster 2
cVar0 = getClusterVar(cov, cItems0)
cVar1 = getClusterVar(cov, cItems1)
alpha = 1 - cVar0 / (cVar0 + cVar1)
w[cItems0] *= alpha # weight 1
w[cItems1] *= 1 - alpha # weight 2
return w
def correlDist(corr):
# A distance matrix based on correlation, where 0<=d[i,j]<=1
# This is a proper distance metric
dist = ((1 - corr) / 2.) ** .5 # distance matrix
return dist
cov, corr = x.cov(), x.corr()
# clustering
dist = correlDist(corr)
link = sch.linkage(dist, 'single')
sortIx = getQuasiDiag(link)
sortIx = corr.index[sortIx].tolist() # recover labels
df0 = corr.loc[sortIx, sortIx] # reorder, quasi-diagonalized correlation matrix
# allocation
res = getRecBipart(cov, sortIx)
return res
hrp_alloc = backtest_model(__hrp_alloc, ['ex_return'], name='hierarchical-risk-parity portfolio')
def __Bayes_Stein(list_df): # ex_return
df = list_df[0]
m = 120
u_ = df.mean(axis=0)
n = df.shape[1]
cov_ = np.dot((df - u_).T, df - u_) / (m - n - 2)
u_min = np.mean(u_)
inv_cov = np.linalg.inv(cov_)
sig = (n + 2) / (m * np.dot(np.dot((u_ - u_min).T, inv_cov), u_ - u_min) + n + 2)
u_bs = (1 - sig) * u_ + sig * u_min
w = np.dot(inv_cov, u_bs)
w /= w.sum()
return w
def __Bayes_Stein_2(list_df): # ex_return
df = list_df[0]
m = 120
u_ = df.mean(axis=0)
n = df.shape[1]
cov_ = np.dot((df - u_).T, df - u_) / (m - n - 2)
min_w=__min_var(list_df)
u_min=np.dot(u_, min_w)
inv_cov = np.linalg.inv(cov_)
sig = (n + 2) / (m * np.dot(np.dot((u_ - u_min).T, inv_cov), u_ - u_min) + n + 2)
u_bs = (1 - sig) * u_ + sig * u_min
w = np.dot(inv_cov, u_bs)
w /= w.sum()
return w
Bayes_Stein_shrink = backtest_model(__Bayes_Stein, ['ex_return'], name='Bayes_Stein_shrinkage portfolio')
import scipy
def __quadratic(x,c):
'''
x: dataframe of returns, columns as assets, rows as time
c: exposure constraint
'''
# Compute the portfolio using quadratic programming approach, with given exposure constraint c
n=x.shape[1]
init=np.hstack((np.ones(n)*1/n,np.zeros(n)))
cov=x.cov()
def function(w):
return np.dot(np.dot(w[:n].T,cov),w[:n])
c1=scipy.optimize.LinearConstraint(np.hstack((np.ones(n),np.zeros(n))),1,1) #equality constraint
c2=scipy.optimize.LinearConstraint(np.hstack((np.zeros(n),np.ones(n))),-np.inf,c)
eye=np.identity(n)
ma_1=np.hstack((eye,-1*eye))
c3=scipy.optimize.LinearConstraint(ma_1,np.full(n,-np.inf),np.zeros(n))
ma_2=np.hstack((eye,eye))
c4=scipy.optimize.LinearConstraint(ma_2,np.zeros(n),np.full(n,np.inf))
# c2=scipy.optimize.LinearConstraint(np.identity(n),np.ones(n)*1.0e-11,np.ones(n)) #inequality constraint
opt=scipy.optimize.minimize(function,init,constraints=(c1,c2,c3,c4),method='trust-constr',options={'gtol': 1e-8, 'disp': False})
res=opt.x
return res[:n]
def __no_short_sell(list_df): #ex_return
df=list_df[0]
return __quadratic(df,1)
no_short_sell=backtest_model(__no_short_sell,['ex_return'],name='no_short_sell portfolio')
# multi-periods strategies
def __global_min_variance(list_df, x):
df = list_df[0]
n = df.shape[1]
cov = df.cov()
in_cov = np.linalg.inv(cov)
beta = df.mean()
u = np.dot(np.dot(in_cov, np.ones(n)) / np.dot( | np.ones(n) | numpy.ones |
import numpy as np
class GaussianNBClassifier:
def __init__(self, eps=1e-6):
r"""
A naive Bayes classifier for real-valued data.
Notes
-----
The naive Bayes model assumes the features of each training example
:math:`\mathbf{x}` are mutually independent given the example label
:math:`y`:
.. math::
P(\mathbf{x}_i \mid y_i) = \prod_{j=1}^M P(x_{i,j} \mid y_i)
where :math:`M` is the rank of the `i`th example :math:`\mathbf{x}_i`
and :math:`y_i` is the label associated with the `i`th example.
Combining the conditional independence assumption with a simple
application of Bayes' theorem gives the naive Bayes classification
rule:
.. math::
\hat{y} &= \arg \max_y P(y \mid \mathbf{x}) \\
&= \arg \max_y P(y) P(\mathbf{x} \mid y) \\
&= \arg \max_y P(y) \prod_{j=1}^M P(x_j \mid y)
In the final expression, the prior class probability :math:`P(y)` can
be specified in advance or estimated empirically from the training
data.
In the Gaussian version of the naive Bayes model, the feature
likelihood is assumed to be normally distributed for each class:
.. math::
\mathbf{x}_i \mid y_i = c, \theta \sim \mathcal{N}(\mu_c, \Sigma_c)
where :math:`\theta` is the set of model parameters: :math:`\{\mu_1,
\Sigma_1, \ldots, \mu_K, \Sigma_K\}`, :math:`K` is the total number of
unique classes present in the data, and the parameters for the Gaussian
associated with class :math:`c`, :math:`\mu_c` and :math:`\Sigma_c`
(where :math:`1 \leq c \leq K`), are estimated via MLE from the set of
training examples with label :math:`c`.
Parameters
----------
eps : float
A value added to the variance to prevent numerical error. Default
is 1e-6.
Attributes
----------
parameters : dict
Dictionary of model parameters: "mean", the `(K, M)` array of
feature means under each class, "sigma", the `(K, M)` array of
feature variances under each class, and "prior", the `(K,)` array of
empirical prior probabilities for each class label.
hyperparameters : dict
Dictionary of model hyperparameters
labels : :py:class:`ndarray <numpy.ndarray>` of shape `(K,)`
An array containing the unique class labels for the training
examples.
"""
self.labels = None
self.hyperparameters = {"eps": eps}
self.parameters = {
"mean": None, # shape: (K, M)
"sigma": None, # shape: (K, M)
"prior": None, # shape: (K,)
}
def fit(self, X, y):
"""
Fit the model parameters via maximum likelihood.
Notes
-----
The model parameters are stored in the :py:attr:`parameters` attribute.
The following keys are present:
mean: :py:class:`ndarray <numpy.ndarray>` of shape `(K, M)`
Feature means for each of the `K` label classes
sigma: :py:class:`ndarray <numpy.ndarray>` of shape `(K, M)`
Feature variances for each of the `K` label classes
prior : :py:class:`ndarray <numpy.ndarray>` of shape `(K,)`
Prior probability of each of the `K` label classes, estimated
empirically from the training data
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, M)`
A dataset consisting of `N` examples, each of dimension `M`
y: :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
The class label for each of the `N` examples in `X`
Returns
-------
self: object
"""
P = self.parameters
H = self.hyperparameters
self.labels = | np.unique(y) | numpy.unique |
from torch.utils.data.dataset import Dataset
import os, re, socket
import nibabel
import pandas as pd
import numpy as np
from typing import Callable, Any, List, Type, Sequence
from nilearn.image import resample_to_img
from itertools import compress
class OpenBHBChallenge(Dataset):
"""
OpenBHB Dataset written in a torchvision-like manner. It is memory-efficient, taking advantage of
memory-mapping implemented with NumPy. It comes with an official Train/Test split with 2 tests
(age/sex/site stratified and independent) and:
... 3 pre-processings:
- Quasi-Raw
- VBM
- FreeSurfer
... And 3 differents targets:
- Age (regression)
- Sex (classification)
- Site (classification)
... With meta-data:
- unique identifier across pre-processing and split (participant_id)
- TIV + Global CSF/WM/GM volumes + acq. settings
Attributes:
* samples: list of (path, target)
* shape, tuple: shape of the data
* infos: pd DataFrame: TIV + global CSF/WM/GM + acq. settings and magnetic field strength
* metadata: ROI names or VBM/quasi-raw template used
"""
def __init__(self, root: str, preproc: str='vbm', scheme: str='train_val_test', target: [str, List[str]]='age',
split: str='train', transforms: Callable[[np.ndarray], np.ndarray]=None,
target_transforms: Callable[[int, float], Any]=None, drop_na: bool=False, residualized:str=None):
"""
:param root: str, path to the root directory containing the different .npy and .csv files
:param preproc: str, must be either VBM ('vbm'), ROI-VBM ('vbm_roi'), Quasi-Raw ('quasi_raw'),
FSL Desikan ROI ('fsl_desikan_roi') or FSL Destrieux ROI ('fsl_destrieux_roi')
:param scheme: str, must be either Train/Test ('train_test') or Train/Val/Test ('train_val_test')
:param target: str or [str], either 'age', 'sex' or 'site'.
:param split: str, either 'train', 'val', 'test' (inter) or 'test_intra'
:param transforms (callable, optional): A function/transform that takes in
a 3D MRI image and returns a transformed version.
:param target_transforms (callable, optional): A function/transform that takes in
a target and returns a transformed version.
:param drop_na, If set drop samples with NaN values in data (target not checked)
:param residualized, if "linear" or "combat", loads the residualized data (only for training)
"""
if isinstance(target, str):
target = [target]
assert preproc in ['vbm', 'vbm_roi', 'quasi_raw', 'fsl_desikan_roi', 'fsl_destrieux_roi'], \
"Unknown preproc: %s"%preproc
assert scheme in ['train_test', 'train_val_test'], "Unknown scheme: %s"%scheme
assert set(target) <= {'age', 'sex', 'site'}, "Unknown target: %s"%target
assert split in ['train', 'val', 'test', 'test_intra', 'validation'], "Unknown split: %s"%split
if scheme == 'train_test' and split == 'val':
raise ValueError("No validation split for Train/Test scheme.")
if split == "test" and "site" in target:
raise ValueError("No site information for external test set.")
if residualized is not None:
assert split in ["train", "validation", "val"], "No residualized data for %s"%split
assert residualized in ["linear", "combat"], "Unknown residualization: %s"%residualized
self.root = root
self.preproc = preproc
self.split = split
self.scheme_name = scheme
self.target_name = target
self.transforms = transforms
self.target_transforms = target_transforms
self.residualize = residualized
if self.split == "val": self.split = "validation"
self.img_paths = {"vbm_roi": "sub-%i_preproc-cat12vbm_desc-gm_ROI.npy",
"vbm": "sub-%i_preproc-cat12vbm_desc-gm_T1w.npy",
"quasi_raw": "sub-%i_preproc-quasiraw_T1w.npy",
"fsl_desikan_roi": "sub-%i_preproc-freesurfer_desc-desikan_ROI.npy",
"fsl_destrieux_roi": "sub-%i_preproc-freesurfer_desc-destrieux_ROI.npy"}
if not self._check_integrity():
raise RuntimeError("Files not found. Check the the root directory %s"%root)
if scheme == "train_val_test" and self.split in ["train", "validation"]:
self.scheme = pd.read_csv(os.path.join(self.root, "challenge_train_val_splits.tsv"), sep="\t")
elif self.split == "test":
_scheme = pd.read_csv(os.path.join(self.root, "official_external_test.tsv"), sep="\t")
self.scheme = pd.DataFrame(dict(participant_id=_scheme.participant_id.values, split="test"))
else:
self.scheme = pd.read_csv(os.path.join(self.root, "official_challenge_splits.tsv"), sep="\t")
# 0) Get the labels to predict
if self.split != "test":
all_labels = pd.merge(pd.read_csv(os.path.join(self.root, "participants.tsv"), sep="\t"),
pd.read_csv(os.path.join(self.root, "official_site_class_labels.tsv"), sep="\t"),
how="inner", on="participant_id", validate="1:1")
else:
all_labels = pd.read_csv(os.path.join(self.root, "privateBHB", "participants.tsv"), sep="\t")
split_ = "test" if self.split == "test_intra" else self.split
all_labels = pd.merge(self.scheme[self.scheme.split.eq(split_)], all_labels,
how="inner", on="participant_id", validate="1:1")
assert len(all_labels) == self.scheme.split.eq(split_).sum()
## 1) Creates the list of samples (path, label)
select = ["participant_id"] + ["siteXacq" if t == "site" else t for t in target]
img_pth = self.img_paths[preproc]
# TODO: remove this quick fix
if preproc == "quasi_raw" and socket.gethostname() == "kraken":
img_pth = re.sub(".npy", "_resampled.npy", img_pth)
if residualized is not None:
img_pth = re.sub(".npy", "_%s_res.npy"%residualized, img_pth)
policy = "privateBHB" if self.split == "test" else ""
self.samples = [(os.path.join(self.root, policy, "data", img_pth%id), target)
for (id, *target) in all_labels[select].values]
if drop_na:
mask = np.ones(len(self.samples), dtype=np.bool)
for i, (pth, _) in enumerate(self.samples):
s = self.load_sample(pth)
mask[i] = (not np.any(np.isnan(s)))
self.samples = list(compress(self.samples, mask))
all_labels = all_labels[mask]
selected_infos = ["tiv", "csfv", "gmv", "wmv", "magnetic_field_strength"]
# Get meta-data associated to specific preproc (ROI names, or template used)
self.metadata = self._extract_metadata()
self.infos = all_labels[selected_infos]
self.shape = (len(self.samples), *self[0][0].shape)
self._all_labels = all_labels.copy()
def _check_integrity(self):
"""
Check the integrity of root dir (including the directories/files required). It does NOT check their content.
Should be formatted as:
/root
official_challenge_splits.tsv
official_site_class_labels.tsv
challenge_train_val_splits.tsv
official_external_test.tsv
participants.tsv
/data
sub-[id]_preproc-cat12vbm_desc-gm_ROI.npy
sub-[id]_preproc-cat12vbm_desc-gm_T1w.npy
sub-[id]_preproc-quasiraw_T1w.npy
sub-[id]_preproc-freesurfer_desc-desikan_ROI.npy
sub-[id]_preproc-freesurfer_desc-destrieux_ROI.npy
/resource
cat12vbm_labels.txt
freesurfer_atlas-desikan_labels.txt
freesurfer_atlas-destrieux_labels.txt
freesurfer_channels.txt
cat12vbm_space-MNI152_desc-gm_TPM.nii.gz
quasiraw_space-MNI152_desc-brain_T1w.nii.gz
"""
is_complete = os.path.isdir(self.root)
is_complete &= os.path.isfile(os.path.join(self.root, "official_challenge_splits.tsv"))
is_complete &= os.path.isfile(os.path.join(self.root, "official_site_class_labels.tsv"))
is_complete &= os.path.isfile(os.path.join(self.root, "challenge_train_val_splits.tsv"))
is_complete &= os.path.isfile(os.path.join(self.root, "official_external_test.tsv"))
is_complete &= os.path.isfile(os.path.join(self.root, "participants.tsv"))
is_complete &= os.path.isfile(os.path.join(self.root, "resource", "cat12vbm_labels.txt"))
is_complete &= os.path.isfile(os.path.join(self.root, "resource", "freesurfer_atlas-desikan_labels.txt"))
is_complete &= os.path.isfile(os.path.join(self.root, "resource", "freesurfer_atlas-destrieux_labels.txt"))
is_complete &= os.path.isfile(os.path.join(self.root, "resource", "freesurfer_channels.txt"))
is_complete &= os.path.isfile(os.path.join(self.root, "resource", "cat12vbm_space-MNI152_desc-gm_TPM.nii.gz"))
is_complete &= os.path.isfile(os.path.join(self.root, "resource", "quasiraw_space-MNI152_desc-brain_T1w.nii.gz"))
is_complete &= os.path.isfile(os.path.join(self.root, "privateBHB", "participants.tsv"))
if (not is_complete): return False
try:
for policy in ["", "privateBHB"]:
ids = pd.read_csv(os.path.join(self.root, policy, "participants.tsv"), sep="\t").participant_id
for id in ids:
f = self.img_paths[self.preproc] % id
is_complete &= os.path.isfile(os.path.join(self.root, policy, "data", f))
if (not is_complete):
print("Missing file: %s" % os.path.join(self.root, policy, "data", f))
return is_complete
except (FileNotFoundError, AttributeError):
return False
return is_complete
def _extract_metadata(self):
"""
:return: ROI names or VBM/Quasi-Raw templates
"""
if self.preproc == "vbm_roi":
meta = pd.read_csv(os.path.join(self.root, "resource", "cat12vbm_labels.txt"), names=["ROI"])
elif self.preproc == "vbm":
meta = nibabel.load(os.path.join(self.root, "resource", "cat12vbm_space-MNI152_desc-gm_TPM.nii.gz"))
elif self.preproc == "quasi_raw":
meta = nibabel.load(os.path.join(self.root, "resource", "quasiraw_space-MNI152_desc-brain_T1w.nii.gz"))
elif self.preproc == "fsl_desikan_roi":
meta = pd.read_csv(os.path.join(self.root, "resource", "freesurfer_atlas-desikan_labels.txt"), names=["ROI"])
elif self.preproc == "fsl_destrieux_roi":
meta = pd.read_csv(os.path.join(self.root, "resource", "freesurfer_atlas-destrieux_labels.txt"), names=["ROI"])
else:
raise ValueError("Unknown preproc: %s"%self.preproc)
return meta
def get_data(self, indices: Sequence[int]=None, mask: np.ndarray=None, dtype: Type=np.float32):
"""
Loads all (or selected ones) data in memory and returns a big numpy array X_data with y_data
The input/target transforms are ignored.
Warning: this can be VERY memory-consuming (~40GB if all data are loaded)
:param indices (Optional): list of indices to load
:param mask (Optional binary mask): binary mask to apply to the data. Each 3D volume is transformed into a
vector. Can be 3D mask or 4D (channel + img)
:param dtype (Optional): the final type of data returned (e.g np.float32)
:return (np.ndarray, np.ndarray), a tuple (X, y)
"""
(tf, target_tf) = (self.transforms, self.target_transforms)
self.transforms, self.target_transforms = None, None
targets = []
if mask is not None:
assert len(mask.shape) in [3, 4], "Mask must be 3D or 4D (current shape is {})".format(mask.shape)
if len(mask.shape) == 3:
# adds the channel dimension
mask = mask[np.newaxis, :]
if indices is None:
nbytes = np.product(self.shape) if mask is None else mask.sum() * len(self)
print("Dataset size to load (shape {}): {:.2f} GB".format(self.shape, nbytes*np.dtype(dtype).itemsize/
(1024*1024*1024)), flush=True)
if mask is None:
data = np.zeros(self.shape, dtype=dtype)
else:
data = np.zeros((len(self), mask.sum()), dtype=dtype)
for i in range(len(self)):
(sample, target) = self[i]
data[i] = sample[mask] if mask is not None else sample
targets.append(target)
self.transforms, self.target_transforms = (tf, target_tf)
return data, np.array(targets)
else:
nbytes = np.product(self.shape[1:]) * len(indices) if mask is None else mask.sum() * len(indices)
print("Dataset size to load (shape {}): {:.2f} GB".format((len(indices),) + self.shape[1:],
nbytes* | np.dtype(dtype) | numpy.dtype |
"""
Computational Cancer Analysis Library
Authors:
Huwate (Kwat) Yeerna (Medetgul-Ernar)
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
<NAME>
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
Modified by:
<NAME>
<EMAIL>
Mesirov Lab -- UCSD Medicine Department.
"""
# import rpy2.robjects as ro
from numpy import asarray, exp, finfo, isnan, log, sign, sqrt, sum, sort
from numpy.random import random_sample, seed
# from rpy2.robjects.numpy2ri import numpy2ri
# from rpy2.robjects.packages import importr
from scipy.stats import pearsonr
from scipy.stats import gaussian_kde
import numpy as np
from .. import RANDOM_SEED
from ..support.d2 import drop_nan_columns
EPS = finfo(float).eps
## Commented-out by EFJ on 2017-07-20 to remove the need to use R.
# ro.conversion.py2ri = numpy2ri
# mass = importr('MASS')
# bcv = mass.bcv
# kde2d = mass.kde2d
## Commented-out by EFJ on 2017-07-20 to remove the need to use R.
# def information_coefficient(x, y, n_grids=25,
# jitter=1E-10, random_seed=RANDOM_SEED):
# """
# Compute the information coefficient between x and y, which are
# continuous, categorical, or binary vectors.
# :param x: numpy array;
# :param y: numpy array;
# :param n_grids: int; number of grids for computing bandwidths
# :param jitter: number;
# :param random_seed: int or array-like;
# :return: float; Information coefficient
# """
#
# # Can't work with missing any value
# # not_nan_filter = ~isnan(x)
# # not_nan_filter &= ~isnan(y)
# # x = x[not_nan_filter]
# # y = y[not_nan_filter]
#
# # Assume that we are not working with NaNs
# x, y = drop_nan_columns([x, y]) # Commented out by EFJ on 2017-07-12
#
# # x = drop_nan_columns(x) # Added by EFJ on 2017-07-12
# # y = drop_nan_columns(y) # Added by EFJ on 2017-07-12
#
# # Need at least 3 values to compute bandwidth
# if len(x) < 3 or len(y) < 3:
# return 0
#
# x = asarray(x, dtype=float)
# y = asarray(y, dtype=float)
#
# # Add jitter
# seed(random_seed)
# x += random_sample(x.size) * jitter
# y += random_sample(y.size) * jitter
#
# # Compute bandwidths
# cor, p = pearsonr(x, y)
# bandwidth_x = asarray(bcv(x)[0]) * (1 + (-0.75) * abs(cor))
# bandwidth_y = asarray(bcv(y)[0]) * (1 + (-0.75) * abs(cor))
#
# # Compute P(x, y), P(x), P(y)
# fxy = asarray(
# kde2d(x, y, asarray([bandwidth_x, bandwidth_y]), n=asarray([n_grids]))[
# 2]) + EPS
#
# dx = (x.max() - x.min()) / (n_grids - 1)
# dy = (y.max() - y.min()) / (n_grids - 1)
# pxy = fxy / (fxy.sum() * dx * dy)
# px = pxy.sum(axis=1) * dy
# py = pxy.sum(axis=0) * dx
#
# # Compute mutual information;
# mi = (pxy * log(pxy / (asarray([px] * n_grids).T *
# asarray([py] * n_grids)))).sum() * dx * dy
#
# # # Get H(x, y), H(x), and H(y)
# # hxy = - (pxy * log(pxy)).sum() * dx * dy
# # hx = -(px * log(px)).sum() * dx
# # hy = -(py * log(py)).sum() * dy
# # mi = hx + hy - hxy
#
# # Compute information coefficient
# ic = sign(cor) * sqrt(1 - exp(-2 * mi))
#
# # TODO: debug when MI < 0 and |MI| ~ 0 resulting in IC = nan
# if isnan(ic):
# ic = 0
#
# return ic
def information_coefficient(x, y, n_grids=25,
jitter=1E-10, random_seed=RANDOM_SEED):
"""
Compute the information coefficient between x and y, which are
continuous, categorical, or binary vectors. This function uses only python libraries -- No R is needed.
:param x: numpy array;
:param y: numpy array;
:param n_grids: int; number of grids for computing bandwidths
:param jitter: number;
:param random_seed: int or array-like;
:return: float; Information coefficient
"""
# Can't work with missing any value
# not_nan_filter = ~isnan(x)
# not_nan_filter &= ~isnan(y)
# x = x[not_nan_filter]
# y = y[not_nan_filter]
x, y = drop_nan_columns([x, y])
# Need at least 3 values to compute bandwidth
if len(x) < 3 or len(y) < 3:
return 0
x = asarray(x, dtype=float)
y = asarray(y, dtype=float)
# Add jitter
seed(random_seed)
x += random_sample(x.size) * jitter
y += random_sample(y.size) * jitter
# Compute bandwidths
cor, p = pearsonr(x, y)
# bandwidth_x = asarray(bcv(x)[0]) * (1 + (-0.75) * abs(cor))
# bandwidth_y = asarray(bcv(y)[0]) * (1 + (-0.75) * abs(cor))
# Compute P(x, y), P(x), P(y)
# fxy = asarray(
# kde2d(x, y, asarray([bandwidth_x, bandwidth_y]), n=asarray([n_grids]))[
# 2]) + EPS
# Estimate fxy using scipy.stats.gaussian_kde
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
X, Y = np.mgrid[xmin:xmax:complex(0, n_grids), ymin:ymax:complex(0, n_grids)]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = gaussian_kde(values)
fxy = np.reshape(kernel(positions).T, X.shape) + EPS
dx = (x.max() - x.min()) / (n_grids - 1)
dy = (y.max() - y.min()) / (n_grids - 1)
pxy = fxy / (fxy.sum() * dx * dy)
px = pxy.sum(axis=1) * dy
py = pxy.sum(axis=0) * dx
# Compute mutual information;
mi = (pxy * log(pxy / (asarray([px] * n_grids).T *
asarray([py] * n_grids)))).sum() * dx * dy
# # Get H(x, y), H(x), and H(y)
# hxy = - (pxy * log(pxy)).sum() * dx * dy
# hx = -(px * log(px)).sum() * dx
# hy = -(py * log(py)).sum() * dy
# mi = hx + hy - hxy
# Compute information coefficient
ic = sign(cor) * sqrt(1 - exp(-2 * mi))
# TODO: debug when MI < 0 and |MI| ~ 0 resulting in IC = nan
if | isnan(ic) | numpy.isnan |
"""
Module varn calculates local densities of the 2D system and plots histogram
of these local densities.
Files are saved according to the active_particles.naming.varN standard.
Environment modes
-----------------
COMPUTE : bool
Compute local densities.
DEFAULT: False
CHECK : bool
Evaluate difference between the parametrised global packing fraction and
the measured averaged packing fraction.
DEFAULT: False
PLOT : bool
Plots histogram of local densities.
DEFAULT: False
PLOT_MODE : string
Histogram type.
_______________________________________________________________________
| Mode | Histogram |
|________|______________________________________________________________|
| 'mean' | Simple histogram of local densities from all computed times. |
|________|______________________________________________________________|
| 'time' | Histogram of local densities as function of time. |
|________|______________________________________________________________|
DEFAULT: mean
SHOW : bool
Show graphs.
DEFAULT: False
PEAK [(COMPUTE and SHOW) or PLOT mode] : bool
Highlight highest peak of the histogram.
DEFAULT: True
SAVE [(COMPUTE and SHOW) or PLOT mode] : bool
Save graphs.
DEFAULT: False
SUPTITLE [(COMPUTE and SHOW) or PLOT mode] : bool
Display suptitle.
DEFAULT: True
Environment parameters
----------------------
DATA_DIRECTORY : string
Data directory.
DEFAULT: current working directory
PARAMETERS_FILE : string
Simulation parameters file.
DEFAULT: DATA_DIRECTORY/active_particles.naming.parameters_file
WRAPPED_FILE : string
Wrapped trajectory file. (.gsd)
DEFAULT: DATA_DIRECTORY/active_particles.naming.wrapped_trajectory_file
INITIAL_FRAME : int
Frame to consider as initial.
NOTE: INITIAL_FRAME < 0 will be interpreted as the initial frame being
the middle frame of the simulation.
DEFAULT: -1
INTERVAL_MAXIMUM : int
Maximum number of frames at which we compute local densities.
DEFAULT: 1
BOX_SIZE : float
Length of the square boxes in which particles are counted to compute local
densities.
DEFAULT: active_particles.analysis.varn._box_size
N_CASES : int
Number of boxes in each direction to compute the shear strain and
displacement vorticity grid.
DEFAULT: smallest integer value greater than or equal to the square root of
the number of particles from the simulation parameters file.
N_BINS [PLOT or SHOW mode] : int
Number of bins for the histogram of local densities.
DEFAULT: active_particles.analysis.varn._Nbins
PHIMAX [PLOT or SHOW mode] : int
Maximum local density for the histogram of local densities.
DEFAULT: active_particles.analysis.varn._phimax
PPHILOCMIN [PLOT or SHOW and 'time' mode] : float
Minimum local density probability.
DEFAULT: active_particles.analysis.varn._pphilocmin
PPHILOCMAX [PLOT or SHOW and 'time' mode] : float
Maximum local density probability.
DEFAULT: active_particles.analysis.varn._pphilocmax
CONTOURS : int
Number of contour lines.
DEFAULT: active_particles.analysis.varn._contours
FONT_SIZE : int
Plot font size.
DEFAULT: active_particles.analysis.varn._font_size
Output
------
[COMPUTE MODE]
> Prints neigbours grid computation time and execution time.
> Saves computed local densities according to the active_particles.naming.varN
standard in DATA_DIRECTORY.
[SHOW or PLOT mode]
> Plots histogram of local densities.
[SAVE mode]
> Saves local densities histogram figure in DATA_DIRECTORY.
"""
import active_particles.naming as naming
from active_particles.init import get_env, slurm_output
from active_particles.dat import Gsd
from active_particles.maths import Histogram
from os import getcwd
from os import environ as envvar
from os.path import join as joinpath
import numpy as np
from math import ceil
import pickle
from collections import OrderedDict
from datetime import datetime
import matplotlib as mpl
if not(get_env('SHOW', default=False, vartype=bool)):
mpl.use('Agg') # avoids crash if launching without display
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from mpl_toolkits.axes_grid1 import make_axes_locatable
# DEFAULT VARIABLES
_init_frame = -1 # default frame to consider as initial
_int_max = 1 # default maximum number of frames on which to calculate densities
_box_size = 10 # default length of the square box in which particles are counted
_Nbins = 10 # default number of bins for the histogram
_phimax = 1 # default maximum local density for histogram
_pphilocmin = 1e-4 # default minimum local density probability
_pphilocmax = 1e-1 # default maximum local density probability
_contours = 20 # default contour level value
_font_size = 10 # default plot font size
# FUNCTIONS AND CLASSES
def density(w_traj, frame, Ncases, box_size):
"""
Returns local densities in squares of length box_size around
Ncases x Ncases nodes, uniformly distributed in the 2D system, at frame
'frame'.
Parameters
----------
w_traj : active_particles.dat.Gsd
Wrapped trajectory object.
frame : int
Frame index.
Ncases : int
Number of nodes in one direction.
box_size : float
Length of the square box in which we calculate the local density.
Returns
-------
density_list : 1D Numpy array
Array of calculated local densities.
"""
L = w_traj.box_size() # system box size
dL = L/Ncases # distance between two consecutive nodes
max_node_dist = ceil(box_size/dL) # maximum distance in infinity norm in terms of nodes between particle and containing node
area_sum = np.zeros((Ncases, Ncases)) # sum of particles' area close to each node (centre of grid box) of the system
def node_position(node_index):
"""
Returns node position from node index.
Parameters
----------
node_index : 2-uple of int
Node index.
Returns
-------
r : (2,) Numpy array
Position of node.
"""
return dL*(1/2 + np.array(node_index)) - L/2
for position, area in zip(w_traj.position(frame),
(np.pi/4)*(w_traj.diameter(frame)**2)):
closest_node_index = np.array((position + L/2)//dL, dtype=int) # index of closest node
for dx in range(-max_node_dist, max_node_dist + 1):
for dy in range(-max_node_dist, max_node_dist + 1):
node_index = tuple(
(closest_node_index + np.array([dx, dy]))%Ncases)
if (np.abs(position - node_position(node_index))
< box_size/2).all(): # particle within box of node
area_sum[node_index] += area
return area_sum/(box_size**2)
def histogram(densities, Nbins, phimax):
"""
Returns histogram and bin values from densities array.
Parameters
----------
densities : array-like
Array of densities.
Nbins : int
Number of bins for histogram.
phimax : float
Maximum density for hsistogram.
NOTE: Minimum density is 0.
Returns
-------
bins : Numpy array
Bins of the histogram.
hist : Numpy array
Values of the histogram at bins.
"""
hist = Histogram(Nbins, 0, phimax)
hist.add_values(densities)
return hist.bins, hist.get_histogram()
class Plot:
"""
Plot mean histograms of densities.
"""
def __init__(self, suptitle=True):
"""
Set figure.
Parameters
----------
suptitle : bool
Display suptitle. (default: True)
"""
self.fig, self.ax = plt.subplots()
if suptitle: self.fig.suptitle(
r'$N=%.2e, \phi=%1.2f, \tilde{v}=%.2e, \tilde{\nu}_r=%.2e$'
% (parameters['N'], parameters['density'], parameters['vzero'],
parameters['dr']) + '\n' +
r'$S_{init}=%.2e, S_{max}=%.2e, N_{cases}=%.2e, l=%.2e$'
% (init_frame, int_max, Ncases, box_size))
self.ax.set_xlabel(r'$\phi_{loc}$')
self.ax.set_ylabel(r'$P(\phi_{loc})$')
def add_hist(self, bins, hist, peak=True):
"""
Add histogram of densities.
Parameters
----------
bins : array-like
Bins of the histogram.
hist : array-like
Values of the histogram at bins.
peak : bool
Highlight tallest peak of histogram. (default: True)
Returns
-------
line : matplotlib.lines.Line2D
Plotted histogram line.
"""
line, = self.ax.semilogy(bins, hist)
if peak:
philocmax, Pphilocmax = bins[np.argmax(hist)], np.max(hist)
self.ax.axhline(Pphilocmax, 0, 1,
linestyle='--', color=line.get_color())
self.ax.axvline(philocmax, 0, 1,
linestyle='--', color=line.get_color(),
label=r'$(\phi_{loc}^* = %1.2f, P(\phi_{loc}^*) = %.2e)$'
% (philocmax, Pphilocmax))
return line
class PlotTime:
"""
Plot histograms of densities as functions of time.
"""
def __init__(self, Nbins, phimax,
pphilocmin=_pphilocmin, pphilocmax=_pphilocmax, contours=_contours,
colormap=plt.cm.inferno, pad=20, suptitle=True):
"""
Set figure and histogram parameters.
Parameters
----------
Nbins : int
Number of bins for the histogram.
phimax : float
Maximum local density for histogram.
pphilocmin : float
Minimum local density probability.
(default: active_particles.analysis.varn._pphilocmin)
pphilocmax : float
Maximum local density probability.
(default: active_particles.analysis.varn._pphilocmax)
contours : int
Number of contour lines.
(default: active_particles.analysis.varn._contours)
colormap : matplotlib colormap
Histogram colormap. (default: matplotlib.pyplot.cm.inferno)
pad : float
Separation between label and colormap. (default: 20)
suptitle : bool
Display suptitle. (default: True)
"""
self.Nbins = Nbins
self.phimax = phimax
self.pphilocmin = np.log10(pphilocmin)
self.pphilocmax = np.log10(pphilocmax)
self.contours = contours
self.fig, self.ax = plt.subplots()
self.fig.subplots_adjust(top=0.98, bottom=0.10, left=0.10, right=0.88)
self.cmap = colormap
self.norm = colors.Normalize(
vmin=self.pphilocmin, vmax=self.pphilocmax)
self.colorbar = mpl.colorbar.ColorbarBase(
make_axes_locatable(self.ax).append_axes(
"right", size="5%", pad=0.05),
cmap=self.cmap, norm=self.norm, orientation='vertical')
if suptitle: self.fig.suptitle(
r'$N=%.2e, \phi=%1.2f, \tilde{v}=%.2e, \tilde{\nu}_r=%.2e$'
% (parameters['N'], parameters['density'], parameters['vzero'],
parameters['dr']) + '\n' +
r'$S_{max}=%.2e, N_{cases}=%.2e, l=%.2e$'
% (int_max, Ncases, box_size))
self.ax.set_xlabel(r'$t$')
self.ax.set_ylabel(r'$\phi_{loc}$')
self.colorbar.set_label(r'$\log P(\phi_{loc})$',
labelpad=pad, rotation=270)
def plot(self, times, densities, peak=True):
"""
Plot histogram.
Parameters
----------
times : array-like
Array of times at which densities have been calculated.
densities : array-like of array-like
Array of densities arrays at times times.
peak : bool
Highlight tallest peak of histogram. (default: True)
"""
self.times = times
self.histogram3D = [] # local densities histogram
self.philocmax = [] # most probable local densities at times
for time, density in zip(self.times, densities):
time_value = np.full(self.Nbins, fill_value=time)
bins, hist = histogram(density, self.Nbins, self.phimax) # histogram of local densities with corresponding bins
hist = np.log10(hist)
histogram3D_time = | np.transpose([time_value, bins, hist]) | numpy.transpose |
import numpy as np
import pandas as pd
from numpy.testing import assert_, assert_equal, assert_allclose, assert_raises
from statsmodels.tsa.arima import specification, params
def test_init():
# Test initialization of the params
# Basic test, with 1 of each parameter
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
# Test things copied over from spec
assert_equal(p.spec, spec)
assert_equal(p.exog_names, ['a'])
assert_equal(p.ar_names, ['ar.L1'])
assert_equal(p.ma_names, ['ma.L1'])
assert_equal(p.seasonal_ar_names, ['ar.S.L4'])
assert_equal(p.seasonal_ma_names, ['ma.S.L4'])
assert_equal(p.param_names, ['a', 'ar.L1', 'ma.L1', 'ar.S.L4', 'ma.S.L4',
'sigma2'])
assert_equal(p.k_exog_params, 1)
assert_equal(p.k_ar_params, 1)
assert_equal(p.k_ma_params, 1)
assert_equal(p.k_seasonal_ar_params, 1)
assert_equal(p.k_seasonal_ma_params, 1)
assert_equal(p.k_params, 6)
# Initial parameters should all be NaN
assert_equal(p.params, np.nan)
assert_equal(p.ar_params, [np.nan])
assert_equal(p.ma_params, [np.nan])
assert_equal(p.seasonal_ar_params, [np.nan])
assert_equal(p.seasonal_ma_params, [np.nan])
assert_equal(p.sigma2, np.nan)
assert_equal(p.ar_poly.coef, np.r_[1, np.nan])
assert_equal(p.ma_poly.coef, np.r_[1, np.nan])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, np.nan])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, np.nan])
assert_equal(p.reduced_ar_poly.coef, np.r_[1, [np.nan] * 5])
assert_equal(p.reduced_ma_poly.coef, np.r_[1, [np.nan] * 5])
# Test other properties, methods
assert_(not p.is_complete)
assert_(not p.is_valid)
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
desired = {
'exog_params': [np.nan],
'ar_params': [np.nan],
'ma_params': [np.nan],
'seasonal_ar_params': [np.nan],
'seasonal_ma_params': [np.nan],
'sigma2': np.nan}
assert_equal(p.to_dict(), desired)
desired = pd.Series([np.nan] * spec.k_params, index=spec.param_names)
assert_allclose(p.to_pandas(), desired)
# Test with different numbers of parameters for each
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(3, 1, 2), seasonal_order=(5, 1, 6, 4))
p = params.SARIMAXParams(spec=spec)
# No real need to test names here, since they are already tested above for
# the 1-param case, and tested more extensively in test for
# SARIMAXSpecification
assert_equal(p.k_exog_params, 2)
assert_equal(p.k_ar_params, 3)
assert_equal(p.k_ma_params, 2)
assert_equal(p.k_seasonal_ar_params, 5)
assert_equal(p.k_seasonal_ma_params, 6)
assert_equal(p.k_params, 2 + 3 + 2 + 5 + 6 + 1)
def test_set_params_single():
# Test setting parameters directly (i.e. we test setting the AR/MA
# parameters by setting the lag polynomials elsewhere)
# Here each type has only a single parameters
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
def check(is_stationary='raise', is_invertible='raise'):
assert_(not p.is_complete)
assert_(not p.is_valid)
if is_stationary == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
else:
assert_equal(p.is_stationary, is_stationary)
if is_invertible == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
else:
assert_equal(p.is_invertible, is_invertible)
# Set params one at a time, as scalars
p.exog_params = -6.
check()
p.ar_params = -5.
check()
p.ma_params = -4.
check()
p.seasonal_ar_params = -3.
check(is_stationary=False)
p.seasonal_ma_params = -2.
check(is_stationary=False, is_invertible=False)
p.sigma2 = -1.
# Finally, we have a complete set.
assert_(p.is_complete)
# But still not valid
assert_(not p.is_valid)
assert_equal(p.params, [-6, -5, -4, -3, -2, -1])
assert_equal(p.exog_params, [-6])
assert_equal(p.ar_params, [-5])
assert_equal(p.ma_params, [-4])
assert_equal(p.seasonal_ar_params, [-3])
assert_equal(p.seasonal_ma_params, [-2])
assert_equal(p.sigma2, -1.)
# Lag polynomials
assert_equal(p.ar_poly.coef, np.r_[1, 5])
assert_equal(p.ma_poly.coef, np.r_[1, -4])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, 3])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, -2])
# (1 - a L) (1 - b L^4) = (1 - a L - b L^4 + a b L^5)
assert_equal(p.reduced_ar_poly.coef, np.r_[1, 5, 0, 0, 3, 15])
# (1 + a L) (1 + b L^4) = (1 + a L + b L^4 + a b L^5)
assert_equal(p.reduced_ma_poly.coef, np.r_[1, -4, 0, 0, -2, 8])
# Override again, one at a time, now using lists
p.exog_params = [1.]
p.ar_params = [2.]
p.ma_params = [3.]
p.seasonal_ar_params = [4.]
p.seasonal_ma_params = [5.]
p.sigma2 = [6.]
p.params = [1, 2, 3, 4, 5, 6]
assert_equal(p.params, [1, 2, 3, 4, 5, 6])
assert_equal(p.exog_params, [1])
assert_equal(p.ar_params, [2])
assert_equal(p.ma_params, [3])
assert_equal(p.seasonal_ar_params, [4])
assert_equal(p.seasonal_ma_params, [5])
assert_equal(p.sigma2, 6.)
# Override again, one at a time, now using arrays
p.exog_params = np.array(6.)
p.ar_params = np.array(5.)
p.ma_params = np.array(4.)
p.seasonal_ar_params = np.array(3.)
p.seasonal_ma_params = np.array(2.)
p.sigma2 = np.array(1.)
assert_equal(p.params, [6, 5, 4, 3, 2, 1])
assert_equal(p.exog_params, [6])
assert_equal(p.ar_params, [5])
assert_equal(p.ma_params, [4])
assert_equal(p.seasonal_ar_params, [3])
assert_equal(p.seasonal_ma_params, [2])
assert_equal(p.sigma2, 1.)
# Override again, now setting params all at once
p.params = [1, 2, 3, 4, 5, 6]
assert_equal(p.params, [1, 2, 3, 4, 5, 6])
assert_equal(p.exog_params, [1])
assert_equal(p.ar_params, [2])
assert_equal(p.ma_params, [3])
assert_equal(p.seasonal_ar_params, [4])
assert_equal(p.seasonal_ma_params, [5])
assert_equal(p.sigma2, 6.)
# Lag polynomials
| assert_equal(p.ar_poly.coef, np.r_[1, -2]) | numpy.testing.assert_equal |
import streamlit as st
from traderl import data, nn
import pandas as pd
import gc
from traderl import agent
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
class DQN(agent.DQN):
def evolute(self, h, h_):
pips, profits, total_pips, total_profits, total_pip, total_profit, buy, sell = self.trade(h, h_)
acc = np.mean(pips > 0)
total_win = np.sum(pips[pips > 0])
total_lose = np.sum(pips[pips < 0])
rr = total_win / abs(total_lose)
ev = (np.mean(pips[pips > 0]) * acc + | np.mean(pips[pips < 0]) | numpy.mean |
# main imports
import numpy as np
import sys
# image transform imports
from PIL import Image
from skimage import color
from sklearn.decomposition import FastICA
from sklearn.decomposition import IncrementalPCA
from sklearn.decomposition import TruncatedSVD
from numpy.linalg import svd as lin_svd
from scipy.signal import medfilt2d, wiener, cwt
import pywt
import cv2
from ipfml.processing import transform, compression, segmentation
from ipfml.filters import convolution, kernels
from ipfml import utils
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
def get_image_features(data_type, block):
"""
Method which returns the data type expected
"""
if data_type == 'lab':
block_file_path = '/tmp/lab_img.png'
block.save(block_file_path)
data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))
if data_type == 'mscn':
img_mscn_revisited = transform.rgb_to_mscn(block)
# save tmp as img
img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
img_output.save(mscn_revisited_file_path)
img_block = Image.open(mscn_revisited_file_path)
# extract from temp image
data = compression.get_SVD_s(img_block)
"""if data_type == 'mscn':
img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
img_mscn_norm = transform.normalize_2D_arr(img_mscn)
img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
data = compression.get_SVD_s(img_mscn_gray)
"""
if data_type == 'low_bits_6':
low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
data = compression.get_SVD_s(low_bits_6)
if data_type == 'low_bits_5':
low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
data = compression.get_SVD_s(low_bits_5)
if data_type == 'low_bits_4':
low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
data = compression.get_SVD_s(low_bits_4)
if data_type == 'low_bits_3':
low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
data = compression.get_SVD_s(low_bits_3)
if data_type == 'low_bits_2':
low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
data = compression.get_SVD_s(low_bits_2)
if data_type == 'low_bits_4_shifted_2':
data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(block, (3, 6)))
if data_type == 'sub_blocks_stats':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_stats_reduced':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area_normed':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
l_svd_data = utils.normalize_arr(l_svd_data)
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'mscn_var_4':
data = _get_mscn_variance(block, (100, 100))
if data_type == 'mscn_var_16':
data = _get_mscn_variance(block, (50, 50))
if data_type == 'mscn_var_64':
data = _get_mscn_variance(block, (25, 25))
if data_type == 'mscn_var_16_max':
data = _get_mscn_variance(block, (50, 50))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'mscn_var_64_max':
data = _get_mscn_variance(block, (25, 25))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'ica_diff':
current_image = transform.get_LAB_L(block)
ica = FastICA(n_components=50)
ica.fit(current_image)
image_ica = ica.fit_transform(current_image)
image_restored = ica.inverse_transform(image_ica)
final_image = utils.normalize_2D_arr(image_restored)
final_image = np.array(final_image * 255, 'uint8')
sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))
data = abs(np.array(sv_values) - np.array(ica_sv_values))
if data_type == 'svd_trunc_diff':
current_image = transform.get_LAB_L(block)
svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
transformed_image = svd.fit_transform(current_image)
restored_image = svd.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'ipca_diff':
current_image = transform.get_LAB_L(block)
transformer = IncrementalPCA(n_components=20, batch_size=25)
transformed_image = transformer.fit_transform(current_image)
restored_image = transformer.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'svd_reconstruct':
reconstructed_interval = (90, 200)
begin, end = reconstructed_interval
lab_img = transform.get_LAB_L(block)
lab_img = np.array(lab_img, 'uint8')
U, s, V = lin_svd(lab_img, full_matrices=True)
smat = np.zeros((end-begin, end-begin), dtype=complex)
smat[:, :] = np.diag(s[begin:end])
output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))
output_img = np.array(output_img, 'uint8')
data = compression.get_SVD_s(output_img)
if 'sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'wave_sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'sv_std_filters_full' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
if 'sv_entropy_std_filters' in data_type:
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
sv_vector = []
sv_entropy_list = []
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_entropy = [utils.get_entropy_contribution_of_i(s, id_sv) for id_sv, sv in enumerate(s)]
sv_entropy_list.append(sv_entropy)
sv_std = []
sv_array = np.array(sv_vector)
_, length = sv_array.shape
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
s_arr = compression.get_SVD_s(arr)
data = s_arr[indices]
if 'convolutional_kernels' in data_type:
sub_zones = segmentation.divide_in_blocks(block, (20, 20))
data = []
diff_std_list_3 = []
diff_std_list_5 = []
diff_mean_list_3 = []
diff_mean_list_5 = []
plane_std_list_3 = []
plane_std_list_5 = []
plane_mean_list_3 = []
plane_mean_list_5 = []
plane_max_std_list_3 = []
plane_max_std_list_5 = []
plane_max_mean_list_3 = []
plane_max_mean_list_5 = []
for sub_zone in sub_zones:
l_img = transform.get_LAB_L(sub_zone)
normed_l_img = utils.normalize_2D_arr(l_img)
# bilateral with window of size (3, 3)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (3, 3))
std_diff = | np.std(normed_diff) | numpy.std |
#%%
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
rc('text.latex', preamble=r'''\usepackage{amsmath}
\usepackage{physics}
\usepackage{siunitx}
''')
THR = .5
WIDTH = 0
# def weight(q):
# if WIDTH>0:
# offset = 1/2 - THR / WIDTH
# return (np.piecewise(q,
# condlist=[
# q < THR - WIDTH / 2,
# q > THR - WIDTH / 2 and q < THR + WIDTH / 2 ,
# q > THR + WIDTH / 2,
# ],
# funclist=[
# 0,
# lambda x: x / WIDTH + offset,
# 1
# ]
# ))
# else:
# return (np.piecewise(q,
# condlist=[q < THR, q >= THR],
# funclist=[0, 1]
# ))
def f1(q):
return (.46224 * (q / (1 + q))**(1 / 3))
def f2(q):
return (.38 + .2 * np.log10(q))
def f(q):
if q < 0.5:
return (f1(q))
else:
return(f2(q))
f = np.vectorize(f, signature='()->()')
#%%
qs = np.linspace(0, 8, num=1000)
f_q = f(qs)
# plt.plot(qs, f(qs))
# plt.xlabel('$q = M_2 / M_1$')
# plt.ylabel('$R_{\\text{{lobe}}} / a$')
# plt.savefig('roche-lobe-radius.pdf', format = 'pdf')
#%%
def a(q):
return((1+q)**4 / q**2)
a_q = a(qs)
plt.plot(qs, np.abs(np.gradient(f_q, qs) / f_q), label='$\\abs{\\Delta \\log f}$')
plt.plot(qs, np.abs( | np.gradient(a_q, qs) | numpy.gradient |
import numpy as np
from numpy import linalg as npl
from meshpy import triangle
from mesh_sphere_packing import logger, ONE_THIRD, GROWTH_LIMIT
from mesh_sphere_packing.area_constraints import AreaConstraints
# TODO : change nomenclature. Segment is used in geometry to refer to an
# : edge connecting two points. Here segment is used to refer to part
# : of a sphere surface. This is confusing...
class PSLG(object):
"""Stores geometry and topology of a Planar Straigh Line Graph."""
def __init__(self, points, edges, holes):
"""Constructs PSLG object.
:param points numpy.ndarray: array of PSLG vertex coordinates.
:param adges numpy.ndarray: array of PSLG edges (vertex topology).
:param holes numpy.ndarray: array of coordinates of holes in the PSLG.
"""
self.points = points
self.edges = edges
self.holes = holes
class BoundaryPLC(object):
"""Stores geometry and topology of a Piecewise Linear Complex forming a domain
boundary.
"""
def __init__(self, points, tris, holes):
"""Constructs BoundaryPLC object.
:param points numpy.ndarray: array of PLC vertex coordinates.
:param adges numpy.ndarray: array of PLC tris (vertex topology).
:param holes numpy.ndarray: array of coordinates of holes in the PLC.
"""
self.points = points
self.tris = tris
self.holes = holes
def build_boundary_PSLGs(domain, sphere_pieces, ds):
"""Constructs PSLGs for domain boundaries. Each boundary is represented by a
Planar Straight Line Graph consisting of set of vertices and edges corresponding
to the union of all intersection loops which lie on the boundary and all boundary
perimeter vertices and edges.
:param domain Domain: spatial domain for mesh.
:param sphere_pieces list: list of SpherePiece objects.
:param ds float: characteristic segment length.
:return: list of PSLG objects for the lower bounds along each coordinate axis.
:rtype: list.
"""
# TODO : Break up this function a bit.
def compile_points_edges(sphere_pieces):
"""Produces consolidated arrays containing all SpherePiece vertices and
edges.
:param sphere_pieces list: list of SpherePiece objects.
:return: tuple of arrays of vertex coordinates and topology.
:rtype: tuple.
"""
def build_edge_list(tris, points):
v_adj = np.zeros(2*[points.shape[0]], dtype=np.int32)
v_adj[tris[:,0], tris[:,1]] = v_adj[tris[:,1], tris[:,0]] = 1
v_adj[tris[:,1], tris[:,2]] = v_adj[tris[:,2], tris[:,1]] = 1
v_adj[tris[:,2], tris[:,0]] = v_adj[tris[:,0], tris[:,2]] = 1
return np.array(np.where(np.triu(v_adj) == 1), dtype=np.int32).T
vcount = 0
all_points = []
all_edges = []
for points, tris in [(p.points, p.tris) for p in sphere_pieces]:
edges = build_edge_list(tris, points)
edges += vcount
vcount += len(points)
all_points.append(points)
all_edges.append(edges)
return np.vstack(all_points), np.vstack(all_edges)
def refined_perimeter(perim, axis, ds):
"""Adds additional vertices to subdivide perimeter edge segments.
:param perim numpy.ndarray: array of vertices intersecting perimeter.
:param axis int: ordinal value of axis 0:x, 1:y, 2:z.
:param ds float: characteristic segment length.
:return: array of vertices intersecting refined perimeter.
:rtype: numpy.ndarray.
"""
def filter_colocated_points(perim, axis):
delta = np.diff(perim[:,axis])
keep_idx = np.hstack(([0], np.where(~np.isclose(delta,0.))[0] + 1))
return perim[keep_idx]
perim = filter_colocated_points(perim, axis)
refined_points = [perim[0]]
for e in [[i, i+1] for i in range(perim.shape[0]-1)]:
e_len = perim[e[1], axis] - perim[e[0], axis]
ne = int(np.ceil(e_len / ds))
if ne > 1:
dse = e_len / ne
add_points = np.zeros((ne,3))
add_points[:,axis] = dse * np.arange(1,ne+1)
refined_points.append(perim[e[0]] + add_points)
return np.vstack(refined_points)
def add_holes(sphere_pieces):
"""Add hole points to boundary PSLGs.
:param sphere_pieces list: list of SpherePiece objects.
:return: array of hole point vertices.
:rtype: numpy.ndarray.
"""
# TODO : this is a placeholder function. Ultimately holes need to
# : be created at the point when a sphere is split into pieces.
holes = [[] for _ in range(3)]
for i in range(3):
j, k = (i+1)%3, (i+2)%3
for points, tris in [(p.points, p.tris) for p in sphere_pieces]:
points_ax = points[np.isclose(points[:,i], 0.)]
if points_ax.shape[0]:
holes[i].append([
0.5 * (points_ax[:,j].max() + points_ax[:,j].min()),
0.5 * (points_ax[:,k].max() + points_ax[:,k].min())
])
holes[i] = np.vstack(holes[i]) if len(holes[i])\
else np.empty((0,2), dtype=np.float64)
return holes
def reindex_edges(points, points_ax, edges_ax):
"""Reindexes edges along a given axis.
:param points numpy.ndarray: all point coordinates.
:param points_ax numpy.ndarray: indices of points intersecting boundary.
:param edges_ax numpy.ndarray: edges interecting boundary.
:return: tuple of arrays of point coordinates and reindexed edges.
:rtype: tuple.
"""
points_segment = points[points_ax]
reindex = {old: new for new, old in enumerate(np.where(points_ax)[0])}
for i, (v0, v1) in enumerate(edges_ax):
edges_ax[i] = np.array([reindex[v0], reindex[v1]])
return points_segment, edges_ax
def build_perim_edge_list(points_pieces, perim_refined):
"""Construct list of perimeter edges for boundary.
:param points_pieces numpy.ndarray: sphere points intersecting boundary.
:param perim_refined numpy.ndarray: refined perimeter points.
:return: array of perimeter edge topology for boundary.
:rtype: numpy.ndarray.
"""
# Need to adjust edge indices for perimeter segments
v_count = len(points_pieces)
perim_edges = 4 * [None]
for j in range(4):
v_count_perim = len(perim_refined[j])
perim_vidx = np.empty(v_count_perim, dtype=np.int32)
mask = np.full(v_count_perim, True)
v_count_new = 0
for i, p in enumerate(perim_refined[j]):
vidx = np.where(np.isclose(npl.norm(points_pieces - p, axis=1), 0.))[0]
if len(vidx):
mask[i] = False
perim_vidx[i] = vidx[0]
else:
perim_vidx[i] = v_count + v_count_new
v_count_new += 1
perim_edges[j] = np.array([
[perim_vidx[k], perim_vidx[k+1]] for k in range(v_count_perim-1)
])
perim_refined[j] = perim_refined[j][mask]
v_count += v_count_new
return perim_edges
def add_point_plane_intersections(hole_pieces, axis, domain):
"""Adds points for sphere which just "touch" the boundary at a single point.
:param hole_pieces list: list of SpherePiece objects.
:param axis int: ordinal value of axis 0:x, 1:y, 2:z.
:param domain Domain: spatial domain for mesh.
:return: array of points touching boundary (may be empty).
:rtype: numpy.ndarray.
"""
added_points = []
for hole_piece in hole_pieces:
if np.isclose(hole_piece.sphere.min[axis], 0.):
close = np.where(np.isclose(hole_piece.points[:,axis], 0.))[0]
for idx in close:
added_points.append(hole_piece.points[idx])
elif | np.isclose(hole_piece.sphere.max[axis], domain.L[axis]) | numpy.isclose |
# -*- coding: utf-8 -*-
"""
v9s model
* Input: v5_im
Author: Kohei <<EMAIL>>
"""
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from pathlib import Path
import subprocess
import argparse
import math
import glob
import sys
import json
import re
import warnings
import scipy
import tqdm
import click
import tables as tb
import pandas as pd
import numpy as np
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
import skimage.transform
import skimage.morphology
import rasterio.features
import shapely.wkt
import shapely.ops
import shapely.geometry
MODEL_NAME = 'v9s'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
BASE_DIR = "/data/train"
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v5')
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FN_SOLUTION_CSV = "/data/output/{}.csv".format(MODEL_NAME)
# Parameters
MIN_POLYGON_AREA = 30
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# Preprocessing result
FMT_BANDCUT_TH_PATH = IMAGE_DIR + "/bandcut{}.csv"
FMT_MUL_BANDCUT_TH_PATH = IMAGE_DIR + "/mul_bandcut{}.csv"
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_VALTRAIN_IM_STORE = IMAGE_DIR + "/valtrain_{}_im.h5"
FMT_VALTEST_IM_STORE = IMAGE_DIR + "/valtest_{}_im.h5"
FMT_VALTRAIN_MASK_STORE = IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_VALTRAIN_MUL_STORE = IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
FMT_TRAIN_IM_STORE = IMAGE_DIR + "/train_{}_im.h5"
FMT_TEST_IM_STORE = IMAGE_DIR + "/test_{}_im.h5"
FMT_TRAIN_MASK_STORE = IMAGE_DIR + "/train_{}_mask.h5"
FMT_TRAIN_MUL_STORE = IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = IMAGE_DIR + "/test_{}_mul.h5"
FMT_IMMEAN = IMAGE_DIR + "/{}_immean.h5"
FMT_MULMEAN = IMAGE_DIR + "/{}_mulmean.h5"
# Model files
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# Logger
warnings.simplefilter("ignore", UserWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger = getLogger('spacenet2')
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
area_dict = {
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def get_resized_raster_3chan_image(image_id, band_cut_th=None):
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def get_resized_raster_3chan_image_test(image_id, band_cut_th=None):
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def image_mask_resized_from_summary(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = skimage.transform.resize(im_mask, (INPUT_SIZE, INPUT_SIZE))
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def train_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image_test(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def valtrain_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
fn = FMT_VALTEST_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def train_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image_test(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def valtrain_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def _load_train_summary_data(area_id):
prefix = area_id_to_prefix(area_id)
fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df = pd.read_csv(fn)
return df
def split_val_train_test(area_id):
prefix = area_id_to_prefix(area_id)
df = _load_train_summary_data(area_id)
df_agg = df.groupby('ImageId').agg('first')
image_id_list = df_agg.index.tolist()
np.random.shuffle(image_id_list)
sz_valtrain = int(len(image_id_list) * 0.7)
sz_valtest = len(image_id_list) - sz_valtrain
pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index=False)
pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index=False)
def train_image_id_to_mspec_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_mspec_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def train_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def image_id_to_prefix(image_id):
prefix = image_id.split('img')[0][:-1]
return prefix
def calc_multiband_cut_threshold(area_id):
rows = []
band_cut_th = __calc_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(FMT_BANDCUT_TH_PATH.format(prefix), index=False)
def __calc_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(3)}
band_cut_th = {k: dict(max=0, min=0) for k in range(3)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(3):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def calc_mul_multiband_cut_threshold(area_id):
rows = []
band_cut_th = __calc_mul_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(
FMT_MUL_BANDCUT_TH_PATH.format(prefix),
index=False)
def __calc_mul_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(8)}
band_cut_th = {k: dict(max=0, min=0) for k in range(8)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(8):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def get_unet():
conv_params = dict(activation='relu', border_mode='same')
merge_params = dict(mode='concat', concat_axis=1)
inputs = Input((8, 256, 256))
conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs)
conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5)
up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params)
conv6 = Convolution2D(256, 3, 3, **conv_params)(up6)
conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6)
up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params)
conv7 = Convolution2D(128, 3, 3, **conv_params)(up7)
conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7)
up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params)
conv8 = Convolution2D(64, 3, 3, **conv_params)(up8)
conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8)
up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params)
conv9 = Convolution2D(32, 3, 3, **conv_params)(up9)
conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
adam = Adam()
model = Model(input=inputs, output=conv10)
model.compile(optimizer=adam,
loss='binary_crossentropy',
metrics=['accuracy', jaccard_coef, jaccard_coef_int])
return model
def jaccard_coef(y_true, y_pred):
smooth = 1e-12
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1e-12
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def generate_test_batch(area_id,
batch_size=64,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_TEST_MUL_STORE.format(prefix)
image_id_list = df_test.ImageId.tolist()
if enable_tqdm:
pbar = tqdm.tqdm(total=len(image_id_list))
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_test = []
y_test = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8)
y_test.append(mask)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_test = X_test - immean
if enable_tqdm:
pbar.update(y_test.shape[0])
yield (X_test, y_test)
if enable_tqdm:
pbar.close()
def get_resized_raster_8chan_image_test(image_id, band_rgb_th, band_mul_th):
"""
RGB + multispectral (total: 8 channels)
"""
im = []
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_rgb_th[chan_i]['min']
max_val = band_rgb_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
fn = test_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
usechannels = [1, 2, 5, 6, 7]
for chan_i in usechannels:
min_val = band_mul_th[chan_i]['min']
max_val = band_mul_th[chan_i]['max']
values[chan_i] = | np.clip(values[chan_i], min_val, max_val) | numpy.clip |
"""
Created on June 6, 2016
@author: <NAME> (<EMAIL>)
Updated Nov 21, 2017 by <NAME> (github.com/Spenca)
"""
import csv
import os, sys, io
import re
import pandas as pd
import numpy as np
import requests
import yaml
from string import Template
from collections import OrderedDict
from datetime import date, datetime, timedelta
#===============
# Django imports
#---------------
from django.db.models import Count, Q, F
from django.http import HttpResponse
from sisyphus.models import DlpAnalysisInformation, Project
from tenx.models import TenxPool
from .models import Sample, SublibraryInformation, ChipRegion, ChipRegionMetadata, MetadataField, DoubletInformation
from dlp.models import (DlpLane, DlpSequencing, DlpLibrary)
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.core.exceptions import ValidationError
#============================
# Pipeline Status
#----------------------------
def get_sequence_date(analysis, library=None):
try:
# Is it for Library?
if library:
sequencing_set = analysis.dlpsequencing_set.all()
# Does Analysis have lanes, if then retrieve latest lane_requested_date from sequencings related to lanes
elif analysis.lanes.all():
sequencing_set = set(l.sequencing for l in analysis.lanes.all())
# Else, Does Analysis have sequencings, retrieve latest lane_requested_date from sequencings directly attached analysis
elif analysis.sequencings.all():
sequencing_set = analysis.sequencings.all()
# Else, Does Analysis's Library have sequencings
else:
sequencing_set = analysis.library.dlpsequencing_set.all()
return max([sequencing.lane_requested_date for sequencing in sequencing_set])
except:
return None
def analysis_info_dict(analysis):
lanes = analysis.lanes.count()
goal = sum(s.number_of_lanes_requested for s in analysis.sequencings.all())
submission_date = get_sequence_date(analysis)
return {
"jira": analysis.analysis_jira_ticket,
"lanes": "{}/{}".format(lanes, goal),
"version": analysis.version.version,
"run_status": analysis.analysis_run.run_status,
"aligner": "bwa-aln" if analysis.aligner is "A" else "bwa-mem",
"submission": str(submission_date) if submission_date else None,
"last_updated": str(analysis.analysis_run.last_updated.date()) if analysis.analysis_run.last_updated else None
}
def fetch_montage():
r = requests.get('https://52.235.35.201/_cat/indices', verify=False, auth=("guest", "sh<PASSWORD>!Montage")).text
return [j.replace("sc", "SC") for j in re.findall('sc-\d{4}', r)]
def analysis_to_row(analysis, basic_dict=None, incomplete=None):
if not basic_dict:
basic_dict = {"name": analysis.library.sample.sample_id, "library": analysis.library.pool_id}
return {**basic_dict, **analysis_info_dict(analysis)}
# | Validate whether a given analysis is IMPORTED or not
# | Input: Analysis
# | Ouput: Boolean
# {True if imported}
def validate_imported(analysis):
# Retrieve all lanes attached to Analysis and create a set of seqeuncings based on it
related_sequencings = set(l.sequencing for l in analysis.lanes.all())
# Check if count(lanes attached to analysis) is smaller or equal to count(lanes attached to related_sequencings)
return analysis.lanes.count() <= sum(s.dlplane_set.count() for s in related_sequencings)
# | (INCOMPLETE) Fetch Row Information related to incomplete Analyses
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_incomplete_analyses():
object_list = []
analyses = DlpAnalysisInformation.objects.exclude(
analysis_run__run_status__in=['complete', 'align_complete', 'hmmcopy_complete'])
for a in analyses.all():
object_list.append(analysis_to_row(a, incomplete=True))
return object_list
# | (PROJECTS) Fetch Row Information related to given a set of dlp libraries
# | Input: Set of Dlp Libraries
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_libraries(libraries, wetlab=None, no_analysis=None):
object_list = []
for library in libraries:
basic_dict = {"name": library.sample.sample_id, "library": library.pool_id}
# For each libraries retrieve all attached analyses
analyses = library.dlpanalysisinformation_set.all()
if analyses and not no_analysis:
for analysis in analyses:
#Hide completed analysis if wetlab
if not wetlab:
object_list.append((analysis_to_row(analysis, basic_dict)))
# If Library does not have any analysis, fill in NA information
else:
# if Wetlab display Sequencing lane info instead of Analysis lane info
if wetlab or no_analysis:
sequencings = library.dlpsequencing_set.all()
if sequencings:
goal = sum(l.number_of_lanes_requested for l in sequencings)
lane = sum(l.dlplane_set.count() for l in sequencings)
basic_dict = {**basic_dict, "lanes": "{}/{}".format(lane, goal) if sequencings else None}
object_list.append({**basic_dict, "submission": str(get_sequence_date(library, True))})
return object_list
# | Fetch Row Information related to given a set of sequencings
# | Input: Set of Sequencings
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_sequencings(sequencings, wetlab=False):
object_list = []
for sequencing in sequencings:
object_list += fetch_rows_from_libraries([sequencing.library], wetlab=wetlab)
return object_list
# | (NO ANALYSIS) Fetch Row Information related to libraries with no analyses but correct lane numbers
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_no_analysis_libraries():
libraries = DlpLibrary.objects\
.annotate(lane_count=Count('dlpsequencing__dlplane'),lane_goal=Count('dlpsequencing__number_of_lanes_requested'))\
.filter(Q(dlpanalysisinformation=None)&Q(lane_count=F('lane_goal'))).all()
return fetch_rows_from_libraries(libraries, no_analysis=True)
# | (WETLAB) Fetch Row Information from sequencings with certain conditions:
# | 1. (OR) Mismatching lane count
# | 2. (AND) Lane requested within 2 months
# | 3. Additionally, hide completed analyses
# | 4. Recently COMPLETED
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_for_wetlab():
threshold = datetime.now() - timedelta(days=60)
# Unimported
sequencings = DlpSequencing.objects\
.annotate(lane_count=Count('dlplane'))\
.filter((Q(lane_count=0)|Q(lane_count__lt=F('number_of_lanes_requested')))&Q(lane_requested_date__gte=threshold))
# Recently Finished or Updated
threshold = datetime.now() - timedelta(days=14)
analyses = DlpAnalysisInformation.objects\
.filter(Q(analysis_run__run_status__in=['complete','align_complete','hmmcopy_complete'])&Q(analysis_run__last_updated__gte=threshold))
analyses_list = [{
**{
"name": a.library.sample.sample_id,
"library": a.library.pool_id
},
**analysis_info_dict(a)
} for a in analyses]
return fetch_rows_from_sequencings(sequencings, wetlab=True) + analyses_list
# | List of Status Page Row Objects
# |
# | WETLAB:
# | Populate row from all sequencings with lane !== goal && recently submitted (2 months)
# |
# | NO ANALYSIS:
# | Populate row from all libraries with sum(sequencing's requested_lane_number) == sum(sequencing's lane count),
# | but no Analysis attached.
# |
# | INCOMPLETE:
# | Populate row from all analyses with run_status not set as either one of ['complete','align_complete','hmmcopy_complete']
# |
# | PROJECTS:
# | Populate rows from set of DlpLibraries of selected Project
def fetch_row_objects(type, key=None):
type = type.strip()
if type == "PROJECTS":
return fetch_rows_from_libraries(Project.objects.get(name=key).dlplibrary_set.all())
elif type == "INCOMPLETE":
return fetch_rows_from_incomplete_analyses()
elif type == "NO ANALYSIS":
return fetch_rows_from_no_analysis_libraries()
elif type == "WETLAB":
return fetch_rows_for_wetlab()
else:
return
#==================================================
# Upload, parse and populate Sublibrary Information
#--------------------------------------------------
def read_excel_sheets(filename, sheetnames):
""" Read the excel sheet.
"""
try:
data = pd.read_excel(filename, sheet_name=None)
except IOError:
raise ValueError('unable to find file', filename)
for sheetname in sheetnames:
if sheetname not in data:
raise ValueError('unable to read sheet(s)', sheetname)
yield data[sheetname]
def check_smartchip_row(index, smartchip_row):
row_sum = sum(smartchip_row)
single_matrix = np.identity(3)
doublet_matrix = np.identity(3) * 2
# Row does not have cells
if smartchip_row == [0, 0, 0]:
cell = None
# TODO: Clean up code; use identity matrices
# Row is singlet
elif row_sum == 1:
for row in range(len(smartchip_row)):
if np.array_equal(smartchip_row, single_matrix[row]):
cell = [row, 0]
# Row is doublet and is strictly live/dead/other
elif row_sum == 2 and len(np.where(np.array(smartchip_row) == 0)[0]) == 2:
for row in range(len(smartchip_row)):
if np.array_equal(smartchip_row, doublet_matrix[row]):
cell = [row, 1]
# Row is doublet but mixed
elif row_sum == 2 and len(np.where(np.array(smartchip_row) == 0)[0]) != 2:
cell = [2, 1]
# Greater than doublet row and row is multiple of unit vector
elif row_sum > 2 and row_sum in smartchip_row:
non_zero_index = | np.where(smartchip_row != 0) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 19:50:56 2020
@author: hiroyasu
"""
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import control
import SCPmulti as scp
import pickle
import TrainRNN as trnn
import torch
import pandas as pd
DT = scp.DT
TSPAN = scp.TSPAN
M = scp.M
II = scp.II
L = scp.L
bb = scp.bb
FMIN = scp.FMIN
FMAX = scp.FMAX
RungeNum = scp.RungeNum
AA = scp.AA
Robs = scp.Robs
Rsafe = scp.Rsafe
XOBSs = scp.XOBSs
XXd0 = np.load('data/params/desired_n/Xhis.npy')
UUd0 = np.load('data/params/desired_n/Uhis.npy')
dratio = 0.2
d_over = np.sqrt(3)*dratio
X0 = scp.X0
Xf = scp.Xf
n_input = trnn.n_input
n_hidden = trnn.n_hidden
n_output = trnn.n_output
n_layers = trnn.n_layers
class Spacecraft:
def __init__(self,XXd0,UUd0,tspan=TSPAN,dt=DT,runge_num=RungeNum,m=M,I=II,l=L,b=bb,A=AA,fmin=FMIN,fmax=FMAX):
self.XXd = XXd0
self.UUd = UUd0
self.tspan = tspan
self.dt = dt
self.runge_num = runge_num
self.h = dt/runge_num
self.m = m
self.I = I
self.l = l
self.b = b
self.A = A
self.fmin = fmin
self.fmax = fmax
self.net = trnn.RNN(n_input,n_hidden,n_output,n_layers)
self.net.load_state_dict(torch.load('data/trained_nets/RNNLorenz.pt'))
self.net.eval()
self.YC = np.load('data/trained_nets/Y_params.npy')
self.ns = 6
self.RNN = 1
def GetP(self,X):
Xnet = self.Np2Var(X)
if self.RNN == 1:
cP = self.net(Xnet.view(1,1,-1))
cP = cP.data.numpy()*self.YC
P = self.cP2P(cP[0,0,:])
else:
cP = self.net(Xnet)
cP = cP.data.numpy()
P = self.cP2P(cP)
return P
def GetK(self,X):
P = self.GetP(X)
B = self.GetB(X)
K = B.T@P
return K
def Np2Var(self,X):
X = X.astype(np.float32)
X = torch.from_numpy(X)
return X
def cP2P(self,cP):
cPnp = 0
for i in range(self.ns):
lb = i*(i+1)/2
lb = int(lb)
ub = (i+1)*(i+2)/2
ub = int(ub)
Di = cP[lb:ub]
Di = np.diag(Di,self.ns-(i+1))
cPnp += Di
P = (cPnp.T)@cPnp
return P
def GetPdot(self,X,Ud,Xdp1):
dX = Xdp1-X
P = self.GetP(X)
Pdot = 0
dXdt = self.dynamics(0,X,Ud)
for i in range(self.ns):
dx = np.zeros(self.ns)
dx[i] = dX[i]
Pdot += (self.GetP(X+dx)-P)/np.linalg.norm(dx)*dXdt[i]
return Pdot
def dynamics(self,t,states,inputs):
A = self.A
B = self.GetB(states)
Xv = np.transpose(np.array([states]))
Uv = np.transpose(np.array([inputs]))
dXdt = A.dot(Xv)+B.dot(Uv)
dXdt = dXdt[:,0]
return dXdt
def GetB(self,states):
m = self.m
I = self.I
l = self.l
b = self.b
th = states[2]
T = np.array([[np.cos(th)/m,np.sin(th)/m,0],[-np.sin(th)/m,np.cos(th)/m,0],[0,0,1/2./I]])
H = np.array([[-1,-1,0,0,1,1,0,0],[0,0,-1,-1,0,0,1,1],[-l,l,-b,b,-l,l,-b,b]])
B = np.vstack((np.zeros((3,8)),T@H))
return B
def rk4(self,t,X,U):
h = self.h
k1 = self.dynamics(t,X,U)
k2 = self.dynamics(t+h/2.,X+k1*h/2.,U)
k3 = self.dynamics(t+h/2.,X+k2*h/2.,U)
k4 = self.dynamics(t+h,X+k3*h,U)
return t+h,X+h*(k1+2.*k2+2.*k3+k4)/6.
def one_step_sim(self,t,X,U):
runge_num = self.runge_num
for num in range(0, runge_num):
t,X = self.rk4(t,X,U)
return t,X
def GetCCM(self,alp):
dt = self.dt
epsilon = 0.
XX = self.XXd
N = XX.shape[0]-1
I = np.identity(6)
WW = {}
for i in range(N+1):
WW[i] = cp.Variable((6,6),PSD=True)
nu = cp.Variable(nonneg=True)
chi = cp.Variable(nonneg=True)
constraints = [chi*I-WW[0] >> epsilon*I,WW[0]-I >> epsilon*I]
for k in range(N):
Xk = XX[k,:]
Ax = self.A
Bx = self.GetB(Xk)
Wk = WW[k]
Wkp1 = WW[k+1]
constraints += [-2*alp*Wk-(-(Wkp1-Wk)/dt+Ax@[email protected]*nu*[email protected]) >> epsilon*I]
constraints += [chi*I-Wkp1 >> epsilon*I,Wkp1-I >> epsilon*I]
prob = cp.Problem(cp.Minimize(chi),constraints)
prob.solve(solver=cp.MOSEK)
cvx_status = prob.status
print(cvx_status)
WWout = {}
MMout = {}
for i in range(N+1):
WWout[i] = WW[i].value/nu.value
MMout[i] = np.linalg.inv(WWout[i])
chi = chi.value
nu = nu.value
cvx_optval = chi/alp
return cvx_status,cvx_optval,WWout,MMout,chi,nu
def CLFQP(self,X,Xd,Xdp1,M,Ud,alp):
dt = self.dt
U = cp.Variable((8,1))
Ud = np.array([Ud]).T
p = cp.Variable((1,1))
fmin = self.fmin
fmax = self.fmax
A = self.A
Bx = self.GetB(X)
Bxd = self.GetB(Xd)
evec = np.array([X-Xd]).T
Mdot = self.GetPdot(X,Ud,Xdp1)
constraints = [evec.T@(Mdot+M@A+A.T@M)@evec+2*evec.T@M@Bx@U-2*evec.T@M@Bxd@Ud <= -2*alp*evec.T@M@evec+p]
for i in range(8):
constraints += [U[i,0] <= fmax, U[i,0] >= fmin]
prob = cp.Problem(cp.Minimize(cp.sum_squares(U-Ud)+p**2),constraints)
prob.solve()
cvx_status = prob.status
U = U.value
U = np.ravel(U)
return U
def FinalTrajectory(self,MM,alp,XXdRCT,UUdRCT):
dt = self.dt
XXd = self.XXd
UUd = self.UUd
N = UUd.shape[0]
X0 = XXd[0,:]
B = self.GetB(X0)
t = 0
t1 = 0
t2 = 0
X1 = X0
X2 = X0
X3 = X0
Xd = XXd[0,:]
XdRCT = XXdRCT[0,:]
this = np.zeros(N+1)
X1his = np.zeros((N+1,X0.size))
X2his = np.zeros((N+1,X0.size))
X3his = np.zeros((N+1,X0.size))
this[0] = t
X1his[0,:] = X1
U1his = np.zeros((N,B.shape[1]))
X2his[0,:] = X2
U2his = np.zeros((N,B.shape[1]))
X3his[0,:] = X3
U3his = np.zeros((N,B.shape[1]))
U1hisN = np.zeros(N)
U2hisN = np.zeros(N)
U3hisN = np.zeros(N)
U1hisND = np.zeros(N)
U2hisND = np.zeros(N)
U3hisND = np.zeros(N)
dnMs = np.zeros(N)
dnUs = np.zeros(N)
dnXs = np.zeros(N)
for i in range(N):
M = MM[i]
A = self.A
Bx3 = self.GetB(X3)
Q = 2.4*np.identity(6)
R = 1*np.identity(8)
K,P,E = control.lqr(A,Bx3,Q,R)
P1 = self.GetP(X1)
U3 = UUd[i,:]-Bx3.T@P@(X3-Xd)
for j in range(8):
if U3[j] >= self.fmax:
U3[j] = self.fmax
elif U3[j] <= self.fmin:
U3[j] = self.fmin
#U1 = self.CLFQP(X1,Xd,XXd[i+1,:],P,UUd[i,:],alp)
U1 = self.CLFQP(X1,XdRCT,XXdRCT[i+1,:],P1,UUdRCT[i,:],alp)
U2 = self.CLFQP(X2,XdRCT,XXdRCT[i+1,:],M,UUdRCT[i,:],alp)
t,X1 = self.one_step_sim(t,X1,U1)
t1,X2 = self.one_step_sim(t1,X2,U2)
t2,X3 = self.one_step_sim(t2,X3,U3)
Xd = XXd[i+1,:]
XdRCT = XXdRCT[i+1,:]
d1 = np.random.choice(np.array([-1,1]),1)[0]
d2 = np.random.choice(np.array([-1,1]),1)[0]
d3 = np.random.choice(np.array([-1,1]),1)[0]
d1 = np.array([0,0,0,d1,d2,d3])*2
#d1 = 0
#d1 = np.hstack((np.zeros(3),(np.random.rand(3)*2-1)))*dratio*20
#d1 = (np.random.rand(6)*2-1)*0.1
X1 = X1+d1*dt
X2 = X2+d1*dt
X3 = X3+d1*dt
this[i+1] = t
X1his[i+1,:] = X1
U1his[i,:] = U1
X2his[i+1,:] = X2
U2his[i,:] = U2
X3his[i+1,:] = X3
U3his[i,:] = U3
U1hisN[i] = np.linalg.norm(U1)
U2hisN[i] = np.linalg.norm(U2)
U3hisN[i] = np.linalg.norm(U3)
U1hisND[i] = np.linalg.norm(U1-UUd[i,:])
U2hisND[i] = np.linalg.norm(U2-UUdRCT[i,:])
U3hisND[i] = np.linalg.norm(U3-UUdRCT[i,:])
dnMs[i] = np.linalg.norm(M-P1,ord=2)
dnUs[i] = np.linalg.norm(U1-U2)
dnXs[i] = np.linalg.norm(X1-X2)
return this,X1his,U1his,X2his,U2his,X3his,U3his,U1hisN,U2hisN,U3hisN,U1hisND,U2hisND,U3hisND,dnMs,dnUs,dnXs
def Rot2d(th):
R = np.array([[np.cos(th),-np.sin(th)],[np.sin(th),np.cos(th)]])
return R
def GetTubePlot(XXdRCT,alp,chi,d_over,th):
Rtube = d_over*np.sqrt(chi)/alp
xxdRCT = XXdRCT[:,0:2]
dxxdRCT = np.diff(xxdRCT,axis=0)
for i in range(dxxdRCT.shape[0]):
dxxdRCT[i,:] = Rot2d(th)@(dxxdRCT[i,:]/np.linalg.norm(dxxdRCT[i,:]))
return xxdRCT[0:dxxdRCT.shape[0],:]+dxxdRCT*Rtube
def SaveDict(filename,var):
output = open(filename,'wb')
pickle.dump(var,output)
output.close()
pass
def LoadDict(filename):
pkl_file = open(filename,'rb')
varout = pickle.load(pkl_file)
pkl_file.close()
return varout
if __name__ == "__main__":
sc = Spacecraft(XXd0,UUd0)
alp = np.load('data/params/alpha_mmicro/alp.npy')
XXdRCT = np.load('data/params/desiredRCT/XXdRCT.npy')
UUdRCT = np.load('data/params/desiredRCT/UUdRCT.npy')
np.random.seed(seed=32)
cvx_status,cvx_optval,WW,MM,chi,nu = sc.GetCCM(alp)
this,X1his,U1his,X2his,U2his,X3his,U3his,U1hisN,U2hisN,U3hisN,U1hisND,U2hisND,U3hisND,dnMs,dnUs,dnXs = sc.FinalTrajectory(MM,alp,XXdRCT,UUdRCT)
xxTube1 = GetTubePlot(XXdRCT,alp,chi,d_over,np.pi/2)
xxTube2 = GetTubePlot(XXdRCT,alp,chi,d_over,-np.pi/2)
Nplot = xxTube1.shape[0]
plt.figure()
plt.plot(X1his[:,0],X1his[:,1])
plt.plot(X2his[:,0],X2his[:,1])
plt.plot(X3his[:,0],X3his[:,1])
#plt.plot(xxTube1[:,0],xxTube1[:,1])
#plt.plot(xxTube2[:,0],xxTube2[:,1])
plt.plot(XXdRCT[:,0],XXdRCT[:,1],'--k')
plt.fill_between(xxTube1[:,0],xxTube1[:,1],np.zeros(Nplot),facecolor='black',alpha=0.2)
plt.fill_between(xxTube2[:,0],xxTube2[:,1],np.zeros(Nplot),facecolor='white')
plt.fill_between(np.linspace(19.347,22,100),16.9* | np.ones(100) | numpy.ones |
__author__ = "<NAME>"
import numpy as np
import json
import dill as pickle
import warnings
import inspect
from .model_parameters import ModelParameterArray, ModelParameter
from .spectral_shapes import SED
from .data_loader import ObsData
from .utils import get_info
from .plot_sedfit import PlotSED
from .cosmo_tools import Cosmo
__all__=['Model','MultiplicativeModel']
class Model(object):
def __init__(self,name='no-name',nu_size=200,model_type='base_model',scale='lin-lin',cosmo=None,nu_min=None,nu_max=None):
self.model_type=model_type
self.name=name
self.SED = SED(name=self.name)
self.parameters = ModelParameterArray()
self._scale=scale
self.nu_size=nu_size
self.nu_min=nu_min
self.nu_max=nu_max
self.flux_plot_lim = 1E-30
if cosmo is None:
self.cosmo=Cosmo()
self._set_version(v=None)
@property
def version(self):
return self._version
def _set_version(self, v=None):
if v is None:
self._version = get_info()['version']
else:
self._version = v
def _prepare_nu_model(self,nu,loglog):
if nu is None:
x1 = np.log10(self.nu_min)
x2 = np.log10(self.nu_max)
lin_nu = np.logspace(x1, x2, self.nu_size)
log_nu = np.log10(lin_nu)
else:
if np.shape(nu) == ():
nu = np.array([nu])
if loglog is True:
lin_nu = np.power(10., nu)
log_nu = nu
else:
log_nu = | np.log10(nu) | numpy.log10 |
"""Neighborhood SPIN Module."""
import numpy as np
from .utils import check_distance_matrix, spin_energy
class NeighborhoodSPIN():
"""Neighborhood SPIN clustering method.
Parameters
----------
initial_sigma : float, optional (default=2e10)
Initial sigma value. This parameter controls the weight matrix
dispersion.
update_factor : float, optional (default=0.5)
The number that will update the sigma value at each iteration. Sigma
will be updated by sigma = sigma * update_factor.
max_iter : int, optional (default=100)
The maximum number of iterations of each round of sorting.
verbose : boolean, optional (default=False)
Flag indicating to show logs and information during the SPIN process.
Attributes
----------
distances_ : array, shape (n, n)
The original distances matrix provided.
permutation_ : array, shape (n, n)
Permutation matrix that can be applied to the original distances matrix
to get to the ordered distances matrix.
ordered_distances_ : array, shape (n, n)
Distances matrix reordered by the permutation matrix. Before run this
is the original distance matrix.
References
----------
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
Sortiug points into neighborhoods (SPIN): data analysis and
visualization by ordering distance matrices, Bioinformatics, Volume 21,
Issue 10, , Pages 2301–2308,
https://doi.org/10.1093/bioinformatics/bti329
"""
def __init__(self, intial_sigma=2**10, update_factor=0.5, max_iter=100,
verbose=False):
self.intial_sigma = intial_sigma
self.update_factor = update_factor
self.max_iter = max_iter
self.verbose = verbose
def run(self, X):
"""Execute the Neighborhood sorting.
Parameters
----------
X : array, shape (n, n)
Returns
-------
self : NeighborhoodSPIN
The object itself containing the ordered distances matrix.
"""
check_distance_matrix(X)
self.size_ = X.shape[0]
self.distances_ = X
self.permutation_ = np.identity(self.size_)
self.ordered_distances_ = self.permutation_.dot(self.distances_) \
.dot(self.permutation_.T)
sigma = self.intial_sigma
while sigma > 1:
weight_matrix = initial_weight_matrix(self.size_, sigma)
permutation = neighborhood(self.ordered_distances_,
weight_matrix,
self.max_iter,
self.verbose)
self.ordered_distances_ = permutation.dot(self.ordered_distances_)\
.dot(permutation.T)
self.permutation_ = permutation.dot(self.permutation_)
sigma = sigma * self.update_factor
return self
def neighborhood(distances, weight_matrix, max_iter=100, verbose=False):
"""Neighborhood SPIN algorithm.
Parameters
----------
distances : np.array, shape [n, n]
Distance symmetric square matrix.
weight_matrix : np.array, shape [n, n]
A initial weight matrix to update permutaions matrix.
max_iter : int, default=100
Maximum number of iterations.
verbose : bool
Verbosity flag, if it is true print useful information about the
process.
Returns
-------
permutation : np.array, shape [n, n]
Permutation matrix with the same dimensions of the distance matrix.
"""
permutation = np.identity(distances.shape[0])
mismatch_matrix = distances.dot(weight_matrix)
trace = np.trace(permutation.dot(mismatch_matrix))
for i in range(max_iter):
(new_permutation,
new_mismatch) = single_neighborhood_sort(distances, weight_matrix)
new_trace = np.trace(new_permutation.dot(new_mismatch))
if new_trace == trace:
break
weight_matrix = new_permutation.T.dot(weight_matrix)
trace = new_trace
return new_permutation
def single_neighborhood_sort(distances, weight_matrix):
"""Single stage on the neighborhood sorting process.
Parameters
----------
distances : array, shape (n, n)
The distances matrix to be sorted.
weight_matrix : array, shape (n, n)
The weight matrix to take into in account in sorting. The distribuition
on the matrix values control the scale of the sorting operations.
"""
size = len(distances)
mismatch = distances.dot(weight_matrix)
min_index = np.argmin(mismatch, axis=1)
min_values = mismatch[np.arange(size), min_index]
max_value = max(min_values)
sort_score = (min_index + 1.
- 0.1 * np.sign((size / 2. - min_index + 1.)) *
min_values / max_value)
sorted_ind = | np.argsort(sort_score) | numpy.argsort |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 7/6/20 4:58 PM
# @Author : anonymous
# @File : compareResults.py
import numpy as np
import os
import re
from qiskit.visualization import plot_histogram
import matplotlib.pyplot as plt
def ks_score(r1, r2):
r = r1-r2
max_ks = max(max(r), 0)
min_ks = min(min(r), 0)
return (abs(max_ks)+abs(min_ks))/r1.sum()
def trans_str(qubit_number:int, number:int)->str:
results = bin(number)
results = results[2:len(results)]
return results.zfill(qubit_number)
def trans(data:str,qubit_number:int,flag1:int): #flag1: to check if the order is upside down
result = re.split(',|}',data)
final_data = []
for i in range(0,pow(2,qubit_number)):
if flag1==0:
pattern = re.compile(trans_str(qubit_number,i)+"':")
else:
pattern = re.compile(''.join(reversed(trans_str(qubit_number,i)))+"':")
flag = 0
for results in result:
s = re.search(pattern,results)
if s is not None:
final_data.append(float(results[s.span()[1]:]))
flag = 1
if flag==0:
final_data.append(0)
return final_data
def read_results(filename: str, qubit_number:int):
pattern_qiskit = re.compile("Qiskt")
pattern_pyquilc = re.compile("Pyquil_Class")
flag1 = 0
with open(filename, 'r') as f:
print("Now read:"+filename)
if (re.search(pattern_pyquilc,filename) is not None) or (re.search(pattern_qiskit,filename) is not None):
flag1 = 1
line = f.readline()
end_file = line
while end_file:
end_file=f.readline()
line = line+end_file
data = trans(line, qubit_number,flag1)
return data
def compare(path:str, thershold:float, qubit_number:int):
print("qubit_number:",qubit_number)
data = []
name = []
right_file = re.compile("startQiskit_QC") # only consider Qiskit noisy
#right_file = re.compile("startQiskit")
#right_file = re.compile("start)
files = os.listdir(path)
for file in files:
if (not os.path.isdir(file)) & (right_file.search(file) is not None):
data.append(read_results(path+"/"+file, qubit_number))
name.append(file)
candidates = [] # save right answer candidates
answer = -1
for results in data:
flag = -1
for i in range(0,len(candidates)):
if ks_score(candidates[i]/(candidates[i].sum()),np.asarray(results)/(np.asarray(results).sum()))<thershold:
flag = i
candidates[i] = candidates[i] + | np.asarray(results) | numpy.asarray |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("..")
sys.path.append("../..")
from datasets import models
sys.modules['Data.Datarr.models'] = models
import os
from datasets.imdb import imdb
import numpy as np
import pickle
import uuid
from model.config import cfg
import json
class datarr(imdb):
def __init__(self, name, nof_ent_classes, nof_rel_classes, image_set):
imdb.__init__(self, name)
self._image_set = image_set
self._devkit_path = self._get_default_path()
self.set_paths()
self._classes = ["invalid", "subject", "object", "none"]
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._data = self._load_data()
self._image_index = [i for i in self._data]
# Default to roidb handler
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
self.nof_ent_classes = nof_ent_classes
self.nof_rel_classes = nof_rel_classes
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def set_paths(self):
pass
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _get_widths(self):
return [self.im_metadata[m]["width"] for m in self.im_metadata]
def _get_default_path(self):
"""
Return the default path where vg is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, self.name)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
gt_roidb = [self._load_annotation(index)
for index in self.image_index]
return gt_roidb
def rpn_roidb(self):
roidb = self.gt_roidb()
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_annotation(self, index):
"""
Load image and bounding boxes info
"""
image = index
metadata = self.im_metadata[image]
anot = self.annotations[image]
width = metadata['width']
height = metadata['height']
seen_objs = {}
for _, relation in enumerate(anot):
sub = relation["subject"]
if not str(sub) in seen_objs:
seen_objs[str(sub)] = sub
obj = relation["object"]
if not str(obj) in seen_objs:
seen_objs[str(obj)] = obj
num_objs = len(seen_objs)
boxes = np.zeros((num_objs, 4), dtype=np.float32)
partial_entity_class = np.zeros((num_objs, self.nof_ent_classes), dtype=np.int32)
partial_relation_class = np.zeros((num_objs, num_objs, self.nof_rel_classes), dtype=np.int32)
gt_classes = np.zeros((0, num_objs, 1), dtype=np.int32)
overlaps = np.zeros((0, num_objs, self.num_classes), dtype=np.int64)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
queries = np.zeros((0, 2 * self.nof_ent_classes + self.nof_rel_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
one_hot_obj = np.eye(self.nof_ent_classes)
for ix, obj in enumerate(seen_objs):
boxes[ix] = np.array(seen_objs[obj]["bbox"])[[2, 0, 3, 1]]
partial_entity_class[ix] = one_hot_obj[seen_objs[obj]["category"]]
seg_areas[ix] = (boxes[ix][2] - boxes[ix][0]) * (boxes[ix][3] - boxes[ix][1])
indices = np.where(boxes[:, 2].astype(int) == boxes[:, 0].astype(int))
boxes[indices, 2] += 1
indices = np.where(boxes[:, 3].astype(int) == boxes[:, 1].astype(int))
boxes[indices, 3] += 1
assert (boxes[:, 2] > boxes[:, 0]).all()
assert (boxes[:, 3] > boxes[:, 1]).all()
# load gt classe
seen_rel = {}
one_hot_rel = np.eye(self.nof_rel_classes)
for _, relation in enumerate(anot):
sub = relation["subject"]
obj = relation["object"]
sub_index = list(seen_objs).index(str(sub))
obj_index = list(seen_objs).index(str(obj))
partial_relation_class[sub_index, obj_index, relation["predicate"]] = 1
for _, relation in enumerate(anot):
sub = relation["subject"]
obj = relation["object"]
sub_index = list(seen_objs).index(str(sub))
obj_index = list(seen_objs).index(str(obj))
if sub_index == obj_index:
continue
sub_class = sub["category"]
obj_class = obj["category"]
rel_class = relation["predicate"]
rel_str = str(sub_class) + "_" + str(rel_class) + "_" + str(obj_class)
found = False
if not rel_str in seen_rel:
query_gt_classes = np.zeros((1, num_objs, 1), dtype=np.int32)
query_overlaps = | np.zeros((1, num_objs, self.num_classes), dtype=np.int64) | numpy.zeros |
import os, sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import numpy as np
import pytest
import librosa
import soundpy as sp
test_dir = 'test_audio/'
test_audiofile = '{}audio2channels.wav'.format(test_dir)
test_traffic = '{}traffic.wav'.format(test_dir)
test_python = '{}python.wav'.format(test_dir)
test_horn = '{}car_horn.wav'.format(test_dir)
samples_48000, sr_48000 = librosa.load(test_audiofile, sr=48000)
samples_44100, sr_44100 = librosa.load(test_audiofile, sr=44100)
samples_22050, sr_22050 = librosa.load(test_audiofile, sr=22050)
samples_16000, sr_16000 = librosa.load(test_audiofile, sr=16000)
samples_8000, sr_8000 = librosa.load(test_audiofile, sr=8000)
def test_shape_samps_channels_mono():
input_data = np.array([1,2,3,4,5])
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data, output_data)
def test_shape_samps_channels_stereo_correct():
input_data = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(5,2)
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data, output_data)
def test_shape_samps_channels_stereo_incorrect():
input_data = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(2,5)
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data.T, output_data)
def test_calc_phase():
np.random.seed(seed=0)
rand_fft = np.random.random(2) + np.random.random(2) * 1j
phase = sp.dsp.calc_phase(rand_fft)
value1 = np.array([0.67324134+0.73942281j, 0.79544405+0.60602703j])
assert np.allclose(value1, phase)
def test_calc_phase_framelength10_default():
frame_length = 10
time = np.arange(0, 10, 0.1)
signal = np.sin(time)[:frame_length]
fft_vals = | np.fft.fft(signal) | numpy.fft.fft |
## 1. Count the level of gauges
## 2. Sout the gauges by level
## 3. Automatic bias correction from upstreams to downstreams at monthly scale
## 4. Take each gauge delta Q added to downstreams
## 5. Bias correction for the ungauged river at monthly scale
## 6. Bias scale mapping at daily scle
## Input: fast_connectivity1.csv; Qout_61_18_daily.nc, Qmon.nc, Qbc_61_18_daily.nc, Qbc_month.nc
## gauge_id.csv; Q_obs.csv.
## Output: revised Qbc_month.nc and Qbc_61_18_daily.nc
## writed by <NAME> 2022-05-05
import os
import pandas as pd
import xarray as xr
import numpy as np
from UDSBC.Q_pre import Qmonth
from UDSBC.Rivers import fast_connectivity,revise,upstream,downstream
from UDSBC.util import filter_nan
from UDSBC.BC import EQM,SDM
############### input_output_file ###############
river_shp = './input/liao.shp'
basin_id = './input/basin_id.csv'
rapid_Qout = './input/Qout.nc'
gauge_file = './input/gauge_id.csv'
obs_file = './input/Q_obs.csv'
river_connect = './output/fast_connectivity.csv'
river_connect1 = './output/fast_connectivity1.csv'
Q_daily_file = './output/Qout_61_18_daily.nc'
Q_month_file = './output/Qmon.nc'
bc_daily_file = './output/Qbc_61_18_daily.nc'
bc_monthly_file = './output/Qbc_month.nc'
############### creat_Qfile ###############
Qmonth.creat_Q(basin_id,rapid_Qout,Q_daily_file,Q_month_file)
sysComm1 = "cdo monmean ./output/Qout_61_18_daily.nc ./output/Qmon.nc"
os.system(sysComm1)
############### creat_Qbcfile ###############
sysComm2 = "cp ./output/Qout_61_18_daily.nc ./output/Qbc_61_18_daily.nc"
sysComm3 = "cp ./output/Qmon.nc ./output/Qbc_month.nc"
os.system(sysComm2)
os.system(sysComm3)
############### River_connet ###############
fast_connectivity.connectivity(river_shp,river_connect)
revise.river_revise(river_connect,river_connect1)
############### BC producer ##############
Qout_m = xr.open_dataset(Q_month_file).load()['qout'] ##original
Rivers = xr.open_dataset(Q_month_file).load()['rivers']
dates = xr.open_dataset(Q_month_file).load()['time']
Qbc_m = xr.open_dataset(Q_month_file).load()['qout'] ##need to BC
print('**** Read gauges file *****')
gauges = pd.read_csv(gauge_file)
#### count the gauges level ###
print('**** Count the gauges level *****')
gauge_level = np.empty(len(gauges['gaugeid']))
for i in range(len(gauge_level)):
uprivers = upstream.find_upstream(gauges['gaugeid'][i],river_connect1)
gauge_level[i] = len(list(set(uprivers).intersection(set(gauges['gaugeid']))))
print('**** Sort the gauges(ascending) *****')
levels = pd.DataFrame({'level':gauge_level})
gauges[['level']] = levels[['level']]
sort_gauges = gauges.sort_values(by = 'level') ## level_ascending
sort_gauges.reset_index(drop=True, inplace=True)
gaugeid = sort_gauges['gaugeid']
######### Read the obs file #########
print('**** Read the obs file *****')
Qobs = pd.read_csv(obs_file)
obs = np.empty([480,len(gaugeid)])
for i in range(len(gaugeid)):
obs[:,i] = Qobs[sort_gauges['station'][i]]
######### Begin to BC at monthly scale #########
print('**** Begin to BC at monthly scale *****')
bc_rivers = []
Qsum = np.zeros(696)
delta_sum = np.zeros(696)
gauge_bc = np.zeros(696)
for i in range(len(gaugeid)):
print('**** Finished '+str(int(i/len(gaugeid)*100))+'% *****')
## gauge eqm
k = np.where(Rivers == gaugeid[i])[0][0]
obs_cal = obs[:,i]
sim_cal = Qout_m[0:480,k]
[s,o] = filter_nan(sim_cal, obs_cal)
alldata = np.array(Qout_m[:,k])
alldata1 = np.array(Qout_m[:,k])
[s,o] = filter_nan(sim_cal, obs_cal)
gauge_bc = SDM.gamma_qm(s,o,alldata)
if any(gauge_bc<0):
gauge_bc[gauge_bc<0] = alldata1[gauge_bc<0]
delta_gauge = gauge_bc-np.array(Qout_m[:,k])
#mean = mean+delta_gauge/Qout_m[:,k])
Qsum = Qsum+np.array(Qout_m[:,k])
delta_sum = delta_sum+delta_gauge
Qbc_m[:,k] = gauge_bc
bc_rivers.append(gaugeid[i])
## downstream revise
downstreams = downstream.find_downstream(gaugeid[i],river_connect1)
for j in range(len(downstreams)):
m = np.where(Rivers == downstreams[j])[0][0]
Qout_m[:,m] = Qout_m[:,m]+np.multiply(Qout_m[:,m],( | np.sum(delta_gauge) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 17:14:53 2019
@author: liuhongbing
"""
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, roc_curve, auc
import tensorflow as tf
# 加载数据集
def read_data(file_path):
column_names = ['user-id', 'activity', 'timestamp', 'x-axis', 'y-axis', 'z-axis']
data = pd.read_csv(file_path, header=None, names=column_names)
data['z-axis'] = dataset['z-axis'].apply(lambda x : str(x).split(";")[0])
data['z-axis'] = data['z-axis'].astype('float32')
return data
# 数据标准化
def feature_normalize(dataset):
mu = np.mean(dataset, axis=0)
print('mu:',mu)
sigma = np.std(dataset, axis=0)
print('sigma:',sigma)
return (dataset - mu) / sigma
# 创建时间窗口,90 × 50ms,也就是 4.5 秒,每次前进 45 条记录,半重叠的方式。
def windows(data, size):
start = 0
while start < data.count():
yield start, start + size
start += (size / 2)
# 创建输入数据,每一组数据包含 x, y, z 三个轴的 90 条连续记录,
# 用 `stats.mode` 方法获取这 90 条记录中出现次数最多的行为
# 作为该组行为的标签,这里有待商榷,其实可以完全使用同一种行为的数据记录
# 来创建一组数据用于输入的。
def segment_signal(data, window_size=90):
segments = np.empty((0, window_size, 3))
labels = np.empty((0))
print (len(data['timestamp']))
count = 0
for (start, end) in windows(data['timestamp'], window_size):
print (count)
start = int(start)
end = int(end)
count += 1
x = data["x-axis"][start:end]
y = data["y-axis"][start:end]
z = data["z-axis"][start:end]
if (len(dataset['timestamp'][start:end]) == window_size):
segments = np.vstack([segments, np.dstack([x, y, z])])
labels = np.append(labels, stats.mode(data["activity"][start:end])[0][0])
return segments, labels
# 初始化神经网络参数
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# 初始化神经网络参数
def bias_variable(shape):
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial)
# 执行卷积操作
def depthwise_conv2d(x, W):
return tf.nn.depthwise_conv2d(x, W, [1, 1, 1, 1], padding='VALID')
# 为输入数据的每个 channel 执行一维卷积,并输出到 ReLU 激活函数
def apply_depthwise_conv(x, kernel_size, num_channels, depth):
weights = weight_variable([1, kernel_size, num_channels, depth])
biases = bias_variable([depth * num_channels])
return tf.nn.relu(tf.add(depthwise_conv2d(x, weights), biases))
# 在卷积层输出进行一维 max pooling
def apply_max_pool(x, kernel_size, stride_size):
return tf.nn.max_pool(x, ksize=[1, 1, kernel_size, 1],
strides=[1, 1, stride_size, 1], padding='VALID')
root = "/Users/liuhongbing/Documents/tensorflow/data/WISDM_ar_v1.1/"
dataset2 = read_data(root +'WISDM_ar_v1.1_raw.txt')
dataset2.fillna(0, inplace=True)
dataset = dataset2[:200000]
dataset['x-axis'] = feature_normalize(dataset['x-axis'])
dataset['y-axis'] = feature_normalize(dataset['y-axis'])
dataset['z-axis'] = feature_normalize(dataset['z-axis'])
segments, labels = segment_signal(dataset)
labels = np.asarray(pd.get_dummies(labels), dtype = np.int8)
# 创建输入
## [batch_size, height, width, chanles]
reshaped_segments = segments.reshape(len(segments), 1, 90, 3)
# 在准备好的输入数据中,分别抽取训练数据和测试数据,按照 70/30 原则来做。
train_test_split = np.random.rand(len(reshaped_segments)) < 0.70
train_x = reshaped_segments[train_test_split]
train_y = labels[train_test_split]
test_x = reshaped_segments[~train_test_split]
test_y = labels[~train_test_split]
# 定义输入数据的维度和标签个数
input_height = 1
input_width = 90
num_labels = 4 # 6
num_channels = 3
batch_size = 10
kernel_size = 60
depth = 60
# 隐藏层神经元个数
num_hidden = 1000
learning_rate = 0.0001
# 降低 cost 的迭代次数
training_epochs = 8
total_batchs = reshaped_segments.shape[0] // batch_size
# 下面是使用 Tensorflow 创建神经网络的过程。
X = tf.placeholder(tf.float32, shape=[None,input_height,input_width,num_channels])
Y = tf.placeholder(tf.float32, shape=[None,num_labels])
c = apply_depthwise_conv(X,kernel_size,num_channels,depth)
p = apply_max_pool(c,20,2)
c = apply_depthwise_conv(p,6,depth*num_channels,depth//10)
shape = c.get_shape().as_list()
c_flat = tf.reshape(c, [-1, shape[1] * shape[2] * shape[3]])
f_weights_l1 = weight_variable([shape[1] * shape[2] * depth * num_channels * (depth//10), num_hidden])
f_biases_l1 = bias_variable([num_hidden])
f = tf.nn.tanh(tf.add(tf.matmul(c_flat, f_weights_l1),f_biases_l1))
out_weights = weight_variable([num_hidden, num_labels])
out_biases = bias_variable([num_labels])
y_ = tf.nn.softmax(tf.matmul(f, out_weights) + out_biases)
loss = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cost_history = np.empty(shape=[1], dtype=float)
# 开始训练
with tf.Session() as session:
tf.initialize_all_variables().run()
# 开始迭代
for epoch in range(training_epochs):
for b in range(total_batchs):
offset = (b * batch_size) % (train_y.shape[0] - batch_size)
batch_x = train_x[offset:(offset + batch_size), :, :, :]
batch_y = train_y[offset:(offset + batch_size), :]
_, c = session.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y})
cost_history = np.append(cost_history, c)
print("Epoch {}: Training Loss = {}, Training Accuracy = {}".format(
epoch, c, session.run(accuracy, feed_dict={X: train_x, Y: train_y})))
y_p = tf.argmax(y_, 1)
y_true = np.argmax(test_y, 1)
final_acc, y_pred = session.run([accuracy, y_p], feed_dict={X: test_x, Y: test_y})
print("Testing Accuracy: {}".format(final_acc))
temp_y_true = | np.unique(y_true) | numpy.unique |
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
############################## CAMERA CALIBRATION ##############################
# prepare object points
nx = 9
ny = 6
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob("camera_cal/calibration*.jpg")
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
plt.imshow(img)
# Test undistortion on an image
img = cv2.imread('camera_cal/calibration1.jpg')
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
cv2.imwrite('calibration_cal/calibration1_undist.jpg',dst)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump( dist_pickle, open( "camera_cal/wide_dist_pickle.p", "wb" ) )
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,4))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)
############################## PIPELINE ##############################
## 1. Distortion correction
# Load undistorted road image
Img = cv2.imread('examples/signs_vehicles_xygrad.png')
# Perform distortion correction using the result from camera calibration above
ImgC = cv2.undistort(Img, mtx, dist, None, mtx)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,4))
ax1.imshow(cv2.cvtColor(Img,cv2.COLOR_BGR2RGB))
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(cv2.cvtColor(ImgC,cv2.COLOR_BGR2RGB))
ax2.set_title('Undistorted Image', fontsize=30)
## 2. Binary image
def abs_sobel_thresh(image,orient='x',sobel_kernel=3,thresh=(0, 255)):
# Grayscale
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# Gaussian blur
gray = cv2.GaussianBlur(gray,(9,9),0)
# Apply cv2.Sobel()
if orient == 'x':
sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
elif orient == 'y':
sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
else:
print("Error: orient must be either x or y.")
# Take the absolute value of the output from cv2.Sobel()
abs_sobel = np.absolute(sobel)
# Scale the result to an 8-bit range (0-255)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Apply lower and upper thresholds
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# Create binary_output
return grad_binary
def mag_threshold(image, sobel_kernel=3, mag_thresh=(0, 255)):
# Grayscale
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# Gaussian blur
gray = cv2.GaussianBlur(gray,(9,9),0)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
mag_binary = np.zeros_like(gradmag)
mag_binary[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Grayscale
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# Gaussian blur
gray = cv2.GaussianBlur(gray,(9,9),0)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
# Take the absolute value of the gradient direction, apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
dir_binary = np.zeros_like(absgraddir)
dir_binary[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
return dir_binary
def color_thresh(image,thr_r=(200, 255),thr_s=(170, 255)):
# Gaussian blur
image = cv2.GaussianBlur(image,(9,9),0)
# Separate the R channel to extract white lanes
r_channel = image[:,:,0]
# Convert to HLS color space and separate the S channel
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
# Threshold color channel
color_binary = np.zeros_like(s_channel)
color_binary[((s_channel >= thr_s[0]) & (s_channel <= thr_s[1]))|((r_channel >= thr_r[0]) & (r_channel <= thr_r[1]))] = 1
return color_binary
# Choose a Sobel kernel size
ksize = 15 # Choose a larger odd number to smooth gradient measurements
# Apply each of the thresholding functions
gradx = abs_sobel_thresh(ImgC,orient='x',sobel_kernel=ksize,thresh=(20,100))
grady = abs_sobel_thresh(ImgC,orient='y',sobel_kernel=ksize,thresh=(5,100))
mag_binary = mag_threshold(ImgC,sobel_kernel=ksize,mag_thresh=(30,100))
dir_binary = dir_threshold(ImgC,sobel_kernel=ksize,thresh=(0.7,1.3))
color_binary = color_thresh(ImgC,thr_s=(200, 255),thr_r=(170, 255))
combined_binary = np.zeros_like(dir_binary)
combined_binary[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | (color_binary == 1) ] = 1
# Visualize binary image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,4))
ax1.imshow(cv2.cvtColor(ImgC,cv2.COLOR_BGR2RGB))
ax1.set_title('Undistorted Image', fontsize=30)
ax2.imshow(combined_binary,cmap="gray")
ax2.set_title('Binary Image', fontsize=30)
## 3. Perspective transform
def warper(img,src,dst):
# Compute and apply perpective transform
img_size = (img.shape[1],img.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image
return warped
def region_of_interest(img, vertices):
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
# Manually get coordinates of 4 src corners
# image_sample = cv2.imread('examples/example_output.jpg')
# plt.imshow(image_sample)
src = np.float32(
[[263, 715],
[584, 458],
[700, 458],
[1156, 715]])
dst = np.float32(
[[320,720],
[320,0],
[960,0],
[960,720]])
vertices = np.array([[(70,720),(550, 450), (700, 450), (1210,720)]], dtype=np.int32)
masked_image = region_of_interest(combined_binary, vertices)
warped = warper(masked_image,src,dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# Visualize perspective transform
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,4))
ax1.imshow(cv2.cvtColor(ImgC,cv2.COLOR_BGR2RGB))
ax1.plot((vertices[0][0][0],vertices[0][1][0],vertices[0][2][0],vertices[0][3][0],vertices[0][0][0]),(vertices[0][0][1],vertices[0][1][1],vertices[0][2][1],vertices[0][3][1],vertices[0][0][1]),color='green',linewidth=1)
ax1.set_title('Undistorted Image',fontsize=30)
ax2.imshow(warped,cmap="gray")
ax2.set_title('Perspective Transform',fontsize=30)
## 4. Fitting lanes with polynomial
Img = cv2.imread('test_images/test2.jpg')
ImgC = cv2.undistort(Img, mtx, dist, None, mtx)
ksize = 15
gradx = abs_sobel_thresh(ImgC,orient='x',sobel_kernel=ksize,thresh=(20,100))
grady = abs_sobel_thresh(ImgC,orient='y',sobel_kernel=ksize,thresh=(5,100))
mag_binary = mag_threshold(ImgC,sobel_kernel=ksize,mag_thresh=(30,100))
dir_binary = dir_threshold(ImgC,sobel_kernel=ksize,thresh=(0,1.3))
color_binary = color_thresh(ImgC,thr_s=(200, 255),thr_r=(170, 255))
combined_binary = np.zeros_like(dir_binary)
combined_binary[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | (color_binary == 1) ] = 1
masked_image = region_of_interest(combined_binary,vertices)
binary_warped = warper(masked_image,src,dst)
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Visualize binary warped image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,4))
ax1.imshow(cv2.cvtColor(ImgC,cv2.COLOR_BGR2RGB))
ax1.set_title('Undistorted Image', fontsize=30)
ax2.imshow(binary_warped, cmap="gray")
ax2.plot(left_fitx, ploty, color='red',linewidth=6.0)
ax2.plot(right_fitx, ploty, color='blue',linewidth=6.0)
ax2.xlim(0, 1280)
ax2.ylim(720, 0)
ax2.set_title('Fitted lanes', fontsize=30)
## 5. Calculate the radius of curvature
y_eval = np.max(ploty)
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
print('Left lane curve radius: ', left_curverad, 'm')
print('Right lane curve radius: ', right_curverad, 'm')
curvature = (left_curverad + right_curverad) / 2
min_curverad = min(left_curverad, right_curverad)
# Calculate offset from the center
y_level = 719 # at the bottom of the image = nearest to the camera
img_center = img_size[0]/2
left_lanex = left_fit[0]*y_level**2 + left_fit[1]*y_level + left_fit[2]
right_lanex = right_fit[0]*y_level**2 + right_fit[1]*y_level + right_fit[2]
lane_center = (left_lanex + right_lanex)/2
offset = (lane_center-img_center)*xm_per_pix
print('Offset: ', offset, 'm off to the right')
## 6. Masking lane area
def show_parameters(img,curvature,offset,min_curverad):
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'Radius of Curvature = %d(m)' % curvature, (50, 50), font, 1.5, (255, 255, 255), 2)
left_or_right = "left" if offset > 0 else "right"
cv2.putText(img, 'Vehicle is %.2fm %s of center' % (np.abs(offset), left_or_right), (50, 100), font, 1.5,
(255, 255, 255), 2)
cv2.putText(img, 'Minimum Radius of Curvature = %d(m)' % min_curverad, (50, 150), font, 1.5, (255, 255, 255), 2)
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = | np.hstack((pts_left, pts_right)) | numpy.hstack |
import csv
import numpy as np
import pickle
import random
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
import scipy.io as sio
from dg import DataGenerator
from tensorflow.python.platform import flags
from keras.layers.normalization import BatchNormalization
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Dropout, Activation
FLAGS = flags.FLAGS
## Dataset/method options
datasource = 'ball_file'#, 'sinusoid or omniglot or miniimagenet')
# oracle means task id is input (only suitable for sinusoid)
## Training options
train_iterations = 10000#, 'number of metatraining iterations.') # 15k for omniglot, 50k for sinusoid
update_lr = 1e-3#, 'step size alpha for inner gradient update.') # 0.1 for omniglot
mini_batch_size = 250
flags.DEFINE_string('norm', 'None', 'batch_norm, layer_norm, or None')
## Model options
stop_grad = False#, 'if True, do not use second derivatives in meta-optimization (for speed)'
## Logging, saving, and testing options
logdir = 'sl_logs'#, 'directory for summaries and checkpoints.'
resume = True#, 'resume training if there is a model available'
train = True#, 'True to train, False to test.'
test_iter = 10000#, 'iteration to load model (-1 for latest model)'
test_set = False#, 'Set to true to test on the the test set, False for the validation set.'
dim_hidden = [40]
def get_training_input_label():
mat_contents = sio.loadmat('training_rects_onetask.mat')
input_data = np.transpose(mat_contents['In'])
output_data = np.transpose(mat_contents['Out'])
meta_data = np.transpose(mat_contents['Meta'])
return input_data[:-100], output_data[:-100]
def get_testing_input_label():
mat_contents = sio.loadmat('training_rects_onetask.mat')
input_data = np.transpose(mat_contents['In'])
output_data = np.transpose(mat_contents['Out'])
meta_data = | np.transpose(mat_contents['Meta']) | numpy.transpose |
"""Utility functions for ICA-AROMA."""
import os
import os.path as op
import shutil
import nibabel as nib
import numpy as np
from nilearn import image, masking
def runICA(fsl_dir, in_file, out_dir, mel_dir_in, mask, dim, TR):
"""Run MELODIC and merge the thresholded ICs into a single 4D nifti file.
Parameters
----------
fsl_dir : str
Full path of the bin-directory of FSL
in_file : str
Full path to the fMRI data file (nii.gz) on which MELODIC
should be run
out_dir : str
Full path of the output directory
mel_dir_in : str or None
Full path of the MELODIC directory in case it has been run
before, otherwise None.
mask : str
Full path of the mask to be applied during MELODIC
dim : int
Dimensionality of ICA
TR : float
TR (in seconds) of the fMRI data
Output
------
melodic.ica/: MELODIC directory
melodic_IC_thr.nii.gz: Merged file containing the mixture modeling
thresholded Z-statistical maps located in
melodic.ica/stats/
"""
# Define the 'new' MELODIC directory and predefine some associated files
mel_dir = op.join(out_dir, 'melodic.ica')
mel_IC = op.join(mel_dir, 'melodic_IC.nii.gz')
mel_IC_mix = op.join(mel_dir, 'melodic_mix')
mel_IC_thr = op.join(out_dir, 'melodic_IC_thr.nii.gz')
# When a MELODIC directory is specified,
# check whether all needed files are present.
# Otherwise... run MELODIC again
if (mel_dir_in and op.isfile(op.join(mel_dir_in, 'melodic_IC.nii.gz'))
and op.isfile(op.join(mel_dir_in, 'melodic_FTmix'))
and op.isfile(op.join(mel_dir_in, 'melodic_mix'))):
print(' - The existing/specified MELODIC directory will be used.')
# If a 'stats' directory is present (contains thresholded spatial maps)
# create a symbolic link to the MELODIC directory.
# Otherwise create specific links and
# run mixture modeling to obtain thresholded maps.
if op.isdir(op.join(mel_dir_in, 'stats')):
os.symlink(mel_dir_in, mel_dir)
else:
print(" - The MELODIC directory does not contain the required "
"'stats' folder. Mixture modeling on the Z-statistical "
"maps will be run.")
# Create symbolic links to the items in the specified melodic
# directory
os.makedirs(mel_dir)
for item in os.listdir(mel_dir_in):
os.symlink(op.join(mel_dir_in, item),
op.join(mel_dir, item))
# Run mixture modeling
melodic_command = ("{0} --in={1} --ICs={1} --mix={2} --out_dir={3} "
"--0stats --mmthresh=0.5").format(
op.join(fsl_dir, 'melodic'),
mel_IC,
mel_IC_mix,
mel_dir,
)
os.system(melodic_command)
else:
# If a melodic directory was specified, display that it did not
# contain all files needed for ICA-AROMA (or that the directory
# does not exist at all)
if mel_dir_in:
if not op.isdir(mel_dir_in):
print(' - The specified MELODIC directory does not exist. '
'MELODIC will be run separately.')
else:
print(' - The specified MELODIC directory does not contain '
'the required files to run ICA-AROMA. MELODIC will be '
'run separately.')
# Run MELODIC
melodic_command = ("{0} --in={1} --outdir={2} --mask={3} --dim={4} "
"--Ostats --nobet --mmthresh=0.5 --report "
"--tr={5}").format(
op.join(fsl_dir, 'melodic'),
in_file,
mel_dir,
mask,
dim,
TR
)
os.system(melodic_command)
# Get number of components
mel_IC_img = nib.load(mel_IC)
nr_ICs = mel_IC_img.shape[3]
# Merge mixture modeled thresholded spatial maps. Note! In case that
# mixture modeling did not converge, the file will contain two spatial
# maps. The latter being the results from a simple null hypothesis test.
# In that case, this map will have to be used (first one will be empty).
zstat_imgs = []
for i in range(1, nr_ICs + 1):
# Define thresholded zstat-map file
z_temp = op.join(mel_dir, "stats", "thresh_zstat{0}.nii.gz".format(i))
# Get number of volumes in component's thresholded image
z_temp_img = nib.load(z_temp)
if z_temp_img.ndim == 4:
len_IC = z_temp_img.shape[3]
# Extract last spatial map within the thresh_zstat file
zstat_img = image.index_img(z_temp_img, len_IC - 1)
else:
zstat_img = z_temp_img
zstat_imgs.append(zstat_img)
# Merge to 4D
zstat_4d_img = image.concat_imgs(zstat_imgs)
# Apply the mask to the merged image (in case a melodic-directory was
# predefined and run with a different mask)
zstat_4d_img = image.math_img(
"stat * mask[:, :, :, None]", stat=zstat_4d_img, mask=mask
)
zstat_4d_img.to_filename(mel_IC_thr)
def register2MNI(fsl_dir, in_file, out_file, affmat, warp):
"""Register an image (or time-series of images) to MNI152 T1 2mm.
If no affmat is defined, it only warps (i.e. it assumes that the data has
been registered to the structural scan associated with the warp-file
already). If no warp is defined either, it only resamples the data to 2mm
isotropic if needed (i.e. it assumes that the data has been registered to
a MNI152 template). In case only an affmat file is defined, it assumes that
the data has to be linearly registered to MNI152 (i.e. the user has a
reason not to use non-linear registration on the data).
Parameters
----------
fsl_dir : str
Full path of the bin-directory of FSL
in_file : str
Full path to the data file (nii.gz) which has to be registerd to
MNI152 T1 2mm
out_file : str
Full path of the output file
affmat : str
Full path of the mat file describing the linear registration (if data
is still in native space)
warp : str
Full path of the warp file describing the non-linear registration (if
data has not been registered to MNI152 space yet)
Output
------
melodic_IC_mm_MNI2mm.nii.gz : merged file containing the mixture modeling
thresholded Z-statistical maps registered to
MNI152 2mm
"""
# Define the MNI152 T1 2mm template
fslnobin = fsl_dir.rsplit('/', 2)[0]
ref = op.join(fslnobin, 'data', 'standard', 'MNI152_T1_2mm_brain.nii.gz')
# If the no affmat- or warp-file has been specified, assume that the data
# is already in MNI152 space. In that case only check if resampling to
# 2mm is needed
if not affmat and not warp:
in_img = nib.load(in_file)
# Get 3D voxel size
pixdim1, pixdim2, pixdim3 = in_img.header.get_zooms()[:3]
# If voxel size is not 2mm isotropic, resample the data, otherwise
# copy the file
if (pixdim1 != 2) or (pixdim2 != 2) or (pixdim3 != 2):
os.system(' '.join([op.join(fsl_dir, 'flirt'),
' -ref ' + ref,
' -in ' + in_file,
' -out ' + out_file,
' -applyisoxfm 2 -interp trilinear']))
else:
os.copyfile(in_file, out_file)
# If only a warp-file has been specified, assume that the data has already
# been registered to the structural scan. In that case apply the warping
# without a affmat
elif not affmat and warp:
# Apply warp
os.system(' '.join([op.join(fsl_dir, 'applywarp'),
'--ref=' + ref,
'--in=' + in_file,
'--out=' + out_file,
'--warp=' + warp,
'--interp=trilinear']))
# If only a affmat-file has been specified perform affine registration to
# MNI
elif affmat and not warp:
os.system(' '.join([op.join(fsl_dir, 'flirt'),
'-ref ' + ref,
'-in ' + in_file,
'-out ' + out_file,
'-applyxfm -init ' + affmat,
'-interp trilinear']))
# If both a affmat- and warp-file have been defined, apply the warping
# accordingly
else:
os.system(' '.join([op.join(fsl_dir, 'applywarp'),
'--ref=' + ref,
'--in=' + in_file,
'--out=' + out_file,
'--warp=' + warp,
'--premat=' + affmat,
'--interp=trilinear']))
def cross_correlation(a, b):
"""Perform cross-correlations between columns of two matrices.
Parameters
----------
a : (M x X) array_like
First array to cross-correlate
b : (N x X) array_like
Second array to cross-correlate
Returns
-------
correlations : (M x N) array_like
Cross-correlations of columns of a against columns of b.
"""
assert a.ndim == b.ndim == 2
_, ncols_a = a.shape
# nb variables in columns rather than rows hence transpose
# extract just the cross terms between cols in a and cols in b
return np.corrcoef(a.T, b.T)[:ncols_a, ncols_a:]
def classification(out_dir, max_RP_corr, edge_fract, HFC, csf_fract):
"""Classify components as motion or non-motion based on four features.
The four features used for classification are: maximum RP correlation,
high-frequency content, edge-fraction, and CSF-fraction.
Parameters
----------
out_dir : str
Full path of the output directory
max_RP_corr : (C,) array_like
Array of the 'maximum RP correlation' feature scores of the components
edge_fract : (C,) array_like
Array of the 'edge fraction' feature scores of the components
HFC : (C,) array_like
Array of the 'high-frequency content' feature scores of the components
csf_fract : (C,) array_like
Array of the 'CSF fraction' feature scores of the components
Returns
-------
motion_ICs : array_like
Array containing the indices of the components identified as motion
components
Output
------
classified_motion_ICs.txt : A text file containing the indices of the
components identified as motion components
"""
# Classify the ICs as motion or non-motion
# Define criteria needed for classification (thresholds and
# hyperplane-parameters)
thr_csf = 0.10
thr_HFC = 0.35
hyp = np.array([-19.9751070082159, 9.95127547670627, 24.8333160239175])
# Project edge & max_RP_corr feature scores to new 1D space
x = np.array([max_RP_corr, edge_fract])
proj = hyp[0] + np.dot(x.T, hyp[1:])
# Classify the ICs
motion_ICs = np.squeeze(
np.array(
np.where(
(proj > 0)
+ (csf_fract > thr_csf)
+ (HFC > thr_HFC)
)
)
)
# Put the feature scores in a text file
np.savetxt(op.join(out_dir, 'feature_scores.txt'),
np.vstack((max_RP_corr, edge_fract, HFC, csf_fract)).T)
# Put the indices of motion-classified ICs in a text file
with open(op.join(out_dir, 'classified_motion_ICs.txt'), 'w') as fo:
if motion_ICs.size > 1:
fo.write(','.join(['{:.0f}'.format(num) for num in
(motion_ICs + 1)]))
elif motion_ICs.size == 1:
fo.write('{:.0f}'.format(motion_ICs + 1))
# Create a summary overview of the classification
with open(op.join(out_dir, 'classification_overview.txt'), 'w') as fo:
fo.write('\t'.join(['IC',
'Motion/noise',
'maximum RP correlation',
'Edge-fraction',
'High-frequency content',
'CSF-fraction']))
fo.write('\n')
for i in range(0, len(csf_fract)):
if (proj[i] > 0) or (csf_fract[i] > thr_csf) or (HFC[i] > thr_HFC):
classif = "True"
else:
classif = "False"
fo.write('\t'.join(['{:d}'.format(i + 1),
classif,
'{:.2f}'.format(max_RP_corr[i]),
'{:.2f}'.format(edge_fract[i]),
'{:.2f}'.format(HFC[i]),
'{:.2f}'.format(csf_fract[i])]))
fo.write('\n')
return motion_ICs
def denoising(fsl_dir, in_file, out_dir, mixing, den_type, den_idx):
"""Remove noise components from fMRI data.
Parameters
----------
fsl_dir : str
Full path of the bin-directory of FSL
in_file : str
Full path to the data file (nii.gz) which has to be denoised
out_dir : str
Full path of the output directory
mixing : str
Full path of the melodic_mix text file
den_type : {"aggr", "nonaggr", "both"}
Type of requested denoising ('aggr': aggressive, 'nonaggr':
non-aggressive, 'both': both aggressive and non-aggressive
den_idx : array_like
Index of the components that should be regressed out
Output
------
denoised_func_data_<den_type>.nii.gz : The denoised fMRI data
"""
# Check if denoising is needed (i.e. are there motion components?)
motion_components_found = den_idx.size > 0
nonaggr_denoised_file = op.join(out_dir, "denoised_func_data_nonaggr.nii.gz")
aggr_denoised_file = op.join(out_dir, "denoised_func_data_aggr.nii.gz")
if motion_components_found:
mixing = np.loadtxt(mixing)
motion_components = mixing[:, den_idx]
# Create a fake mask to make it easier to reshape the full data to 2D
img = nib.load(in_file)
full_mask = nib.Nifti1Image(np.ones(img.shape[:3], int), img.affine)
data = masking.apply_mask(img, full_mask) # T x S
# Non-aggressive denoising of the data using fsl_regfilt
# (partial regression), if requested
if den_type in ("nonaggr", "both"):
# Fit GLM to all components
betas = np.linalg.lstsq(mixing, data, rcond=None)[0]
# Denoise the data using the betas from just the bad components.
pred_data = | np.dot(motion_components, betas[den_idx, :]) | numpy.dot |
'''
Extract physical features of airfoils or wing sections.
'''
import copy
import os
import numpy as np
from scipy.interpolate import interp1d
class PhysicalSec():
'''
Extracting flow features of a section (features on/near the wall)
'''
_i = 0 # index of the mesh point
_X = 0.0 # location of the feature location
_value = 0.0
#* Dictionary of flow features (identify the index and location)
xf_dict = {
'Cu': ['upper crest', _i, _X], # crest point on upper surface
'Cl': ['lower crest', _i, _X], # crest point on lower surface
'tu': ['upper highest', _i, _X], # highest point on upper surface
'tl': ['lower highest', _i, _X], # lowest point on lower surface
'tm': ['max thickness', _i, _X], # maximum thickness position
'L': ['upper LE', _i, _X], # suction peak near leading edge on upper surface
'T': ['upper TE', _i, _X], # trailing edge upper surface (98% chord length)
'H': ['upper surface max Ma', _i, _X], # position of lower upper maximum Mach number
'S': ['separation start', _i, _X], # separation start position
'R': ['reattachment', _i, _X], # reattachment position
'Q': ['lower LE', _i, _X], # suction peak near leading edge on lower surface
'M': ['lower surface max Ma', _i, _X], # position of lower surface maximum Mach number
'mUy': ['min(du/dy)', _i, _X], # position of min(du/dy)
'F': ['shock foot', _i, _X], # shock foot position
'1': ['shock front', _i, _X], # shock wave front position
'3': ['shock hind', _i, _X], # position of just downstream the shock
'D': ['dent on plateau', _i, _X], # largest dent on the suction plateau
'U': ['local sonic', _i, _X], # local sonic position
'B': ['1st dent after L', _i, _X], # first dent after suction peak [X_L, X_L+0.1]
# # Note: for weak shock waves, may not reach Mw=1
# # define position of U as Mw minimal extreme point after shock foot
'A': ['maximum Mw after shock', _i, _X], # maximum wall Mach number after shock wave (or equal to 3)
'N': ['new flat boundary', _i, _X], # starting position of new flat boundary
# # most of the time, A == N
'Hi': ['maximum Hi', _i, _X], # position of maximum Hi
'Hc': ['maximum Hc', _i, _X], # position of maximum Hc
'L1U': ['length 1~U', _value], # XU-X1
'L13': ['length 1~3', _value], # X3-X1
'LSR': ['length S~R', _value], # XR-XS
'lSW': ['single shock', _value], # single shock wave flag
'DCp': ['shock strength', _value], # Cp change through shock wave
'Err': ['suc Cp area', _value], # Cp integral of suction plateau fluctuation
'FSp': ['fluctuation suc-plat', _value], # Mw fluctuation of suction plateau
'DMp': ['Mw dent on plateau', _value], # dMw of Mw dent on suction plateau
'CLU': ['upper CL', _value], # CL of upper surface
'CLL': ['lower CL', _value], # CL of lower surface
'CdU': ['upper Cd', _value], # Cdp of upper surface
'CdL': ['lower Cd', _value], # Cdp of lower surface
'CLw': ['windward CL', _value], # CL of windward surfaces (before crest point)
'Cdw': ['windward Cdp', _value], # Cdp of windward surfaces (before crest point)
'CLl': ['leeward CL', _value], # CL of leeward surfaces (behind crest point)
'Cdl': ['leeward Cdp', _value], # Cdp of leeward surfaces (behind crest point)
'kaf': ['slope aft', _value] # average Mw slope of the aft upper surface (3/N~T)
}
def __init__(self, Minf, AoA, Re):
'''
### Inputs:
```text
Minf: Free stream Mach number
AoA: Angle of attack (deg)
Re: Reynolds number per meter
```
'''
self.Minf = Minf
self.AoA = AoA
self.Re = Re
self.xf_dict = copy.deepcopy(PhysicalSec.xf_dict)
def setdata(self, x, y, Cp, Tw, Hi, Hc, dudy):
'''
Set the data of this foil or section.
Data: ndarray, start from lower surface trailing edge
'''
self.x = copy.deepcopy(x)
self.y = copy.deepcopy(y)
self.Cp = copy.deepcopy(Cp)
self.Mw = self.Cp2Mw()
self.Tw = copy.deepcopy(Tw)
self.Hi = copy.deepcopy(Hi)
self.Hc = copy.deepcopy(Hc)
self.dudy = copy.deepcopy(dudy)
iLE = np.argmin(self.x)
self.x -= self.x[iLE]
self.y -= self.y[iLE]
self.x[0] = 1.0
self.x[-1] = 1.0
fmw = interp1d(self.x[iLE:], self.Mw[iLE:], kind='cubic')
fhu = interp1d(self.x[iLE:], self.Hc[iLE:], kind='cubic')
gu = interp1d(self.x[iLE:], self.y [iLE:], kind='cubic')
x_ = np.append(self.x[iLE:0:-1], self.x[0])
y_ = np.append(self.y[iLE:0:-1], self.y[0])
gl = interp1d(x_, y_, kind='cubic')
self.xx = np.arange(0.0, 1.0, 0.001)
self.yu = gu(self.xx)
self.yl = gl(self.xx)
self.mu = fmw(self.xx)
self.hu = fhu(self.xx)
self.iLE = iLE
def set_Mw(self, x, Mw):
'''
Set the Mw distribution of this foil or section.
Data: ndarray, start from lower surface trailing edge
'''
self.x = copy.deepcopy(x)
self.Mw = copy.deepcopy(Mw)
iLE = np.argmin(self.x)
self.iLE = iLE
fmw = interp1d(self.x[iLE:], self.Mw[iLE:], kind='cubic')
self.xx = np.arange(0.0, 1.0, 0.001)
self.mu = fmw(self.xx)
@property
def n_point(self):
'''
Number of points in this section
'''
return self.x.shape[0]
@staticmethod
def IsentropicCp(Ma, Minf: float, g=1.4):
'''
Isentropic flow: Calculate Cp by Mach
### Inputs:
```text
Ma: float, or ndarray
Minf: free stream Mach number
g: γ=1.4, ratio of the specific heats
```
'''
X = (2.0+(g-1.0)*Minf**2)/(2.0+(g-1.0)*Ma**2)
X = X**(g/(g-1.0))
Cp = 2.0/g/Minf**2*(X-1.0)
return Cp
@staticmethod
def toMw(Cp: np.array, Minf: float, n_ref=100, M_max=2.0):
'''
Converting Cp to wall Mach number
'''
Ma_ref = np.linspace(0.0, M_max, n_ref)
Cp_ref = PhysicalSec.IsentropicCp(Ma_ref, Minf)
f = interp1d(Cp_ref, Ma_ref, kind='cubic')
Cp_ = Cp.copy()
Cp_ = np.clip(Cp_, Cp_ref[-1], Cp_ref[0])
return f(Cp_)
def Cp2Mw(self, n_ref=100, M_max=2.0):
'''
Converting Cp to wall Mach number
'''
Mw = PhysicalSec.toMw(self.Cp, self.Minf, n_ref=n_ref, M_max=M_max)
return Mw
@staticmethod
def ShapeFactor(sS, VtS, Tw: float, iUe: int, neglect_error=False):
'''
Calculate shape factor Hi & Hc by mesh points on a line pertenticular to the wall.
### Inputs:
```text
sS: ndarray [nMax], distance of mesh points to wall
VtS: ndarray [nMax], velocity component of mesh points (parallel to the wall)
Tw: wall temperature (K)
iUe: index of mesh point locating the outer velocity Ue
neglect_error: if True, set shape factor to 0 when error occurs
```
### Return:
```text
Hi: incompressible shape factor
Hc: compressible shape factor
```
### Note:
```text
XR => 物面参考点,考察以 XR 为起点,物面法向 nR 方向上的数据点,共 nMax 个数据点
sS => 数据点到物面距离
VtS => 数据点速度在物面方向的分量
se: distance of boundary layer outer boundary to wall
ds: 𝛿*, displacement thickness
tt: θ, momentum loss thickness
Ue: outer layer velocity component (parallel to the wall)
Ue 测试结果显示,直接取最大Ue较为合理,取一定范围内平均,或取固定网格的值,效果不好
```
'''
nMax= sS.shape[0]
Ue = VtS[iUe]
se = sS[iUe]
ds = 0.0
tt = 0.0
if iUe>=nMax or iUe<=int(0.2*nMax):
if neglect_error:
return 0.0, 0.0
else:
print()
print('Vts: velocity component of mesh points')
print(VtS)
print()
raise Exception('Error [ShapeFactor]: iUe %d not reasonable (nMax=%d)'%(iUe, nMax))
for i in range(iUe-1):
a1 = Ue-VtS[i]
a2 = Ue-VtS[i+1]
ds += 0.5*(a1+a2)*(sS[i+1]-sS[i])
for i in range(iUe-1):
a1 = VtS[i ]*(Ue-VtS[i ])
a2 = VtS[i+1]*(Ue-VtS[i+1])
tt += 0.5*(a1+a2)*(sS[i+1]-sS[i])
Hi = ds/tt*Ue
Hc = Tw*Hi+Tw-1
return Hi, Hc
@staticmethod
def getHi(X, Y, U, V, T, j0: int, j1: int, nHi: int, neglect_error=False):
'''
Calculate shape factor Hi & Hc from field data
### Inputs:
```text
Field data: ndarray (nj,nk), X, Y, U, V, T
j0: j index of the lower surface TE
j1: j index of the upper surface TE
nHi: maximum number of mesh points in k direction for boundary layer
neglect_error: if True, set shape factor to 0 when error occurs
```
### Return:
```text
Hi, Hc: ndarray (j1-j0)
info: tuple of ndarray (Tw, dudy)
```
### Note:
```text
Tw: wall temperature
dudy: du/dy
iUe: index of mesh point locating the outer velocity Ue
XR: reference position on the wall
```
### Filed data (j,k) index
```text
j: 1 - nj from far field of lower surface TE to far field of upper surface TE
j: j0 - j1 from lower surface TE to upper surface TE
k: 1 - nk from surface to far field (assuming pertenticular to the wall)
```
'''
iLE = int(0.5*(j0+j1))
nj = X.shape[0]
nk = X.shape[1]
nn = j1-j0
Hi = np.zeros(nn)
Hc = np.zeros(nn)
Tw = np.zeros(nn)
dudy = np.zeros(nn)
#* Locate boundary layer edge index iUe & calculate du/dy
sS = np.zeros([nn,nHi])
VtS = np.zeros([nn,nHi])
iUe = np.zeros(nn, dtype=int)
for j in range(nn):
jj = j0+j
XR = np.array([X[jj,0], Y[jj,0]])
tR = np.array([X[jj+1,0]-X[jj-1,0], Y[jj+1,0]-Y[jj-1,0]])
tR = tR/np.linalg.norm(tR)
if tR[0]<0.0:
tR = -tR
for i in range(nHi-1):
XS = np.array([X[jj,i+1], Y[jj,i+1]])
VS = np.array([U[jj,i+1], V[jj,i+1]])
sS [j,i+1] = np.linalg.norm(XR-XS)
VtS[j,i+1] = np.dot(tR,VS)
iUe[j] = np.argmax(np.abs(VtS[j,:]))
dudy[j] = VtS[j,1]/sS[j,1]
Tw[j] = T[jj,0]
#* Smooth iUe at shock wave foot
nspan = 4
for j in range(nn-2*nspan):
jj = j+nspan
r1 = 0.5*(iUe[jj-nspan]+iUe[jj+nspan])
r2 = abs(iUe[jj+nspan]-iUe[jj-nspan])
r3 = abs(iUe[jj]-iUe[jj-nspan]) + abs(iUe[jj]-iUe[jj+nspan])
if r3>r2:
iUe[jj] = int(r1)
#* Calculate Hi & Hc
for j in range(nn):
Hi[j], Hc[j] = PhysicalSec.ShapeFactor(sS[j,:], VtS[j,:],
Tw[j], iUe[j], neglect_error=neglect_error)
#* Limit leading edge Hi
r1 = 1.0
r2 = 1.0
r3 = 1.0
r4 = 1.0
for j in range(nn):
jj = j0+j
if (X[jj,0]-0.05)*(X[jj+1,0]-0.05)<=0.0 and jj<iLE:
r1 = Hi[j]
r3 = Hc[j]
if (X[jj,0]-0.05)*(X[jj+1,0]-0.05)<=0.0 and jj>=iLE:
r2 = Hi[j]
r4 = Hc[j]
for j in range(nn):
jj = j0+j
if X[jj,0]<0.05 and jj<iLE:
Hi[j] = r1
Hc[j] = r3
if X[jj,0]<0.05 and jj>=iLE:
Hi[j] = r2
Hc[j] = r4
return Hi, Hc, (Tw, dudy)
def getValue(self, feature: str, key='key') -> float:
'''
Get value of given feature.
### Inputs:
```text
feature: key of feature dictionary
key: 'i', 'X', 'Cp', 'Mw', 'Tw', 'Hi', 'Hc', 'dudy'
```
'''
if not feature in PhysicalSec.xf_dict.keys():
print(' Warning: feature [%s] not valid'%(feature))
return 0.0
aa = self.xf_dict[feature]
if len(aa)==2:
return aa[1]
if key == 'i':
return aa[1]
if key == 'X':
return aa[2]
if key == 'Cp':
yy = self.Cp
elif key == 'Mw':
yy = self.Mw
elif key == 'Tw':
yy = self.Tw
elif key == 'Hi':
yy = self.Hi
elif key == 'Hc':
yy = self.Hc
elif key == 'dudy':
yy = self.dudy
else:
raise Exception(' key %s not valid'%(key))
ii = aa[1]
xx = aa[2]
if xx <= 1e-6:
return 0.0
if ii >= self.iLE:
i0 = max(self.iLE, ii-4)
i1 = i0 + 7
else:
i1 = min(self.iLE, ii+4)
i0 = i1 - 7
X = self.x[i0:i1]
Y = yy[i0:i1]
f = interp1d(X, Y, kind='cubic')
return f(xx)
#TODO: locate the position of flow features
def locate_basic(self, dMwcri_L=1.0):
'''
Locate the index and position of basic flow features.
### Get value of: L, T, Q, M
'''
X = self.x
M = self.Mw
nn = X.shape[0]
iLE = self.iLE
#TODO: Basic features
#* L => suction peak near leading edge on upper surface
# 1: maximum extreme point
# 2: dMw/dx = 1
i_L = 0
for i in range(int(0.25*nn)):
ii = i + iLE
if X[ii] > 0.2:
break
if M[ii-1]<=M[ii] and M[ii]>=M[ii+1]:
i_L = ii
break
if i_L == 0:
dMw2 = 0.0
for i in range(int(0.25*nn)):
ii = i + iLE+1
dMw1 = dMw2
dMw2 = (M[ii+1]-M[ii])/(X[ii+1]-X[ii])
if dMw1>=dMwcri_L and dMw2<dMwcri_L:
i_L = ii
break
self.xf_dict['L'][1] = i_L
self.xf_dict['L'][2] = X[i_L]
#* T => trailing edge upper surface (98% chord length)
for i in range(int(0.2*nn)):
ii = nn-i-1
if X[ii]<=0.98 and X[ii+1]>0.98:
self.xf_dict['T'][1] = ii
self.xf_dict['T'][2] = 0.98
break
#* H => position of upper surface maximum Mach number
i_H = 0
max1 = -1.0
for i in np.arange(iLE, nn-2, 1):
if M[i-1]<=M[i] and M[i+1]<=M[i] and M[i]>max1:
max1 = M[i]
i_H = i
self.xf_dict['H'][1] = i_H
self.xf_dict['H'][2] = X[i_H]
#* Q => suction peak near leading edge on lower surface
for i in range(int(0.2*nn)):
ii = iLE - i
if M[ii-1]<=M[ii] and M[ii]>=M[ii+1]:
self.xf_dict['Q'][1] = ii
self.xf_dict['Q'][2] = X[ii]
break
#* M => position of lower surface maximum Mach number
i_M = 0
max1 = -1.0
for i in np.arange(1, iLE, 1):
if M[i-1]<=M[i] and M[i+1]<=M[i] and M[i]>max1:
max1 = M[i]
i_M = i
self.xf_dict['M'][1] = i_M
self.xf_dict['M'][2] = X[i_M]
def locate_sep(self):
'''
Locate the index and position of flow features about du/dy.
### Get value of: S, R, mUy
'''
X = self.x
dudy = self.dudy
nn = X.shape[0]
iLE = self.iLE
#* S => separation start position
#* R => reattachment position
#* mUy => position of min(du/dy)
min_Uy = 1e6
i_S = 0
for i in range(int(0.5*nn)):
ii = iLE + i
if X[ii]<0.02:
continue
if X[ii]>0.98:
break
if dudy[ii]>=0.0 and dudy[ii+1]<0.0 and i_S==0:
i_S = ii
self.xf_dict['S'][1] = ii
self.xf_dict['S'][2] = (0.0-dudy[ii])*(X[ii+1]-X[ii])/(dudy[ii+1]-dudy[ii])+X[ii]
if dudy[ii]<=0.0 and dudy[ii+1]>0.0 and i_S!=0:
self.xf_dict['R'][1] = ii
self.xf_dict['R'][2] = (0.0-dudy[ii])*(X[ii+1]-X[ii])/(dudy[ii+1]-dudy[ii])+X[ii]
if dudy[ii]<min_Uy and dudy[ii-1]>=dudy[ii] and dudy[ii+1]>=dudy[ii]:
min_Uy = dudy[ii]
self.xf_dict['mUy'][1] = ii
self.xf_dict['mUy'][2] = X[ii]
def locate_geo(self):
'''
Locate the index and position of geometry related flow features.\n
### Get value of: Cu, Cl, tu, tl, tm
'''
X = self.x
xx = self.xx
yu = self.yu
yl = self.yl
iLE = self.iLE
n0 = xx.shape[0]
#* tm => maximum thickness
#* tu => highest point on upper surface
#* tl => lowest point on lower surface
x_max = xx[np.argmax(yu-yl)]
x_mu = xx[np.argmax(yu)]
x_ml = xx[np.argmin(yl)]
self.xf_dict['tm'][1] = np.argmin(np.abs(X[iLE:]-x_max)) + iLE
self.xf_dict['tm'][2] = x_max
self.xf_dict['tu'][1] = np.argmin(np.abs(X[iLE:]-x_mu )) + iLE
self.xf_dict['tu'][2] = x_mu
self.xf_dict['tl'][1] = np.argmin(np.abs(X[:iLE]-x_ml ))
self.xf_dict['tl'][2] = x_ml
#* Cu => crest point on upper surface
aa = self.AoA/180.0*np.pi
x0 = np.array([0.0, 0.0])
x1 = np.array([np.cos(aa), np.sin(aa)])
ds = np.zeros(n0)
for i in range(n0):
xt = np.array([xx[i], yu[i]])
if xx[i] > 0.9:
continue
ds[i], _ = ratio_vec(x0, x1, xt)
ii = np.argmax(ds)
self.xf_dict['Cu'][1] = np.argmin(np.abs(X[iLE:]-xx[ii])) + iLE
self.xf_dict['Cu'][2] = xx[ii]
#* Cl => crest point on lower surface
ds = np.zeros(n0)
for i in range(n0):
if xx[i] > 0.9:
continue
xt = np.array([xx[i], yl[i]])
ds[i], _ = ratio_vec(x0, x1, xt)
ii = np.argmax(ds)
self.xf_dict['Cl'][1] = np.argmin(np.abs(X[:iLE]-xx[ii]))
self.xf_dict['Cl'][2] = xx[ii]
def locate_shock(self, dMwcri_1=-1.0, info=False):
'''
Locate the index and position of shock wave related flow features.
### Get value of: 1, 3, F, U, D, A, B
### Inputs:
```text
dMwcri_1: critical value locating shock wave front
```
'''
X = self.x # [n]
xx = self.xx # [1000]
mu = self.mu # [1000]
nn = xx.shape[0]
iLE = self.iLE
dMw = np.zeros(nn)
for i in range(nn-1):
if xx[i]<=0.02:
continue
if xx[i]>=0.98:
continue
dMw[i] = (mu[i+1]-mu[i])/(xx[i+1]-xx[i])
dMw[i] = min(dMw[i], 2)
d2Mw = np.zeros(nn)
for i in range(nn-1):
if xx[i]<0.02 or xx[i]>0.95:
continue
#d2Mw[i] = (dMw[i+2]+dMw[i+1]-dMw[i]-dMw[i-1])/2/(xx[i+1]-xx[i-1])
#d2Mw[i] = (dMw[i+1]-dMw[i-1])/(xx[i+1]-xx[i-1])
d2Mw[i] = (0.5*dMw[i+7]+0.5*dMw[i+4]+2*dMw[i+1]-
2*dMw[i]-0.5*dMw[i-3]-0.5*dMw[i-6])/4.5/(xx[i+1]-xx[i-1])
#* Check shock and shock properties
flag, i_F, i_1, i_U, i_3 = PhysicalSec.check_singleshock(xx, mu, dMw, d2Mw, dMwcri_1, info=info)
self.xf_dict['lSW'][1] = flag
if not flag==1:
return 0
#* F => shock foot position
self.xf_dict['F'][1] = np.argmin(np.abs(X[iLE:]-xx[i_F])) + iLE
self.xf_dict['F'][2] = xx[i_F]
#* 1 => shock wave front position
self.xf_dict['1'][1] = np.argmin(np.abs(X[iLE:]-xx[i_1])) + iLE
self.xf_dict['1'][2] = xx[i_1]
#* 3 => position of just downstream the shock
self.xf_dict['3'][1] = np.argmin(np.abs(X[iLE:]-xx[i_3])) + iLE
self.xf_dict['3'][2] = xx[i_3]
#* U => local sonic position
self.xf_dict['U'][1] = np.argmin(np.abs(X[iLE:]-xx[i_U])) + iLE
self.xf_dict['U'][2] = xx[i_U]
#* D => dent on the suction plateau
# maximum (linear Mw - actual Mw) between L and 1
x_1 = self.xf_dict['1'][2]
x_L = max(self.xf_dict['L'][2], 0.05)
m_1 = self.getValue('1','Mw')
m_L = self.getValue('L','Mw')
lL1 = x_1-x_L
i_D = 0
min_D = 0.0
for i in np.arange(2, i_1-1, 1):
if xx[i]<x_L:
continue
tt = (xx[i]-x_L)/lL1
ss = (1-tt)*m_L + tt*m_1
dM = ss - mu[i]
if dM > min_D:
i_D = i
min_D = dM
if i_D==0:
self.xf_dict['D'][1] = self.xf_dict['L'][1]
self.xf_dict['D'][2] = self.xf_dict['L'][2]
else:
self.xf_dict['D'][1] = np.argmin(np.abs(X[iLE:]-xx[i_D])) + iLE
self.xf_dict['D'][2] = xx[i_D]
#* B => first dent after suction peak [X_L, X_L+0.1]
# minimum Mw between L and L+0.1
x_L = self.xf_dict['L'][2]
i_B = 0
for i in np.arange(2, i_1-1, 1):
if xx[i]<x_L or xx[i]>x_L+0.1:
continue
if mu[i-1]>=mu[i] and mu[i]<=mu[i+1] and i_B==0:
i_B = i
if i_B == 0:
self.xf_dict['B'][1] = self.xf_dict['L'][1]
self.xf_dict['B'][2] = self.xf_dict['L'][2]
else:
self.xf_dict['B'][1] = np.argmin(np.abs(X[iLE:]-xx[i_B])) + iLE
self.xf_dict['B'][2] = xx[i_B]
#* A => maximum Mw after shock
# Find the maximum position of Mw in range [x_3, 0.9]
i_A = 0
max_A = 0.0
for i in np.arange(i_3, nn-1, 1):
if xx[i]>0.9:
break
if mu[i]>max_A:
i_A = i
max_A = mu[i]
elif mu[i]>=mu[i_3]*0.8 and mu[i]>mu[i-1] and mu[i]>mu[i+1]:
i_A = i
x_A = xx[i_A]
self.xf_dict['A'][1] = np.argmin(np.abs(X[iLE:]-x_A)) + iLE
self.xf_dict['A'][2] = x_A
return i_1
def locate_BL(self, i_1):
'''
Locate the index and position of boundary layer related flow features. \n
i-1: index of shock wave front position in self.xx
### Get value of: N, Hi, Hc
'''
X = self.x
xx = self.xx
hu = self.hu
nn = xx.shape[0]
iLE = self.iLE
#* Hi, Hc => position of maximum Hi, Hc after shock wave front
# For cases when shock wave is weak, and Hc just keeps growing, set 0
i_H = 0
max1 = 0.0
for i in np.arange(i_1, nn-2, 1):
if xx[i] > 0.95:
break
if hu[i-1]<=hu[i] and hu[i+1]<=hu[i] and hu[i]>max1:
max1 = hu[i]
i_H = i
x_H = xx[i_H]
self.xf_dict['Hc'][1] = np.argmin(np.abs(X[iLE:]-x_H)) + iLE
self.xf_dict['Hc'][2] = x_H
self.xf_dict['Hi'][1] = self.xf_dict['Hc'][1]
self.xf_dict['Hi'][2] = x_H
#* N => starting position of new flat boundary
# i.e., position of minimum Hc after shock wave front
# For cases when shock wave is weak, and Hc just keeps growing, set 0
i_N = 0
min1 = 1000.0
for i in np.arange(i_1, nn-1, 1):
if hu[i-1]>=hu[i] and hu[i+1]<=hu[i] and hu[i]<min1:
min1 = hu[i]
i_N = i
x_N = xx[i_N]
self.xf_dict['N'][1] = np.argmin(np.abs(X[iLE:]-x_N)) + iLE
self.xf_dict['N'][2] = x_N
@staticmethod
def shock_property(xu, mu, dMw, d2Mw, dMwcri_1):
'''
>>> i_F, i_1, i_U, i_3 = shock_property(xu, mu, dMw, d2Mw, dMwcri_1)
### Return:
```text
Index of xu for: F, 1, U, 3
```
'''
nn = xu.shape[0]
#* F => shock foot position
i_F = np.argmin(dMw)
x_F = xu[i_F]
#* 1 => shock wave front position
# Find the kink position of dMw in range [x_F-0.2, x_F], defined as dMw = -1
i_1 = 0
i_cri = 0
i_md2 = 0
for i in np.arange(i_F, 1, -1):
# 1. Within the range of [x_F-0.2, x_F]
if xu[i]<x_F-0.2:
break
# 2. Locate dMw = dMwcri_1 (tend to go too much upstream)
if dMw[i]>=dMwcri_1 and dMw[i+1]<dMwcri_1 and i_cri==0:
i_cri = i
# 3. Locate min d2Mw/dx2 (tend to go too much downstream)
if d2Mw[i]<=d2Mw[i-1] and d2Mw[i]>d2Mw[i+1] and i_md2==0:
i_md2 = i
if i_md2-i_cri > 2*(i_F-i_md2):
i_1 = i_md2
elif 2*(i_md2-i_cri) < i_F-i_md2:
i_1 = i_cri
else:
i_1 = int(0.5*(i_cri+i_md2))
'''
print(i_cri, i_md2, i_F, xu[i_cri], xu[i_md2], dMw[i_md2], dMw[i_F])
import matplotlib.pyplot as plt
plt.plot(xu, mu, 'b')
plt.plot(xu, d2Mw/1000, 'r')
plt.plot([xu[i_cri], xu[i_md2]], [mu[i_cri], mu[i_md2]], 'bo')
plt.plot([xu[i_1]], [mu[i_1]], 'ro')
plt.show()
'''
#* 3 => position of just downstream the shock
# Find the first flat position of Mw in range [x_F, x_F+0.2], defined as dMw = 0 or -1
i_3 = 0
i_cri = 0
i_md2 = 0
i_flat = 0
for i in np.arange(i_F, nn-1, 1):
# 1. Within the range of [x_F, x_F+0.2]
if xu[i]>x_F+0.2:
break
# 2. Locate dMw = dMwcri_1 (tend to go too much downstream)
if dMw[i]<=dMwcri_1 and dMw[i+1]>dMwcri_1 and i_cri==0:
i_cri = i
# 3. Locate min d2Mw/dx2 (tend to go too much upstream)
if d2Mw[i]<=d2Mw[i-1] and d2Mw[i]>d2Mw[i+1] and i_md2==0:
i_md2 = i
# 4. Locate the first flat position of Mw
if dMw[i]<=0.0 and dMw[i+1]>0.0:
i_flat = i
if i_flat!=0 and i_flat-i_F < 2*(i_cri-i_F):
i_3 = i_flat
elif i_cri-i_md2 > 2*(i_md2-i_F):
i_3 = i_md2
elif 2*(i_cri-i_md2) < i_md2-i_F:
i_3 = i_cri
else:
i_3 = int(0.5*(i_cri+i_md2))
'''
print('F %3d %.2f'%(i_F, xu[i_F]))
print('d2Mw %3d %.2f'%(i_md2, xu[i_md2]))
print('cri %3d %.2f'%(i_cri, xu[i_cri]))
print('dMw=0 %3d %.2f'%(i_flat,xu[i_flat]))
print('3 %3d %.2f'%(i_3, xu[i_3]))
print()
'''
#* U => local sonic position
i_U = 0
for i in np.arange(i_1, i_3, 1):
if mu[i]>=1.0 and mu[i+1]<1.0:
i_U = i
break
#* Neglect small Mw bump near leading edge
if xu[i_1]<0.1 and mu[i_1]<1.10:
i_1=0; i_U=0; i_3=0
return i_F, i_1, i_U, i_3
@staticmethod
def check_singleshock(xu, mu, dMw, d2Mw, dMwcri_1, info=False):
'''
Check whether is single shock wave or not
>>> flag, i_F, i_1, i_U, i_3 = check_singleshock(xu, mu, dMw, d2Mw, dMwcri_1)
### Inputs:
```text
xu: ndarray, x location
mu: ndarray, wall Mach number of upper surface
dMw: ndarray, slope of wall Mach number
dMwcri_1: critical value locating shock wave front
```
### flag:
```text
1: single shock wave
0: shockless
-1: multiple shock waves
```
'''
#* Get 1st shock
i_F, i_1, i_U, i_3 = PhysicalSec.shock_property(xu, mu, dMw, d2Mw, dMwcri_1)
d_F = dMw[i_F]
#* Check if shockless
# Check if Mw jump exists and M1>1.0
if d_F>dMwcri_1 or mu[i_1]<1.0 or i_1==0:
if info:
print(' Shockless: XF=%.2f MF=%.2f dM/dX=%.2f'%(xu[i_F], mu[i_F], d_F))
return 0, 0, 0, 0, 0
#* Check if 2nd shock wave exists
# Remove first shock
dm = dMw.copy()
d2m = d2Mw.copy()
nn = xu.shape[0]
for i in np.arange(i_F, nn, 1, dtype=int):
if dm[i]<=0.0:
dm[i]=0.0
d2m[i]=0.0
else:
break
for i in np.arange(i_F, 0, -1, dtype=int):
if dm[i]<=0.0:
dm[i]=0.0
d2m[i]=0.0
else:
break
# Locate second shock
dMwcri_F = max(dMwcri_1, 0.5*d_F)
_iF, _i1, _iU, _i3 = PhysicalSec.shock_property(xu, mu, dm, d2m, dMwcri_1)
if dm[_iF]<dMwcri_F and _i1!=0 and _i3!=0:
# Locate sharp change of Mw
if mu[_i1]>1.0 and mu[_i3]<1.05:
# Check supersonic wave front and 'subsonic' wave hind
if info:
print(' Second shock: X1=%.2f M1=%.2f M2=%.2f'%(xu[_i1], mu[_i1], mu[_i3]))
return -1, 0, 0, 0, 0
return 1, i_F, i_1, i_U, i_3
def aux_features(self):
'''
Calculate auxiliary features based on basic, geo, and shock features.
### Get value of: Length, lSW, DCp, Err, DMp, FSp, kaf,
### CLU, CLL, CLw, Cdw, CLl, Cdl
'''
X = self.x
Y = self.y
x1 = self.xf_dict['1'][2]
n0 = len(X)
self.xf_dict['L1U'][1] = self.xf_dict['U'][2] - x1
self.xf_dict['L13'][1] = self.xf_dict['3'][2] - x1
self.xf_dict['LSR'][1] = self.xf_dict['R'][2] - self.xf_dict['S'][2]
self.xf_dict['DCp'][1] = self.getValue('3','Cp') - self.getValue('1','Cp')
cosA = | np.cos(self.AoA/180.0*np.pi) | numpy.cos |
# Imports
from PIL import Image
import torch
import numpy as np
from torchvision import datasets, transforms, models
from torch import nn, optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
import seaborn as sns
import json
def load_category_json(file_path):
"""
Load the mapping between categories and names
"""
with open(file_path, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
def load_data(path = './flowers'):
"""
Load the data in, and create relevant data loaders for use during training and testing
"""
train_dir = path + '/train'
valid_dir = path + '/valid'
test_dir = path + '/test'
# Transforms for training includes random rotations, flips and normalisations
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Transforms for validation
validation_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Transforms for testing
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# datasets =
train_data = datasets.ImageFolder(train_dir, transform = train_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform = validation_transforms)
test_data = datasets.ImageFolder(test_dir, transform = test_transforms)
# TODO: Using the image datasets and the trainforms, define the dataloaders
# dataloaders =
trainloader = torch.utils.data.DataLoader(train_data, batch_size = 64, shuffle = True)
validloader = torch.utils.data.DataLoader(valid_data, batch_size = 32, shuffle = True)
testloader = torch.utils.data.DataLoader(test_data, batch_size = 32, shuffle = True)
return trainloader, validloader, testloader, train_data
def classifier(model, hidden_layer_size, architecture):
"""
Define our trainable classifier network to work on top of the pretrained model
"""
for param in model.parameters():
param.requires_grad = False
start_size = {"vgg16":25088,"densenet121":1024}
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(start_size[architecture], hidden_layer_size)
self.fc2 = nn.Linear(hidden_layer_size, 102)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.dropout(F.relu(self.fc1(x)))
x = F.log_softmax(self.fc2(x), dim=1)
return x
return Classifier()
def save_checkpoint(model, save_path, architecture, hidden_layer_size, train_data):
"""
Save the checkpoint for a model at the given path
"""
model.class_to_idx = train_data.class_to_idx
# Transfer the cpu for saving
model.to('cpu')
checkpoint = {'architecture':architecture,
'state_dict': model.state_dict(),
'class_to_idx': train_data.class_to_idx,
'hidden_layer_size':hidden_layer_size}
torch.save(checkpoint, save_path)
def load_checkpoint(path):
"""
Load a saved model checkpoint and return the model
"""
checkpoint = torch.load(path)
architecture = checkpoint['architecture']
hidden_layer_size = checkpoint['hidden_layer_size']
# Declare the model prior to the exec function as it isn't capable of doing so
# model = 0
# Recreate the model using the checkpoint saves
# exec('model = models.{}(pretrained = True)'.format(architecture))
if architecture == 'densenet121':
model = models.densenet121(pretrained = True)
else:
model = models.vgg16(pretrained = True)
# Make sure that the model is the right shape and size
model.classifier = classifier(model, hidden_layer_size, architecture)
# Load the categories dictionary and model state
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
def train_network(epochs, trainloader, validloader, optimiser, device, criterion, model):
"""
Train a model using the given data loaders, optimiser and loss function
"""
steps = 0
print_every = 32
print('Training start\n')
model.to(device)
# Loop through each epoch for training
for epoch in range(epochs):
running_loss = 0
# Go through the training batches
for inputs_1, labels_1 in trainloader:
# Update the steps progress
steps += 1
# Move input and label tensors to the default device so they are available
inputs_1, labels_1 = inputs_1.to(device), labels_1.to(device)
optimiser.zero_grad()
outputs = model.forward(inputs_1)
loss = criterion(outputs, labels_1)
# Update the gradients
loss.backward()
optimiser.step()
running_loss += loss.item()
# Only print every n steps
if steps % print_every == 0:
# Turn off the dropout for the validation phase so that all inputs are used
model.eval()
validation_loss = 0
accuracy = 0
with torch.no_grad():
for inputs_2, labels_2 in validloader:
optimiser.zero_grad()
# Move the validation examples to the relevant device
inputs_2, labels_2 = inputs_2.to(device), labels_2.to(device)
model.to(device)
# Get the outputs from the model
outputs_2 = model.forward(inputs_2)
# Calculate the loss
batch_loss = criterion(outputs_2, labels_2)
validation_loss += batch_loss.item()
# Find the probabilities
ps = torch.exp(outputs_2)
# Get the top result
top_p, top_class = ps.topk(1, dim=1)
# Calculate accuracy
equals = top_class == labels_2.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch: {epoch+1}/{epochs} - "
f"Train loss: {running_loss/print_every:.4f} - "
f"Validation loss: {validation_loss/len(validloader):.4f} - "
f"Validation accuracy: {accuracy/len(validloader):.4f}")
# Set the model back to training mode with dropout included for training segment
model.train()
# Reset running_loss
running_loss = 0
# Note the end of the network training
print('\nTraining end')
def process_image(image_path):
"""
Scales, crops, and normalizes a PIL image for a PyTorch model
Returns - Numpy array
"""
image = Image.open(image_path)
image_processing = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
image_tensor = image_processing(image)
# Must return a numpy array so that it can be transposed for plotting
return image_tensor.numpy()
def imshow(image, ax=None, title=None):
"""
Print the given image
"""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = | np.clip(image, 0, 1) | numpy.clip |
import numpy as np
import math
import sys, copy
sys.path.insert(0,'../Robots')
import robot_toy_example as robot_moel
import uvs as uvss
import time
robot = robot_moel.toy_blocks_robot()
estimate_jacobian_random_motion_range = [2, 5]
step_normalize_range = [2, 3]
uvs = uvss.UVS(robot, 0.5, 0.1, 2, step_normalize_range,estimate_jacobian_random_motion_range)
trajectory = []
def set_intermediate_target(dx, dy):
current_joints = robot.current_joints()
robot.set_intermediate_target_fake(current_joints[0] + dx, current_joints[1] + dy)
def estimate_jacobian(trials=3):
current_joints = robot.current_joints()
delta_joints = []
delta_errors = []
prev_error = robot.current_joints()
for i in range(trials):
input("Press Enter to continue...")
r_motion = uvs.estimate_jacobian_random_motion(i)
r_error = robot.current_joints()
delta_error = r_error - prev_error
if uvs.estimate_jacobian_motion_quality(delta_error):
delta_joints.append(r_motion)
delta_errors.append(delta_error)
# move back to current joints
input("Press Enter to continue...")
robot.move_to(current_joints)
print('back to origin')
delta_joints = | np.asarray(delta_joints) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 4 11:01:16 2015
@author: hehu
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.lda import LDA
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from scipy.linalg import eig
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def visualize(X, y, clf):
fig, ax = plt.subplots(figsize=[6,6])
plt.axis('equal')
# create a mesh to plot in
#x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
#y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
x_min, x_max = -9, 3
y_min, y_max = -7, 5
if clf is not None:
h = .01 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min-1, x_max+1, h),
np.arange(y_min-1, y_max+1, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap='bwr', alpha=0.5) #plt.cm.Paired cmap='bwr',
ymin, ymax = ax.get_ylim()
xmin, xmax = ax.get_xlim()
if clf.kernel == "linear":
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-10, 5, 500)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
ax.plot(xx, yy, 'k-')
ax.plot(xx, yy_down, 'k--')
ax.plot(xx, yy_up, 'k--')
for svIdx in range(clf.support_vectors_.shape[0]):
sv = [clf.support_vectors_[svIdx, 0], clf.support_vectors_[svIdx, 1]]
ax.annotate("Support Vectors",
sv,
xytext=(-6, 3),
size=13,
bbox=dict(boxstyle="round4", fc="w", ec = "g"),
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=0.2",
shrinkA = 0,
shrinkB = 8,
fc = "g",
ec = "g"),
horizontalalignment='center',
verticalalignment='middle')
# Plot margin
x0 = -0.5
y0 = a * x0 - (clf.intercept_[0]) / w[1]
distances = np.hypot(x0 - xx, y0 - yy_down)
minIdx = np.argmin(distances)
x1 = xx[minIdx]
y1 = yy_down[minIdx]
ax.annotate("",
xy=(x0, y0), xycoords='data',
xytext=(x1, y1), textcoords='data',
arrowprops=dict(arrowstyle="<->",
connectionstyle="arc3"),
)
distances = np.hypot(x0 - xx, y0 - yy_up)
minIdx = np.argmin(distances)
x2 = xx[minIdx]
y2 = yy_up[minIdx]
ax.annotate("",
xy=(x0, y0), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="<->",
connectionstyle="arc3"),
)
ax.annotate("Margin",
(0.5*(x0+x1), 0.5*(y0+y1)),
xytext=(1.5, -6.7),
size=13,
bbox=dict(boxstyle="round4", fc="w", ec = "g"),
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.2",
shrinkA = 0,
shrinkB = 8,
fc = "g",
ec = "g"),
horizontalalignment='center',
verticalalignment='middle')
ax.annotate("Margin",
(0.5*(x0+x2), 0.5*(y0+y2)),
xytext=(1.5, -6.7),
size=13,
bbox=dict(boxstyle="round4", fc="w", ec = "g"),
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.2",
shrinkA = 0,
shrinkB = 8,
fc = "g",
ec = "g"),
horizontalalignment='center',
verticalalignment='middle')
ax.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
#ax.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
ax.set_ylim(y_min, y_max)
ax.set_xlim(x_min, x_max)
X1 = X[y==1, :]
X2 = X[y==0, :]
ax.plot(X1[:, 0], X1[:, 1], 'ro', zorder = 1, alpha = 0.6)
ax.plot(X2[:, 0], X2[:, 1], 'bx', zorder = 1)
def generate_data(N):
X1 = np.random.randn(2,N)
X2 = np.random.randn(2,N)
M1 = 0.7*np.array([[1.5151, -0.1129], [0.1399, 0.6287]])
M2 = 0.7*np.array([[0.8602, 1.2461], [-0.0737, -1.5240]])
T1 = np.array([-1, 1]).reshape((2,1))
T2 = np.array([-2, -5]).reshape((2,1))
X1 = np.dot(M1, X1) + np.tile(T1, [1,N])
X2 = np.dot(M2, X2) + np.tile(T2, [1,N])
X1 = X1[::-1,:]
X2 = X2[::-1,:]
return X1, X2
if __name__ == "__main__":
plt.close("all")
# Generate random training data
N = 200
np.random.seed(2014)
X1, X2 = generate_data(N)
X = | np.concatenate((X1.T, X2.T)) | numpy.concatenate |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import math
import argparse
import itertools
import csv
from scipy.stats import linregress
from scipy.optimize import minimize
read_num_seq_lineage_global = None
read_num_min_seq_lineage_global = None
read_depth_seq_global = None
t_seq_global = None
kappa_global = None
x_mean_global = None
def fun_estimate_parameters(x, read_num_seq, t_seq, kappa=2.5, fitness_type='m'):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE LOG LIKELIHOOD VALUE OF EACH GENOTYPE GIVEN ITS
# FITNESS, THE ESTIMATED READ NUMBER PER GENOTYPE PER SEQUENCING TIME-POINT, AND THE ESTIMATED MEAN FITNESS PER
# SEQUENCING TIME-POINT
#
# INPUTS
# --x: fitness of each genotype, [x1, x2, ...]
# --read_num_seq: read number per genotype at each sequencing time-point
# --t_seq: sequenced time-points in number of generations, [0, t1, t2, ...]
# --kappa: a noise parameter that characterizes the total noise introduced by growth, cell transfer, DNA extraction,
# PCR, and sequencing (To measure kappa empirically, see the reference: [<NAME>, et al. Quantitative
# Evolutionary Dynamics Using High-resolution Lineage Tracking. Nature, 519: 181–186 (2015)].)
# . (default: 2.5)
# --fitness_type: type of fitness: Wrightian fitness (w), or Malthusian fitness (m)' (default: m)
#
# OUTPUTS
# --estimate_parameters_output: log likelihood value of each genotype,
# estimated reads number per genotype per sequencing time-point,
# estimated mean fitness per sequencing time-point, [x_mean(t0),x_mean(t1),...]
# ------------------------------------------------------------------------------------------------------------------
read_num_seq = read_num_seq.astype(float)
read_num_seq[read_num_seq == 0] = 1e-1
read_depth_seq = np.sum(read_num_seq, axis=0)
lineages_num, seq_num = read_num_seq.shape
read_num_min_seq = np.zeros((lineages_num, seq_num))
read_num_min_seq[:, 0] = read_num_seq[:, 0]
for i in range(1, seq_num):
read_num_min_seq[:, i] = read_num_min_seq[:, i - 1] / 2 ** (t_seq[i] - t_seq[i - 1])
x[x <= -1] = -1 + 1e-7
x_mean = np.zeros(seq_num)
read_num_seq_est = np.zeros((lineages_num, seq_num))
read_num_seq_est[:, 0] = read_num_seq[:, 0]
likelihood_log_seq = np.zeros((lineages_num, seq_num))
if fitness_type == 'w':
for i in range(1, seq_num):
x_mean[i] = np.max(np.dot(x, read_num_seq[:, i]) / read_depth_seq[i], 0)
read_num_est_tempt = np.exp((t_seq[i] - t_seq[i - 1]) * (np.log(1 + x) + 1)
- (t_seq[i] - t_seq[i - 1]) / (x_mean[i] - x_mean[i - 1])
* ((x_mean[i] + 1) * np.log(x_mean[i] + 1)
- (x_mean[i - 1] + 1) * np.log(x_mean[i - 1] + 1)))
read_num_est_tempt = read_num_est_tempt * read_num_seq[:, i - 1] / read_depth_seq[i - 1] * read_depth_seq[i]
read_num_seq_est[:, i] = np.max([read_num_est_tempt, read_num_min_seq[:, i]], axis=0)
x_mean[i] = np.dot(x, read_num_seq_est[:, i]) / np.sum(read_num_seq_est[:, i])
elif fitness_type == 'm':
for i in range(1, seq_num):
x_mean[i] = np.max(np.dot(x, read_num_seq[:, i]) / read_depth_seq[i], 0)
read_num_est_tempt = np.exp((t_seq[i] - t_seq[i - 1]) * x
- (t_seq[i] - t_seq[i - 1]) * (x_mean[i] + x_mean[i - 1]) / 2)
read_num_est_tempt = read_num_est_tempt * read_num_seq[:, i - 1] / read_depth_seq[i - 1] * read_depth_seq[i]
read_num_seq_est[:, i] = np.max([read_num_est_tempt, read_num_min_seq[:, i]], axis=0)
x_mean[i] = np.dot(x, read_num_seq_est[:, i]) / np.sum(read_num_seq_est[:, i])
pos1_r, pos1_c = np.where(read_num_seq[:, :-1] >= 20)
likelihood_log_seq[pos1_r, pos1_c + 1] = (0.25 * np.log(read_num_seq_est[pos1_r, pos1_c + 1])
- 0.5 * np.log(4 * np.pi * kappa)
- 0.75 * np.log(read_num_seq_est[pos1_r, pos1_c + 1])
- (np.sqrt(read_num_seq[pos1_r, pos1_c + 1])
- np.sqrt(read_num_seq_est[pos1_r, pos1_c + 1])) ** 2 / kappa)
pos_r, pos_c = np.where(read_num_seq[:, :-1] < 20)
pos_p1 = np.where(read_num_seq[pos_r, pos_c + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq[pos_r, pos_c + 1] < 10)[0]
pos2_r = pos_r[pos_p1]
pos2_c = pos_c[pos_p1]
pos3_r = pos_r[pos_p2]
pos3_c = pos_c[pos_p2]
likelihood_log_seq[pos2_r, pos2_c + 1] = (np.multiply(read_num_seq[pos2_r, pos2_c + 1],
np.log(read_num_seq_est[pos2_r, pos2_c + 1]))
- read_num_seq_est[pos2_r, pos2_c + 1]
- np.multiply(read_num_seq[pos2_r, pos2_c + 1],
np.log(read_num_seq[pos2_r, pos2_c + 1]))
+ read_num_seq[pos2_r, pos2_c + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq[pos2_r, pos2_c + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq[pos3_r, pos3_c + 1].astype(int)]
likelihood_log_seq[pos3_r, pos3_c + 1] = (np.multiply(read_num_seq[pos3_r, pos3_c + 1],
np.log(read_num_seq_est[pos3_r, pos3_c + 1]))
- read_num_seq_est[pos3_r, pos3_c + 1]
- np.log(factorial_tempt))
likelihood_log = np.sum(likelihood_log_seq, axis=1)
estimate_parameters_output = {'Likelihood_Log': likelihood_log,
'Estimated_Read_Number': read_num_seq_est,
'Estimated_Mean_Fitness': x_mean}
return estimate_parameters_output
def fun_likelihood_lineage_w(x):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE SUM OF THE NEGATIVE LOG LIKELIHOOD VALUE OF ALL
# GENOTYPES GIVEN THE WRIGHTIAN FITNESS OF EACH GENOTYPE
#
# INPUTS
# --x: fitness of a genotype
#
# OUTPUTS
# --likelihood_log_lineage: the negative log likelihood value of the genotype
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
if x <= -1:
x = -1 + 1e-7
seq_num = read_num_seq_lineage_global.shape[0]
read_num_seq_lineage_est = np.zeros(seq_num)
read_num_seq_lineage_est[0] = read_num_seq_lineage_global[0]
likelihood_log_seq_lineage = np.zeros(seq_num)
for i in range(1, seq_num):
read_num_lineage_est_tempt = np.exp((t_seq_global[i] - t_seq_global[i - 1]) * (np.log(1 + x) + 1)
- (t_seq_global[i] - t_seq_global[i - 1]) / (
x_mean_global[i] - x_mean_global[i - 1])
* ((x_mean_global[i] + 1) * np.log(x_mean_global[i] + 1)
- (x_mean_global[i - 1] + 1) * np.log(x_mean_global[i - 1] + 1)))
read_num_lineage_est_tempt = (read_num_lineage_est_tempt * read_num_seq_lineage_global[i - 1]
/ read_depth_seq_global[i - 1] * read_depth_seq_global[i])
read_num_seq_lineage_est[i] = np.max([read_num_lineage_est_tempt.item(), read_num_min_seq_lineage_global[i]])
pos1 = np.where(read_num_seq_lineage_global[:-1] >= 20)[0]
likelihood_log_seq_lineage[pos1 + 1] = (0.25 * np.log(read_num_seq_lineage_est[pos1 + 1])
- 0.5 * np.log(4 * np.pi * kappa_global)
- 0.75 * np.log(read_num_seq_lineage_est[pos1 + 1])
- (np.sqrt(read_num_seq_lineage_global[pos1 + 1])
- np.sqrt(read_num_seq_lineage_est[pos1 + 1])) ** 2 / kappa_global)
pos = np.where(read_num_seq_lineage_global[:-1] < 20)[0]
pos_p1 = np.where(read_num_seq_lineage_global[pos + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq_lineage_global[pos + 1] < 10)[0]
pos2 = pos[pos_p1]
pos3 = pos[pos_p2]
likelihood_log_seq_lineage[pos2 + 1] = (np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_est[pos2 + 1]))
- read_num_seq_lineage_est[pos2 + 1]
- np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_global[pos2 + 1]))
+ read_num_seq_lineage_global[pos2 + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq_lineage_global[pos2 + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq_lineage_global[pos3 + 1].astype(int)]
likelihood_log_seq_lineage[pos3 + 1] = (np.multiply(read_num_seq_lineage_global[pos3 + 1],
np.log(read_num_seq_lineage_est[pos3 + 1]))
- read_num_seq_lineage_est[pos3 + 1]
- np.log(factorial_tempt))
likelihood_log_lineage = np.sum(likelihood_log_seq_lineage)
return -likelihood_log_lineage
def fun_likelihood_lineage_m(x):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE SUM OF THE NEGATIVE LOG LIKELIHOOD VALUE OF ALL
# GENOTYPES GIVEN THE MALTHUSIAN FITNESS OF EACH GENOTYPE
#
# INPUTS
# --x: fitness of a genotype
#
# OUTPUTS
# --likelihood_log_lineage: the negative log likelihood value of the genotype
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
if x <= -1:
x = -1 + 1e-7
seq_num = read_num_seq_lineage_global.shape[0]
read_num_seq_lineage_est = np.zeros(seq_num)
read_num_seq_lineage_est[0] = read_num_seq_lineage_global[0]
likelihood_log_seq_lineage = np.zeros(seq_num)
for i in range(1, seq_num):
read_num_lineage_est_tempt = np.exp((t_seq_global[i] - t_seq_global[i - 1]) * x
- (t_seq_global[i] - t_seq_global[i - 1]) *
(x_mean_global[i] + x_mean_global[i - 1]) / 2)
read_num_lineage_est_tempt = (read_num_lineage_est_tempt * read_num_seq_lineage_global[i - 1]
/ read_depth_seq_global[i - 1] * read_depth_seq_global[i])
read_num_seq_lineage_est[i] = np.max([read_num_lineage_est_tempt.item(), read_num_min_seq_lineage_global[i]])
pos1 = np.where(read_num_seq_lineage_global[:-1] >= 20)[0]
likelihood_log_seq_lineage[pos1 + 1] = (0.25 * np.log(read_num_seq_lineage_est[pos1 + 1])
- 0.5 * np.log(4 * np.pi * kappa_global)
- 0.75 * np.log(read_num_seq_lineage_est[pos1 + 1])
- (np.sqrt(read_num_seq_lineage_global[pos1 + 1])
- np.sqrt(read_num_seq_lineage_est[pos1 + 1])) ** 2 / kappa_global)
pos = np.where(read_num_seq_lineage_global[:-1] < 20)[0]
pos_p1 = np.where(read_num_seq_lineage_global[pos + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq_lineage_global[pos + 1] < 10)[0]
pos2 = pos[pos_p1]
pos3 = pos[pos_p2]
likelihood_log_seq_lineage[pos2 + 1] = (np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_est[pos2 + 1]))
- read_num_seq_lineage_est[pos2 + 1]
- np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_global[pos2 + 1]))
+ read_num_seq_lineage_global[pos2 + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq_lineage_global[pos2 + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq_lineage_global[pos3 + 1].astype(int)]
likelihood_log_seq_lineage[pos3 + 1] = (np.multiply(read_num_seq_lineage_global[pos3 + 1],
np.log(read_num_seq_lineage_est[pos3 + 1]))
- read_num_seq_lineage_est[pos3 + 1]
- np.log(factorial_tempt))
likelihood_log_lineage = np.sum(likelihood_log_seq_lineage)
return -likelihood_log_lineage
def main():
# ------------------------------------------------------------------------------------------------------------------
# ESTIMATE FITNESS OF EACH GENOTYPE IN A COMPETITIVE POOLED GROWTH EXPERIMENT
#
# OPTIONS
# --input: a .csv file, with each column being the read number per genotype at each sequenced time-point
# --t_seq: sequenced time-points in number of generations (format: 0 t1 t2 ...)
# --max_iter_num: maximum number of iterations in the optimization (Small numbers can reduce running time
# and decrease accuracy.) (default: 10)
# --kappa: a noise parameter that characterizes the total noise introduced by growth, cell transfer,
# DNA extraction, PCR, and sequencing (To measure kappa empirically, see the reference:
# [<NAME>, et al. Quantitative Evolutionary Dynamics Using High-resolution Lineage Tracking.
# Nature, 519: 181–186 (2015)].) (default: 2.5)
# --regression_num: number of points used in the initial linear-regression-based fitness estimate (default: 2)
# --fitness_type: type of fitness: Wrightian fitness (w), or Malthusian fitness (m)' (default: m)
# --output_filename: prefix of output .csv files (default: output)
#
# OUTPUTS
# output_filename_FitSeq_Result.csv: 1st column: estimated fitness of each genotype, [x1, x2, ...],
# 2nd column: log likelihood value of each genotype, [f1, f2, ...],
# 3rd column: estimated mean fitness per sequenced time-point
# [x_mean(0), x_mean(t1), ...],
# 4th column+: estimated reads number per genotype per sequencingtime-point,
# with each time-point being a column
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
parser = argparse.ArgumentParser(description='Estimate fitness of each genotype in a competitive pooled growth '
'experiment', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', type=str, help='a .csv file: with each column being the read number per '
'genotype at each sequenced time-point')
parser.add_argument('-t', '--t_seq', nargs='*', type=float, help='sequenced time-points in number of generations')
parser.add_argument('-m', '--max_iter_num', type=int, default=10,
help='maximum number of iterations in the optimization')
parser.add_argument('-k', '--kappa', type=float, default=2.5,
help='a noise parameter that characterizes the total noise introduced by growth, '
'cell transfer, DNA extraction, PCR, and sequencing (To measure kappa empirically, '
'see the reference: [<NAME>, et al. Quantitative Evolutionary Dynamics Using '
'High-resolution Lineage Tracking. Nature, 519: 181–186 (2015)].)')
parser.add_argument('-g', '--regression_num', type=int, default=2,
help='number of points used in the initial linear-regression-based fitness estimate')
parser.add_argument('-f', '--fitness_type', type=str, default='m',
help='type of fitness: Wrightian fitness (w), or Malthusian fitness (m)')
parser.add_argument('-o', '--output_filename', type=str, default='output', help='prefix of output .csv files')
args = parser.parse_args()
read_num_seq = np.array(pd.read_csv(args.input, header=None), dtype=float)
t_seq = | np.array(args.t_seq, dtype=float) | numpy.array |
# coding:utf-8
'''
created on 2018/9/27
@author:Dxq
'''
from PIL import Image
import os
import scipy.io as scio
import numpy as np
import cv2
import functools
import time
import urllib.request
from common.utils import get_baseInfo_tx
skin_triangles = scio.loadmat("resource/mat/triangle_matrix_skin_nose.mat")['triangle']
triangles = scio.loadmat("resource/mat/triangle_matrix.mat")['triangle']
def time_cal(func):
@functools.wraps(func)
def wrapper(*args, **kw):
t1 = time.time()
r = func(*args, **kw) # 先让函数运行一次,防止直接输出,将其赋值给一个变量
if time.time() - t1 > 0.001:
print('函数%s执行的时间为:%f' % (func.__name__, time.time() - t1))
return r
return wrapper
def get_landmark_dict(file_path, status='local'):
landmark_dict = get_baseInfo_tx(file_path, status)
# if landmark_dict['roll'] != 0:
# Image.open(file_path).rotate(-landmark_dict['roll']).save(file_path)
# landmark_dict = get_baseInfo_tx(file_path)
return landmark_dict
def get_temp_landmark_dict(file_path):
mat_file = file_path.split(".")[0] + '.mat'
if os.path.exists(mat_file):
landmark_dict = scio.loadmat(mat_file)
else:
landmark_dict = get_baseInfo_tx(file_path)
scio.savemat(mat_file, landmark_dict)
return landmark_dict
def check_right_eye(points):
fixed_points = points.copy()
if points[0][0] < points[1][0]:
fixed_points[0] = points[4]
fixed_points[1] = points[3]
fixed_points[2] = points[2]
fixed_points[3] = points[1]
fixed_points[4] = points[0]
fixed_points[5] = points[7]
fixed_points[6] = points[6]
fixed_points[7] = points[5]
return fixed_points
def check_left_eye(points):
fixed_points = points.copy()
if points[0][0] > points[1][0]:
fixed_points[0] = points[4]
fixed_points[1] = points[5]
fixed_points[2] = points[6]
fixed_points[3] = points[7]
fixed_points[4] = points[0]
fixed_points[5] = points[1]
fixed_points[6] = points[2]
fixed_points[7] = points[3]
return fixed_points
def check_face_profile(points):
# fixed_points = points[16:37]
# v_x = 2 * points[10][0]
#
# left_p = [[v_x - p[0], p[1]] for p in fixed_points[11:][::-1]]
# right_p = [[v_x - p[0], p[1]] for p in fixed_points[:10][::-1]]
# merge_p = np.vstack((left_p, fixed_points[10]))
# merge_p = np.vstack((merge_p, right_p))
# fixed_points = (fixed_points + merge_p) / 2
#
# m1 = get_similarity_matrix(fixed_points, merge_p,True)
# fixed_points2 = landmark_trans_by_m(points, m1)
# print(m1)
return points
def get_points(landmark_dict):
'''
:param landmark_dict:
:return:左眼0-7 左眉8-15 脸16-36 鼻子37-49 嘴50-71 右眉72-79 右眼80-87 88-89左右眼球
'''
def _get_eye_center(points):
eye_center = [(points[0] + points[4])[0] // 2, (points[2] + points[6])[1] // 2]
return eye_center
p0 = np.vstack([check_left_eye(landmark_dict['left_eye']), landmark_dict['left_eyebrow']])
p1 = np.vstack([p0, landmark_dict['face_profile']])
p2 = np.vstack([p1, landmark_dict['nose']])
p3 = np.vstack([p2, landmark_dict['mouth']])
p4 = np.vstack([p3, landmark_dict['right_eyebrow']])
p5 = np.vstack([p4, check_right_eye(landmark_dict['right_eye'])])
p6 = np.vstack([p5, [_get_eye_center(landmark_dict['left_eye']), _get_eye_center(landmark_dict['right_eye'])]])
p6 = check_face_profile(p6)
return p6, [tuple(p) for p in p6]
def get_similarity_matrix(orange_points, tree_points, fullAffine=False):
'''
dst->src 的变换矩阵
:param dst_points: 目标特征点
:param src_points: 底图特征点
:return: matrix
'''
m = cv2.estimateRigidTransform(np.array(orange_points), np.array(tree_points), fullAffine)
if m is None:
print('异常')
m = cv2.getAffineTransform(np.float32(orange_points[:3]), np.float32(tree_points[:3]))
return m
def save_img(img_array, save_name, ifsave):
if ifsave:
cv2.imwrite(save_name, img_array)
def landmark_trans_by_m(points, m):
p1 = np.transpose(points, [1, 0])
p2 = np.pad(p1, ((0, 1), (0, 0)), 'constant', constant_values=(1, 1))
p3 = np.matmul(m, p2)
p4 = np.transpose(p3, [1, 0])
return p4
def get_measure_triangle():
return [list(t.astype(np.int32)) for t in triangles]
def get_measure_triangle_skin():
return [list(t.astype(np.int32)) for t in skin_triangles]
def affine_transform(src, src_tri, dst_tri, size):
warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))
# warp_mat = cv2.estimateRigidTransform(np.array(src_tri), np.array(dst_tri), True)
dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]),
None,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return dst
def morph_triangle(src, dst, img, face_mask, t_src, t_dst, t, base_alpha, step=0):
# t_src, t_dst, t
# 分别为特征点的三角形坐标
r1 = cv2.boundingRect(np.float32([t_src]))
r2 = cv2.boundingRect(np.float32([t_dst]))
r = cv2.boundingRect(np.float32([t]))
# 获取三角形的凸包正方形 格式 xmin,ymin,wid,height
t1_rect = []
t2_rect = []
t_rect = []
for i in range(0, 3):
t_rect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
t1_rect.append(((t_src[i][0] - r1[0]), (t_src[i][1] - r1[1])))
t2_rect.append(((t_dst[i][0] - r2[0]), (t_dst[i][1] - r2[1])))
# 将坐标转换为相对正方形左上角坐标
mask = np.zeros((r[3], r[2], 3), dtype=np.float32)
# 包含剖分三角形的正方形区域
cv2.fillConvexPoly(mask, np.int32(t_rect), (1., 1., 1.))
# 填充剖分三角形
img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
img2_rect = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]
size = (r[2], r[3])
warp_img_src = affine_transform(img1_rect, t1_rect, t_rect, size)
warp_img_dst = affine_transform(img2_rect, t2_rect, t_rect, size)
# alpha = 0.5 if step > 49 else alpha
if step < 16:
# print('眼睛')
alpha = min(1.25 * base_alpha, 1.0)
elif step < 28:
# print('鼻子')
alpha = min(1.0 * base_alpha, 1.0)
elif step < 40:
# print('眉毛')
alpha = min(1.13 * base_alpha, 1.0)
elif step < 50:
# print('眉毛')
alpha = min(1.25 * base_alpha, 1.0)
else:
alpha = min(1.0 * base_alpha, 1.0)
img_rect = (1.0 - alpha) * warp_img_src + alpha * warp_img_dst
img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (1 - mask) + img_rect * mask
face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (
1 - mask[:, :, 0]) + 255 * mask[:, :, 0]
return img, face_mask
def affine_triangle(src, dst, t_src, t_dst):
r1 = cv2.boundingRect(np.float32([t_src]))
r2 = cv2.boundingRect(np.float32([t_dst]))
t1_rect = []
t2_rect = []
t2_rect_int = []
for i in range(0, 3):
t1_rect.append((t_src[i][0] - r1[0], t_src[i][1] - r1[1]))
t2_rect.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1]))
t2_rect_int.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1]))
mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t2_rect_int), (1.0, 1.0, 1.0))
img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r2[2], r2[3])
img2_rect = affine_transform(img1_rect, t1_rect, t2_rect, size)
img2_rect = img2_rect * mask
# (1620, 280, 3)
# (800, 0, 820, 1620)
dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] * (
(1.0, 1.0, 1.0) - mask)
dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2_rect
def morph_img(tree_img, tree_points, orange_img, orange_points, alpha):
def _get_morph_points(_tree_points, _orange_points, alphas):
'''
:param src_points:
:param dst_points:
:param alphas: eye_alpha, face_alpha, other_alpha 分别为dst 占据的比例
:return:
'''
eye_alpha, face_alpha, other_alpha = alphas
_morph_points = (1 - other_alpha) * _tree_points + other_alpha * _orange_points
other_alpha2 = .5
_mask_points = (1 - other_alpha2) * _tree_points + other_alpha2 * _orange_points
eye_points = (1 - eye_alpha) * _tree_points + eye_alpha * _orange_points
face_points = (1 - face_alpha) * _tree_points + face_alpha * _orange_points
m1 = get_similarity_matrix(_morph_points[0:8] - _morph_points[88], eye_points[0:8] - eye_points[88])
_morph_points[0:8] = landmark_trans_by_m(_morph_points[0:8] - _morph_points[88], m1) + _morph_points[88]
m2 = get_similarity_matrix(_morph_points[80:88] - _morph_points[89], eye_points[80:88] - eye_points[89])
_morph_points[80:88] = landmark_trans_by_m(_morph_points[80:88] - _morph_points[89], m2) + _morph_points[89]
m3 = get_similarity_matrix(_morph_points[16:37] - _morph_points[26], face_points[16:37] - face_points[26])
_morph_points[16:37] = landmark_trans_by_m(_morph_points[16:37] - _morph_points[26], m3) + _morph_points[26]
return _mask_points, _morph_points,
tree_img = tree_img.astype(np.float32)
orange_img = orange_img.astype(np.float32)
res_img = np.zeros(tree_img.shape, dtype=tree_img.dtype)
_face_mask = np.zeros(orange_img.shape[:2], dtype=np.uint8)
mask_points, morph_points_ = _get_morph_points(tree_points, orange_points, alpha[:3])
# morph_points = dst_points
# src_point 格式[(),()]
# 根据88点获取149个三角剖分对应的88点的index
dt = get_measure_triangle()[47:]
for i in range(0, len(dt)):
t1 = []
t2 = []
t = []
for j in range(0, 3):
t1.append(tree_points[dt[i][j]])
t2.append(orange_points[dt[i][j]])
t.append(mask_points[dt[i][j]])
_, face_maskk = morph_triangle(tree_img, orange_img, res_img, _face_mask, t1, t2, t, alpha[3], i)
return res_img, morph_points_, face_maskk
def tran_src(tree_img, tree_points, orange_points):
"""
应用三角仿射转换将模板图人脸轮廓仿射成目标图像人脸轮廓
:param src_img:
:param src_points:
:param dst_points:
:param face_area:
:return:
"""
h, w, c = tree_img.shape
h -= 1
w -= 1
mask_area = cv2.boundingRect(np.float32([orange_points]))
start_x = max(.9 * mask_area[0], 1)
start_y = max(.9 * mask_area[1], 1)
end_x = min(start_x + 1.2 * mask_area[2], w - 10)
end_y = min(start_y + 1.2 * mask_area[3], h - 10)
sum_x = start_x + end_x
sum_y = start_y + end_y
bound_area = np.int32([
[start_x, start_y], [end_x, start_y], [end_x, end_y], [start_x, end_y],
[0, 0], [w, 0], [w, h], [0, h],
[0.5 * sum_x, start_y], [end_x, 0.5 * sum_y], [0.5 * sum_x, end_y], [start_x, 0.5 * sum_y]
])
tree_list = np.vstack([tree_points, bound_area])
orange_list = np.vstack([orange_points, bound_area])
res_img = np.zeros(tree_img.shape, dtype=tree_img.dtype)
dt = get_measure_triangle()
for i in range(0, len(dt)):
t_src = []
t_dst = []
for j in range(0, 3):
t_src.append(tree_list[dt[i][j]])
t_dst.append(orange_list[dt[i][j]])
affine_triangle(tree_img, res_img, t_src, t_dst)
return res_img
def merge_img(orange_img, tree_img, face_mask, orange_points, mat_rate=.88):
r = cv2.boundingRect(np.float32([orange_points]))
center = (r[0] + int(r[2] / 2), r[1] + int(int(r[3] / 2)))
mat = cv2.getRotationMatrix2D(center, 0, mat_rate)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
# face_mask = cv2.blur(face_mask, (3, 3))
# face_mask = cv2.GaussianBlur(face_mask, (27, 27), 1)
# kernel = np.ones((60, 60), np.uint8)
# face_mask = cv2.dilate(face_mask, kernel) # 膨胀
# face_mask = cv2.erode(face_mask, kernel) # 腐蚀
# face_mask = cv2.medianBlur(face_mask, 19)
res = cv2.seamlessClone(np.uint8(orange_img), np.uint8(tree_img), face_mask, center, 1)
return res
def toushi_img(orange_img, orange_points, tree_points, yaw=0):
if abs(yaw) <= 5:
rate = 0.1
else:
rate = min(abs(yaw), 12) / 12
_tree = rate * tree_points + (1 - rate) * orange_points
pts1 = np.float32([orange_points[17], orange_points[18], orange_points[34], orange_points[35]])
pts2 = np.float32([_tree[17], _tree[18], _tree[34], _tree[35]])
M = cv2.getPerspectiveTransform(pts1, pts2)
p2 = np.pad(orange_points, ((0, 0), (0, 1)), 'constant', constant_values=(1, 1))
new_data1 = np.matmul(p2, M.T)
new_data1 = new_data1 / np.repeat(new_data1[:, 2:3], 3, axis=1)
new_orange_points = new_data1[:, :2]
new_orange_img = cv2.warpPerspective(orange_img, M, (2 * orange_img.shape[1], 2 * orange_img.shape[0]))
return new_orange_img, new_orange_points
def resize_img(img_array, fusion_face_wid):
img_array = img_array[..., [2, 1, 0, 3]]
img = Image.fromarray(np.uint8(img_array), "RGBA")
wid, hei = img.size
std_face_wid = 257
fixed_loc = [500, 500]
# rate = std_face_wid / fusion_face_wid
# 可优化更合理的对比指标
rate = max(0.93, std_face_wid / fusion_face_wid)
img = img.resize([int(rate * wid), int(rate * hei)])
wid2, hei2 = img.size
diff_x = abs(int((rate - 1) * fixed_loc[0]))
diff_y = abs(int((rate - 1) * fixed_loc[1]))
if wid2 <= wid:
rr = ((diff_y, wid - wid2 - diff_y), (diff_x, wid - wid2 - diff_x), (0, 0))
image = np.pad(np.array(img), rr, mode='constant', constant_values=(0, 0))
img = Image.fromarray(np.uint8(image))
else:
img = img.crop([diff_x, diff_y, diff_x + wid, diff_y + hei])
return img
def get_data_analysis(skin_ori):
skin_ori_flatten = skin_ori.reshape([-1, 1])
skin_ori_index = np.flatnonzero(skin_ori_flatten != 0)
skin_ori_value = skin_ori_flatten[skin_ori_index]
skin_ori_value_max = np.max(skin_ori_value)
skin_ori_value_std = np.std(skin_ori_value)
skin_ori_value_min = np.min(skin_ori_value)
skin_ori_value_mean = np.mean(skin_ori_value)
return skin_ori_value_mean, skin_ori_value_std, skin_ori_value_max, skin_ori_value_min
def make_mask(face_mask, t):
# t
# 分别为特征点的三角形坐标
r = cv2.boundingRect(np.float32([t]))
# 获取三角形的凸包正方形 格式 xmin,ymin,wid,height
t_rect = []
for i in range(0, 3):
t_rect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
# 将坐标转换为相对正方形左上角坐标
mask = np.zeros((r[3], r[2]), dtype=np.float32)
# 包含剖分三角形的正方形区域
cv2.fillConvexPoly(mask, np.int32(t_rect), 1)
# 填充剖分三角形
face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (
1 - mask) + 1 * mask
return face_mask
def get_data_analysis(skin_ori):
skin_ori_flatten = skin_ori.reshape([-1, 1])
skin_ori_index = np.flatnonzero(skin_ori_flatten != 0)
skin_ori_value = skin_ori_flatten[skin_ori_index]
skin_ori_value_max = np.max(skin_ori_value)
skin_ori_value_std = np.std(skin_ori_value)
skin_ori_value_min = np.min(skin_ori_value)
skin_ori_value_mean = np.mean(skin_ori_value)
return skin_ori_value_mean, skin_ori_value_std, skin_ori_value_max, skin_ori_value_min
def smooth_light(orange_img, arr_point_tree):
# 肤色区域
dt = get_measure_triangle_skin()[47:]
face_mask2 = np.zeros(orange_img.shape[:2], dtype=np.uint8)
for i in range(0, len(dt)):
t = []
for j in range(0, 3):
t.append(arr_point_tree[dt[i][j]])
face_mask = make_mask(face_mask2, t)
face_mask = np.array(face_mask, np.float32)
orange_img_hsv = cv2.cvtColor(orange_img, cv2.COLOR_BGR2HSV)
s = | np.array(orange_img_hsv[:, :, 1], np.float32) | numpy.array |
################################################################################
##
## 2018/05/02
##
## Author: <NAME>, IB²
## Version: 1.0
## Python: 3.6
##
## This implements a random forest in order to predict digenic effect of both
## DIDA combinations (1) and dual diagnosis combinations (2). It aims to diffe-
## rentiate between true digenic class, composite and dual diagnosis.
##
## (1) https://academic.oup.com/nar/article/45/15/e140/3894171
## (2) https://www.nejm.org/doi/full/10.1056/NEJMoa1516767
##
## It performs stratified cross-validations and averages results over a given
## amount of repeats. dida_dualdiag.csv is an instance of valid CSV file.
##
################################################################################
import sys
import time
import pandas as pd
from math import sqrt
from numpy import array, concatenate, dot, diag, mean, std
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import roc_curve, roc_auc_score, auc, matthews_corrcoef
def main(f_name, n_trees, n_epochs, threshold, selector):
"""
Loads csv, launches cross-validation, displays scores
f_name: str, path to reach the .csv file to evaluate predictor
n_trees: int, amount of trees in forest
n_epochs: int, amount of cross-validation to perform
thresholds: see getScores
selector: str, boolean vector representing features to take into account
"""
features = [
'CADD1', 'CADD2', 'RecA', 'EssA',
'CADD3', 'CADD4', 'RecB', 'EssB',
'Path'
]
assert len(selector) == len(features), "Features selector must fit features amount."
to_keep = [f for i, f in enumerate(features) if selector[i] == '1']
# Csv gathering, it needs to be ordered.
df_data = pd.read_csv(f_name)
X = array(df_data[to_keep])
# TD: true digenic, CO: composite, UK: unknown
# OV: OVerlapping dual diagnosis, DI: Distinct dual diagnosis
y = array(
df_data['DE'].replace(
'TD', 2
).replace(
'CO', 1
).replace(
'UK', -1
).replace(
'OV', 0
).replace(
'DI', 0
).replace(
'DD', 0
)
)
gene_pairs = array(df_data['Pair'])
X, y, gene_pairs = X[y != -1], y[y != -1], gene_pairs[y != -1]
y = array([ [i == y_i for i in range(3)] for y_i in y])
print('Training on subspace {', ', '.join( to_keep ), '}.' )
def getScores(pred, real, thresholds=[1, 1, 1]):
"""
Returns evaluation metrics to evaluate one cross-validation:
For each class, Sensitivity and Specificity. Order:
sen_dd, sen_co, sen_td, spe_dd, spe_co, spe_td
pred: Predicted probabilities. For each sample, vector is such as
[pred_dd, pred_co, pred_td]
real: real label. A label is a 3-long boolean vector.
DD: [1, 0, 0] - CO: [0, 1, 0] - TD: [0, 0, 1]
thresholds: weightings to compensate lack of data in certain class.
"""
if len(pred) != len(real):
raise Exception("ERROR: input vectors have differente len!")
results = {
'sen': [ { 'count': 0, 'recognized': 0 } for _ in range(3) ],
'spe': [ { 'count': 0, 'true': 0 } for _ in range(3) ],
}
for i, r_tab in enumerate(real):
r = max(range(3), key=lambda k: r_tab[k])
p = max(range(3), key=lambda k: pred[i][k]*thresholds[k])
results['sen'][r]['count'] += 1
results['spe'][p]['count'] += 1
if p == r:
results['sen'][p]['recognized'] += 1
results['spe'][p]['true'] += 1
return map(
lambda x: round(x*100)/100,
[r['recognized'] / r['count'] for r in results['sen']] + [r['true'] / r['count'] for r in results['spe']]
)
def LOGO_crossValidation(X, y, groups, n_trees=100, n_epochs=50, thresholds=[1,1,1]):
"""
Stratified cross-validation.
X: Design matrix
y: label vector
groups: Gene pair vector to define training groups
n_trees: Amount of trees in random forest
n_epochs: number of cross validations to perform
thresholds: see getScores
"""
logo = LeaveOneGroupOut()
clf = RandomForestClassifier(
n_estimators=n_trees,
max_depth=10,
criterion='gini',
min_samples_split=2,
min_samples_leaf=2,
bootstrap=True,
n_jobs=1
)
# Vector to compute final scores
sum_dd_se, sum_co_se, sum_td_se = [], [], []
sum_dd_sp, sum_co_sp, sum_td_sp = [], [], []
for i in range(n_epochs):
start_time = time.time()
values_t, values_p = [], []
print("#"*10, "Trial %i" % i, "#"*10)
# We leave one group out
for train_index, test_index in logo.split(X, y, groups):
X_fit, y_fit, X_train, y_train = (
X[train_index], y[train_index],
X[test_index], y[test_index]
)
clf = clf.fit(X_fit, y_fit)
y_predicted = clf.predict_proba(X_train)
# y_predicted is not shaped correctly. Reshape it to fit
# getScores expectations.
y_formatted = [ [0, 0, 0] for _ in range(len(y_predicted[0])) ]
for de in (0, 1, 2):
for i, proba in enumerate(y_predicted[de][:,1]):
y_formatted[i][de] = proba
# Predictions are concatenated into a prediction vector
values_t, values_p = values_t + [yi for yi in y_train], values_p + [yi for yi in y_formatted]
sen_dd, sen_co, sen_td, spe_dd, spe_co, spe_td = getScores(values_p, values_t, thresholds)
sum_dd_se.append(sen_dd)
sum_co_se.append(sen_co)
sum_td_se.append(sen_td)
sum_dd_sp.append(spe_dd)
sum_co_sp.append(spe_co)
sum_td_sp.append(spe_td)
print('Duration:', round( (time.time() - start_time) * 100) / 100, 's')
print('sen | dd - co - td / spe | dd - co - td')
print('sen | ' + '-'.join(map(str, (sen_dd, sen_co, sen_td))) + ' / spe | ' + '-'.join(map(str, (spe_dd, spe_co, spe_td))))
print('Sen DD: %f, std: %f' % (mean(sum_dd_se), std(sum_dd_se)) )
print('Sen CO: %f, std: %f' % (mean(sum_co_se), std(sum_co_se)) )
print('Sen TD: %f, std: %f' % (mean(sum_td_se), std(sum_td_se)) )
print('Spe DD: %f, std: %f' % (mean(sum_dd_sp), std(sum_dd_sp)) )
print('Spe CO: %f, std: %f' % (mean(sum_co_sp), std(sum_co_sp)) )
print('Spe TD: %f, std: %f' % ( | mean(sum_td_sp) | numpy.mean |
import numpy as np
import open3d as o3d
from PIL import Image, ImageDraw
class BoundingBox3D:
"""Class that defines an axially-oriented bounding box."""
next_id = 1
def __init__(self,
center,
front,
up,
left,
size,
label_class,
confidence,
meta=None,
show_class=False,
show_confidence=False,
show_meta=None,
identifier=None,
arrow_length=1.0):
"""Creates a bounding box.
Front, up, left define the axis of the box and must be normalized and
mutually orthogonal.
Args:
center: (x, y, z) that defines the center of the box.
front: normalized (i, j, k) that defines the front direction of the
box.
up: normalized (i, j, k) that defines the up direction of the box.
left: normalized (i, j, k) that defines the left direction of the
box.
size: (width, height, depth) that defines the size of the box, as
measured from edge to edge.
label_class: integer specifying the classification label. If an LUT
is specified in create_lines() this will be used to determine
the color of the box.
confidence: confidence level of the box.
meta: a user-defined string (optional).
show_class: displays the class label in text near the box
(optional).
show_confidence: displays the confidence value in text near the box
(optional).
show_meta: displays the meta string in text near the box (optional).
identifier: a unique integer that defines the id for the box
(optional, will be generated if not provided).
arrow_length: the length of the arrow in the front_direct. Set to
zero to disable the arrow (optional).
"""
assert (len(center) == 3)
assert (len(front) == 3)
assert (len(up) == 3)
assert (len(left) == 3)
assert (len(size) == 3)
self.center = | np.array(center, dtype="float32") | numpy.array |
#!/usr/bin/env python
from __future__ import print_function, division
import math
import glob
import aplpy
import numpy as np
import itertools
import multiprocessing as mp
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy import wcs
import astropy.units as u
from astropy.stats import sigma_clipped_stats
from astropy.nddata.utils import Cutout2D
from astropy.io import fits
from astropy.visualization import ZScaleInterval
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
from scipy.optimize import curve_fit
from photutils import aperture_photometry, CircularAperture
from photutils import Background2D, MedianBackground, make_source_mask
from qso_toolbox import utils as ut
from qso_toolbox import catalog_tools as ct
from qso_toolbox import photometry_tools as pt
def show_rectangles(fig, xw, yw, width, height, angle=0, layer=False,
zorder=None, coords_frame='world', **kwargs):
"""
Overlay rectangles on the current plot.
ATTENTION! THIS IS A MODIFIED VERSION OF THE ORIGINAL APLPY ROUTINE THAT
CORRECTLY ROTATES THE RECTANGLE AROUND ITS CENTER POSITION.
see https://github.com/aplpy/aplpy/pull/327
Parameters
----------
xw : list or `~numpy.ndarray`
The x positions of the centers of the rectangles (in world coordinates)
yw : list or `~numpy.ndarray`
The y positions of the centers of the rectangles (in world coordinates)
width : int or float or list or `~numpy.ndarray`
The width of the rectangle (in world coordinates)
height : int or float or list or `~numpy.ndarray`
The height of the rectangle (in world coordinates)
angle : int or float or list or `~numpy.ndarray`, optional
rotation in degrees (anti-clockwise). Default
angle is 0.0.
layer : str, optional
The name of the rectangle layer. This is useful for giving
custom names to layers (instead of rectangle_set_n) and for
replacing existing layers.
coords_frame : 'pixel' or 'world'
The reference frame in which the coordinates are defined. This is
used to interpret the values of ``xw``, ``yw``, ``width``, and
``height``.
kwargs
Additional keyword arguments (such as facecolor, edgecolor, alpha,
or linewidth) are passed to Matplotlib
:class:`~matplotlib.collections.PatchCollection` class, and can be
used to control the appearance of the rectangles.
"""
xw, yw, width, height, angle = aplpy.core.uniformize_1d(xw, yw, width,
height, angle)
if 'facecolor' not in kwargs:
kwargs.setdefault('facecolor', 'none')
if layer:
fig.remove_layer(layer, raise_exception=False)
if coords_frame not in ['pixel', 'world']:
raise ValueError("coords_frame should be set to 'pixel' or 'world'")
# While we could plot the shape using the get_transform('world') mode
# from WCSAxes, the issue is that the rotation angle is also measured in
# world coordinates so will not be what the user is expecting. So we
# allow the user to specify the reference frame for the coordinates and
# for the rotation.
if coords_frame == 'pixel':
x, y = xw, yw
w = width
h = height
a = angle
transform = fig.ax.transData
else:
x, y = fig.world2pixel(xw, yw)
pix_scale = aplpy.core.proj_plane_pixel_scales(fig._wcs)
sx, sy = pix_scale[fig.x], pix_scale[fig.y]
w = width / sx
h = height / sy
a = angle
transform = fig.ax.transData
# x = x - w / 2.
# y = y - h / 2.
#
# patches = []
# for i in range(len(x)):
# patches.append(Rectangle((x[i], y[i]), width=w[i], height=h[i],
# angle=a[i]))
xp = x - w / 2.
yp = y - h / 2.
radeg = np.pi / 180
xr = (xp - x) * np.cos((angle) * radeg) - (yp - y) * np.sin(
(angle) * radeg) + x
yr = (xp - x) * np.sin((angle) * radeg) + (yp - y) * np.cos(
(angle) * radeg) + y
patches = []
for i in range(len(xr)):
patches.append(
Rectangle((xr[i], yr[i]), width=w[i], height=h[i], angle=a[i]))
# Due to bugs in matplotlib, we need to pass the patch properties
# directly to the PatchCollection rather than use match_original.
p = PatchCollection(patches, transform=transform, **kwargs)
if zorder is not None:
p.zorder = zorder
c = fig.ax.add_collection(p)
if layer:
rectangle_set_name = layer
else:
fig._rectangle_counter += 1
rectangle_set_name = 'rectangle_set_' + str(fig._rectangle_counter)
fig._layers[rectangle_set_name] = c
return fig
# ------------------------------------------------------------------------------
# Plotting functions for image_cutouts
# ------------------------------------------------------------------------------
def open_image(filename, ra, dec, fov, image_folder_path, verbosity=0):
"""Opens an image defined by the filename with a fov of at least the
specified size (in arcseonds).
:param filename:
:param ra:
:param dec:
:param fov:
:param image_folder_path:
:param verbosity:
:return:
"""
filenames_available = glob.glob(filename)
file_found = False
open_file_fov = None
file_path = None
if len(filenames_available) > 0:
for filename in filenames_available:
try:
file_fov = int(filename.split("_")[3].split(".")[0][3:])
except:
file_fov = 9999999
if fov <= file_fov:
data, hdr = fits.getdata(filename, header=True)
file_found = True
file_path =filename
open_file_fov = file_fov
if file_found:
if verbosity > 0:
print("Opened {} with a fov of {} "
"arcseconds".format(file_path, open_file_fov))
return data, hdr, file_path
else:
if verbosity > 0:
print("File {} in folder {} not found. Target with RA {}"
" and Decl {}".format(filename, image_folder_path,
ra, dec))
return None, None, None
def make_mult_png_fig(ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list=None,
magerr_list=None, sn_list=None,
forced_mag_list=None, forced_magerr_list=None,
forced_sn_list=None, n_col=3,
n_sigma=3, color_map_name='viridis',
add_info_label=None, add_info_value=None, verbosity=0):
"""Create a figure to plot cutouts for one source in all specified surveys
and bands.
:param ra: float
Right Ascension of the target
:param dec: float
Declination of the target
:param surveys: list of strings
List of survey names, length has to be equal to bands and fovs
:param bands: list of strings
List of band names, length has to be equal to surveys and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys, bands and apertures.
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param square_sizes: list of floats
List of
:param image_folder_path: string
Path to the directory where all the images are be stored
:param mag_list: list of floats
List of magnitudes for each survey/band
:param magerr_list: list of floats
List of magnitude errors for each survey/band
:param sn_list: list of floats
List of S/N for each survey/band
:param forced_mag_list: list of floats
List of forced magnitudes for each survey/band
:param forced_magerr_list: list of floats
List of forced magnitude errors for each survey/band
:param forced_sn_list: list of floats
List of forced S/N for each survey/band
:param n_col: int
Number of columns
:param n_sigma: int
Number of sigmas for the sigma-clipping routine that creates the
boundaries for the color map.
:param color_map_name: string
Name of the color map
:param add_info_value : string
Value for additional information added to the title of the figure
:param add_info_label : string
Label for additional information added to the title of the figure
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: matplotlib.figure
Figure with the plot.
"""
n_images = len(surveys)
n_row = int(math.ceil(n_images / n_col))
fig = plt.figure(figsize=(5*n_col, 5*n_row))
fig = _make_mult_png_axes(fig, n_row, n_col, ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list,
magerr_list, sn_list,
forced_mag_list, forced_magerr_list,
forced_sn_list, n_sigma, color_map_name, verbosity)
coord_name = ut.coord_to_name(np.array([ra]),
np.array([dec]),
epoch="J")
if add_info_label is None or add_info_value is None:
fig.suptitle(coord_name[0])
else:
fig.suptitle(coord_name[0]+' '+add_info_label+'='+add_info_value)
return fig
def _make_mult_png_axes(fig, n_row, n_col, ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list=None,
magerr_list=None, sn_list=None,
forced_mag_list=None, forced_magerr_list=None,
forced_sn_list=None,
n_sigma=3, color_map_name='viridis', verbosity=0):
""" Create axes components to plot one source in all specified surveys
and bands.
:param fig: matplotlib.figure
Figure
:param n_row: int
Number of rows
:param n_col: int
Number of columns
:param ra: float
Right Ascension of the target
:param dec: float
Declination of the target
:param surveys: list of strings
List of survey names, length has to be equal to bands and fovs
:param bands: list of strings
List of band names, length has to be equal to surveys and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys, bands and apertures.
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param square_sizes: list of floats
List of
:param image_folder_path: string
Path to the directory where all the images are be stored
:param mag_list: list of floats
List of magnitudes for each survey/band
:param magerr_list: list of floats
List of magnitude errors for each survey/band
:param sn_list: list of floats
List of S/N for each survey/band
:param forced_mag_list: list of floats
List of forced magnitudes for each survey/band
:param forced_magerr_list: list of floats
List of forced magnitude errors for each survey/band
:param forced_sn_list: list of floats
List of forced S/N for each survey/band
:param n_col: int
Number of columns
:param n_sigma: int
Number of sigmas for the sigma-clipping routine that creates the
boundaries for the color map.
:param color_map_name: string
Name of the color map
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: matplotlib.figure
Figure with the plot.
"""
for idx, survey in enumerate(surveys):
band = bands[idx]
fov = fovs[idx]
aperture = apertures[idx]
size = square_sizes[idx]
if mag_list is not None:
catmag = mag_list[idx]
else:
catmag = None
if magerr_list is not None:
caterr = magerr_list[idx]
else:
caterr = None
if sn_list is not None:
catsn = sn_list[idx]
else:
catsn = None
if forced_mag_list is not None:
forced_mag = forced_mag_list[idx]
else:
forced_mag = None
if forced_magerr_list is not None:
forced_magerr = forced_magerr_list[idx]
else:
forced_magerr = None
if forced_sn_list is not None:
forced_sn = forced_sn_list[idx]
else:
forced_sn = None
# Get the correct filename, accept larger fovs
coord_name = ut.coord_to_name(np.array([ra]), np.array([dec]),
epoch="J")
filename = image_folder_path + '/' + coord_name[0] + "_" + survey + "_" + \
band + "*fov*.fits"
data, hdr, file_path = open_image(filename, ra, dec, fov,
image_folder_path,
verbosity)
if data is not None and hdr is not None:
file_found = True
else:
file_found = False
# Old plotting routine to modify, currently it only plots images for
# surveys and bands that it could open, no auto download implemented
if file_found:
wcs_img = wcs.WCS(hdr)
pixcrd = wcs_img.wcs_world2pix(ra, dec, 0)
positions = (np.float(pixcrd[0]), np.float(pixcrd[1]))
overlap = True
if verbosity >= 4:
print("[DIAGNOSTIC] Image file shape {}".format(data.shape))
try:
img_stamp = Cutout2D(data, positions, size=fov * u.arcsec,
wcs=wcs_img)
if verbosity >= 4:
print("[DIAGNOSTIC] Cutout2D file shape {}".format(
img_stamp.shape))
except:
print("Source not in image")
overlap = False
img_stamp = None
if img_stamp is not None:
if overlap:
img_stamp = img_stamp.data
hdu = fits.ImageHDU(data=img_stamp, header=hdr)
axs = aplpy.FITSFigure(hdu, figure=fig,
subplot=(n_row, n_col, idx + 1),
north=True)
# Check if input color map name is a color map, else use viridis
try:
cm = plt.get_cmap(color_map_name)
except ValueError:
print('Color map argument is not a color map. Setting '
'default: viridis')
cm = plt.get_cmap('viridis')
color_map_name = 'viridis'
# Sigma-clipping of the color scale
mean = np.mean(img_stamp[~np.isnan(img_stamp)])
std = np.std(img_stamp[~np.isnan(img_stamp)])
upp_lim = mean + n_sigma * std
low_lim = mean - n_sigma * std
axs.show_colorscale(vmin=low_lim, vmax=upp_lim,
cmap=color_map_name)
# Plot circular aperture (forced photometry flux)
(yy, xx) = img_stamp.shape
circx = (xx * 0.5) # + 1
circy = (yy * 0.5) # + 1
aper_pix = aperture_inpixels(aperture, hdr)
circle = plt.Circle((circx, circy), aper_pix, color='r', fill=False,
lw=1.5)
fig.gca().add_artist(circle)
# Plot rectangular aperture (error region)
rect_inpixels = aperture_inpixels(size, hdr)
square = plt.Rectangle((circx - rect_inpixels * 0.5,
circy - rect_inpixels * 0.5),
rect_inpixels, rect_inpixels,
color='r', fill=False, lw=1.5)
fig.gca().add_artist(square)
# Create forced photometry label
if (forced_mag is not None):
if (forced_sn is not None) & (forced_magerr is not None):
forcedlabel = r'${0:s} = {1:.2f} \pm {2:.2f} (SN=' \
r'{3:.1f})$'.format(band + "_{forced}",
forced_mag,
forced_magerr,
forced_sn)
elif forced_magerr is not None:
forcedlabel = r'${0:s} = {1:.2f} \pm {2:.2f}$'.format(
band + "_{forced}", forced_mag, forced_magerr)
else:
forcedlabel = r'${0:s} = {1:.2f}$'.format(
band + "_{forced}", forced_mag)
fig.gca().text(0.03, 0.16, forcedlabel, color='black',
weight='bold', fontsize='large',
bbox=dict(facecolor='white', alpha=0.6),
transform=fig.gca().transAxes)
# Create catalog magnitude label
if catmag is not None:
if (catsn is not None) & (caterr is not None):
maglabel = r'${0:s} = {1:.2f} \pm {2:.2f} (SN=' \
r'{3:.2f})$'.format(
band + "_{cat}", catmag, caterr, catsn)
elif caterr is not None:
maglabel = r'${0:s} = {1:.2f} \pm {2:.2f}$'.format(
band + "_{cat}", catmag, caterr)
else:
maglabel = r'${0:s} = {1:.2f}$'.format(
band + "_{cat}", catmag)
fig.gca().text(0.03, 0.04, maglabel, color='black',
weight='bold',
fontsize='large',
bbox=dict(facecolor='white', alpha=0.6),
transform=fig.gca().transAxes)
fig.gca().set_title(survey + " " + band)
return fig
# ------------------------------------------------------------------------------
# Finding Chart plotting routine
# ------------------------------------------------------------------------------
def make_finding_charts(table, ra_column_name, dec_column_name,
target_column_name, survey, band,
aperture, fov, image_folder_path,
offset_table=None,
offset_id = 0,
offset_focus = False,
offset_ra_column_name=None,
offset_dec_column_name=None,
pos_angle_column_name=None,
offset_mag_column_name=None,
offset_id_column_name=None,
# offset_finding_chart=True,
label_position='bottom',
slit_width=None,
slit_length=None,
format ='pdf',
auto_download=False, verbosity=0):
"""Create and save finding charts plots for all targets in the input table.
:param table: pandas.core.frame.DataFrame
Dataframe with targets to plot finding charts for
:param ra_column_name: string
Right ascension column name
:param dec_column_name: string
Declination column name
:param target_column_name: string
Name of the target identifier column
:param survey: string
Survey name
:param band: string
Passband name
:param aperture: float
Aperture to plot in arcseconds
:param fov: float
Field of view in arcseconds
:param image_folder_path: string
Path to where the image will be stored
:param offset_table: pandas.core.frame.DataFrame
Pandas dataframe with offset star information for all targets
:param offset_id: int
Integer indicating the primary offset from the offset table
:param offset_focus: boolean
Boolean to indicate whether offset star will be in the center or not
:param offset_ra_column_name: string
Offset star dataframe right ascension column name
:param offset_dec_column_name: string
Offset star dataframe declination column name
:param pos_angle_column_name: string
Offset star dataframe position angle column name
:param offset_mag_column_name: string
Offset star dataframe magnitude column name
:param offset_id_column_name: string
Offset star dataframe identifier column name
:param label_position: string
String that defines the label position for the offset stars.
Possible label positions are ["left", "right", "top", "bottom",
"topleft"]
:param slit_width: float
Slit width in arcseconds.
:param slit_length: float
Slit length in arcseconds
:param format: string
A string indicating in which format the finding charts are save.
Possible formats: 'pdf', 'png'
:param auto_download: boolean
Boolean to indicate whether images should be automatically downloaded.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
"""
surveys = [survey]
bands = [band]
fovs = [fov]
print(offset_table)
print(table)
for idx in table.index:
ra = table.loc[idx, ra_column_name]
dec = table.loc[idx, dec_column_name]
target_name = table.loc[idx, target_column_name]
if offset_table is not None:
offset_target = offset_table.query('target_name=="{}"'.format(
target_name))
# Set position angle
if len(offset_target) > 0:
if pos_angle_column_name is not None:
position_angle = offset_target.loc[offset_target.index[0],
pos_angle_column_name]
else:
target_coords = SkyCoord(ra=ra, dec=dec,
unit=(u.deg, u.deg),
frame='icrs')
offset_coords = SkyCoord(ra=offset_target.loc[:,
offset_ra_column_name].values,
dec=offset_target.loc[:,
offset_dec_column_name].values,
unit=(u.deg, u.deg),
frame='icrs')
# Calculate position angles(East of North)
pos_angles = offset_coords.position_angle(target_coords).to(
u.deg)
# Take position angle to offset_id star in list
position_angle = pos_angles[offset_id].to(u.deg).value
else:
position_angle = 0
offset_target = None
else:
offset_target = None
position_angle = 0
if offset_target is not None:
offset_target.reset_index(inplace=True, drop=True)
if auto_download:
if offset_focus:
ct.get_photometry(offset_target.loc[[0]],
offset_ra_column_name,
offset_dec_column_name,
surveys,
bands,
image_folder_path,
fovs,
# n_jobs=1,
verbosity=verbosity)
else:
ct.get_photometry(table.loc[[idx]],
ra_column_name,
dec_column_name,
surveys,
bands,
image_folder_path,
fovs,
# n_jobs=1,
verbosity=verbosity)
fig = make_finding_chart(ra, dec, survey, band, aperture, fov,
image_folder_path,
offset_df=offset_target,
offset_id=offset_id,
offset_focus=offset_focus,
offset_ra_column_name=offset_ra_column_name,
offset_dec_column_name=offset_dec_column_name,
offset_mag_column_name=offset_mag_column_name,
offset_id_column_name=offset_id_column_name,
label_position=label_position,
slit_width=slit_width,
slit_length=slit_length,
position_angle=position_angle,
verbosity=verbosity)
if format == 'pdf':
fig.save('fc_{}.pdf'.format(target_name), transparent=False)
if format == 'png':
fig.save('fc_{}.png'.format(target_name), transparent=False)
print('{} created'.format('fc_{}'.format(target_name)))
def make_finding_chart(ra, dec, survey, band, aperture, fov,
image_folder_path,
offset_df=None,
offset_id=0,
offset_focus=False,
offset_ra_column_name=None,
offset_dec_column_name=None,
offset_mag_column_name=None,
offset_id_column_name=None,
label_position='bottom',
slit_width=None, slit_length=None,
position_angle=None, verbosity=0):
"""Make the finding chart figure and return it.
This is an internal function, but can be used to create one finding chart.
:param ra: float
Right ascension of the target in decimal degrees
:param dec: float
Declination of the target in decimal degrees
:param survey: string
Survey name
:param band: string
Passband name
:param aperture: float
Size of the plotted aperture in arcseconds
:param fov: float
Field of view in arcseconds
:param image_folder_path: string
Path to where the image will be stored
:param offset_df: pandas.core.frame.DataFrame
Pandas dataframe with offset star information
:param offset_id: int
Integer indicating the primary offset from the offset table
:param offset_focus: boolean
Boolean to indicate whether offset star will be in the center or not
:param offset_ra_column_name: string
Offset star dataframe right ascension column name
:param offset_dec_column_name: string
Offset star dataframe declination column name
:param offset_mag_column_name: string
Offset star dataframe magnitude column name
:param offset_id_column_name: string
Offset star dataframe identifier column name
:param label_position: string
String that defines the label position for the offset stars.
Possible label positions are ["left", "right", "top", "bottom",
"topleft"]
:param slit_width: float
Slit width in arcseconds.
:param slit_length: float
Slit length in arcseconds
:param position_angle:
Position angle for the observation.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
:return: matplotlib.figure
Return the matplotlib figure of the finding chart.
"""
if offset_focus:
im_ra = offset_df.loc[offset_id, offset_ra_column_name]
im_dec = offset_df.loc[offset_id, offset_dec_column_name]
else:
im_ra = ra
im_dec = dec
coord_name = ut.coord_to_name(np.array([im_ra]), np.array([im_dec]),
epoch="J")
filename = image_folder_path + '/' + coord_name[0] + "_" + survey + "_" + \
band + "*.fits"
data, hdr, file_path = open_image(filename, im_ra, im_dec,
fov,
image_folder_path,
verbosity=verbosity)
# Reproject data if position angle is specified
if position_angle != 0:
hdr['CRPIX1'] = int(hdr['NAXIS1'] / 2.)
hdr['CRPIX2'] = int(hdr['NAXIS2'] / 2.)
hdr['CRVAL1'] = im_ra
hdr['CRVAL2'] = im_dec
new_hdr = hdr.copy()
pa_rad = np.deg2rad(position_angle)
# TODO: Note that the rotation definition here reflects one axis
# TODO: to make sure that it is a rotated version of north up east left
# TODO: both 001 components have a negative sign!
new_hdr['PC001001'] = -np.cos(pa_rad)
new_hdr['PC001002'] = np.sin(pa_rad)
new_hdr['PC002001'] = np.sin(pa_rad)
new_hdr['PC002002'] = np.cos(pa_rad)
from reproject import reproject_interp
data, footprint = reproject_interp((data, hdr),
new_hdr,
shape_out=[hdr['NAXIS1'],
hdr['NAXIS2']])
hdr = new_hdr
if data is not None:
# Plotting routine from here on.
hdu = fits.PrimaryHDU(data, hdr)
# De-rotate image along the position angle
fig = aplpy.FITSFigure(hdu)
if fov is not None:
fig.recenter(im_ra, im_dec, radius=fov / 3600. * 0.5)
try:
zscale = ZScaleInterval()
z1, z2 = zscale.get_limits(data)
fig.show_grayscale(vmin=z1, vmax=z2)
except Exception as e:
print('Exception encountered: {}'.format(str(e)))
fig.show_grayscale(pmin=10, pmax=99)
fig.add_scalebar(fov/4/3600., '{:.1f} arcmin'.format(fov/4/60.),
color='black',
font='serif',
linewidth=4)
if slit_length is not None and slit_width is not None:
if position_angle == 0:
_plot_slit(fig, im_ra, im_dec, slit_length, slit_width,
position_angle)
else:
_plot_slit(fig, im_ra, im_dec, slit_length, slit_width,
0)
if offset_df is not None and offset_ra_column_name is not None and \
offset_dec_column_name is not None and offset_mag_column_name is \
not None and offset_id_column_name is not None:
print("[INFO] Generating offsets for {}".format(filename))
_plot_offset_stars(fig, ra, dec, offset_df, fov,
offset_id,
offset_ra_column_name,
offset_dec_column_name,
offset_mag_column_name,
offset_id_column_name,
label_position=label_position)
_plot_info_box(fig, ra, dec, offset_df, offset_ra_column_name,
offset_dec_column_name, offset_mag_column_name)
fig.show_circles(xw=ra, yw=dec, radius=aperture / 3600., edgecolor='red',
alpha=1, lw=3)
fig.axis_labels.set_xtext('Right Ascension')
fig.axis_labels.set_ytext('Declination')
c = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree))
title = 'RA= {0} ; DEC = {1}'.format(
c.ra.to_string(precision=3, sep=":", unit=u.hour),
c.dec.to_string(precision=3, sep=":", unit=u.degree, alwayssign=True))
plt.title(title)
fig.add_grid()
fig.grid.show()
fig.set_theme('publication')
return fig
def _plot_slit(fig, ra, dec, slit_length, slit_width, position_angle):
# slit_label = 'PA=${0:.2f}$deg\n \n'.format(position_angle)
# slit_label += 'width={0:.1f}"; length={1:.1f}"'.format(
# slit_width, slit_length)
fig = show_rectangles(fig, ra, dec, slit_width / 3600., slit_length / 3600.,
edgecolor='w', lw=1.0, angle=position_angle,
coords_frame='world')
# if position_angle > 0 and position_angle < 180:
# angle_offset = 180
# dec_offset = 0
# else:
# angle_offset = 0
# dec_offset = 0
# fig.add_label(ra, dec + dec_offset, slit_label,
# rotation=position_angle + angle_offset + 90,
# size='large', color='w')
position_dict = {"left": [8, 0], "right": [-8, 0], "top": [0, 5],
"bottom": [0, -5], "topleft": [8, 5]}
def _plot_offset_stars(fig, ra, dec, offset_df, fov, offset_id,
ra_column_name,
dec_column_name,
mag_column_name,
id_column_name,
label_position="left"):
# Check if star is in image
radius = fov / 25. / 3600.
ra_pos, dec_pos = position_dict[label_position]
fig.show_circles(xw=offset_df.loc[offset_id, ra_column_name],
yw=offset_df.loc[offset_id, dec_column_name],
radius=radius * 0.5,
edgecolor='blue',
lw=3)
fig.show_rectangles(offset_df.drop(offset_id)[ra_column_name],
offset_df.drop(offset_id)[dec_column_name],
radius, radius, edgecolor='blue', lw=1)
abc_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E'}
for num, idx in enumerate(offset_df.index):
ra_off = offset_df.loc[idx, ra_column_name]
dec_off = offset_df.loc[idx, dec_column_name]
target_coords = SkyCoord(ra=ra, dec=dec,
unit=(u.deg, u.deg),
frame='icrs')
offset_coords = SkyCoord(ra=ra_off,
dec=dec_off, unit=(u.deg, u.deg),
frame='icrs')
separation = offset_coords.separation(target_coords).to(u.arcsecond)
label = '{}'.format(abc_dict[num])
if separation.value <= fov/2.:
if idx == offset_id:
fig.add_label(ra_off + ra_pos * 5 / 3600. / 3.,
dec_off + dec_pos * 5 / 3600. / 3., label,
color='blue', size='x-large',
verticalalignment='center', family='serif')
else:
fig.add_label(ra_off + ra_pos * radius/5., dec_off + dec_pos *
radius/5., label,
color='blue', size='large',
verticalalignment='center', family='serif')
def _plot_info_box(fig, ra, dec, offset_df, ra_column_name, dec_column_name,
mag_column_name,):
target_info = 'Target: RA={:.4f}, DEC={:.4f}'.format(ra, dec)
info_list = [target_info]
abc_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4:'E'}
for num, idx in enumerate(offset_df.index):
ra_off = offset_df.loc[idx, ra_column_name]
dec_off = offset_df.loc[idx, dec_column_name]
target_coords = SkyCoord(ra=ra, dec=dec,
unit=(u.deg, u.deg),
frame='icrs')
offset_coords = SkyCoord(ra=ra_off,
dec=dec_off, unit=(u.deg, u.deg),
frame='icrs')
# Calculate position angles and separations (East of North)
pos_angles = offset_coords.position_angle(target_coords).to(u.deg)
separations = offset_coords.separation(target_coords).to(u.arcsecond)
dra, ddec = offset_coords.spherical_offsets_to(target_coords)
mag = offset_df.loc[idx, mag_column_name]
info = '{}:\t RA={:.4f}, DEC={:.4f}, {}={:.2f}, PosAngle={' \
':.2f}'.format(abc_dict[num],
ra_off,
dec_off, mag_column_name,
mag, pos_angles)
info_off = 'Sep={:.2f}, Dra={:.2f}, ' \
'Ddec={:.2f}'.format(separations, dra.to(
'arcsecond'), ddec.to('arcsecond'))
info_list.append(info)
info_list.append(info_off)
ax = plt.gca()
boxdict = dict(facecolor='white', alpha=0.5, edgecolor='none')
ax.text(.02, 0.02, "\n".join(info_list), transform=ax.transAxes,
fontsize='small',
bbox=boxdict)
# ------------------------------------------------------------------------------
# Determine forced photometry for sources in cutouts.
# ------------------------------------------------------------------------------
def get_forced_photometry(table, ra_col_name, dec_col_name, surveys,
bands, apertures, fovs, image_folder_path,
auto_download=True,
verbosity=0):
"""Calculate forced photometry for all objects in the table Data Frame.
In the current version of this routine forced photometry calculations for
the following surveys and bands is available:
survey: 'desdr1'
bands: 'grizy'
survey: "unwise-allwise, unwise-neo1, unwise-neo2, "unwise-neo3",
"unwise-neo4", "unwise-neo5", "unwise-neo6"
bands: 'w1w2w3w4
This function takes a table object (astropy table, astropy fitstable or
DataFrame) with specified Ra and Dec. It eiher looks for the image
cutouts associated with each survey/band/fov entry or automatically
downloads them, if specified. If the image cutouts are found forced
photometry is calculated within the specified aperture.
A note on confusing terminology in the function:
img_name : Name of the image to be opened
[Epoch Identifier][RA in HHMMSS.SS][DEC in DDMMSS.SS]_
[SURVEY]_[PASSBAND]_fov[FIELD OF VIEW].fits
filename : Path to the image without field of view. This variable is used
to find all images of the source with different field of views
[Image folder path]/[Epoch Identifier][RA in HHMMSS.SS]
[DEC in DDMMSS.SS]_[SURVEY]_[PASSBAND]_*.fits
file_path : Path to the image to be opened
[Image folder path]/[Epoch Identifier][RA in HHMMSS.SS]
[DEC in DDMMSS.SS]_[SURVEY]_[PASSBAND]_fov[FIELD OF VIEW].fits
For each survey/band the following columns are added to the input table:
forced_[survey]_mag_[band]
Forced photometry magnitude for the object in the given survey/band.
The magnitudes are all in the AB system
forced_[survey]_flux_[band]
Forced photometry flux for the object in the given survey/band
forced_[survey]_sn_[band]
Forced photometry S/N for the object in the given survey/band
forced_[survey]_magerr_[band]
Forced photometry magnitude error for the object in the given
survey/band
forced_[survey]_comment_[band]
A comment with regard to the forced photometry calculation for each
object in the given survey/band.
If the forced photometry calculation is successful the comment will
give the used apertures: 'ap_[aperture in arcseconds]'
If the forced photometry calculation is unsuccessfull the comment will
reflect the problem:
'image_too_small': cutout image is too small to calculate the forced
photometry (minimum pixel size 50)
'image_not_available': cutout image could not be found and/or downloaded
'crashed': bad things happened! (Check try-except clause in
calculate_forced_aperture_photometry)
Lists of equal length need to be supplied to surveys, bands, apertures and
fovs.
:param table: table object
Input data table with at least RA and Decl. columns
:param ra_col_name: string
Exact string for the RA column in the table
:param dec_col_name: string
Exact string for the Decl. column in the table
:param surveys: list of strings
List of survey names, length has to be equal to bands, apertures and
fovs
:param bands: list of strings
List of band names, length has to be equal to surveys, apertures and
fovs
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys, bands and apertures
:param image_folder_path: string
Path to the directory where all the images will be stored
:param auto_download: Boolean
Switch to enable/disable auto-downloading the cutouts images
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: DataFrame
Returns a DataFrame with the added columns for the forced photometry
calculation.
"""
# Check if table is pandas DataFrame otherwise convert to one
table, format = ct.check_if_table_is_pandas_dataframe(table)
# Add a column to the table specifying the object name used
# for the image name
table.loc[:, 'temp_object_name'] = ut.coord_to_name(table.loc[:,
ra_col_name].values,
table.loc[
:, dec_col_name].values,
epoch="J")
for jdx, survey in enumerate(surveys):
band = bands[jdx]
aperture = apertures[jdx]
fov = fovs[jdx]
for idx in table.index:
ra = table.loc[idx, ra_col_name]
dec = table.loc[idx, dec_col_name]
filename = image_folder_path + '/' + \
table.loc[idx, 'temp_object_name'] + "_" \
+ survey + "_" + band + "*.fits"
data, hdr, file_path = open_image(filename, ra, dec, fov,
image_folder_path, verbosity)
if data is not None:
img_name = file_path.split('/')[-1]
if data is None and auto_download is True:
if survey in ["desdr1", "desdr2"]:
url = ct.get_des_deepest_image_url(ra,
dec,
data_release=survey[-3:],
fov=fov,
band=band,
verbosity=verbosity)
elif survey.split("-")[0] == "unwise" and band in ["w1",
"w2",
"w3",
"w4"]:
# Hack to create npix from fov approximately
npix = int(round(fov / 60. / 4. * 100))
data_release = survey.split("-")[1]
wband = band[1]
url = ct.get_unwise_image_url(ra, dec, npix, wband,
data_release)
else:
raise ValueError(
"Survey and band name not recognized: {} {}. "
"\n "
"Possible survey names include: desdr1, "
"unwise-allwise, unwise-neo1, unwise-neo2, "
"unwise-neo3, unwise-neo4, unwise-neo5,"
"unwise-neo6".format(survey, band))
if url is not None:
img_name = table.loc[idx,'temp_object_name'] + "_" + \
survey + \
"_" + band + "_fov" + '{:d}'.format(fov)
ct.download_image(url, image_name=img_name,
image_folder_path=image_folder_path,
verbosity=verbosity)
file_path = image_folder_path + '/' + img_name + '.fits'
data, hdr = fits.getdata(file_path, header=True)
file_size_sufficient = False
if data is not None:
# Check if file is sufficient
file_size_sufficient = check_image_size(img_name,
file_path,
verbosity)
if data is not None and file_size_sufficient is True:
mag, flux, sn, err, comment = \
calculate_forced_aperture_photometry(file_path,
ra, dec, survey, band,
aperture,
verbosity=verbosity)
table.loc[idx, 'forced_{}_mag_{}'.format(survey, band)] = mag
table.loc[idx, 'forced_{}_flux_{}'.format(survey, band)] = flux
table.loc[idx, 'forced_{}_sn_{}'.format(survey, band)] = sn
table.loc[idx, 'forced_{}_magerr_{}'.format(survey, band)] = \
err
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] =\
comment
if data is not None and file_size_sufficient is not True:
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] = \
'image_too_small'.format(aperture)
if data is None:
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] = \
'image_not_available'.format(aperture)
table.drop(columns='temp_object_name', inplace=True)
table = ct.convert_table_to_format(table, format)
return table
def get_forced_photometry_mp(table, ra_col_name, dec_col_name, surveys,
bands, apertures, fovs, image_folder_path, n_jobs=5,
auto_download=True,
verbosity=0):
"""Calculate forced photometry in multiprocessing mode.
This function works analogous to get_forced_photometry only allowing to
use multiple processor (python multiprocessing module).
:param table: table object
Input data table with at least RA and Decl. columns
:param ra_col_name: string
Exact string for the RA column in the table
:param dec_col_name: string
Exact string for the Decl. column in the table
:param surveys: list of strings
List of survey names, length has to be equal to bands, apertures and
fovs
:param bands: list of strings
List of band names, length has to be equal to surveys, apertures and
fovs
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys,
bands and apertures
:param image_folder_path: string
Path to the directory where all the images will be stored
:param n_jobs:
Number of cores to be used
:param auto_download: Boolean
Switch to enable/disable auto-downloading the cutouts images
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: DataFrame
Returns a DataFrame with the added columns for the forced photometry
calculation.
"""
# Check if table is pandas DataFrame otherwise convert to one
table, format = ct.check_if_table_is_pandas_dataframe(table)
# Add a column to the table specifying the object name used
# for the image name
table['temp_object_name'] = ut.coord_to_name(table[ra_col_name].values,
table[dec_col_name].values,
epoch="J")
for jdx, survey in enumerate(surveys):
band = bands[jdx]
aperture = apertures[jdx]
fov = fovs[jdx]
# Create list with image names
ra = table[ra_col_name].values
dec = table[dec_col_name].values
index = table.index
# # Create image names without the fov ending.
# temp = table.temp_object_name
mp_args = list(zip(index,
ra,
dec,
itertools.repeat(survey),
itertools.repeat(band),
itertools.repeat(aperture),
itertools.repeat(fov),
itertools.repeat(image_folder_path),
table.temp_object_name,
itertools.repeat(auto_download),
itertools.repeat(verbosity)))
# Start multiprocessing pool
with mp.Pool(n_jobs) as pool:
results = pool.starmap(_mp_get_forced_photometry, mp_args)
for result in results:
idx, mag, flux, sn, err, comment = result
table.loc[idx, 'forced_{}_mag_{}'.format(survey, band)] = mag
table.loc[idx, 'forced_{}_flux_{}'.format(survey, band)] = flux
table.loc[idx, 'forced_{}_sn_{}'.format(survey, band)] = sn
table.loc[idx, 'forced_{}_magerr_{}'.format(survey, band)] = \
err
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] = \
comment
table.drop(columns='temp_object_name')
table = ct.convert_table_to_format(table, format)
return table
def _mp_get_forced_photometry(index, ra, dec, survey,
band, aperture, fov, image_folder_path,
temp_object_name,
auto_download=True,
verbosity=0):
"""Calculate forced photometry for one object at a time.
:param index:
:param ra: float
Right Ascension of the target
:param dec: float
Declination of the target
:param survey: string
Survey name
:param band: string
Passband name
:param aperture: float
Aperture to calculate forced photometry in in arcseconds
:param fov: float
Field of view in arcseconds
:param image_folder_path: string
Path to where the image will be stored
:param img_name:
The name of the image to be opened for the forced photometry
calculation (excluding the fov:
[Epoch Identifier][RA in HHMMSS.SS][DEC in DDMMSS.SS]_
[SURVEY]_[PASSBAND]
:param auto_download: Boolean
Switch to enable/disable auto-downloading the cutouts images
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: tuple(int, float, float, float, float, string)
Returns a tuple with the forced photometry values:
index, mag, flux, sn, err, comment
"""
# Create image filename to check against files in cutout folder
filename = image_folder_path + '/' + temp_object_name + "_" + survey + "_" \
+ band + "*.fits"
data, hdr, file_path = open_image(filename, ra, dec, fov,
image_folder_path, verbosity)
if data is not None:
img_name = file_path.split('/')[-1]
if data is None and auto_download is True:
if survey in ["desdr1", "desdr2"]:
url = ct.get_des_deepest_image_url(ra,
dec,
data_release=survey[-3:],
fov=fov,
band=band,
verbosity=verbosity)
elif survey.split("-")[0] == "unwise" and band in ["w1",
"w2",
"w3",
"w4"]:
# Hack to create npix from fov approximately
npix = int(round(fov / 60. / 4. * 100))
data_release = survey.split("-")[1]
wband = band[1]
url = ct.get_unwise_image_url(ra, dec, npix, wband,
data_release)
else:
raise ValueError(
"Survey and band name not recognized: {} {}. "
"\n "
"Possible survey names include: desdr1, "
"unwise-allwise, unwise-neo1, unwise-neo2, "
"unwise-neo3, unwise-neo4, unwise-neo5,"
"unwise-neo6".format(survey, band))
if url is not None:
img_name = temp_object_name + "_" + survey + \
"_" + band + "_fov" + '{:d}'.format(fov)
ct.download_image(url, image_name=img_name,
image_folder_path=image_folder_path,
verbosity=verbosity)
file_path = image_folder_path + '/' + img_name + '.fits'
data, hdr = fits.getdata(file_path, header=True)
file_size_sufficient = False
if data is not None:
# Check if file is sufficient
file_size_sufficient = check_image_size(img_name,
file_path,
verbosity)
if data is not None and file_size_sufficient is True:
mag, flux, sn, err, comment = \
calculate_forced_aperture_photometry(file_path,
ra, dec, survey, band,
aperture,
verbosity=verbosity)
return index, mag, flux, sn, err, comment
if data is not None and file_size_sufficient is not True:
comment = 'image_too_small'.format(aperture)
return index, np.nan, np.nan, np.nan, np.nan, comment
if data is None:
comment = 'image_not_available'.format(aperture)
return index, np.nan, np.nan, np.nan, np.nan, comment
def calculate_forced_aperture_photometry(filepath, ra, dec, survey,
band, aperture,
verbosity=0):
"""Calculates the forced photometry for a Ra/Dec position on a given
image file specified by filepath.
:param filepath: string
Path to the image on which to calculate the forced photometry.
:param ra: float
Right ascension of the source for which forced photometry should be
calculated.
:param dec: float
Declination of the source for which forced photometry should be
calculated.
:param survey: string
Survey keyword; The magnitude calculation depends on the survey
photometry and hence this keyword sets the flux to magnitude
conversion accordingly.
:param aperture: float
Aperture in arcseconds in over which the forced photometry is
calculated.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: tuple(float, float, float, float, string)
Returns a tuple with the forced photometry values:
mag, flux, sn, err, comment
"""
# Open the fits image
data, header = fits.getdata(filepath, header=True)
# Convert radius from arcseconds to pixel
pixelscale = get_pixelscale(header)
aperture_pixel = aperture / pixelscale # pixels
# Transform coordinates of target position to pixel scale
wcs_img = wcs.WCS(header)
pixel_coordinate = wcs_img.wcs_world2pix(ra, dec, 1)
# QUICKFIX to stop aperture photometry from crashing
# try:
# Get photometry
positions = (pixel_coordinate[0], pixel_coordinate[1])
apertures = CircularAperture(positions, r=aperture_pixel)
f = aperture_photometry(data, apertures)
flux = np.ma.masked_invalid(f['aperture_sum'])
# Get the noise
rmsimg, mean_noise, empty_flux = get_noiseaper(data, aperture_pixel)
sn = flux[0] / rmsimg
comment = 'ap_{}'.format(aperture)
if verbosity > 0:
print("flux: ", flux[0], "sn: ", sn)
if sn < 0:
flux[0] = rmsimg
err = -1
mags = flux_to_magnitude(flux, survey)[0]
else:
mags = flux_to_magnitude(flux, survey)[0]
err = mag_err(1. / sn, verbose=False)
if verbosity > 0:
print("mag: ", mags)
if mags is np.ma.masked:
mags = -999
comment = 'masked'
if sn is np.ma.masked:
sn = np.nan
if err is np.ma.masked:
err = np.nan
if flux[0] is np.ma.masked:
flux = np.nan
else:
flux = flux[0]
survey_band = survey+'_'+band
mags = pt.vega_to_ab(mags, survey_band)
return mags, flux, sn, err, comment
# except ValueError:
# return -999, np.nan, np.nan, np.nan, 'crashed'
# ------------------------------------------------------------------------------
# Image utility functions for forced photometry
# (mostly from Eduardo and not modified)
# ------------------------------------------------------------------------------
def check_image_size(image_name, file_path, verbosity):
"""
:param image_name:
:param file_path:
:param verbosity:
:return:
"""
shape = fits.getdata(file_path).shape
min_axis = np.min(shape)
if min_axis < 50 and verbosity > 0:
print("Minimum image dimension : {} (pixels)".format(min_axis))
print("Too few pixels in one axis (<50). Skipping {}".format(
image_name))
if min_axis < 50:
return False
else:
return True
def aperture_inpixels(aperture, hdr):
'''
receives aperture in arcsec. Returns aperture in pixels
'''
pixelscale = get_pixelscale(hdr)
aperture /= pixelscale #pixels
return aperture
def get_pixelscale(hdr):
'''
Get pixelscale from header and return in it in arcsec/pixel
'''
wcs_img = wcs.WCS(hdr)
scale = np.mean(proj_plane_pixel_scales(wcs_img)) * 3600
return scale
def mag_err(noise_flux_ratio, verbose=True):
'''
Calculates the magnitude error from the input noise_flux_ratio
which is basically the inverse of the Signal-to-Noise ratio
'''
err = (2.5 / np.log(10)) * noise_flux_ratio
if verbose:
print(err)
return err
def get_noiseaper(data, radius):
# print("estimating noise in aperture: ", radius)
sources_mask = make_source_mask(data, nsigma=2.5, npixels=3,
dilate_size=15, filter_fwhm=4.5)
N = 5100
ny, nx = data.shape
x1 = np.int(nx * 0.09)
x2 = np.int(nx * 0.91)
y1 = np.int(ny * 0.09)
y2 = np.int(ny * 0.91)
xx = np.random.uniform(x1, x2, N)
yy = np.random.uniform(y1, y2, N)
mask = sources_mask[np.int_(yy), np.int_(xx)]
xx = xx[~mask]
yy = yy[~mask]
positions = list(zip(xx, yy))
apertures = CircularAperture(positions, r=radius)
f = aperture_photometry(data, apertures, mask=sources_mask)
f = np.ma.masked_invalid(f['aperture_sum'])
m1 = np.isfinite(f) #& (f!=0)
empty_fluxes = f[m1]
emptyapmeanflux, emptyapsigma = gaussian_fit_to_histogram(empty_fluxes)
return emptyapsigma, emptyapmeanflux, empty_fluxes
def gaussian_fit_to_histogram(dataset):
""" fit a gaussian function to the histogram of the given dataset
:param dataset: a series of measurements that is presumed to be normally
distributed, probably around a mean that is close to zero.
:return: mean, mu and width, sigma of the gaussian model fit.
Taken from
https://github.com/djones1040/PythonPhot/blob/master/PythonPhot/photfunctions.py
"""
def gauss(x, mu, sigma):
return np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))
if np.ndim(dataset) == 2:
musigma = np.array([gaussian_fit_to_histogram(dataset[:, i])
for i in range( | np.shape(dataset) | numpy.shape |
"""Import modules."""
import numpy as np
def vectorNorm(data, axis=None, out=None):
"""Calculate norm of a vector."""
data = np.array(data, dtype=np.float64, copy=True)
if out is None:
if data.ndim == 1:
return np.sqrt(np.dot(data, data))
data *= data
out = np.atleast_1d(np.sum(data, axis=axis))
np.sqrt(out, out)
return out
else:
data *= data
np.sum(data, axis=axis, out=out)
np.sqrt(out, out)
def rotationFromQuaternion(q):
"""Convert quaternion to euler-axis-angle (vrml).
Args:
q (np.array of 4 float): quaternion (w,x,y,z)
Returns:
np.array of 4 float: euler axis-angle representation (x, y, z, angle)
"""
q = np.array(q)
v = np.array([0.0, 0.0, 0.0, 0.0]) # axis (x,y,z) + angle
v[3] = 2.0 * np.arccos(q[0]) # angle
if v[3] < 0.0001:
# if v[3] close to zero then direction of axis not important
v[:3] = [0.0, 1.0, 0.0]
else:
# normalise axes
n = np.sqrt(np.sum(q[1:]**2))
v[:3] = q[1:] / n
return v
def convertRPYtoQuaternions(rpy, cylinder=False):
"""Convert RPY to quaternions.
Args:
rpy (np.array of 3 float): roll-pitch-yaw angle.
cylinder (bool): If True, it is a cylinder.
"""
if cylinder:
rpy[0] += np.pi/2.
cy, sy = np.cos(rpy[2]/2.), np.sin(rpy[2]/2.)
cp, sp = np.cos(rpy[1]/2.), np.sin(rpy[1]/2.)
cr, sr = | np.cos(rpy[0]/2.) | numpy.cos |
"""
Code to study the result of sequence experiments, where a randomly chosen cell is repeateadly activated.
The main function to post-process simulation results is:
compute_sequence_details_batch
"""
import numpy as np
import pandas as pd
import scipy.stats as st
from pathlib import Path
from tqdm.auto import tqdm as pbar
from tctx.util import spike_trains as spt
import tctx.util.parallel
from tctx.analysis import simbatch as sb
from tctx.analysis.simbatch import CAT
MS_TO_S = 0.001
DEFAULT_FRM_THRESHOLD_PROB = 1. / 10 ** 7
DEFAULT_EFFECT_LENGTH_MS = 300
########################################################################################################################
# METRICS
def _compute_frm(ewins, cells, spikes):
"""
compute, for every cell in the experiment, their firing rate change in Hz
An entry per cell is guaranteed.
:return: a DF that looks like
effect pre frm
gid
0 0.1 0.10 0.00
1 0.0 0.10 -0.10
... ... ... ...
99998 2.0 2.60 -0.60
99999 3.1 2.75 0.35
"""
counts = ewins.count_spikes(spikes)
counts = counts.reindex(cells.index)
counts.fillna(0, inplace=True)
counts.index = cells.index
counts.index.name = 'gid'
total_time_s = ewins.get_length_by_cat() * MS_TO_S
fr = counts / total_time_s
fr.columns.name = ''
fr.name = 'hz'
fr['frm'] = fr['effect'] - fr['baseline']
return fr
def _compute_delay_std(delays, min_spike_count):
"""
Compute std of temporal delays of spikes classified in windows after an event.
We use this as a metric for lack of precision.
We could instead use the reciprocal of the variance (which is called "precision")
but the scale then becomes problematic to visualize: maximum precision is infinite.
Note that the std doesn't make sense for cells that spiked only once and is not
representative for those that spiked very few times. That is why we first filter
for cells with a minimum number of spikes. Those cells will have std "nan"
:param delays: DF containing 'gid' and 'delay'
:param min_spike_count: minimum acceptable number of spikes
:return: pd.Series
"""
spike_count = delays.groupby('gid')['delay_in_window'].count()
mask = spike_count >= min_spike_count
stds = delays.groupby('gid')['delay_in_window'].std()
return pd.Series(stds[mask]).rename('delstd')
def _compute_spike_delays(sim_params, spikes, induced_spike_times):
"""
the delay to the closest preceding induced spike even for the spikes that happened in a "baseline" window.
:return: a pd.Series with the delay per spike
"""
ind_wins = spt.ExclusiveWindows.build_between(induced_spike_times, sim_params.tstart, sim_params.tend)
spikes_delay_from_induced = ind_wins.classify_spikes(spikes).delay
return spikes_delay_from_induced
########################################################################################################################
# PROTOCOL
def get_trial_idx_from_win_idx(spikes, col='win_idx'):
"""
We number 100 trials 0-99. Spikes outside of trials will get -1 or 100 (before or late).
This relies on "win_idx" being present, which is computed in sequence analysis
(windows being consecutive trial & baseline periods)
"""
return np.ceil(spikes[col].dropna() / 2).astype(np.int) - 1
def define_experiment_windows(induced_times, start=None, stop=None, win=(0, +200)):
"""
:param induced_times: np.array or series that represents the start of each trial
:param start: (ms) beginning of experiment
:param stop: (ms) end of experiment
:param win: time pair that defines where we look for the effect relative to each induced spike
:return: spt.ExclusiveWindows with two categories: 'baseline' and 'effect'.
These may NOT cover the entire experiment if induced_times are too close,
leaving gaps where spikes will be ignored. This is important for experimental data.
"""
induced_windows_raw = spt.make_windows(induced_times, win)
# anything else is "baseline"
baseline_wins = spt.invert_windows(induced_windows_raw, start=start, stop=stop)
assert spt.are_windows_exclusive(baseline_wins)
# our induced_windows_raw may overlap and contain multiple induced spikes (if they are closer than "win")
# discard those so we have "clean windows"
# Note that we do this AFTER computing the baseline windows to avoid having induced spikes there
# This means that our new "effect_wins" and "baseline_wins" may NOT cover the entire experiment
effect_wins = spt.filter_windows_exclusive_ref(induced_windows_raw)
assert spt.are_windows_exclusive(effect_wins)
baseline_wins['cat'] = 'baseline'
effect_wins['cat'] = 'effect'
all_wins = pd.concat([baseline_wins, effect_wins], axis=0)
all_wins.sort_values(['start', 'stop', 'ref'], inplace=True)
all_wins.reset_index(drop=True, inplace=True)
all_wins.index.name = 'win_idx'
assert spt.are_windows_exclusive(all_wins)
all_wins = spt.ExclusiveWindows(all_wins, by='cat')
return all_wins
########################################################################################################################
# PROCESS
def _collect_induced_spikes(spikes, input_targeted_times, trial_length_ms, targeted_gid):
"""
The targeted cell may fire multiple times due to recurrent excitatory connections.
It may also fail to fire or fire with a random delay due to excitatory inhibitory connections.
This tags spikes as "induced" only if they are the first per trial window and within a few milliseconds
:return: a boolean series matching the spikes index
"""
inter_induction_wins = spt.make_windows(input_targeted_times, (0, trial_length_ms))
inter_induction_wins = spt.ExclusiveWindows(inter_induction_wins)
targeted_spikes = spikes[spikes.gid == targeted_gid]
targeted_spikes = inter_induction_wins.classify_spikes(targeted_spikes)
targeted_spikes = targeted_spikes[targeted_spikes['delay'] < 10.]
induced_spk_idcs = targeted_spikes.groupby('win_idx')['delay'].idxmin().values
is_induced = pd.Series(np.zeros(len(spikes), dtype=np.bool_), index=spikes.index)
is_induced.loc[induced_spk_idcs] = True
return is_induced
def compute_sequence_details(
sim_params, cells, spikes,
effect_length_ms=DEFAULT_EFFECT_LENGTH_MS,
delstd_min_spike_count=5,
trial_times_col='input_targeted_times',
):
"""
Compute multiple metrics for cells and for spikes, return as two DF
"""
is_induced_spike = _collect_induced_spikes(
spikes,
sim_params[trial_times_col],
sim_params['trial_length_ms'],
sim_params['targeted_gid'])
induced_spikes = spikes[is_induced_spike]
exp_wins = define_experiment_windows(
induced_spikes.time,
sim_params.tstart, sim_params.tend,
win=(0, effect_length_ms)
)
frm = _compute_frm(exp_wins, cells, spikes)
frm = frm.rename(columns=dict(baseline='fr_baseline', effect='fr_effect'))
delays = exp_wins.classify_spikes(spikes).rename(columns=dict(delay='delay_in_window'))
delays['delay_from_induced'] = _compute_spike_delays(sim_params, spikes, induced_spikes.time)
delstd = _compute_delay_std(delays[delays.cat == 'effect'], min_spike_count=delstd_min_spike_count)
detailed_cells = pd.concat([cells, frm], sort=True, axis=1)
detailed_cells['delstd'] = delstd
detailed_cells['spike_count'] = spikes.groupby('gid')['time'].count()
detailed_cells['spike_count'].fillna(0, inplace=True)
# normalize delstd relative to the standard deviation of uniformly distributed delays
# random_delstd = np.sqrt((effect_length_ms - 0) ** 2 / 12)
# detailed_cells['delstd_norm'] = delstd / random_delstd
# normalize frm relative to 1 spike per trial over 0 on non-trial
plus_one_frm = 1. / (effect_length_ms * MS_TO_S)
detailed_cells['frm_norm'] = detailed_cells['frm'] / plus_one_frm
detailed_cells['is_targeted'] = detailed_cells.index == sim_params.targeted_gid
detailed_spikes = pd.merge(spikes, delays.drop('gid', axis=1), left_index=True, right_index=True, how='left')
detailed_spikes['is_induced'] = is_induced_spike
detailed_spikes = pd.merge(
detailed_spikes,
detailed_cells[['ei_type']],
left_on='gid', right_index=True, how='left',
)
detailed_spikes['trial_idx'] = get_trial_idx_from_win_idx(detailed_spikes, col='win_idx')
return exp_wins, detailed_cells, detailed_spikes
def _collect_foll(all_detailed_cells, targeted_gids: pd.Series):
"""
Collect all follower gids for each simulation, differentiating by ei_type
:returns: df like:
e_foll_gids i_foll_gids e_foll_count i_foll_count
sim_gid
0 (2118, 3486, 591... (96852, 99575, 9... 42 3
1 (553, 2118, 2240... (93252, 93621, 9... 68 12
2 (553, 2118, 2240... (93359, 93621, 9... 125 21
3 (5917, 24730, 48... () 5 0
4 (1162, 2240, 348... (93213, 93621, 9... 80 21
... ... ... ... ...
11032 (4379, 41169, 46... (94603, 98130, 9... 4 3
11033 (4379, 41169, 46... (99221,) 4 1
11034 (1882, 4589, 571... (93164, 95562, 9... 62 6
11035 (20517, 23404, 2... (94550, 98253, 9... 7 3
11036 (410, 3127, 5958... (98615,) 18 1
"""
all_foll_gids = {}
for sim_gid, cells in pbar(all_detailed_cells.items(), total=len(all_detailed_cells), desc='sim'):
targeted_gid = targeted_gids.loc[sim_gid]
sb.CAT.add_cats_cells(cells)
cells = cells.drop(targeted_gid)
foll_ei_types = cells.loc[cells['frm_cat'] == 'foll', 'ei_type']
all_foll_gids[sim_gid] = {
f'{ei_type}_foll_gids': tuple(gids)
for ei_type, gids in foll_ei_types.groupby(foll_ei_types).groups.items()}
all_foll_gids = pd.DataFrame.from_dict(all_foll_gids, orient='index')
# fillna doesn't like taking empty tuples
for col, values in all_foll_gids.items():
all_foll_gids.loc[all_foll_gids[col].isna(), col] = tuple()
all_foll_gids = all_foll_gids.rename_axis(index='sim_gid')
foll_counts = all_foll_gids.applymap(len)
foll_counts.columns = [f'{col[0]}_foll_count'for col in foll_counts]
all_foll_gids = pd.concat([all_foll_gids, foll_counts], axis=1)
return all_foll_gids
def compute_sequence_details_batch(
batch,
batch_folder: str,
effect_length_ms=DEFAULT_EFFECT_LENGTH_MS,
delstd_min_spike_count=5,
trial_times_col='forced_times',
threshold_prob=DEFAULT_FRM_THRESHOLD_PROB,
max_workers=None,
exec_mode=None,
):
"""
Compute the same as compute_sequence_details but for multiple experiments.
Results are stored under the given folder and added to the batch registry.
Batch should contain cells_raw and spikes_raw, which can be automatically added for new sims like:
batch.register_raw()
:return: a copy of the batch with references to the stored exp_wins, cells, spikes
"""
batch_folder = Path(batch_folder)
sim_gids = batch.reg.index
res = tctx.util.parallel.independent_tasks(
compute_sequence_details,
[
(
batch.reg.loc[sim_gid],
batch.stores['cells_raw'][sim_gid],
batch.stores['spikes_raw'][sim_gid],
effect_length_ms,
delstd_min_spike_count,
trial_times_col,
)
for sim_gid in pbar(sim_gids, desc='load')
],
max_workers=max_workers,
mode=exec_mode,
)
all_exp_wins = {}
all_detailed_cells = {}
all_detailed_spikes = {}
for i, r in pbar(res.items(), desc='remove cats'):
all_exp_wins[sim_gids[i]] = r[0]
CAT.remove_cats_cells(r[1])
all_detailed_cells[sim_gids[i]] = r[1]
CAT.remove_cats_spikes(r[2])
all_detailed_spikes[sim_gids[i]] = r[2]
all_cmf = compute_frm_norm_cmf_multisim(
sim_gids, all_exp_wins, all_detailed_spikes, all_detailed_cells, effect_length_ms)
# this modifies each dataframe of 'all_detailed_cells' inplace
classify_cells_by_frm_null_dist_multisim(sim_gids, all_cmf, all_detailed_cells, threshold_prob=threshold_prob)
all_foll_gids = _collect_foll(all_detailed_cells, batch.reg['targeted_gid'])
for col, values in all_foll_gids.items():
batch.reg[col] = values
batch.register_and_save(batch_folder, 'cells', all_detailed_cells)
batch.register_and_save(batch_folder, 'spikes', all_detailed_spikes)
batch.register_and_save(batch_folder, 'ewins', {sim_gid: ewin.windows for sim_gid, ewin in all_exp_wins.items()})
return batch
########################################################################################################################
# NULL DISTRIBUTION
def _sample_from_null_frm_dist(mean_spike_count, total_baseline_time, total_effect_time, sample_size=10 ** 6):
"""
Our null distribution is that which says that the firing rate of the cell is NOT affected
by the injected spike.
In that case, the firing RATE in the "baseline" and "effect" windows is the same.
However, the actual COUNT of spikes in those windows will be different because of stochasticity
and even the mean may differ if the windows have different lengths.
Generate 2 independent poisson counts, convert them to rates and substract them.
Notice this is similar to a Skellam distribution except that, because we conver to rates, we are
scaling both poissons before subtracting. This makes the values not integer
(although still discrete due to the integer-based underlying poisson) and non-contiguous.
A poisson RV scaled is no longer poisson.
"""
total_time = total_baseline_time + total_effect_time
samples = (
st.poisson(mean_spike_count * total_effect_time / total_time).rvs(sample_size) / total_effect_time
-
st.poisson(mean_spike_count * total_baseline_time / total_time).rvs(sample_size) / total_baseline_time
)
# convert 1/ms to 1/s (Hz)
samples = samples / MS_TO_S
return samples
def _estimate_frm_norm_cmf(
baseline_total: float,
effect_total: float,
mean_spike_count: float,
plus_one_frm: float,
cmf_repeat_count=50, sample_size=10 ** 5
):
cmf_bins = np.linspace(-1, 2, 1001)
multiple_cmfs = []
for i in range(cmf_repeat_count):
null_samples = _sample_from_null_frm_dist(
mean_spike_count,
baseline_total, effect_total,
sample_size,
)
null_samples = null_samples / plus_one_frm
h = np.histogram(null_samples, cmf_bins)[0]
multiple_cmfs.append(
np.cumsum(h / np.sum(h))
)
cmf = | np.mean(multiple_cmfs, axis=0) | numpy.mean |
#! /usr/bin/python
"""
Metropolis-Hastings Monte Carlo integration library
"""
import numpy as np
from math import sqrt
from random import normalvariate, uniform
def MCIntegrate(variables, integrand, probability, numsamples, parameters={}, burnin=1000) :
"""Perform a Metropolis-Hastings Monte Carlo integral for a 5D vector integrand"""
# Initialize data collection
samples = -burnin
numsamples = int(numsamples)
results = np.zeros([numsamples, 5])
accepted = 0
# Initialize values for each variable
values = [0.0] * len(variables)
lastprob = init_values(variables, values, probability, parameters)
# Start integrating
while samples < numsamples :
# Jump to a new point
values, lastprob, jumped = update_values(variables, values, probability, parameters, lastprob)
# Are we done burning in?
if samples >= 0 :
# Count the number of iterations to compute the acceptance ratio
if jumped : accepted += 1
# Compute the integrand
intval = integrand(values, parameters)
# Add it to the results
results[samples] = intval
# Increment samples
samples += 1
# Compute integrals for the four different results (different types of stationary points)
vals = [0] * 5
vals[0], vals[1], vals[2], vals[3], vals[4] = np.transpose(results)
integrals = np.zeros(5)
errors = np.zeros(5)
variances = np.zeros(5)
for i in range(5) :
integrals[i] = np.sum(vals[i]) / numsamples
delta = vals[i] - integrals[i]
variances[i] = | np.sum(delta * delta) | numpy.sum |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.