prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""
2D interpolation. In this program the optimal fraction doses are compueted based on a maximal OAR dose while maximizing tumor BED.
single_fraction allows to compute single fraction doses, while whole_plan computes the doses for a whole treatment plan (when all sparing factors are known).
whole_plan_print prints the doses in a well-aranged manner.
"""
import numpy as np
from scipy.stats import truncnorm
import scipy as sc
from scipy.stats import invgamma
from scipy.stats import t
def data_fit(data):
"""
This function fits the alpha and beta value for the conjugate prior
Parameters
----------
data : array
a nxk matrix with n the amount of patints and k the amount of sparing factors per patient.
Returns
-------
list
alpha and beta hyperparameter.
"""
variances = data.var(axis = 1)
alpha,loc,beta = invgamma.fit(variances, floc = 0)
return[alpha,beta]
def get_truncated_normal(mean=0, sd=1, low=0.01, upp=10):
"""
produces a truncated normal distribution
Parameters
----------
mean : float, optional
The default is 0.
sd : float, optional
The default is 1.
low : float, optional
The default is 0.01.
upp : float, optional
The default is 10.
Returns
-------
scipy.stats._distn_infrastructure.rv_frozen
distribution function.
"""
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def std_calc(measured_data,alpha,beta): #this isnt used at this point, but could be applied. In general it gives lower std values.
"""
calculates the most likely standard deviation for a list of k sparing factors and an inverse-gamma conjugate prior
measured_data: list/array with k sparing factors
Parameters
----------
measured_data : list/array
list/array with k sparing factors
alpha : float
shape of inverse-gamma distribution
beta : float
scale of inverse-gamme distrinbution
Returns
-------
std : float
most likely std based on the measured data and inverse-gamma prior
"""
n = len(measured_data)
var_values = np.arange(0.00001,0.4,0.00001)
likelihood_values = np.zeros(len(var_values))
for index,value in enumerate(var_values):
likelihood_values[index] = value**(-alpha-1)/value**(n/2)* | np.exp(-beta/value) | numpy.exp |
from railrl.data_management.simple_replay_pool import SimpleReplayPool
from railrl.predictors.dynamics_model import FullyConnectedEncoder, InverseModel, ForwardModel
import tensorflow as tf
import time
import numpy as np
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from railrl.misc.pyhelper_fns.vis_utils import MyAnimationMulti
def planner_info(arm_loss, box_loss, forward_models_outputs):
return {'arm_loss':arm_loss, 'box_loss':box_loss, \
'forward_models_outputs': forward_models_outputs}
def gather_cols(params, indices, name=None):
"""Gather columns of a 2D tensor.
Args:
params: A 2D tensor.
indices: A 1D tensor. Must be one of the following types: ``int32``, ``int64``.
name: A name for the operation (optional).
Returns:
A 2D Tensor. Has the same type as ``params``.
"""
with tf.op_scope([params, indices], name, "gather_cols") as scope:
# Check input
params = tf.convert_to_tensor(params, name="params")
indices = tf.convert_to_tensor(indices, name="indices")
try:
params.get_shape().assert_has_rank(2)
except ValueError:
raise ValueError('\'params\' must be 2D.')
try:
indices.get_shape().assert_has_rank(1)
except ValueError:
raise ValueError('\'params\' must be 1D.')
# Define op
p_shape = tf.shape(params)
p_flat = tf.reshape(params, [-1])
i_flat = tf.reshape(tf.reshape(tf.range(0, p_shape[0]) * p_shape[1],
[-1, 1]) + indices, [-1])
return tf.reshape(tf.gather(p_flat, i_flat),
[p_shape[0], -1])
"""
Planner takes two states (S_init and S_goal) and output an action.
Fine Tune is out of the scope of Planner
"""
class Planner(object):
def __init__(
self,
dynamic_model,
encoder,
sess
):
self.encoder = encoder
self.dynamic_model = dynamic_model
self.sess = sess
##initialize the model.....
def get_action(S_init, S_goal):
return None
"""
Inverde_model planner should be easy, just return the action
"""
class InverseModelPlanner(object):
def __init__(
self,
dynamic_model,
env,
encoder,
sess = None,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
#re-construct the dynamic model
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
self.inverse_model = dynamic_model.get_weight_tied_copy(feature_input1=encoder1.output,
feature_input2=encoder2.output)
def get_action(self, S_init, S_goal):
action = self.sess.run(self.inverse_model.output, feed_dict = \
{self.S_init_ph:S_init, self.S_goal_ph: S_goal})
return action
"""
ForwardModel planner, optimize action according to this objective:
min_{a} (S_next - S_goal)^2
"""
class CEMPlanner_arm_coord():
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
max_length = 15,
sample_batch_size = 2000,
top_k = 200,
action_penalty=False,
accumulated_loss = False):
self.sample_batch_size = sample_batch_size
self.top_k = top_k
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, None, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, [None, 24])
self.S_goal_ph = tf.placeholder(tf.float32, [None, 24])
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
self.encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output] #for debug purpose only
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
def transfer_box_global_tf(obs):
arm2box = gather_cols(obs, [4,5])/10.0
return gather_cols(obs, [21,22]) + arm2box
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
self.objective_topk_index_list = []
current_objective = 0
#objective
for forward_model in self.forward_model_list:
if accumulated_loss:
current_objective += tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)), axis = 1)
else:
current_objective = tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)), axis = 1)
self.objective_list.append(current_objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)))*100)
if action_penalty:
for i in range(len(self.objective_list)):
self.objective_list[i] += tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.5
def get_action(self, S_init, S_goal, steps = 1, plot_loss = False, debug = False, stop_variance = 0.2, stop_itr = 3, init_batch_size = 50000):
assert(steps <= self.max_length)
#fit a multivariable Gaussian
mean_list = None
cov_matrix = None
batch_S_init = np.dot(np.ones([init_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([init_batch_size, 1]), S_goal.reshape(1,-1))
#CEM
actions = | np.random.rand(self.max_length, init_batch_size, 4) | numpy.random.rand |
# -*- coding: utf-8 -*-
""" Module to implement acoustic shadowing along with spherical spreading
of sound
Created on Mon Jun 17 16:08:50 2019
@author: tbeleyur
"""
import time
import sys
sys.path.append('..//bridson//')
sys.path.append('..//')
import numpy as np
import pandas as pd
import scipy.spatial as spatial
import statsmodels.api as sm
def soundprop_w_acoustic_shadowing(start_point, end_point, all_other_points,
**kwargs):
'''Calculate the received level of a sound emitter at the start point
and reaching the end point after potentially passing through other points on
the way.
Each point sound passes through creates a drop in the intensity because of
acoustic shadowing.
Parameters
----------
start_point : 1 x 2 array like
end_point : 1 x 2 array like
all_other_points : Nbats-2 x 2 array like
xy coordinates of all points between start and end point
in a rectangular area of given width
Keyword Arguments
----------------
implement_shadowing : Boolean. If True then shadowing calculations are done, else only simple spherical spreading.
R : float. straight line distance between soure and receiver
rectangle_width : float >0. width of the rectangle
emitted_source_level : dictionary with key 'dBSPL' and 'ref_distance' ,
indicating source level in dB SPL re 20muPa and reference distance in metres.
R : float >0. straight line distance between source and receiver. This is used
in case there are no obstacles between them.
acoustic_shadowing_model : statsmodel object that allows calculation of how much
shadowing will be observed.
min_spacing : float> 0.
Returns
-------
received_level : float.
received level of the sound
'''
if kwargs.get('implement_shadowing'):
all_points_between = get_points_in_between(start_point, end_point,
all_other_points, **kwargs)
received_level = calc_RL(kwargs['R'],kwargs['emitted_source_level']['dBSPL'],
kwargs['emitted_source_level']['ref_distance'])
num_obstacles = all_points_between.shape[0]
if num_obstacles >= 1:
acoustic_shadowing = calculate_acoustic_shadowing(num_obstacles,
**kwargs)
received_level += acoustic_shadowing
else:
received_level = calc_RL(kwargs['R'],kwargs['emitted_source_level']['dBSPL'],
kwargs['emitted_source_level']['ref_distance'])
return(received_level)
def get_distances_between_points(xy_between, start, end):
'''
'''
all_xy = np.row_stack((start, xy_between, end))
distance_matrix = spatial.distance_matrix(all_xy, all_xy)
distance_to_source = np.argsort(distance_matrix[:,0])
points_sorted = all_xy[distance_to_source,:]
distances_sorted = spatial.distance_matrix(points_sorted, points_sorted)
num_distances = all_xy.shape[0] - 1
point_2_point_distances = np.zeros(num_distances)
for i, (point0, point1) in enumerate(zip(range(all_xy.shape[0]-1),
range(1,all_xy.shape[0]))):
point_2_point_distances[i] = distances_sorted[point0, point1]
return(point_2_point_distances)
def calculate_acoustic_shadowing(num_obstacles,
**kwargs):
'''Calculates received level of a call with acoustic shadowing included.
The received level of the call with shadowing is calculated with an iterative
application of the bistatic sonar equation. The TS used here is the bistatic
target strength at emitter-receiver angular separations of 180 degrees.
Parameters
----------
num_obstacles : int >1 .
Number of obstacles between receiver and emitter.
Keyword Arguments
-----------------
acoustic_shadowing_model : statsmodel object
A statistical model that allows calculation of
the amount of acoustic shadowing in dB.
For predictions the model accepts a
pd.DataFrame which has the following
columns (this might depend on the exact model
loaded too!)
obstacles
spacing
min_spacing : float>0.
Separation between bats/obstacles
see the_cocktail_party_nightmare
Returns
-------
shadowing_reduction : float.
Reduction of received level due to shadowing in dB.
'''
no_obstacle = pd.DataFrame(data={'obstacles':[0],
'spacing':[kwargs['min_spacing']],
})
with_obstacles = pd.DataFrame(data={'obstacles':[num_obstacles],
'spacing':[kwargs['min_spacing']],
})
#convert_to_categorical(no_obstacle, 'spacing')
#convert_to_categorical(with_obstacles, 'spacing')
level_w_obstacles = kwargs['acoustic_shadowing_model'].predict(with_obstacles)
level_wo_obstacles = kwargs['acoustic_shadowing_model'].predict(no_obstacle)
shadowing_reduction = float(level_w_obstacles - level_wo_obstacles)
return(shadowing_reduction)
def convert_to_categorical(df, column):
'''
'''
df[column] = pd.Categorical(df[column])
return(df)
def calc_RL(distance, SL, ref_dist, **kwargs):
'''calculates received level only because of spherical spreading.
Parameters
-----------
distance : float>0. receiver distance from source in metres.
SL : float. source level in dB SPL re 20 muPa at the reference distance.
ref_dist : float >0. distance at which source level was measured in metres.
Typically 1metre by convention.
Keyword Arguments
-----------------
atmospheric_attenuation : float <= 0.
Atmospheric attenuation in dB/m.
This has to be negative number.
Defaults to no atmospheric attenuations (0 dB/m )
Returns
-------
RL : received level in dB SPL re 20muPa.
'''
RL = SL - 20*np.log10(float(distance/ref_dist))
RL += kwargs.get('atmospheric_attenuation', 0)*distance
return(RL)
def get_points_in_between(start_point, end_point, all_other_points,
**kwargs):
'''
Parameters
----------
start_point : 1x2 array like
xy coordinates of starting point
end_point : 1x2 array like
xy coordinates of end points
all_other_points : N x 2 array like
xy coordinates of all other points
Keyword Arguments
-----------------
rectangle_width : float >0.
The width of the rectangle between the start and end point.
Returns
-------
points_between : Mpoints_between x 2 np.array where Mpoints can be >= 0.
'''
rectangle_limits, rotation_matrix = make_rectangle_between_2_points(start_point,
end_point,**kwargs)
points_between = get_points_in_rectangle(rectangle_limits, start_point,
all_other_points, rotation_matrix)
return(points_between)
def get_points_in_between_thecircleversion(start_point, end_point,
all_other_points,**kwargs):
'''Take 2 at getting perhaps a faster version of the
previous get_points_in_between function.
It is fast *and* dirty ... and doesn't quite apply when many bats are packed tightly
together ... as long as the 'rectangle_width' is decently large -- then it
should be okay..
'''
# get line equation from A to B
diff_x_y = end_point-start_point
vertical, m = calculate_slope(diff_x_y)
numpoints = 100 # choose a default density for now
points_along_line = | np.zeros((numpoints, 2)) | numpy.zeros |
import numpy
import six.moves
import cellprofiler_core.image
import cellprofiler_core.measurement
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import (
M_LOCATION_CENTER_X,
M_LOCATION_CENTER_Y,
FF_COUNT,
FF_PARENT,
COLTYPE_FLOAT,
M_NUMBER_OBJECT_NUMBER,
COLTYPE_INTEGER,
FF_CHILDREN_COUNT,
)
import cellprofiler.modules.splitormergeobjects
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.workspace
import tests.modules
INPUT_OBJECTS_NAME = "inputobjects"
OUTPUT_OBJECTS_NAME = "outputobjects"
IMAGE_NAME = "image"
OUTLINE_NAME = "outlines"
def test_load_v5():
file = tests.modules.get_test_resources_directory("splitormergeobjects/v5.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.loadtxt(six.moves.StringIO(data))
module = pipeline.modules()[0]
assert module.objects_name.value == "IdentifyPrimaryObjects"
assert module.output_objects_name.value == "SplitOrMergeObjects"
assert module.relabel_option.value == "Merge"
assert module.distance_threshold.value == 0
assert not module.wants_image.value
assert module.image_name.value == "None"
assert module.minimum_intensity_fraction.value == 0.9
assert module.where_algorithm.value == "Closest point"
assert module.merge_option.value == "Distance"
assert module.parent_object.value == "None"
assert module.merging_method.value == "Disconnected"
def test_load_v4():
file = tests.modules.get_test_resources_directory("splitormergeobjects/v4.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.loadtxt(six.moves.StringIO(data))
assert len(pipeline.modules()) == 2
module = pipeline.modules()[0]
assert isinstance(
module, cellprofiler.modules.splitormergeobjects.SplitOrMergeObjects
)
assert module.objects_name == "blobs"
assert module.output_objects_name == "RelabeledBlobs"
assert (
module.relabel_option == cellprofiler.modules.splitormergeobjects.OPTION_MERGE
)
assert module.distance_threshold == 2
assert not module.wants_image
assert module.image_name == "Guide"
assert module.minimum_intensity_fraction == 0.8
assert (
module.where_algorithm
== cellprofiler.modules.splitormergeobjects.CA_CLOSEST_POINT
)
assert module.merge_option == cellprofiler.modules.splitormergeobjects.UNIFY_PARENT
assert module.parent_object == "Nuclei"
assert (
module.merging_method == cellprofiler.modules.splitormergeobjects.UM_CONVEX_HULL
)
module = pipeline.modules()[1]
assert (
module.relabel_option == cellprofiler.modules.splitormergeobjects.OPTION_SPLIT
)
assert module.wants_image
assert (
module.where_algorithm == cellprofiler.modules.splitormergeobjects.CA_CENTROIDS
)
assert (
module.merge_option == cellprofiler.modules.splitormergeobjects.UNIFY_DISTANCE
)
assert (
module.merging_method
== cellprofiler.modules.splitormergeobjects.UM_DISCONNECTED
)
def rruunn(
input_labels,
relabel_option,
merge_option=cellprofiler.modules.splitormergeobjects.UNIFY_DISTANCE,
unify_method=cellprofiler.modules.splitormergeobjects.UM_DISCONNECTED,
distance_threshold=5,
minimum_intensity_fraction=0.9,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CLOSEST_POINT,
image=None,
parent_object="Parent_object",
parents_of=None,
):
"""Run the SplitOrMergeObjects module
returns the labels matrix and the workspace.
"""
module = cellprofiler.modules.splitormergeobjects.SplitOrMergeObjects()
module.set_module_num(1)
module.objects_name.value = INPUT_OBJECTS_NAME
module.output_objects_name.value = OUTPUT_OBJECTS_NAME
module.relabel_option.value = relabel_option
module.merge_option.value = merge_option
module.merging_method.value = unify_method
module.parent_object.value = parent_object
module.distance_threshold.value = distance_threshold
module.minimum_intensity_fraction.value = minimum_intensity_fraction
module.wants_image.value = image is not None
module.where_algorithm.value = where_algorithm
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.RunException)
pipeline.add_listener(callback)
pipeline.add_module(module)
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
if image is not None:
img = cellprofiler_core.image.Image(image)
image_set.add(IMAGE_NAME, img)
module.image_name.value = IMAGE_NAME
object_set = cellprofiler_core.object.ObjectSet()
o = cellprofiler_core.object.Objects()
o.segmented = input_labels
object_set.add_objects(o, INPUT_OBJECTS_NAME)
workspace = cellprofiler_core.workspace.Workspace(
pipeline,
module,
image_set,
object_set,
cellprofiler_core.measurement.Measurements(),
image_set_list,
)
if parents_of is not None:
m = workspace.measurements
ftr = FF_PARENT % parent_object
m[INPUT_OBJECTS_NAME, ftr] = parents_of
module.run(workspace)
output_objects = workspace.object_set.get_objects(OUTPUT_OBJECTS_NAME)
return output_objects.segmented, workspace
def test_split_zero():
labels, workspace = rruunn(
numpy.zeros((10, 20), int),
cellprofiler.modules.splitormergeobjects.OPTION_SPLIT,
)
assert numpy.all(labels == 0)
assert labels.shape[0] == 10
assert labels.shape[1] == 20
assert isinstance(workspace, cellprofiler_core.workspace.Workspace)
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
count = m.get_current_image_measurement(FF_COUNT % OUTPUT_OBJECTS_NAME)
assert count == 0
for feature_name in (
M_LOCATION_CENTER_X,
M_LOCATION_CENTER_Y,
):
values = m.get_current_measurement(OUTPUT_OBJECTS_NAME, feature_name)
assert len(values) == 0
module = workspace.module
assert isinstance(
module, cellprofiler.modules.splitormergeobjects.SplitOrMergeObjects
)
columns = module.get_measurement_columns(workspace.pipeline)
assert len(columns) == 6
for object_name, feature_name, coltype in (
(OUTPUT_OBJECTS_NAME, M_LOCATION_CENTER_X, COLTYPE_FLOAT,),
(OUTPUT_OBJECTS_NAME, M_LOCATION_CENTER_Y, COLTYPE_FLOAT,),
(OUTPUT_OBJECTS_NAME, M_NUMBER_OBJECT_NUMBER, COLTYPE_INTEGER,),
(INPUT_OBJECTS_NAME, FF_CHILDREN_COUNT % OUTPUT_OBJECTS_NAME, COLTYPE_INTEGER,),
(OUTPUT_OBJECTS_NAME, FF_PARENT % INPUT_OBJECTS_NAME, COLTYPE_INTEGER,),
("Image", FF_COUNT % OUTPUT_OBJECTS_NAME, COLTYPE_INTEGER,),
):
assert any(
[
object_name == c[0] and feature_name == c[1] and coltype == c[2]
for c in columns
]
)
categories = module.get_categories(workspace.pipeline, "Image")
assert len(categories) == 1
assert categories[0] == "Count"
categories = module.get_categories(workspace.pipeline, OUTPUT_OBJECTS_NAME)
assert len(categories) == 3
assert any(["Location" in categories])
assert any(["Parent" in categories])
assert any(["Number" in categories])
categories = module.get_categories(workspace.pipeline, INPUT_OBJECTS_NAME)
assert len(categories) == 1
assert categories[0] == "Children"
f = module.get_measurements(workspace.pipeline, "Image", "Count")
assert len(f) == 1
assert f[0] == OUTPUT_OBJECTS_NAME
f = module.get_measurements(workspace.pipeline, OUTPUT_OBJECTS_NAME, "Location")
assert len(f) == 2
assert all([any([x == y for y in f]) for x in ("Center_X", "Center_Y")])
f = module.get_measurements(workspace.pipeline, OUTPUT_OBJECTS_NAME, "Parent")
assert len(f) == 1
assert f[0] == INPUT_OBJECTS_NAME
f = module.get_measurements(workspace.pipeline, OUTPUT_OBJECTS_NAME, "Number")
assert len(f) == 1
assert f[0] == "Object_Number"
f = module.get_measurements(workspace.pipeline, INPUT_OBJECTS_NAME, "Children")
assert len(f) == 1
assert f[0] == "%s_Count" % OUTPUT_OBJECTS_NAME
def test_split_one():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels_out, workspace = rruunn(
labels, cellprofiler.modules.splitormergeobjects.OPTION_SPLIT
)
assert numpy.all(labels == labels_out)
assert isinstance(workspace, cellprofiler_core.workspace.Workspace)
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
count = m.get_current_image_measurement(FF_COUNT % OUTPUT_OBJECTS_NAME)
assert count == 1
for feature_name, value in (
(M_LOCATION_CENTER_X, 5),
(M_LOCATION_CENTER_Y, 3),
(FF_PARENT % INPUT_OBJECTS_NAME, 1),
):
values = m.get_current_measurement(OUTPUT_OBJECTS_NAME, feature_name)
assert len(values) == 1
assert round(abs(values[0] - value), 7) == 0
values = m.get_current_measurement(
INPUT_OBJECTS_NAME, FF_CHILDREN_COUNT % OUTPUT_OBJECTS_NAME,
)
assert len(values) == 1
assert values[0] == 1
def test_split_one_into_two():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 1
labels_out, workspace = rruunn(
labels, cellprofiler.modules.splitormergeobjects.OPTION_SPLIT
)
index = numpy.array([labels_out[3, 5], labels_out[3, 15]])
assert index[0] != index[1]
assert all([x in index for x in (1, 2)])
expected = numpy.zeros((10, 20), int)
expected[2:5, 3:8] = index[0]
expected[2:5, 13:18] = index[1]
assert numpy.all(labels_out == expected)
m = workspace.measurements
values = m.get_current_measurement(
OUTPUT_OBJECTS_NAME, FF_PARENT % INPUT_OBJECTS_NAME,
)
assert len(values) == 2
assert numpy.all(values == 1)
values = m.get_current_measurement(
INPUT_OBJECTS_NAME, FF_CHILDREN_COUNT % OUTPUT_OBJECTS_NAME,
)
assert len(values) == 1
assert values[0] == 2
def test_unify_zero():
labels, workspace = rruunn(
numpy.zeros((10, 20), int),
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
)
assert numpy.all(labels == 0)
assert labels.shape[0] == 10
assert labels.shape[1] == 20
def test_unify_one():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels_out, workspace = rruunn(
labels, cellprofiler.modules.splitormergeobjects.OPTION_MERGE
)
assert numpy.all(labels == labels_out)
def test_unify_two_to_one():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
)
assert numpy.all(labels_out[labels != 0] == 1)
assert numpy.all(labels_out[labels == 0] == 0)
def test_unify_two_stays_two():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=4,
)
assert numpy.all(labels_out == labels)
def test_unify_image_centroids():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
image = numpy.ones((10, 20)) * (labels > 0) * 0.5
image[3, 8:13] = 0.41
image[3, 5] = 0.6
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
image=image,
minimum_intensity_fraction=0.8,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CENTROIDS,
)
assert numpy.all(labels_out[labels != 0] == 1)
assert numpy.all(labels_out[labels == 0] == 0)
def test_dont_unify_image_centroids():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
image = numpy.ones((10, 20)) * labels * 0.5
image[3, 8:12] = 0.41
image[3, 5] = 0.6
image[3, 15] = 0.6
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
image=image,
minimum_intensity_fraction=0.8,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CENTROIDS,
)
assert numpy.all(labels_out == labels)
def test_unify_image_closest_point():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
image = numpy.ones((10, 20)) * (labels > 0) * 0.6
image[2, 8:13] = 0.41
image[2, 7] = 0.5
image[2, 13] = 0.5
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
image=image,
minimum_intensity_fraction=0.8,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CLOSEST_POINT,
)
assert numpy.all(labels_out[labels != 0] == 1)
assert numpy.all(labels_out[labels == 0] == 0)
def test_dont_unify_image_closest_point():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
image = numpy.ones((10, 20)) * labels * 0.6
image[3, 8:12] = 0.41
image[2, 7] = 0.5
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
distance_threshold=6,
image=image,
minimum_intensity_fraction=0.8,
where_algorithm=cellprofiler.modules.splitormergeobjects.CA_CLOSEST_POINT,
)
assert numpy.all(labels_out == labels)
def test_unify_per_parent():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
merge_option=cellprofiler.modules.splitormergeobjects.UNIFY_PARENT,
parent_object="Parent_object",
parents_of=numpy.array([1, 1]),
)
assert numpy.all(labels_out[labels != 0] == 1)
def test_unify_convex_hull():
labels = numpy.zeros((10, 20), int)
labels[2:5, 3:8] = 1
labels[2:5, 13:18] = 2
expected = numpy.zeros(labels.shape, int)
expected[2:5, 3:18] = 1
labels_out, workspace = rruunn(
labels,
cellprofiler.modules.splitormergeobjects.OPTION_MERGE,
merge_option=cellprofiler.modules.splitormergeobjects.UNIFY_PARENT,
unify_method=cellprofiler.modules.splitormergeobjects.UM_CONVEX_HULL,
parent_object="Parent_object",
parents_of= | numpy.array([1, 1]) | numpy.array |
from __future__ import print_function
from six import iteritems
import netCDF4
import os
import uuid
from .. import utils
import numpy as np
import collections
from scipy import interpolate
from scipy.signal import decimate
class QncException(Exception):
pass
def to_str(s):
if not isinstance(s,str) and isinstance(s,bytes):
s=s.decode() # in case py3 and s is bytes
return s
def sanitize_name(s):
"""
make s suitable for use as a variable or dimension
name
"""
return to_str(s).replace(' ','_').replace('/','_')
def as_tuple(x):
if isinstance(x,tuple):
return x
elif isinstance(x,list):
return tuple(x)
else:
return (x,)
def anon_dim_name(size,**kws):
"""
Name given to on-demand dimensions.
kws: unused, but might include the type?
"""
return 'd%d'%size
class QuickVar(object): # wraps netcdf variable
# predefine QuickVar attributes for the sake of __setattr__
_nc=None
_v=None
_transpose=None
# _converter=None
def __init__(self,nc,v,transpose=None):
self.__dict__['_nc']=nc
self.__dict__['_v']=v
# self.__dict__['_converter']=converter
if transpose is None:
transpose=range(len(v.dimensions))
self.__dict__['_transpose']=transpose
def __getitem__(self,k):
""" the options are similar to indexing a record array,
but strings refer to dimensions
"""
# if k is a string or tuple of strings, transpose
# and return a new quick var.
# if k is a dict, no transposition, but map the values to
# slices
# otherwise delegate to var.
# this makes list arguments and tuple arguments
# appear the same, but in the spirit of following
# numpy semantics, those should not be the same.
if isinstance(k,dict):
dims=self.dims
slices=[ k.get(d,slice(None))
for d in self.dims ]
k=tuple(slices)
if not isinstance(k,tuple):
k=(k,)
k=tuple(to_str(ki) for ki in k)
if isinstance(k[0],str):
return self._transpose_by_names(k)
else:
myv=self._v
# first, make k explicitly cover all dimensions
#try:
k=list(k) # make a list so we can modify it
#except TypeError: # k just a single slice or index
# k=[k]
for ki,kk in enumerate(k):
if kk is Ellipsis:
expand_slc=slice(ki,ki+1)
expansion=[slice(None)]*(myv.ndim-(len(k)-1))
k[expand_slc] = expansion
break
else:
while len(k)< myv.ndim:
k.append( slice(None) )
# k is still a set of slices on the transposed data
untranspose=[ self._transpose.index(i)
for i in range(myv.ndim) ]
k_untransposed=[k[j] for j in untranspose]
# retrieve the data, possibly having netcdf subset it
# important to tuplify - different semantics than
# a list
pulled=self._rich_take(myv,tuple(k_untransposed))
# if none of the slices are a singleton, then we'd just
# apply our transpose and be done.
# .. say the initial dims were [time,cell,z]
# and self._transpose=[1,0,2] # cell,time,z
# and k=[0,slice(None),slice(None)]
# which means cell=0,all time, all z
# then k_transposed=[slice(None),0,slice(None)]
# pulled has dimensions of time,z
# retranspose=[0,1]
# compared to transpose which was [1,0,2]
# so of transpose,
retranspose=[i for i in self._transpose
if (isinstance(k_untransposed[i],slice) or
isinstance(k_untransposed[i],collections.Iterable))]
# and renumber via cheesy np trick
if len(retranspose):
retranspose=np.argsort(np.argsort(retranspose))
return pulled.transpose(retranspose)
else:
return pulled
def _rich_take(self,myv,idxs):
# allow more relaxed semantics, in particular, grabbing an
# out of order or repeated set of indices
new_idxs=[] # introduce some mangling
post_idxs=[] # then use this to undo mangling
for idx in idxs:
post=slice(None)
if isinstance(idx, collections.Iterable):
idx=np.asarray(idx)
if ( idx.ndim!=1 or
(len(idx)>1 and np.any(np.diff(idx)<=0)) ):
post=idx # have numpy do it.
idx=slice(None)
new_idxs.append(idx)
if not np.isscalar(idx):
post_idxs.append(post)
new_idxs=tuple(new_idxs)
post_idxs=tuple(post_idxs)
result=myv[new_idxs]
if len(post_idxs):
result=result[post_idxs]
return result
def __setitem__(self,k,v):
"""
limited support here - just nc.varname[slices]=value
TODO: allow k to be a bitmask index.
TODO: allow k to be dimensions, to transpose on the fly
TODO: allow self to already have a transpose
"""
myv=self._v
# for now, no support for assigning into a transposed array
if np.all( self._transpose == range(len(myv.dimensions)) ):
# when k is a bitmask, this is super slow.
if myv.size<1e6: # do it in python, in memory.
value=myv[:]
value[k]=v
myv[:]=value
else:
myv[k]=v
else:
raise QncException("Tranpose is set - not ready for that")
def __getattr__(self,attr):
return getattr(self._v,attr)
def __setattr__(self,attr,value):
# ony pre-existing attributes are set on this object
# all others are passed on to the real variable.
if attr in self.__dict__:
self.__dict__[attr]=value
else:
return setattr(self._v,attr,value)
def _transpose_by_names(self,k):
if isinstance(k,str):
k=(k,)
new_transpose=[self._v.dimensions.index(kk) for kk in k]
return QuickVar(self._nc,self._v,new_transpose)
def __len__(self):
return len(self._v)
@property
def dims(self):
return self.dimensions
@property
def dimensions(self):
return self._v.dimensions
def as_datenum(self):
import nc_utils
return nc_utils.cf_time_to_datenum(self._v)
class QDataset(netCDF4.Dataset):
# 'varlen' will try to use variable length arrays to store strings
# 'fixed' will add a strNN dimension, and strings are padded out to that size
# 'varlen' may not be supported on as wide a range of library versions.
# neither is well-tested at this point
def __init__(self,*args,**kws):
# seems that it has to be done this way, otherwise the setattr/getattr
# overrides don't see it.
super(QDataset,self).__init__(*args,**kws)
self._set_string_mode('varlen') # 'fixed'
self.__dict__['_auto_dates']=True
def _set_string_mode(self,mode):
self.__dict__['_string_mode']=mode
def __getattr__(self,attr):
# only called when attribute is not found in dict
if attr in self.variables:
return QuickVar(self,self.variables[attr])
raise AttributeError(attr)
class VarProxy(object):
""" represents a variable about to be defined, but
waiting for dimension names and data, via setattr.
"""
def __init__(self,dataset,varname):
self.dataset=dataset
self.varname=to_str(varname)
def __setitem__(self,dims,data):
""" syntax for creating dimensions and variables at the
same time.
nc['var_name']['dim1','dim2','dim3']=np.array(...)
"""
return self.create(dims,data)
def create(self,dims,data,**kwargs):
if self.varname in self.dataset.variables:
print( "Trying to setitem on varname=%s"%(self.varname) )
print( "Existing dataset state:" )
self.dataset._dump()
raise QncException("Can't create variable twice: %s"%self.varname)
attrs=kwargs.get('attrs',{})
data=np.asarray(data)
# create dimensions on-demand:
dims=as_tuple(dims)
dims=tuple(to_str(di) for di in dims)
if len(dims) and dims[-1]==Ellipsis:
dims=dims[:-1] #drop the ellipsis
# add new dimensions named for their size, according to the
# trailing dimensions of data
extras=[anon_dim_name(size=n) for n in data.shape[len(dims):]]
if extras:
dims=dims + tuple(extras)
#print "New dimensions: "
#print dims
# A college try at dereferencing objects
if data.dtype==object:
if np.all( [isinstance(v,str) or isinstance(v,unicode) for v in data.ravel()] ):
data=data.astype('S')
# and handle strings by also adding an extra generic dimension
dtype_str=data.dtype.str
# print "Dtype: ",dtype_str
if dtype_str.startswith('|S') or dtype_str.startswith('S'):
# print "It's a string!"
slen=data.dtype.itemsize
if slen>1 and self.dataset._string_mode=='fixed':
dims=dims + ("str%d"%slen,)
new_shape=data.shape + (slen,)
new_data=np.fromstring(data.tostring(),'S1').reshape(new_shape)
data=new_data
# print "New data dtype: ",data.dtype
# get smart about np datetime64 values
if self.dataset._auto_dates and (data.dtype.type == np.datetime64):
# CF conventions can't deal with nanoseconds...
# this will lose sub-second values - could try going with floats
# instead??
data=data.astype('M8[s]').astype(np.int64)
# Assumes UTC
attrs['units']='seconds since 1970-01-01 00:00:00'
data_shape=list(data.shape)
var_dtype=data.dtype
for dim_idx,dim_name in enumerate(dims):
self.dataset.add_dimension(dim_name,data.shape[dim_idx])
variable=self.dataset.createVariable(self.varname,
var_dtype,
dims,**kwargs)
variable[:]=data
for k,v in iteritems(attrs):
setattr(variable,k,v)
def add_dimension(self,dim_name,length):
"""
create dimension if it doesn't exist, otherwise check that
the length requested matches what does exist.
"""
if dim_name in self.dimensions:
assert ( self.dimensions[dim_name].isunlimited() or
(len(self.dimensions[dim_name]) == length) )
else:
self.createDimension(dim_name,length)
def __getitem__(self,k):
if k in self.variables:
return QuickVar(self,self.variables[k])
else:
return self.VarProxy(dataset=self,varname=k)
def __setitem__(self,k,val):
# shorthand for dimensionless variable
self[k][()]=val
def __contains__(self,k):
return k in self.variables
def alias(self,**kwargs):
""" had been just copying the variables. But why not just
update self.variables? This works even if not writing
to the file.
"""
for k,v in iteritems(kwargs):
if 0: # deep copy:
self[k][self.variables[v].dimensions] = self.variables[v][:]
for attr_name in self.variables[v].ncattrs():
setattr(self.variables[k],attr_name,
getattr(self.variables[v],attr_name))
else:
self.variables[k]=self.variables[v]
def interpolate_dimension(self,int_dim,int_var,new_coordinate,
max_gap=None,gap_fields=None,
int_mode='nearest'):
"""
return a new dataset as a copy of this one, but with
the given dimension interpolated according to varname=values
typically this would be done to a list of datasets, after which
they could be appended.
it can also be used to collapse a 'dependent' coordinate into
an independent coordinate - e.g. if depth bins are a function of
time, this can be used to interpolate onto a constant depth axis,
which will also remove the time dimension from that depth variable.
max_gap: jumps in the source variable greater than max_gap are filled
with nan (or -99 if int valued). For now this is only supported when
int_dim has just one dimension
gap_fields: None, or a list of variable names to be masked based on gaps.
int_mode:
'nearest' - grab the integer value from the nearest sample
may add 'linear' in the future, which would cast to float
"""
result=empty()
int_ncvar=self.variables[int_var]
if len(int_ncvar.dimensions)>1:
print( "Will collapse %s"%int_var)
if max_gap:
raise QncException("max_gap not implemented for multi-dimensional variable")
else:
if max_gap:
gapped=np.zeros(new_coordinate.shape,'b1')
deltas=np.diff(int_ncvar[:])
gap_idx=np.nonzero(deltas>max_gap)[0]
for idx in gap_idx:
gap_left=int_ncvar[idx]
gap_right=int_ncvar[idx+1]
print( "Gap is %f - %f (delta %f)"%(gap_left,gap_right,gap_right-gap_left) )
to_mask=slice(*np.searchsorted( new_coordinate, [gap_left,gap_right] ))
gapped[to_mask]=True
for varname in self.variables.keys():
dim_names=self.variables[varname].dimensions
if int_dim in dim_names:
merge_idx=dim_names.index(int_dim)
else:
merge_idx=None
if varname==int_var:
# takes care of matching the coordinate variable.
# but there will be trouble if there are multiple
# coordinate variables and you try to concatenate
# use int_dim here instead of dim_names, because we might
# be collapsing the interpolant variable.
result[varname][int_dim]=new_coordinate
elif merge_idx is not None:
# it's an array-valued variable, and includes
# the dimension over which we want to interpolate
int_all_dims=self.variables[int_var].dimensions
src_var=self.variables[varname]
src_val=src_var[:]
# masked values don't work so well with the
# interpolation:
if isinstance(src_val,np.ma.core.MaskedArray):
print( "Filling masked src data" )
if 'int' in src_val.dtype.name:
src_val=src_val.filled(-1)
else:
src_val=src_val.filled(np.nan)
if len(int_all_dims)==1:
if ('int' in src_val.dtype.name) and (int_mode=='nearest'):
def interper(coord):
idxs=utils.nearest(self.variables[int_var][:],coord)
slices=[slice(None)]*src_val.ndim
slices[merge_idx]=idxs
return src_val[slices]
else:
interper=interpolate.interp1d(self.variables[int_var][:],
src_val,
axis=merge_idx,
bounds_error=False,
assume_sorted=False)
new_val=interper(new_coordinate)
if max_gap and ( (gap_fields is None) or (varname in gap_fields)):
if 'int' in new_val.dtype.name:
new_val[gapped] = -99
else:
new_val[gapped] = np.nan
result[varname][dim_names]=new_val
else:
# here's the tricky part when it comes to collapsing
# dimensions
# self.variables[int_var] - this is multidimensional
# basically we want to iterate over elements in
# iterate over all other dimensions of the int_var
int_values=self.variables[int_var][:]
int_dim_idx=self.variables[int_var].dimensions.index(int_dim)
# preallocate the result, since we have to fill it in bit-by-bit
dest_shape=list(src_var.shape)
dest_shape[merge_idx] = len(new_coordinate)
dest=np.zeros(dest_shape,dtype=src_val.dtype)
# start with a stupid, slow way:
# Assume that there is one extra dimension and it's the first one.
# int_ncvar has multiple dimensions, i.e. depth ~ bin,time
# so there is an assumption here that some variable to be interpolated
# like u=u(bin,time)
# could there be something that depended on bin, but not time?
# there aren't any in the existing adcp data
for extra in range(int_ncvar.shape[0]):
interper=interpolate.interp1d(int_ncvar[extra,:],
src_val[extra,:],
axis=merge_idx-1, # account for extra
bounds_error=False,
assume_sorted=False)
dest[extra,:]=interper(new_coordinate)
result[varname][dim_names]=dest
else: # non-dimensioned attributes here
result[varname][dim_names]=self.variables[varname][:]
self.copy_ncattrs_to(result)
return result
def copy(self,skip=[],fn=None,**create_args):
""" make a deep copy of self, into a writable, diskless QDataset
if fn is given, target is a netCDF file on disk.
"""
if fn is not None:
new=empty(fn,**create_args)
else:
new=empty()
for dimname in self.dimensions.keys():
if dimname not in skip:
new.createDimension(dimname,len(self.dimensions[dimname]))
for varname in self.variables.keys():
if varname not in skip:
ncvar=self.variables[varname]
new[varname][ncvar.dimensions] = ncvar[:]
self.copy_ncattrs_to(new)
return new
def copy_ncattrs_to(self,new):
for varname in self.variables.keys():
myvar=self.variables[varname]
if varname not in new.variables:
continue
newvar = new.variables[varname]
for attr in myvar.ncattrs():
if attr != '_FillValue':
# _FillValue can only be set at var creation time
# This approach was failing on valid_range, 2017-03-17
# setattr(newvar,attr,getattr(myvar,attr))
newvar.setncattr(attr,getattr(myvar,attr))
def select(self,**kwargs):
new=empty()
for varname in self.variables.keys():
dim_names=self.variables[varname].dimensions
if len(dim_names)==0:
newvar=new.createVariable(varname,self.variables[varname].dtype,())
newvar[...]=self.variables[varname][...]
else:
slices=[slice(None)]*len(dim_names)
for slc_dim,slc_sel in iteritems(kwargs):
if slc_dim in dim_names:
slc_idx=dim_names.index(slc_dim)
slices[slc_idx]=slc_sel
else:
print( "slice dimension %s not in dim_names %s"%(slc_dim,dim_names) )
new[varname][dim_names]=self.variables[varname][slices]
self.copy_ncattrs_to(new)
return new
def within(self,**kwargs):
selects={}
for slc_varname,slc_range in iteritems(kwargs):
slc_var=self.variables[slc_varname]
assert( len(slc_var.dimensions)==1 )
selects[slc_var.dimensions[0]]=utils.within(slc_var[:],slc_range,as_slice=True)
return self.select(**selects)
def _dump(self):
print( self._desc() )
def _desc(self):
""" returns pretty printed description of dataset similar to
output of ncdump
"""
lines=[ "%s %s {"%( self.file_format.lower(), "unknown_filename" ) ]
lines.append( self._desc_dims() )
lines.append( self._desc_vars() )
lines.append( "}" )
return "\n".join(lines)
def _desc_dims(self):
lines=["dimensions:"]
for k,v in iteritems(self.dimensions):
lines.append(" %s = %s ;"%(k,len(v) ))
return "\n".join(lines)
def _desc_vars(self,max_attr_len=20,max_n_attrs=7):
lines=["variables:"]
for k,v in iteritems(self.variables):
try:
typ=v.dtype.name
except AttributeError:
typ=str(v.dtype)
lines.append( " %s %s(%s) ;"%( typ, k, ",".join( v.dimensions )) )
for attr_name in v.ncattrs()[:max_n_attrs]:
a_val=getattr(v,attr_name)
if isinstance(a_val,str) and len(a_val)>max_attr_len:
a_val = a_val[:max_attr_len] + "... [%d more bytes]"%(len(a_val)-max_attr_len)
a_val = '"' + a_val + '"'
lines.append(' %s:%s = %s'%(k,attr_name,a_val))
if len(v.ncattrs()) > max_n_attrs > 0:
lines.append(' ... %d more'%(len(v.ncattrs())-max_n_attrs))
return "\n".join(lines)
def empty(fn=None,overwrite=False,**kwargs):
if fn is None:
return QDataset(uuid.uuid1().hex,'w',diskless=True,**kwargs)
else:
if os.path.exists(fn):
if overwrite:
os.unlink(fn)
else:
raise QncException('File %s already exists'%fn)
return QDataset(fn,'w',**kwargs)
def concatenate(ncs,cat_dim,skip=[],new_dim=None):
""" ncs is an ordered list of QDataset objects
If a single QDataset is given, it will be copied at the metadata
level
new_dim: if given, then fields not having cat_dim, but differing
between datasets, will be concatenated along new_dim.
for convenience, elements of gdms which are None are silently dropped
"""
ncs=filter(None,ncs)
N=len(ncs)
if N==1:
return ncs[0].copy()
if N==0:
return empty()
result=empty()
for varname in ncs[0].variables.keys():
if varname in skip:
continue
dim_names=ncs[0].variables[varname].dimensions
if cat_dim in dim_names:
cat_idx=dim_names.index(cat_dim)
else:
cat_idx=None
parts=[nc.variables[varname][:] for nc in ncs]
if cat_idx is not None:
result[varname][dim_names]=np.concatenate(parts,axis=cat_idx)
else:
constant=True
for n in range(1,N):
if np.any(parts[0]!=parts[n]):
constant=False
break
if not constant:
if new_dim is None:
raise QncException("Non-concatenated variable %s "\
"does not match %s != %s"%(varname,
parts[0],
parts[n]))
else:
print( "Variable values of %s will go into new dimension %s"%(varname,
new_dim) )
result[varname][ [new_dim]+list(dim_names) ]=np.array(parts)
else:
result[varname][dim_names]=parts[0]
# attrs are copied from first element
ncs[0].copy_ncattrs_to(result)
return result
# Functional manipulations of QDataset:
def downsample(ds,dim,stride,lowpass=True):
""" Lowpass variables along the given dimension, and resample
at the given stride.
lowpass=False => decimate, no lowpass
lowpass=<float> => lowpass window size is lowpass*stride
"""
lowpass=float(lowpass)
winsize=int(lowpass*stride)
new=empty()
for var_name in ds.variables:
ncvar=ds.variables[var_name]
val=ncvar[:]
if dim in ncvar.dimensions:
dim_idx=ncvar.dimensions.index(dim)
# should be possible to use the faster scipy way,
# but it is having issues with setting nan's in the
# output - maybe something is getting truncated or
# reshaped??
if True: # lowpass!=1: # older, slower way:
if lowpass:
import lp_filter
val_lp=lp_filter.lowpass_fir(val,winsize,axis=dim_idx,nan_weight_threshold=0.5)
else:
val_lp=val
slcs=[slice(None)]*len(ncvar.dimensions)
slcs[dim_idx]=slice(None,None,stride)
val=val_lp[slcs]
else: # scipy.signal way:
kws=dict(q=stride,ftype='fir',axis=dim_idx)
val_valid=decimate(np.isfinite(val).astype('f4'),**kws)
val=decimate(val,**kws)
val[val_valid<0.1] = np.nan
new[var_name+'_msk'][ncvar.dimensions]=val_valid # DBG
new[var_name][ncvar.dimensions] = val
ds.copy_ncattrs_to(new)
return new
def mat_to_nc(mat,dim_map={},autosqueeze=True):
nc=empty()
length_to_dim={}
if autosqueeze:
sq=np.squeeze
else:
sq=lambda x: x
for k,v in iteritems( dim_map ):
if isinstance(v,str):
v=[v]
if k in mat:
x=sq(mat[k])
if x.ndim != len(v):
raise QncException("dim_map: field %s - %s doesn't match with %s"%(k,v,x.shape))
for dim_name,size in zip(v,x.shape):
length_to_dim[size]=dim_name
for k,v in iteritems(mat):
if k.startswith('_'):
print( "Skipping %s"%k)
continue
v=sq(v)
if not isinstance(v,np.ndarray):
print( "Don't know how to deal with variable of type %s"%str(type(v)))
continue
if v.ndim==0:
setattr(nc,k,v.item())
continue
if k in dim_map:
dims=dim_map[k]
else:
dims=[]
for size in v.shape:
if size not in length_to_dim:
length_to_dim[size]='d%d'%size
dims.append(length_to_dim[size])
# special handling for some datatypes:
if v.dtype == np.dtype('O'):
# sometimes this is just ragged array
# for some reason mat files can have a 3-dimensional variable reported
# as an array of 2-d arrays.
# TODO
print( "%s is not a simple array. Skipping"%k )
continue
nc[k][dims]=v
return nc
def linear_to_orthogonal_nc(nc_src,lin_dim,ortho_dims,nc_dst=None):
"""
copy a dataset, changing a linear dimension into a pair of orthogonal
dimensions
"""
if isinstance(nc_src,str):
nc=qnc.QDataset(nc_src)
else:
nc=nc_src
if nc_dst is None:
nc2=qnc.empty()
elif isinstance(nc_dst,str):
nc2=qnc.empty(fn=nc_dst)
else:
nc2=nc_dst
ortho_coords=[np.unique(nc.variables[d][:]) for d in ortho_dims]
ortho_shape =[len(oc) for oc in ortho_coords]
map1=np.searchsorted(ortho_coords[0],nc.variables[ortho_dims[0]][:])
map2=np.searchsorted(ortho_coords[1],nc.variables[ortho_dims[1]][:])
for d in ortho_dims:
vals=np.unique(nc.variables[d][:])
nc2[d][d]=vals
nc2.variables
# nc2.set_coords(ortho_dims)
for v in nc.variables:
print("Processing %s"%v)
if v in ortho_dims:
continue
create_kwargs={}
if '_FillValue' in nc.variables[v].ncattrs():
create_kwargs['fill_value']=nc.variables[v]._FillValue
if lin_dim in nc.variables[v].dimensions:
old_val=nc.variables[v][:]
new_dims=[] ; new_shape=[] ; new_slices=[]
for d in nc.variables[v].dimensions:
if d==lin_dim:
new_dims+=ortho_dims
new_shape+=ortho_shape
new_slices+=[map1,map2]
else:
new_dims.append(d)
new_shape.append(len(nc.variables[d]))
new_slices+=[slice(None)]
new_val=np.zeros(new_shape,old_val.dtype)
new_val[:] = np.nan
new_val[tuple(new_slices)] = old_val
# dims=[nc2[d] for d in new_dims]
nc2[v].create(new_dims,new_val,**create_kwargs)
else:
#print "COPY",v,nc[v].dims
data=nc.variables[v][:]
if data.dtype==object:
data=data.astype('S')
nc2[v].create(nc.variables[v].dimensions,data,**create_kwargs)
for att in nc.variables[v].ncattrs():
if att == '_FillValue': # can't be added after the fact
continue
setattr(nc2.variables[v],att,getattr(nc.variables[v],att))
if isinstance(nc_src,str):
nc.close()
return nc2
def ortho_to_transect_nc(src_nc,src_x_var,src_y_var,transect_xy,dst_nc=None):
""" Extract a transect to a new dataset
"""
if isinstance(src_nc,str):
src_nc=qnc.QDataset(src_nc)
close_src=True
else:
close_src=False
if dst_nc is None:
dst_nc=qnc.empty()
elif isinstance(dst_nc,str):
dst_nc=qnc.empty(fn=dst_nc)
else:
pass
src_xy=np.array( [src_nc.variables[src_x_var][:],src_nc.variables[src_y_var][:]] ).T
elti_sel=[]
for xy in transect_xy:
dists=utils.dist(xy,src_xy)
elti_sel.append(np.argmin(dists))
elti_sel= | np.array(elti_sel) | numpy.array |
import numpy as np
import h5py
import matplotlib.pyplot as plt
import pyfftw
from numpy.fft import fftshift
# --------------------------------------------------------------------------------------------------------------------
# Loading data:
# --------------------------------------------------------------------------------------------------------------------
filename = input('Enter filename of data to open: ')
data_file = h5py.File('../../data/{}.hdf5'.format(filename), 'r')
# Loading grid array data:
x, y = data_file['grid/x'], data_file['grid/y']
X, Y = np.meshgrid(x[:], y[:])
Nx, Ny = x[:].size, y[:].size
dx, dy = x[1] - x[0], y[1] - y[0]
dkx = 2 * np.pi / (Nx * dx)
dky = 2 * np.pi / (Ny * dy) # K-space spacing
kxx = np.arange(-Nx // 2, Nx // 2) * dkx
kyy = np.arange(-Nx // 2, Nx // 2) * dky
Kx, Ky = np.meshgrid(kxx, kyy)
# Kx, Ky = np.fft.fftshift(Kx), np.fft.fftshift(Ky)
K = np.sqrt( | fftshift(Kx ** 2) | numpy.fft.fftshift |
import numpy as np
import matplotlib.pyplot as plt
import time
import skcuda.linalg as linalg
import aspire.em_classavg.data_utils as data_utils
from aspire.em_classavg.image_denoising.image_denoising.ConverterModel.Converter import Converter
class EM:
def __init__(self, images, trunc_param=10, beta=0.5, ang_jump=1,
max_shift=5, shift_jump=1, n_scales=10, is_remove_outliers=True, outliers_precent_removal=5):
self.trunc_param = trunc_param
self.beta = beta
self.ang_jump = ang_jump
self.is_remove_outliers = is_remove_outliers
self.outliers_precent_removal = outliers_precent_removal
self.em_params = dict()
self.em_params['n_scales'] = n_scales
self.em_params['max_shift'] = max_shift
self.em_params['shift_jump'] = shift_jump
self.em_params['thetas'] = np.arange(1, 361, self.ang_jump)
self.em_params['shifts'] = np.arange(-1 * self.em_params['max_shift'],
self.em_params['max_shift'] + 1, self.em_params['shift_jump'])
self.im_size = np.shape(images)[-1]
if np.ndim(images) == 3:
self.n_images = len(images)
else:
self.n_images = 1
images, self.mean_bg_ims, self.sd_bg_ims = data_utils.normalize_background(images)
snr_est = EM.est_snr(images)
est_scale = np.sqrt(snr_est * np.mean(self.sd_bg_ims) ** 2)
self.em_params['scales'] = np.linspace(0.8 * est_scale, 1.2 * est_scale, self.em_params['n_scales'])
self.converter = Converter(self.im_size, self.trunc_param, self.beta)
self.converter.init_direct('full')
self.c_ims = self.converter.direct_forward(images)
self.const_terms = self.pre_compute_const_terms()
self.phases = np.exp(-1j * 2 * np.pi / 360 *
np.outer(self.em_params['thetas'], self.converter.get_angular_frequency()))
# the expansion coefficients of each image for each possible rotation
self.c_ims_rot = self.c_ims[:, np.newaxis, :] * self.phases[np.newaxis, :]
def e_step(self, c_avg):
print('e-step')
n_scales = len(self.em_params['scales'])
n_rots = len(self.em_params['thetas'])
n_shifts_2d = len(self.em_params['shifts'])**2
n_shifts_1d = len(self.em_params['shifts'])
posteriors = np.zeros((self.n_images, n_shifts_2d, n_scales, n_rots))
# posteriors = np.zeros((self.n_images, n_scales, n_rots, n_shifts_2d))
# compute the terms that do not depend on the shifts
ann_const = (np.linalg.norm(c_avg) * np.outer(1 / self.sd_bg_ims, self.em_params['scales']))**2
cross_cnn_ann = np.outer(self.mean_bg_ims / (self.sd_bg_ims**2), self.em_params['scales']) * \
2 * np.real(np.vdot(c_avg, self.const_terms['c_all_ones_im']))
ann_const_cross_cnn_anns = ann_const + cross_cnn_ann
const_elms = ann_const_cross_cnn_anns + (self.const_terms['anni'] + self.const_terms['cnn'])[:, np.newaxis]
for shift_x in self.em_params['shifts']:
for shift_y in self.em_params['shifts']:
if shift_y < shift_x:
continue
A_shift = self.calc_A_shift(shift_x, shift_y)
tmp1_shift = np.conj(self.const_terms['c_all_ones_im']).dot(A_shift)
tmp2_shift = np.conj(c_avg).dot(A_shift)
A_inv_shift = np.conj(np.transpose(A_shift))
tmp1_inv_shift = np.conj(self.const_terms['c_all_ones_im']).dot(A_inv_shift)
tmp2_inv_shift = np.conj(c_avg).dot(A_inv_shift)
shifts = (np.array([[shift_y, -shift_y], [shift_x, -shift_x]]) + self.em_params['max_shift']) / \
self.em_params['shift_jump']
inds = np.ravel_multi_index(shifts.astype(shift_y), (n_shifts_1d, n_shifts_1d))
for i in np.arange(self.n_images):
# calculate the two cross terms
cross_anni_cnn = self.mean_bg_ims[i] / self.sd_bg_ims[i] * \
2 * np.real(tmp1_shift.dot(np.transpose(self.c_ims_rot[i])))
cross_anni_ann = self.em_params['scales'][:, np.newaxis] / self.sd_bg_ims[i] * \
2 * np.real(tmp2_shift.dot(np.transpose(self.c_ims_rot[i])))
# write down the log likelihood
posteriors[i, inds[0]] = cross_anni_ann - (const_elms[i][:, np.newaxis] + cross_anni_cnn)
if shift_y != shift_x:
cross_anni_cnn_minus = self.mean_bg_ims[i] / self.sd_bg_ims[i] * \
2 * np.real(tmp1_inv_shift.dot(np.transpose(self.c_ims_rot[i])))
cross_anni_ann_minus = self.em_params['scales'][:, np.newaxis] / self.sd_bg_ims[i] * \
2 * np.real(tmp2_inv_shift.dot(np.transpose(self.c_ims_rot[i])))
# write down the log likelihood
# TODO: avoid elipsis by shifting shift indx to the beginning
posteriors[i, inds[1]] = cross_anni_ann_minus - \
(const_elms[i][:, np.newaxis] + cross_anni_cnn_minus)
log_lik_per_image = np.zeros(self.n_images)
for i in np.arange(self.n_images):
omega_i = posteriors[i]
max_omega = np.max(omega_i)
omega_i = np.exp(omega_i - max_omega)
log_lik_per_image[i] = max_omega + np.log(np.sum(omega_i))
posteriors[i] = omega_i / np.sum(omega_i)
return posteriors, log_lik_per_image
def m_step(self, posteriors):
print('m-step')
n_images = self.n_images
n_shifts_1d = len(self.em_params['shifts'])
n_prolates = self.converter.get_num_prolates()
W_shifts_marg = np.zeros((n_images, n_prolates)).astype('complex')
c_avg = np.zeros(n_prolates).astype('complex')
for shift_x in self.em_params['shifts']:
for shift_y in self.em_params['shifts']:
if shift_y < shift_x:
continue
shifts = (np.array([[shift_y, -shift_y], [shift_x, -shift_x]]) + self.em_params['max_shift']) / \
self.em_params['shift_jump']
inds = np.ravel_multi_index(shifts.astype(shift_y), (n_shifts_1d, n_shifts_1d))
A_shift = self.calc_A_shift(shift_x, shift_y)
A_inv_shift = np.conj(np.transpose(A_shift))
non_neg_freqs = self.converter.get_non_neg_freq_inds()
A_shift = A_shift[non_neg_freqs]
A_inv_shift = A_inv_shift[non_neg_freqs]
W = np.zeros((n_images, self.converter.get_num_prolates())).astype('complex')
for i in np.arange(n_images):
W[i] = np.sum(np.dot(posteriors[i, inds[0]], self.phases), axis=0)
c_avg[non_neg_freqs] += np.sum(A_shift.dot(np.transpose(W * self.c_ims)), axis=1)
W_shifts_marg += W
if shift_y != shift_x:
W_minus = np.zeros((n_images, self.converter.get_num_prolates())).astype('complex')
for i in np.arange(n_images):
W_minus[i] = np.sum(np.dot(posteriors[i, inds[1]], self.phases), axis=0)
c_avg[non_neg_freqs] += np.sum(A_inv_shift.dot(np.transpose(W_minus * self.c_ims)), axis=1)
W_shifts_marg += W_minus
# update the coeffs using with respect to the additive term
c_avg[non_neg_freqs] += np.sum(np.transpose(W_shifts_marg * self.const_terms['c_additive_term']), axis=1)[non_neg_freqs]
c_avg[self.converter.get_neg_freq_inds()] = np.conj(c_avg[self.converter.get_pos_freq_inds()])
c = posteriors * self.em_params['scales'][:, np.newaxis] / \
self.sd_bg_ims[:, np.newaxis, np.newaxis, np.newaxis]
c = np.sum(c)
c_avg = c_avg/c
return c_avg
def calc_A_shift(self, shift_x, shift_y):
psis = self.converter.get_prolates_as_images()
n_psis = len(psis)
if shift_x == 0 and shift_y == 0:
return np.eye(n_psis)
A_shift = np.zeros((n_psis, n_psis)).astype('complex')
non_neg_freqs = self.converter.get_non_neg_freq_inds()
psis_non_neg_shifted = np.roll(np.roll(psis[non_neg_freqs], shift_y, axis=1), shift_x, axis=2)
# mask the shifted psis
psis_non_neg_shifted = self.converter.mask_points_inside_the_circle(psis_non_neg_shifted)
# we need the conjugation by design
A_shift[:, non_neg_freqs] = np.tensordot(np.conj(psis), psis_non_neg_shifted, axes=([1, 2], [1, 2]))
zero_freq_inds = self.converter.get_zero_freq_inds()
pos_freq_inds = self.converter.get_pos_freq_inds()
neg_freq_inds = self.converter.get_neg_freq_inds()
A_shift[zero_freq_inds, neg_freq_inds] = np.conj(A_shift[zero_freq_inds, pos_freq_inds])
A_shift[pos_freq_inds, neg_freq_inds] = np.conj(A_shift[neg_freq_inds, pos_freq_inds])
A_shift[neg_freq_inds, neg_freq_inds] = np.conj(A_shift[pos_freq_inds, pos_freq_inds])
return A_shift
def pre_compute_const_terms(self):
const_terms = dict()
im_size = self.im_size
# we need the all-ones-image in order for the additive term due to normalization
const_terms['c_all_ones_im'] = self.converter.direct_forward(np.ones((im_size, im_size)))
const_terms['anni'] = np.linalg.norm(self.c_ims, axis=1)**2
const_terms['cnn'] = (self.mean_bg_ims / self.sd_bg_ims * np.linalg.norm(const_terms['c_all_ones_im']))**2
const_terms['c_additive_term'] = np.outer(self.mean_bg_ims / self.sd_bg_ims, const_terms['c_all_ones_im'])
return const_terms
def compute_opt_latent_vals(self, posteriors):
n_images = len(posteriors)
n_shifts_1d = len(self.em_params['shifts'])
opt_latent = dict()
opt_latent['rots'] = np.zeros(n_images)
opt_latent['shifts_x'] = np.zeros(n_images)
opt_latent['shifts_y'] = np.zeros(n_images)
opt_latent['scales'] = np.zeros(n_images)
for i in np.arange(n_images):
om_i = posteriors[i]
opt_scale_ind = np.argmax(np.sum(np.sum(om_i, axis=2), axis=1))
opt_rot_ind = np.argmax(np.sum(np.sum(om_i, axis=2), axis=0))
opt_shift_ind = np.argmax(np.sum(np.sum(om_i, axis=1), axis=0))
opt_latent['scales'][i] = self.em_params['scales'][opt_scale_ind]
opt_latent['rots'][i] = self.em_params['thetas'][opt_rot_ind]
yy, xx = np.unravel_index(opt_shift_ind, (n_shifts_1d, n_shifts_1d))
opt_latent['shifts_x'][i] = self.em_params['shifts'][xx]
opt_latent['shifts_y'][i] = self.em_params['shifts'][yy]
return opt_latent
@staticmethod
def est_snr(images):
snr = data_utils.estimate_snr(images)[0]
if snr <= 0:
snr = 10 ** -4
return snr
@staticmethod
def plot_images(init_avg_image, im_avg_est_prev, im_avg_est):
# plt.figure(1)
plt.subplot(131)
plt.imshow(init_avg_image, cmap='gray')
plt.subplot(132)
plt.imshow(np.real(im_avg_est_prev), cmap='gray')
plt.subplot(133)
plt.imshow(np.real(im_avg_est), cmap='gray')
plt.show()
def main():
linalg.init() # TODO: where to init this?
images = data_utils.mat_to_npy('images')
images = np.transpose(images, axes=(2, 0, 1)) # move to python convention
is_use_matlab_params = True
if is_use_matlab_params:
trunc_param = data_utils.mat_to_npy_vec('T')[0]
beta = data_utils.mat_to_npy_vec('beta')[0]
ang_jump = data_utils.mat_to_npy_vec('ang_jump')[0]
max_shift = data_utils.mat_to_npy_vec('max_shift')[0] # max_shift
shift_jump = data_utils.mat_to_npy_vec('shift_jump')[0] # shift_jump
n_scales = data_utils.mat_to_npy_vec('n_scales')[0]
is_remove_outliers = data_utils.mat_to_npy_vec('is_remove_outliers')[0]
outliers_precent_removal = data_utils.mat_to_npy_vec('outliers_precent_removal')[0]
em = EM(images, trunc_param, beta, ang_jump, max_shift, shift_jump,
n_scales, is_remove_outliers, outliers_precent_removal)
else:
em = EM(images,max_shift=0)
init_avg_image = data_utils.mat_to_npy('init_avg_image')
init_avg_image = data_utils.mask_decorator(init_avg_image, is_stack=True)
c_avg = em.converter.direct_forward(init_avg_image)
n_iters = 3 # data_utils.mat_to_npy_vec('nIters')[0]
print("#images=%d\t#iterations=%d\tangualr-jump=%d,\tmax shift=%d,\tshift-jump=%d,\t#scales=%d" %
(len(images), n_iters, em.ang_jump, em.em_params['max_shift'],em.em_params['shift_jump'], em.em_params['n_scales']))
im_avg_est_prev = init_avg_image
log_lik = dict()
for round in range(2):
round_str = str(round)
log_lik[round_str] = | np.zeros((n_iters, em.n_images)) | numpy.zeros |
import numpy as np
import os
class Perceptron(object):
"""docstring for Perceptron. Creates a single perceptron with multiple inputs and a bias.
Attributes:
inputs: The number of inputs given to the perceptron. Does not include the bias.
bias: The bias for each perceptron. Defaults to 1.0. """
def __init__(self, inputs, bias=1.0):
"""Create a perceptron with a given number of inputs and a bias."""
self.weights = (np.random.rand(inputs + 1) * 2) - 1
self.bias = bias
# Are we really adding a bias to the weights?
def activate(self, x):
"""Take the inputs and bias to produce the output of the Perceptron."""
sum = np.dot(np.append(x,self.bias),self.weights)
return self.sigmoid(sum)
def create_weights(self, init_weights):
""""Use this function to assign known weights to the perceptron."""
self.weights = np.array(init_weights)
def sigmoid(self, x):
"""Evaluate the perceptron function for an input, x."""
return 1 / (1 + np.exp(-x))
class Multilayer_Perceptron(object):
"""docstring for Multilayer_Perceptron. Creates a single perceptron with multiple inputs and a bias.
Attributes:
layers: A python list detailing the number of elements in each layer.
bias: The bias term. The same bias used for all
eta: The learning rate of the system. """
def __init__(self, layers, bias=1.0, eta=0.5):
self.layers = np.array(layers, dtype=object) # Length is the number of layers, number is the perceptrons per layer
self.bias = bias
self.eta = eta
self.network = [] # The list of neurons
self.values = [] # The list of outputs
self.d = [] # The list of the error terms (lowercase delta)
for i in range(len(self.layers)):
self.values.append([]) # Add a blank location for each layer
self.d.append([]) # Add a blank location for each layer
self.network.append([]) # Add a blank location for each layer
self.values[i] = [0.0 for j in range(self.layers[i])] # Create 0 values for each perceptron
self.d[i] = [0.0 for j in range(self.layers[i])] # Create 0 values for each perceptron
if i > 0: # the first layer is the input layer so it does not have any
for j in range(self.layers[i]):
# Create an object of the perceptron class for every position in the network
self.network[i].append(Perceptron(inputs = self.layers[i-1], bias = self.bias))
# Make an array of the data.
self.network = np.array([np.array(x) for x in self.network], dtype=object)
self.values = np.array([np.array(x) for x in self.values], dtype=object)
self.d = np.array([np.array(x) for x in self.d], dtype=object)
def setWeights(self, init_weights):
"""Set the weights of all perceptrons.
init_weights is a list of lists that holds the weights of all but the input layer."""
for i in range(len(init_weights)):
for j in range(len(init_weights[i])):
# The i+1 is used to not affect the initial input layer.
self.network[i+1][j].create_weights(init_weights[i][j])
def printWeights(self):
"""Print the weights given to each perceptron."""
print()
for i in range(1,len(self.network)):
for j in range(self.layers[i]):
# Print out the weights of each perceptron
print("Layer: %d Neuron: %d - " % (i+1, j), self.network[i][j].weights)
print()
def saveWeights(self, file):
with open(file, 'w') as save_weight_file:
for i in range(1,len(self.network)):
for j in range(self.layers[i]):
for k in self.network[i][j].weights:
save_weight_file.writelines('%s\n' % k)
# save_weight_file.write('\n')
def readWeights(self, file):
weights_array = []
done = 0
if os.stat(file).st_size == 0:
raise ValueError("No Weights Detected")
with open(file, 'r') as read_weight_file:
data = read_weight_file.readlines()
for i in range(1,len(self.network)): # 1 to 2
weights_array.append([]) # Creates an array for each
for j in range(self.layers[i]): # 1 to 10
weights_array[i-1].append([])
k = data[:self.layers[i-1]+1]
for line in k:
weights_array[i-1][j].append(float(line[:-2]))
data = data[len(k):]
return weights_array
def run(self, x):
"""Feed a sample x into the MultiLayer Perceptron.
x is a list of the inputs to the network."""
# Make an array of the data
x = np.array(x, dtype=object)
# Set the first layer of values to be the inputs, x.
self.values[0] = x
for i in range(1, len(self.network)):
for j in range(self.layers[i]):
# Assign the value to be equal to the output of the perceptron activation function
self.values[i][j] = self.network[i][j].activate(self.values[i-1])
# Return the output values of the network
return self.values[-1]
def backpropagation(self, x, y):
"""Run an (x, y) pair through the backpropagation algorithm"""
x = | np.array(x, dtype=object) | numpy.array |
import os
from functools import partial
from typing import Callable, Dict, List, Optional, Tuple, Union
import attr
import numpy as np
import pandas as pd
import tabmat as tm
from dask_ml.preprocessing import DummyEncoder
from git_root import git_root
from joblib import Memory
from scipy.sparse import csc_matrix
from .data import (
generate_housing_dataset,
generate_intermediate_insurance_dataset,
generate_narrow_insurance_dataset,
generate_real_insurance_dataset,
generate_wide_insurance_dataset,
)
from .util import cache_location, exposure_and_offset_to_weights, get_tweedie_p
joblib_memory = Memory(cache_location, verbose=0)
@attr.s
class Problem:
"""Store metadata about which problem we should run."""
data_loader = attr.ib(type=Callable)
distribution = attr.ib(type=str)
regularization_strength = attr.ib(type=float)
l1_ratio = attr.ib(type=float)
@joblib_memory.cache
def load_data(
loader_func: Callable[
[Optional[int], Optional[float], Optional[str]],
Tuple[pd.DataFrame, np.ndarray, np.ndarray],
],
num_rows: int = None,
storage: str = "dense",
single_precision: bool = False,
noise: float = None,
distribution: str = "poisson",
data_setup: str = "weights",
) -> Dict[str, np.ndarray]:
"""
Load the data.
A note about weights and exposures: Due to the way we have set up this problem, by
rescaling the target variable, it is appropriate to pass what is modeled as an
'exposure' as a weight. Everywhere else, exposures will be referred to as weights.
"""
# TODO: add a weights_and_offset option
# Step 1) Load the data.
if data_setup not in ["weights", "offset", "no-weights"]:
raise NotImplementedError
X_in, y, exposure = loader_func(num_rows, noise, distribution)
# Step 2) Convert to needed precision level.
if single_precision:
X_in = X_in.astype(np.float32)
y = y.astype(np.float32)
if exposure is not None:
exposure = exposure.astype(np.float32)
# Step 3) One hot encode columns if we are not using CategoricalMatrix
def transform_col(i: int, dtype) -> Union[pd.DataFrame, tm.CategoricalMatrix]:
if dtype.name == "category":
if storage == "cat":
return tm.CategoricalMatrix(X_in.iloc[:, i])
return DummyEncoder().fit_transform(X_in.iloc[:, [i]])
return X_in.iloc[:, [i]]
mat_parts = [transform_col(i, dtype) for i, dtype in enumerate(X_in.dtypes)]
# TODO: add a threshold for the number of categories needed to make a categorical
# matrix
# Step 4) Convert the matrix to the appopriate storage type.
if storage == "auto":
dtype = np.float32 if single_precision else np.float64
X = tm.from_pandas(X_in, dtype, sparse_threshold=0.1, cat_threshold=3)
elif storage == "cat":
cat_indices_in_expanded_arr: List[np.ndarray] = []
dense_indices_in_expanded_arr: List[int] = []
i = 0
for elt in mat_parts:
assert elt.ndim == 2
if isinstance(elt, tm.CategoricalMatrix):
ncol = elt.shape[1]
cat_indices_in_expanded_arr.append( | np.arange(i, i + ncol) | numpy.arange |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for KroneckerFactoredLattice Layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tempfile
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow_lattice.python import kronecker_factored_lattice_layer as kfll
from tensorflow_lattice.python import test_utils
class KroneckerFactoredLatticeTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(KroneckerFactoredLatticeTest, self).setUp()
self.disable_all = False
self.disable_ensembles = False
self.loss_eps = 0.0001
self.small_eps = 1e-6
self.seed = 42
def _ResetAllBackends(self):
keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
def _ScatterXUniformly(self, num_points, lattice_sizes, input_dims):
"""Deterministically generates num_point random points within lattice."""
np.random.seed(41)
x = []
for _ in range(num_points):
point = [
np.random.random() * (lattice_sizes - 1.0) for _ in range(input_dims)
]
x.append(np.asarray(point))
if input_dims == 1:
x.sort()
return x
def _ScatterXUniformlyExtendedRange(self, num_points, lattice_sizes,
input_dims):
"""Extends every dimension by 1.0 on both sides and generates points."""
| np.random.seed(41) | numpy.random.seed |
#!/usr/bin/env python3
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import softmax
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
from sklearn.metrics import log_loss
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=10, type=int, help="Batch size")
parser.add_argument(
"--classes", default=10, type=int, help="Number of classes to use"
)
parser.add_argument(
"--hidden_layer", default=20, type=int, help="Hidden layer size"
)
parser.add_argument(
"--iterations", default=50, type=int, help="Number of iterations over the data"
)
parser.add_argument(
"--learning_rate", default=0.01, type=float, help="Learning rate"
)
parser.add_argument("--seed", default=42, type=int, help="Random seed")
parser.add_argument("--test_size", default=797, type=int, help="Test set size")
args = parser.parse_args()
# Set random seed
np.random.seed(args.seed)
# Use the digits dataset
data, target = sklearn.datasets.load_digits(n_class=args.classes, return_X_y=True)
# Append a constant feature with value 1 to the end of every input data
data = np.pad(data, ((0, 0), (0, 1)), constant_values=1)
# Split the data randomly to train and test using `sklearn.model_selection.train_test_split`,
# with `test_size=args.test_size` and `random_state=args.seed`.
train_data, test_data, train_target, test_target = sklearn.model_selection.train_test_split(
data, target, stratify=target, test_size=args.test_size, random_state=args.seed
)
# Generate initial model weights
weights = [
np.random.uniform(
size=[train_data.shape[1], args.hidden_layer], low=-0.1, high=0.1
),
np.random.uniform(size=[args.hidden_layer, args.classes], low=-0.1, high=0.1),
]
relu = lambda x: | np.maximum(x, 0) | numpy.maximum |
# standard libraries
from datetime import datetime
import threading, collections, queue, os, os.path, json
import time, logging
# third-party libraries
import editdistance as ed
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
from scipy import signal
import torch
import wave
# project libraries
import speech
from speech.loader import log_spectrogram_from_data, log_spectrogram_from_file
from speech.models.ctc_decoder import decode as ctc_decode
from speech.models.ctc_model import CTC
from speech.utils.compat import normalize
from speech.utils.convert import to_numpy
from speech.utils.io import get_names, load_config, load_state_dict, read_pickle
from speech.utils.stream_utils import make_full_window
from speech.utils.wave import wav_duration, array_from_wave
set_linewidth=160
np.set_printoptions(linewidth=set_linewidth)
torch.set_printoptions(linewidth=set_linewidth)
log_filename = "logs_probs-hiddencell_2020-05-20.log"
# log levels: CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
log_level = "WARNING"
logging.basicConfig(filename=None, filemode='w', level=log_level)
log_sample_len = 50 # number of data samples outputed to the log
def main(ARGS):
print('Initializing model...')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_path, preproc_path, config_path = get_names(ARGS.model_dir,
tag=ARGS.tag,
get_config=True,
model_name=ARGS.model_name)
print("model_path: ", model_path)
print("preproc_path: ", preproc_path)
print("config_path: ", config_path)
# load and update preproc
preproc = read_pickle(preproc_path)
preproc.update()
# if feature_norm is True, the streaming vs list_chunk and full_audio won't agree
# you can manually turn it off to make them agree, but then the predictions aren't very good.
# preproc.use_feature_normalize = False
# load and assign config
config = load_config(config_path)
model_cfg = config['model']
# create model
model = CTC(preproc.input_dim,
preproc.vocab_size,
model_cfg)
# load the state-dict
state_dict = load_state_dict(model_path, device=device)
model.load_state_dict(state_dict)
# setting model to eval model
model.eval()
#initial states for LSTM layers
hidden_size = model_cfg['encoder']['rnn']['dim']
hidden_in = torch.zeros((5, 1, hidden_size), dtype=torch.float32)
cell_in = torch.zeros((5, 1, hidden_size), dtype=torch.float32)
lstm_states = (hidden_in, cell_in)
PARAMS = {
"chunk_size": 46, # number of log_spec timesteps fed into the model
"half_context": 15, # half-size of the convolutional layers
"feature_window": 512, # number of audio frames into log_spec
"feature_step": 256, # number of audio frames in log_spec step
"feature_size": 257, # frequency dimension of log_spec
"initial_padding": 15, # padding of feature_buffer
"final_padding": 13, # final padding of feature_buffer
'fill_chunk_padding': 1, #TODO hard-coded value that is calculated as fill_chunk_padding
"blank_idx": model.blank
}
# stride of chunks across the log_spec output/ model input
PARAMS['stride'] = PARAMS['chunk_size'] - 2 * PARAMS['half_context']
logging.warning(f"PARAMS dict: {PARAMS}")
stream_probs, stream_preds, st_model_inputs = stream_infer(model, preproc, lstm_states, PARAMS, ARGS)
lc_probs, lc_preds, lc_model_inputs = list_chunk_infer_full_chunks(model, preproc, lstm_states, PARAMS, ARGS)
fa_probs, fa_preds, fa_model_inputs = full_audio_infer(model, preproc, lstm_states, PARAMS, ARGS)
print(f"Stream MODEL INPUTS shape: {st_model_inputs.shape}")
print(f"List chunk MODEL INPUTS shape: {lc_model_inputs.shape}")
print(f"Full audio MODEL INPUTS shape: {fa_model_inputs.shape}")
# saving the inputs to debugging in ipython
#np.save("./test_data/lc_input_2020-09-29_test.npy", lc_model_inputs)
#np.save("./test_data/st_input_2020-09-29_test.npy", st_model_inputs)
logging.warning(f"stream probs shape: {stream_probs.shape}")
logging.warning(f"list chunk probs shape: {lc_probs.shape}")
logging.warning(f"full audio probs shape: {fa_probs.shape}")
# checks to see that the inputs to each implementation are the same.
np.testing.assert_allclose(fa_model_inputs, lc_model_inputs, rtol=1e-03, atol=1e-05)
np.testing.assert_allclose(st_model_inputs, lc_model_inputs, rtol=1e-03, atol=1e-05)
np.testing.assert_allclose(st_model_inputs, fa_model_inputs, rtol=1e-03, atol=1e-05)
np.testing.assert_allclose(stream_probs, lc_probs, rtol=1e-03, atol=1e-05)
np.testing.assert_allclose(stream_probs, fa_probs, rtol=1e-03, atol=1e-05)
np.testing.assert_allclose(lc_probs, fa_probs, rtol=1e-03, atol=1e-05)
assert ed.eval(stream_preds, lc_preds)==0, "stream and list-chunk predictions are not the same"
assert ed.eval(stream_preds, fa_preds)==0, "stream and full-audio predictions are not the same"
assert ed.eval(lc_preds, fa_preds)==0, "list-chunk and full-audio predictions are not the same"
logging.warning(f"all probabilities and predictions are the same")
def stream_infer(model, preproc, lstm_states, PARAMS:dict, ARGS)->tuple:
"""
Performs streaming inference of an input wav file (if provided in ARGS) or from
the micropohone. Inference is performed by the model and the preproc preprocessing
object performs normalization.
"""
begin_time = time.time()
# Start audio with VAD
audio = Audio(device=ARGS.device, input_rate=ARGS.rate, file=ARGS.file)
frames = audio.frame_generator()
print("Listening (ctrl-C to exit)...")
logging.warning(f"--- starting stream_infer ---")
hidden_in, cell_in = lstm_states
wav_data = bytearray()
stride_counter = 0 # used to stride the feature_buffer
# audio buffer contains audio signal that is few into the log_spec
audio_buffer_size = 2 # two 16 ms steps in the features window
audio_ring_buffer = collections.deque(maxlen=audio_buffer_size)
# feature buffer contains log_spec output and is fed into the model
features_buffer_size = PARAMS['chunk_size']
features_ring_buffer = collections.deque(maxlen=features_buffer_size)
#saved_model_input = np.empty((1, PARAMS['chunk_size'], PARAMS['feature_size']))
# add `half_context` zero frames as padding to the feature buffer
## zero_frame is a single feature timestep with dims (1, feature_size)
zero_frame = np.zeros((1, PARAMS['feature_size']), dtype=np.float32)
for _ in range(PARAMS['half_context']):
features_ring_buffer.append(zero_frame)
predictions = list()
probs_list = list()
# TODO(dustin) why is the "* 2" at the end of frames_per_block?
frames_per_block = round( audio.RATE_PROCESS/ audio.BLOCKS_PER_SECOND * 2)
time_attributes = [
"audio_buffer",
"numpy_buffer",
"features",
"normalize",
"features_buffer",
"numpy_conversion",
"model_infer",
"output_assign",
"decoder_time",
"total_time"
]
# -------time evaluation variables-----------
audio_buffer_time, audio_buffer_count = 0.0, 0
numpy_buffer_time, numpy_buffer_count = 0.0, 0
features_time, features_count = 0.0, 0
normalize_time, normalize_count = 0.0, 0
features_buffer_time, features_buffer_count = 0.0, 0
numpy_conv_time, numpy_conv_count = 0.0, 0
model_infer_time, model_infer_count = 0.0, 0
output_assign_time, output_assign_count = 0.0, 0
decoder_time, decoder_count = 0.0, 0
total_time, total_count = 0.0, 0
# -------------------------------------------
# ------------ logging ----------------------
logging.warning(ARGS)
logging.warning(model)
logging.warning(preproc)
logging.warning(f"audio_ring_buffer size: {audio_buffer_size}")
logging.warning(f"feature_ring_buffer size: {features_buffer_size}")
# -------------------------------------------
try:
total_time_start = time.time()
for count, frame in enumerate(frames):
logging.debug(f"----------iteration {count}------------")
# exit the loop if there are no more full input frames
if len(frame) < frames_per_block:
logging.warning(f"final sample length {len(frame)}")
final_sample = frame
break
# ------------ logging ---------------
logging.info(f"sample length: {len(frame)}")
logging.info(f"audio_buffer length: {len(audio_ring_buffer)}")
#logging.debug(f"iter {count}: first {log_sample_len} raw audio buffer values added to audio_ring_buffer: {frame[:log_sample_len]}")
# ------------ logging ---------------
# fill up the audio_ring_buffer and then feed into the model
if len(audio_ring_buffer) < audio_buffer_size-1:
# note: appending new frame to right of the buffer
audio_buffer_time_start = time.time()
audio_ring_buffer.append(frame)
audio_buffer_time += time.time() - audio_buffer_time_start
audio_buffer_count += 1
else:
#audio_buffer_time_start = time.time()
audio_ring_buffer.append(frame)
#numpy_buffer_time_start = time.time()
#buffer_list = list(audio_ring_buffer)
# convert the audio buffer to numpy array
# a single audio frame has dims: (512,) which is reduced to (256,) in the numpy buffer
# The dimension of numpy buffer is reduced by half because integers in the audio_ring_buffer
# are encoded as two hexidecimal entries, which are reduced to a single integer in the numpy buffer
# two numpy buffers are then concatenated making the final `numpy_buffer` have dims: (512,)
numpy_buffer = np.concatenate(
(np.frombuffer(audio_ring_buffer[0], np.int16),
np.frombuffer(audio_ring_buffer[1], np.int16) )
)
#features_time_start = time.time()
# calculate the features with dim: (1, 257)
features_step = log_spectrogram_from_data(numpy_buffer, samp_rate=16000)
# normalize_time_start = time.time()
# normalize the features
norm_features = normalize(preproc, features_step)
# ------------ logging ---------------
logging.info(f"audio integers shape: {numpy_buffer.shape}")
#logging.debug(f"iter {count}: first {log_sample_len} input audio samples {numpy_buffer.shape}: \n {numpy_buffer[:log_sample_len]}")
logging.info(f"features_step shape: {features_step.shape}")
#logging.debug(f"iter {count}: log_spec frame (all 257 values) {features_step.shape}:\n {features_step}")
logging.info(f"features_buffer length: {len(features_ring_buffer)}")
#logging.debug(f"iter {count}: normalized log_spec (all 257 values) {norm_features.shape}:\n {norm_features[0,:log_sample_len]}")
logging.info(f"stride modulus: {stride_counter % PARAMS['stride']}")
# ------------ logging ---------------
# fill up the feature_buffer and then feed into the model
if len(features_ring_buffer) < features_buffer_size-1:
#features_buffer_time_start = time.time()
features_ring_buffer.append(norm_features)
else:
# if stride_counter is an even multiple of the stride value run inference
# on the buffer. Otherwise, append values to the buffer.
if stride_counter % PARAMS['stride'] != 0:
features_ring_buffer.append(norm_features)
stride_counter += 1
# run inference on the full feature_buffer
else:
stride_counter += 1
#features_buffer_time_start = time.time()
features_ring_buffer.append(norm_features)
#numpy_conv_time_start = time.time()
# conv_context dim: (31, 257)
conv_context = np.concatenate(list(features_ring_buffer), axis=0)
# addding batch dimension: (1, 31, 257)
conv_context = np.expand_dims(conv_context, axis=0)
# saved_model_input saves the inputs to the model
if stride_counter == 1:
print(f"~~~~~~~ stride counter: {stride_counter} ~~~~~~~~~")
saved_model_input = conv_context
else:
saved_model_input = np.concatenate((saved_model_input, conv_context), axis=0)
#model_infer_time_start = time.time()
if stride_counter == 1:
logging.debug(f"iter {count}: first {log_sample_len} of input: {conv_context.shape}\n {conv_context[0, 0, :log_sample_len]}")
logging.debug(f"iter {count}: first {log_sample_len} of hidden_in first layer: {hidden_in.shape}\n {hidden_in[0, :, :log_sample_len]}")
logging.debug(f"iter {count}: first {log_sample_len} of cell_in first layer: {cell_in.shape}\n {cell_in[0, :, :log_sample_len]}")
model_out = model(torch.from_numpy(conv_context), (hidden_in, cell_in))
#output_assign_time_start = time.time()
probs, (hidden_out, cell_out) = model_out
if stride_counter == 1:
logging.debug(f"iter {count}: first {log_sample_len} of prob output {probs.shape}:\n {probs[0, 0, :log_sample_len]}")
logging.debug(f"iter {count}: first {log_sample_len} of hidden_out first layer {hidden_out.shape}:\n {hidden_out[0, :, :log_sample_len]}")
logging.debug(f"iter {count}: first {log_sample_len} of cell_out first layer {cell_out.shape}:\n {cell_out[0, :, :log_sample_len]}")
# probs dim: (1, 1, 40)
probs = to_numpy(probs)
probs_list.append(probs)
hidden_in, cell_in = hidden_out, cell_out
# ------------ logging ---------------
logging.info(f"conv_context shape: {conv_context.shape}")
logging.info(f"probs shape: {probs.shape}")
logging.info(f"probs_list len: {len(probs_list)}")
#logging.info(f"probs value: {probs}")
# ------------ logging ---------------
# decoding every 20 time-steps
#if count%20 ==0 and count!=0:
#decoder_time_start = time.time()
#
probs_steps = np.concatenate(probs_list, axis=1)[0]
tokenized_labels = max_decode(probs_steps, blank=PARAMS['blank_idx'])
# int_labels, likelihood = ctc_decode(probs[0], beam_size=50, blank=PARAMS['blank_idx'])
predictions = preproc.decode(tokenized_labels)
# ------------ logging ---------------
logging.warning(f"predictions: {predictions}")
# ------------ logging ---------------
total_count += 1
if ARGS.savewav: wav_data.extend(frame)
except KeyboardInterrupt:
pass
finally:
# IN THE FINALLY BLOCK
# if frames is empty
if not next(frames):
logging.info(f"---------- processing final sample in audio buffer ------------")
zero_byte = b'\x00'
num_missing_bytes = PARAMS['feature_step']*2 - len(final_sample)
final_sample += zero_byte * num_missing_bytes
audio_ring_buffer.append(final_sample)
buffer_list = list(audio_ring_buffer)
numpy_buffer = np.concatenate(
(np.frombuffer(buffer_list[0], np.int16),
np.frombuffer(buffer_list[1], np.int16)))
features_step = log_spectrogram_from_data(numpy_buffer, samp_rate=16000)
norm_features = normalize(preproc, features_step)
# --------logging ------------
# logging.warning(f"final sample length 2: {len(final_sample)}")
logging.warning(f"numpy_buffer shape: {len(numpy_buffer)}")
# logging.warning(f"audio_buffer 1 length: {len(buffer_list[0])}")
# logging.warning(f"audio_buffer 2 length: {len(buffer_list[1])}")
#logging.debug(f"iter {count}: first {log_sample_len} input audio samples {numpy_buffer.shape}: \n {numpy_buffer[:log_sample_len]}")
logging.warning(f"features_step shape: {features_step.shape}")
#logging.debug(f"iter {count}: log_spec frame (all 257 values) {features_step.shape}:\n {features_step}")
#logging.debug(f"iter {count}: normalized log_spec (all 257 values) {norm_features.shape}:\n {norm_features[0,:log_sample_len]}")
logging.warning(f"features_buffer length: {len(features_ring_buffer)}")
logging.warning(f"stride modulus: {stride_counter % PARAMS['stride']}")
# --------logging ------------
if stride_counter % PARAMS['stride'] !=0:
features_ring_buffer.append(norm_features)
stride_counter += 1
else:
features_ring_buffer.append(norm_features)
stride_counter += 1
conv_context = np.concatenate(list(features_ring_buffer), axis=0)
# addding batch dimension: (1, 31, 257)
conv_context = np.expand_dims(conv_context, axis=0)
# saved_model_input saves the inputs to the model for comparison with list_chunk and full_audio inputs
saved_model_input = np.concatenate((saved_model_input, conv_context), axis=0)
model_out = model(torch.from_numpy(conv_context), (hidden_in, cell_in))
probs, (hidden_out, cell_out) = model_out
logging.debug(f"iter {count}: first {log_sample_len} of prob output {probs.shape}:\n {probs[0, 0, :log_sample_len]}")
logging.debug(f"iter {count}: first {log_sample_len} of hidden_out first layer {hidden_out.shape}:\n {hidden_out[0, :, :log_sample_len]}")
logging.debug(f"iter {count}: first {log_sample_len} of cell_out first layer {cell_out.shape}:\n {cell_out[0, :, :log_sample_len]}")
probs = to_numpy(probs)
probs_list.append(probs)
padding_iterations = PARAMS["final_padding"] + PARAMS['fill_chunk_padding'] + PARAMS['stride']
for count, frame in enumerate(range(padding_iterations)):
logging.debug(f"---------- adding zeros at the end of audio sample ------------")
# -------------logging ----------------
logging.info(f"stride modulus: {stride_counter % PARAMS['stride']}")
# -------------logging ----------------
if stride_counter % PARAMS['stride'] !=0:
# zero_frame is (1, 257) numpy array of zeros
features_ring_buffer.append(zero_frame)
stride_counter += 1
else:
stride_counter += 1
features_buffer_time_start = time.time()
features_ring_buffer.append(zero_frame)
features_buffer_time += time.time() - features_buffer_time_start
features_buffer_count += 1
numpy_conv_time_start = time.time()
# conv_context dim: (31, 257)
conv_context = np.concatenate(list(features_ring_buffer), axis=0)
# addding batch dimension: (1, 31, 257)
conv_context = np.expand_dims(conv_context, axis=0)
numpy_conv_time += time.time() - numpy_conv_time_start
numpy_conv_count += 1
# saved_model_input saves the inputs to the model for comparison with list_chunk and full_audio inputs
saved_model_input = np.concatenate((saved_model_input, conv_context), axis=0)
model_infer_time_start = time.time()
model_out = model(torch.from_numpy(conv_context), (hidden_in, cell_in))
model_infer_time += time.time() - model_infer_time_start
model_infer_count += 1
output_assign_time_start = time.time()
probs, (hidden_out, cell_out) = model_out
# probs dim: (1, 1, 40)
probs = to_numpy(probs)
probs_list.append(probs)
hidden_in, cell_in = hidden_out, cell_out
output_assign_time += time.time() - output_assign_time_start
output_assign_count += 1
# ------------ logging ---------------
logging.info(f"conv_context shape: {conv_context.shape}")
logging.info(f"probs shape: {probs.shape}")
logging.info(f"probs_list len: {len(probs_list)}")
#logging.info(f"probs value: {probs}")
# ------------ logging ---------------
# decoding every 20 time-steps
if count%20 ==0:
decoder_time_start = time.time()
probs_steps = np.concatenate(probs_list, axis=1)
int_labels = max_decode(probs_steps[0], blank=PARAMS['blank_idx'])
# int_labels, likelihood = ctc_decode(probs[0], beam_size=50, blank=PARAMS['blank_idx'])
predictions = preproc.decode(int_labels)
decoder_time += time.time() - decoder_time_start
decoder_count += 1
# ------------ logging ---------------
logging.warning(f"predictions: {predictions}")
# ------------ logging ---------------
total_count += 1
if ARGS.savewav: wav_data.extend(frame)
# process the final frames
#logging.warning(f"length of final_frames: {len(final_sample)}")
decoder_time_start = time.time()
probs_steps = np.concatenate(probs_list, axis=1)
int_labels = max_decode(probs_steps[0], blank=PARAMS['blank_idx'])
# int_labels, likelihood = ctc_decode(probs[0], beam_size=50, blank=PARAMS['blank_idx'])
predictions = preproc.decode(int_labels)
decoder_time += time.time() - decoder_time_start
decoder_count += 1
logging.warning(f"final predictions: {predictions}")
audio.destroy()
total_time = time.time() - total_time_start
acc = 3
duration = wav_duration(ARGS.file)
logging.warning(f"-------------- streaming_infer --------------")
logging.warning(f"audio_buffer time (s), count: {round(audio_buffer_time, acc)}, {audio_buffer_count}")
logging.warning(f"numpy_buffer time (s), count: {round(numpy_buffer_time, acc)}, {numpy_buffer_count}")
logging.warning(f"features_operation time (s), count: {round(features_time, acc)}, {features_count}")
logging.warning(f"normalize time (s), count: {round(normalize_time, acc)}, {normalize_count}")
logging.warning(f"features_buffer time (s), count: {round(features_buffer_time, acc)}, {features_buffer_count}")
logging.warning(f"numpy_conv time (s), count: {round(numpy_conv_time, acc)}, {numpy_conv_count}")
logging.warning(f"model_infer time (s), count: {round(model_infer_time, acc)}, {model_infer_count}")
logging.warning(f"output_assign time (s), count: {round(output_assign_time, acc)}, {output_assign_count}")
logging.warning(f"decoder time (s), count: {round(decoder_time, acc)}, {decoder_count}")
logging.warning(f"total time (s), count: {round(total_time, acc)}, {total_count}")
logging.warning(f"Multiples faster than realtime : {round(duration/total_time, acc)}x")
if ARGS.savewav:
audio.write_wav(os.path.join(ARGS.savewav, datetime.now().strftime("savewav_%Y-%m-%d_%H-%M-%S_%f.wav")), wav_data)
all_audio = np.frombuffer(wav_data, np.int16)
plt.plot(all_audio)
plt.show()
probs = np.concatenate(probs_list, axis=1)
saved_model_input = remove_input_duplicates(saved_model_input, PARAMS['stride'])
return probs, predictions, saved_model_input
def remove_input_duplicates(model_inputs:np.ndarray, stride:int)->np.ndarray:
"""this function removes the duplicates from the input.
Args:
model_inputs (np.ndarray): feature inputs to the model with dims (#_inputs, chunk_size, feature_size)
stride (int): number of feature inputs to stride over before feeding to the model
"""
# iterating over the numpy array will return arrays for size (chunk_size, feature_size) as `inp`
for i, inp in enumerate(model_inputs):
# take the entirety of the initial input
if i == 0:
dedup_inputs = inp
else:
# for all other inputs, only use the last `stride` number of inputs
# concatenate this last segment along the `chunk_size` dimension
dedup_inputs = np.concatenate((dedup_inputs, inp[-stride:, :]), axis=0)
assert dedup_inputs.shape[1] == 257, "second dedup_inputs dimension is not 257"
return dedup_inputs
def process_pad_audio(audio_file, preproc, PARAMS):
"""
"""
audio_data, samp_rate = array_from_wave(audio_file)
# pads the audio data so that the data will be evenly divisble by the feature_step
audio_data = make_full_window(audio_data, PARAMS['feature_window'], PARAMS['feature_step'])
features_time = time.time()
features = log_spectrogram_from_data(audio_data, samp_rate)
features_time = time.time() - features_time
normalize_time = time.time()
norm_features = normalize(preproc, features)
normalize_time = time.time() - normalize_time
convert_pad_time = time.time()
# adds the batch dimension (1, time, 257)
norm_features = | np.expand_dims(norm_features, axis=0) | numpy.expand_dims |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 13:12:28 2020
@author: henry
"""
import os
import Analysis
import numpy as np
import PyGnuplot as gp
from joblib import Parallel, delayed
from scipy.stats import linregress
def plotStringSingleFile(size, dat_name, formatting, title):
plot_string = 'plot'
for i in range(size):
plot_string = plot_string\
+ ' "%s" u 1:%d %s %s,' % (dat_name, i+2, formatting, title[i])
return plot_string
def plotStringMultiFile(size, dat_name, formatting, title):
plot_string = 'plot'
for i in range(size):
plot_string = plot_string\
+ ' "%s" %s %s,' % (dat_name[i], formatting, title[i])
return plot_string
def plotStringMultiFileWithFit(size, dat_name, formatting, title, grad,
grad_title):
plot_string = 'plot'
for i in range(size):
plot_string = plot_string\
+ ' "%s" %s lc %d %s,' % (dat_name[i], formatting, i, title[i])
plot_string = plot_string\
+ ' %e*x lc %d title "%s",' % (grad[i], i, grad_title[i])
return plot_string
# %%
class Graphing:
def __init__(self, bacteria, graph_dir, plot_dir, threads):
self.bacteria = bacteria
self.graph_dir = graph_dir
self.plot_dir = plot_dir
self.threads = threads
for entry in os.scandir(plot_dir):
os.remove(entry.path)
def BacteriaPaths(self):
gp.c("reset")
gp.c("set ticslevel 0")
gp.c("set view equal xyz")
gp.c("set terminal pngcairo enhanced size 1600,1200 font 'ariel, 14'")
for key in self.bacteria.bacterium.keys():
output = os.path.join(self.graph_dir, key+'_path.png')
gp.c('set output "'+output+'"')
plot_string = 'splot'
for bact in self.bacteria.bacterium[key].keys():
temp_out = self.bacteria.bacterium[
key][bact].total_displacement
graph_out = np.swapaxes(temp_out, 0, 1)
dat_name = os.path.join(self.plot_dir,
str(key)+str(bact)+'path.dat')
gp.s(graph_out, dat_name)
plot_string = plot_string + ' "' + dat_name\
+ '" u 1:2:3 with lines,'
gp.c(plot_string)
def BacteriaHeading(self):
gp.c("reset")
gp.c("set ticslevel 0")
gp.c("set view equal xyz")
gp.c("set terminal pngcairo enhanced size 1600,1200 font 'ariel, 14'")
for key in self.bacteria.bacterium.keys():
output = os.path.join(self.graph_dir, key+'_heading.png')
gp.c('set output "'+output+'"')
plot_string = 'splot'
for bact in self.bacteria.bacterium[key].keys():
temp_out = self.bacteria.bacterium[key][bact].vectors_cartesian
graph_out = np.swapaxes(temp_out, 0, 1)
dat_name = os.path.join(self.plot_dir,
str(key)+str(bact)+'heading.dat')
gp.s(graph_out, dat_name)
plot_string = plot_string + ' "' + dat_name\
+ '" u 1:2:3 with lines,'
gp.c(plot_string)
def DiffusionConstants(self):
with Parallel(n_jobs=self.threads) as parallel:
self.LDValues = {}
for key in self.bacteria.bacterium.keys():
self.LDValues[key] = Analysis.LDValues(self.bacteria.config[key])
# %% Linear - LogLog fullscale
gp.c('reset')
gp.c('set logscale xy 10')
gp.c('set xlabel "{/Symbol t} (s)"')
gp.c('set ylabel "MSD (m^2)"')
gp.c('set key top left')
gp.c("set terminal pngcairo enhanced"
+ " size 1600,1200 font 'ariel, 14'")
amalg_dat_name = []
amalg_titles = []
for key in self.bacteria.bacterium.keys():
key_title = self.bacteria.config[key].name
print('Started: %s \t Linear Analysis' % (key_title))
output = os.path.join(self.graph_dir,
'%s_linear.png' % (key))
gp.c('set output "%s"' % (output))
g_title = 'Analysis of Linear Mean Squared Displacement - %s'\
% (key_title)
gp.c('set title "%s"' % (g_title))
tau = Analysis.TauCalc(self.bacteria.config[key])
gp.c('set xrange [%f:%f]' % (tau.min()*0.75, tau.max()*1.25))
results_array = parallel(delayed(
Analysis.Linear)(self.bacteria.bacterium[key][bact],
self.bacteria.config[key])
for bact in self.bacteria.bacterium[key].keys())
size = len(results_array)
dat_name = os.path.join(self.plot_dir,
'%s_msd_lin.dat' % (key))
graph_out = np.vstack((tau, results_array))
gp.s(graph_out, dat_name)
title = ['notitle' for i in range(size)]
plot_string = plotStringSingleFile(size, dat_name,
'with points', title)
gp.c(plot_string)
output = os.path.join(self.graph_dir,
'%s_linear_mean.png' % (key))
gp.c('set output "%s"' % (output))
mean_results = np.mean(results_array, axis=0)
std_dev = np.std(results_array, axis=0)
std_error = std_dev/np.sqrt(size)
current_results = np.vstack(
(tau, mean_results, std_error))
dat_name = os.path.join(self.plot_dir,
'%s_msd_lin_mean.dat' % (key))
gp.s(current_results, dat_name)
plot_string = 'plot "%s" u 1:2:3 with yerrorbars' % (dat_name)
plot_string = plot_string + ' title "Mean Linear MSD"'
gp.c(plot_string)
amalg_dat_name.append(dat_name)
amalg_titles.append('title "%s"' % (key_title))
print('Completed %s \t Linear Analysis' % (key))
amalg_formatting = 'u 1:2:3 with yerrorlines'
amalg_plot_string = plotStringMultiFile(len(amalg_dat_name),
amalg_dat_name,
amalg_formatting,
amalg_titles)
output = os.path.join(self.graph_dir, 'linear_mean_amalg.png')
gp.c('set output "%s"' % (output))
g_title = 'Analysis of Linear Mean Squared Displacement'
gp.c('set title "%s"' % (g_title))
gp.c('set xrange [*:*]')
gp.c(amalg_plot_string)
# %% Linear - High Range (>1 sec)
gp.c('reset')
gp.c('set xlabel "{/Symbol t} (s)"')
gp.c('set ylabel "MSD (m^2)"')
gp.c('set key top left')
gp.c("set terminal pngcairo enhanced"
+ " size 1600,1200 font 'ariel, 14'")
amalg_dat_name = []
amalg_titles = []
amalg_grad = []
amalg_grad_titles = []
for key in self.bacteria.bacterium.keys():
line_colour = 1
key_title = self.bacteria.config[key].name
print('Started: %s \t Linear Analysis High Range'
% (key_title))
output = os.path.join(self.graph_dir,
'%s_linear_hr.png' % (key))
gp.c('set output "%s"' % (output))
g_title = 'Analysis of Linear Mean Squared Displacement %s'\
% (key_title)
gp.c('set title "%s"' % (g_title))
tau = Analysis.TauCalcHR(self.bacteria.config[key])
gp.c('set xrange [%f:%f]' % (tau.min()-5, tau.max()+5))
results_array = parallel(delayed(
Analysis.LinearHighRange)
(self.bacteria.bacterium[key][bact],
self.bacteria.config[key])
for bact in self.bacteria.bacterium[key].keys())
size = len(results_array)
dat_name = os.path.join(self.plot_dir,
'%s_msd_lin_hr.dat' % (key))
graph_out = np.vstack((tau, results_array))
gp.s(graph_out, dat_name)
title = ['notitle' for i in range(size)]
plot_string = plotStringSingleFile(size, dat_name,
'with points', title)
gp.c(plot_string)
output = os.path.join(self.graph_dir,
'%s_linear_mean_hr.png' % (key))
gp.c('set output "%s"' % (output))
mean_results = np.mean(results_array, axis=0)
std_dev = np.std(results_array, axis=0)
std_error = std_dev/ | np.sqrt(size) | numpy.sqrt |
# %% import library
import pathlib
import sys
from glob import glob
import numpy as np
import pandas as pd
# import residual_node2vec as rv
import utils_link_pred
from scipy import sparse
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
# Helper Functions
def get_params(filename):
params = pathlib.Path(filename).stem.split("_")
retval = {"filename": filename}
for p in params:
if "=" not in p:
continue
kv = p.split("=")
retval[kv[0]] = kv[1]
return retval
# Loading
if "snakemake" in sys.modules:
net_files = snakemake.input["net_files"]
emb_files = snakemake.input["emb_files"]
edge_files = snakemake.input["edge_files"]
output_file = snakemake.output["output_file"]
else:
net_files = [f for f in glob("../data/link-prediction/networks/net=*")]
emb_files = [f for f in glob("../data/link-prediction/embeddings/*")]
edge_files = [
f for f in glob("../data/link-prediction/networks/test_edgelist_*.csv")
]
output_file = "../data/link-prediction/results/auc_score.csv"
# %%
# Loading
#
emb_file_table = pd.DataFrame([get_params(r) for r in emb_files])
net_file_table = pd.DataFrame([get_params(r) for r in net_files])
edge_file_table = pd.DataFrame([get_params(r) for r in edge_files])
# %%
# Merging
#
emb_file_table = emb_file_table.rename(columns={"filename": "emb_file"})
edge_file_table = edge_file_table.rename(columns={"filename": "edge_file"})
net_file_table = net_file_table.rename(columns={"filename": "net_file"})
cols = list(set(emb_file_table.columns).intersection(set(edge_file_table.columns)))
file_table = pd.merge(emb_file_table, edge_file_table, on=cols)
cols = list(set(file_table.columns).intersection(set(net_file_table.columns)))
file_table = pd.merge(file_table, net_file_table, on=cols)
# %%
# Calculate the AUC
#
def calc_modeled_prob(emb, net, src, trg, model_name, membership, offset):
dotsim = | np.sum(emb[src, :] * emb[trg, :], axis=1) | numpy.sum |
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import xdl
import unittest
import numpy as np
from xdl.python.lib.datatype import *
idx = np.array([2,1,3,0,1,4,0,3,5,2], dtype=np.int32)
values = np.array([1,2,3,4,5,6,7,8,9,10],dtype=np.float)
segs = np.array([3,4,6,6,10],dtype=np.int32)
grps = np.array([],dtype=np.int32)
embeds = np.array([[0.1],[0.2],[0.3],[0.4],[0.5],[0.6]],dtype=np.float)
grads = np.array([[0.1,0.2,0.3],[0.4,0.5,0.6],[0.7,0.8,0.9],
[1.0,1.1,1.2],[1.3,1.4,1.5]],dtype=np.float)
length = 3
class TestTileGrad(unittest.TestCase):
def test_cpu_tile_empty_value(self):
empty_values = np.array([], dtype=np.float)
res = xdl.tile_grad(embeds, idx, empty_values, segs, grps,
grads, length=length, reverse=False)
res = xdl.execute(res)
res_grad = np.array([[1.7],[0.9],[0.1],[1.7],[0.8],[1.5]],dtype=np.float)
self.assertTrue(np.allclose(res, res_grad))
def test_cpu_tile_empty_value_reverse(self):
empty_values = np.array([], dtype=np.float)
res = xdl.tile_grad(embeds, idx, empty_values, segs, grps,
grads, length=length, reverse=True)
res = xdl.execute(res)
res_grad = | np.array([[0.4],[1.0],[1.6],[1.6],[0.7],[1.4]],dtype=np.float) | numpy.array |
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are length of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: <NAME>
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import pc_util
import sunrgbd_utils
from sunrgbd_utils import extract_pc_in_box3d
from model_util_sunrgbd import SunrgbdDatasetConfig
DC = SunrgbdDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 64 # maximum number of objects allowed per scene
MEAN_COLOR_RGB = np.array([0.5,0.5,0.5]) # sunrgbd color is in 0~1
DIST_THRESH = 0.1#0.2
VAR_THRESH = 5e-3
CENTER_THRESH = 0.1
LOWER_THRESH = 1e-6
NUM_POINT = 50
NUM_POINT_LINE = 10
LINE_THRESH = 0.1#0.2
MIND_THRESH = 0.1
NUM_POINT_SEM_THRESHOLD = 1
def check_upright(para_points):
return (para_points[0][-1] == para_points[1][-1]) and (para_points[1][-1] == para_points[2][-1]) and (para_points[2][-1] == para_points[3][-1])
def check_z(plane_equ, para_points):
return np.sum(para_points[:,2] + plane_equ[-1]) / 4.0 < LOWER_THRESH
def clockwise2counter(angle):
'''
@Args:
angle: clockwise from x axis, from 0 to 2*pi,
@Returns:
theta: counter clockwise, -pi / 2 ~ pi / 2, +x~+y: (0, pi/2), +x~-y: (0, -pi/2)
'''
return -((angle + np.pi / 2) % np.pi) + np.pi / 2;
def point2line_dist(points, a, b):
'''
@Args:
points: (N, 3)
a / b: (3,)
@Returns:
distance: (N,)
'''
x = b - a
t = np.dot(points - a, x) / np.dot(x, x)
c = a + t[:, None] * np.tile(x, (t.shape[0], 1))
return np.linalg.norm(points - c, axis=1)
def get_linesel(points, corners, direction):
''' corners:
[[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax],
[xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]]
'''
if direction == 'lower':
sel1 = point2line_dist(points, corners[0], corners[2]) < LINE_THRESH
sel2 = point2line_dist(points, corners[4], corners[6]) < LINE_THRESH
sel3 = point2line_dist(points, corners[0], corners[4]) < LINE_THRESH
sel4 = point2line_dist(points, corners[2], corners[6]) < LINE_THRESH
return sel1, sel2, sel3, sel4
elif direction == 'upper':
sel1 = point2line_dist(points, corners[1], corners[3]) < LINE_THRESH
sel2 = point2line_dist(points, corners[5], corners[7]) < LINE_THRESH
sel3 = point2line_dist(points, corners[1], corners[5]) < LINE_THRESH
sel4 = point2line_dist(points, corners[3], corners[7]) < LINE_THRESH
return sel1, sel2, sel3, sel4
elif direction == 'left':
sel1 = point2line_dist(points, corners[0], corners[1]) < LINE_THRESH
sel2 = point2line_dist(points, corners[2], corners[3]) < LINE_THRESH
return sel1, sel2
elif direction == 'right':
sel1 = point2line_dist(points, corners[4], corners[5]) < LINE_THRESH
sel2 = point2line_dist(points, corners[6], corners[7]) < LINE_THRESH
return sel1, sel2
else:
AssertionError('direction = lower / upper / left')
def get_linesel2(points, ymin, ymax, zmin, zmax, axis=0):
#sel3 = sweep(points, axis, ymax, 2, zmin, zmax)
#sel4 = sweep(points, axis, ymax, 2, zmin, zmax)
sel3 = np.abs(points[:,axis] - ymin) < LINE_THRESH
sel4 = np.abs(points[:,axis] - ymax) < LINE_THRESH
return sel3, sel4
''' ATTENTION: SUNRGBD, size_label is only half the actual size
'''
def params2bbox(center, size, angle):
''' from bbox_center, angle and size to bbox
@Args:
center: (3,)
size: (3,)
angle: -pi ~ pi, +x~+y: (0, pi/2), +x~-y: (0, -pi/2)
@Returns:
bbox: 8 x 3, order:
[[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax],
[xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]]
'''
xsize = size[0]
ysize = size[1]
zsize = size[2]
vx = np.array([np.cos(angle), np.sin(angle), 0])
vy = np.array([-np.sin(angle), np.cos(angle), 0])
vx = vx * np.abs(xsize) / 2
vy = vy * np.abs(ysize) / 2
vz = np.array([0, 0, np.abs(zsize) / 2])
bbox = np.array([\
center - vx - vy - vz, center - vx - vy + vz,
center - vx + vy - vz, center - vx + vy + vz,
center + vx - vy - vz, center + vx - vy + vz,
center + vx + vy - vz, center + vx + vy + vz])
return bbox
class SunrgbdDetectionVotesDataset(Dataset):
def __init__(self, data_path=None, split_set='train', num_points=20000,
use_color=False, use_height=False, use_v1=False,
augment=False, scan_idx_list=None):
assert(num_points<=50000)
self.use_v1 = use_v1
if use_v1:
self.data_path = os.path.join(data_path, 'sunrgbd_pc_bbox_votes_50k_v1_' + split_set)
# self.data_path = os.path.join('/scratch/cluster/yanght/Dataset/sunrgbd/sunrgbd_pc_bbox_votes_50k_v1_' + split_set)
else:
AssertionError("v2 data is not prepared")
self.raw_data_path = os.path.join(ROOT_DIR, 'sunrgbd/sunrgbd_trainval')
self.scan_names = sorted(list(set([os.path.basename(x)[0:6] \
for x in os.listdir(self.data_path)])))
if scan_idx_list is not None:
self.scan_names = [self.scan_names[i] for i in scan_idx_list]
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
scan_name = self.scan_names[idx]
point_color_sem = np.load(os.path.join(self.data_path, scan_name)+'_pc.npz')['pc'] # Nx6
bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy') # K,8
point_votes = np.load(os.path.join(self.data_path, scan_name)+'_votes.npz')['point_votes'] # Nx10
semantics37 = point_color_sem[:, 6]
semantics10 = np.array([DC.class37_2_class10[k] for k in semantics37])
semantics10_multi = [DC.class37_2_class10_multi[k] for k in semantics37]
if not self.use_color:
point_cloud = point_color_sem[:, 0:3]
else:
point_cloud = point_color_sem[:,0:6]
point_cloud[:,3:6] = (point_color_sem[:,3:6]-MEAN_COLOR_RGB)
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:,0] = -1 * point_cloud[:,0]
bboxes[:,0] = -1 * bboxes[:,0]
bboxes[:,6] = np.pi - bboxes[:,6]
point_votes[:,[1,4,7]] = -1 * point_votes[:,[1,4,7]]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = sunrgbd_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:,3:6] + MEAN_COLOR_RGB
rgb_color *= (1+0.4*np.random.random(3)-0.2) # brightness change for each channel
rgb_color += (0.1*np.random.random(3)-0.05) # color shift for each channel
rgb_color += np.expand_dims((0.05*np.random.random(point_cloud.shape[0])-0.025), -1) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(np.random.random(point_cloud.shape[0])>0.3,-1)
point_cloud[:,3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
# new items
box3d_angles = np.zeros((MAX_NUM_OBJ,))
point_boundary_mask_z = np.zeros(self.num_points)
point_boundary_mask_xy = np.zeros(self.num_points)
point_boundary_offset_z = np.zeros([self.num_points, 3])
point_boundary_offset_xy = np.zeros([self.num_points, 3])
point_boundary_sem_z = np.zeros([self.num_points, 3+2+1])
point_boundary_sem_xy = np.zeros([self.num_points, 3+1+1])
point_line_mask = np.zeros(self.num_points)
point_line_offset = np.zeros([self.num_points, 3])
point_line_sem = np.zeros([self.num_points, 3+1])
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]*2
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
box3d_angles[i] = bbox[6]
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = sunrgbd_utils.my_compute_box_3d(bbox[0:3], bbox[3:6], bbox[6])
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = np.max(corners_3d[:,1])
zmax = np.max(corners_3d[:,2])
target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin])
target_bboxes[i,:] = target_bbox
point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True)
semantics37 = semantics37[choices]
semantics10 = semantics10[choices]
semantics10_multi = [semantics10_multi[i] for i in choices]
point_votes_mask = point_votes[choices,0]
point_votes = point_votes[choices,1:]
# box angle is -pi to pi
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners = params2bbox(bbox[:3], 2 * bbox[3:6], clockwise2counter(bbox[6]))
# corners_votenet = sunrgbd_utils.my_compute_box_3d(bbox[:3], bbox[3:6], bbox[6])
try:
x_all_cls, ind_all_cls = extract_pc_in_box3d(point_cloud, corners)
except:
continue
ind_all_cls = np.where(ind_all_cls)[0] # T/F to index
# find point with same semantic as bbox, note semantics is 37 cls in sunrgbd
# ind = ind_all_cls[np.where(semantics10[ind_all_cls] == bbox[7])[0]]
ind = []
for j in ind_all_cls:
if bbox[7] in semantics10_multi[j]:
ind.append(j)
ind = np.array(ind)
if ind.shape[0] < NUM_POINT_SEM_THRESHOLD:
pass
else:
x = point_cloud[ind, :3]
###Get bb planes and boundary points
plane_lower_temp = np.array([0,0,1,-corners[6,-1]])
para_points = np.array([corners[1], corners[3], corners[5], corners[7]])
newd = np.sum(para_points * plane_lower_temp[:3], 1)
if check_upright(para_points) and plane_lower_temp[0]+plane_lower_temp[1] < LOWER_THRESH:
plane_lower = np.array([0,0,1,plane_lower_temp[-1]])
plane_upper = np.array([0,0,1,-np.mean(newd)])
else:
import pdb;pdb.set_trace()
print ("error with upright")
if check_z(plane_upper, para_points) == False:
import pdb;pdb.set_trace()
### Get the boundary points here
#alldist = np.abs(np.sum(point_cloud[:,:3]*plane_lower[:3], 1) + plane_lower[-1])
alldist = np.abs(np.sum(x*plane_lower[:3], 1) + plane_lower[-1])
mind = np.min(alldist)
#[count, val] = np.histogram(alldist, bins=20)
#mind = val[np.argmax(count)]
sel = np.abs(alldist - mind) < DIST_THRESH
#sel = (np.abs(alldist - mind) < DIST_THRESH) & (point_cloud[:,0] >= xmin) & (point_cloud[:,0] <= xmax) & (point_cloud[:,1] >= ymin) & (point_cloud[:,1] <= ymax)
## Get lower four lines
line_sel1, line_sel2, line_sel3, line_sel4 = get_linesel(x[sel], corners, 'lower')
if np.sum(line_sel1) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel1]] = 1.0
linecenter = (corners[0] + corners[2]) / 2.0
point_line_offset[ind[sel][line_sel1]] = linecenter - x[sel][line_sel1]
point_line_sem[ind[sel][line_sel1]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel2) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel2]] = 1.0
linecenter = (corners[4] + corners[6]) / 2.0
point_line_offset[ind[sel][line_sel2]] = linecenter - x[sel][line_sel2]
point_line_sem[ind[sel][line_sel2]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel3) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel3]] = 1.0
linecenter = (corners[0] + corners[4]) / 2.0
point_line_offset[ind[sel][line_sel3]] = linecenter - x[sel][line_sel3]
point_line_sem[ind[sel][line_sel3]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(line_sel4) > NUM_POINT_LINE:
point_line_mask[ind[sel][line_sel4]] = 1.0
linecenter = (corners[2] + corners[6]) / 2.0
point_line_offset[ind[sel][line_sel4]] = linecenter - x[sel][line_sel4]
point_line_sem[ind[sel][line_sel4]] = np.array([linecenter[0], linecenter[1], linecenter[2], bbox[7]])
if np.sum(sel) > NUM_POINT and np.var(alldist[sel]) < VAR_THRESH:
# center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0, np.mean(x[sel][:,2])])
center = (corners[0] + corners[6]) / 2.0
center[2] = np.mean(x[sel][:,2])
sel_global = ind[sel]
point_boundary_mask_z[sel_global] = 1.0
point_boundary_sem_z[sel_global] = np.array([center[0], center[1], center[2], np.linalg.norm(corners[4] - corners[0]), | np.linalg.norm(corners[2] - corners[0]) | numpy.linalg.norm |
#!/usr/bin/python
# todo -- simplify the total energy read in, the kinetic energy read-in, temperature read-in
#=========================================================
#IMPORTS
#=========================================================
from __future__ import print_function
import sys
import numpy
import pymbar # for MBAR analysis
from pymbar import timeseries # for timeseries analysis
import os
import os.path
import optparse
from optparse import OptionParser
#===================================================================================================
# INPUT PARAMETERS
#===================================================================================================
parser = OptionParser()
parser.add_option("-d", "--directory", dest="simulation", default = "energydata",
help="the directory of the energies we care about")
parser.add_option("-b", "--nbootstraps", dest="nBoots",type = int, default=0,
help="Number of bootstrap samples taken")
parser.add_option("-s", "--spacing", dest="NumIntermediates",type = "int", default = 200,
help="Number of intermediate simulations used to calculate finite differences (default 200)")
parser.add_option("-f", "--finitedifftype", dest="dertype", default="temperature",
help="the type of finite difference energy, choice is \"temperature\" or \"beta\" [default = %default]")
parser.add_option("-r", "--randomseed", dest="rseed", type=int, default=None,
help="random seed for bootstraping [default = %default]")
(options, args) = parser.parse_args()
simulation = options.simulation
nBoots = options.nBoots
NumIntermediates = options.NumIntermediates
dertype = options.dertype
rseed = options.rseed
#========================================================
# CONSTANTS
#========================================================
kB = 0.008314462 #Boltzmann constant (Gas constant) in kJ/(mol*K)
TE_COL_NUM = 11 #The column number of the total energy in ener_box#.output
NumTemps = 16 # Last TEMP # + 1 (start counting at 1)
NumIterations = 1000 # The number of energies to be taken and analyzed, starting from the last
# Extra data will be ignored
if (dertype == 'temperature'): # if the temperatures are equally spaced
types = ['var','dT','ddT']
elif (dertype == 'beta'): # if the inverse temperatures are equally spaced.
types = ['var','dbeta','ddbeta']
else:
print('type of finite difference not recognized must be \'beta\' or \'temperature\'')
quit()
ntypes = len(types)
numpy.random.seed(rseed) # seed the random numbers
###########################################################
# For Cv vs T
# _____
# Cv / \ <-- what we expect the graph to look like
# ____________/ \____________
# T
############################################################
#=========================================================
# SUBROUTINES
#=========================================================
def read_total_energies(pathname,colnum):
"""Reads in the TEMP#/ener_box#.output file and parses it, returning an array of energies
ARGUMENTS
filename (string) - the path to the folder of the simulation
colnum (integer) column the energy is found in
"""
print("--Reading total energies from %s/..." % pathname)
# Initialize Return variables
E_kn = numpy.zeros([NumTemps, NumIterations], numpy.float64)
#Read files
for k in range(NumTemps):
#Construct each TEMP#/ener_box#.output name and read in the file
filename = os.path.join(pathname,'TEMP' + str(k), 'ener_box'+ str(k) + '.output')
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
numLines = len(lines)
#Initialize arrays for E
E_from_file = numpy.zeros(NumIterations, numpy.float64)
#Parse lines in each file
for n in range(NumIterations):
m = numLines - 2 - n #Count down (the 2 is for index purposes(1) and to not use the double-counted last line (1))
elements = lines[m].split()
E_from_file[n] = float(elements[colnum])
#Add in the E's for each timestep (n) at this temperature (k)
E_kn[k] = E_from_file;
return E_kn
def read_simulation_temps(pathname,NumTemps):
"""Reads in the various temperatures from each TEMP#/simul.output file by knowing
beforehand the total number of temperatures (parameter at top)
"""
print("--Reading temperatures from %s/..." % pathname)
# Initialize return variable
temps_from_file = numpy.zeros(NumTemps, numpy.float64)
for k in range(NumTemps):
infile = open(os.path.join(pathname,'TEMP'+ str(k), 'simul'+str(k)+'.output'), 'r')
lines = infile.readlines()
infile.close()
for line in lines:
if (line[0:11] == 'Temperature'):
vals = line.split(':')
break
temps_from_file[k] = float(vals[1])
return temps_from_file
def PrintResults(string,E,dE,Cv,dCv,types):
print(string)
print("Temperature dA <E> +/- d<E> ", end=' ')
for t in types:
print(" Cv +/- dCv (%s)" % (t), end=' ')
print("")
print("------------------------------------------------------------------------------------------------------")
for k in range(originalK,K):
print("%8.3f %8.3f %9.3f +/- %5.3f" % (Temp_k[k],mbar.f_k[k]/beta_k[k],E[k],dE[k]), end=' ')
for i in range(len(types)):
if Cv[k,i,0] < -100000.0:
print(" N/A ", end=' ')
else:
print(" %7.4f +/- %6.4f" % (Cv[k,i,0],dCv[k,i]), end=' ')
print("")
#========================================================================
# MAIN
#========================================================================
#------------------------------------------------------------------------
# Read Data From File
#------------------------------------------------------------------------
print("")
print("Preparing data:")
T_from_file = read_simulation_temps(simulation,NumTemps)
E_from_file = read_total_energies(simulation,TE_COL_NUM)
K = len(T_from_file)
N_k = numpy.zeros(K,numpy.int32)
g = numpy.zeros(K,numpy.float64)
for k in range(K): # subsample the energies
g[k] = timeseries.statisticalInefficiency(E_from_file[k])
indices = numpy.array(timeseries.subsampleCorrelatedData(E_from_file[k],g=g[k])) # indices of uncorrelated samples
N_k[k] = len(indices) # number of uncorrelated samples
E_from_file[k,0:N_k[k]] = E_from_file[k,indices]
#------------------------------------------------------------------------
# Insert Intermediate T's and corresponding blank U's and E's
#------------------------------------------------------------------------
Temp_k = T_from_file
minT = T_from_file[0]
maxT = T_from_file[len(T_from_file) - 1]
#beta = 1/(k*BT)
#T = 1/(kB*beta)
if dertype == 'temperature':
minv = minT
maxv = maxT
elif dertype == 'beta': # actually going in the opposite direction as beta for logistical reasons
minv = 1/(kB*minT)
maxv = 1/(kB*maxT)
delta = (maxv-minv)/(NumIntermediates-1)
originalK = len(Temp_k)
print("--Adding intermediate temperatures...")
val_k = []
currentv = minv
if dertype == 'temperature':
# Loop, inserting equally spaced T's at which we are interested in the properties
while (currentv <= maxv):
val_k = numpy.append(val_k, currentv)
currentv = currentv + delta
Temp_k = numpy.concatenate((Temp_k,numpy.array(val_k)))
elif dertype == 'beta':
# Loop, inserting equally spaced T's at which we are interested in the properties
while (currentv >= maxv):
val_k = numpy.append(val_k, currentv)
currentv = currentv + delta
Temp_k = numpy.concatenate((Temp_k,(1/(kB*numpy.array(val_k)))))
# Update number of states
K = len(Temp_k)
# Loop, inserting E's into blank matrix (leaving blanks only where new Ts are inserted)
Nall_k = numpy.zeros([K], numpy.int32) # Number of samples (n) for each state (k) = number of iterations/energies
E_kn = numpy.zeros([K, NumIterations], numpy.float64)
for k in range(originalK):
E_kn[k,0:N_k[k]] = E_from_file[k,0:N_k[k]]
Nall_k[k] = N_k[k]
#------------------------------------------------------------------------
# Compute inverse temperatures
#------------------------------------------------------------------------
beta_k = 1 / (kB * Temp_k)
#------------------------------------------------------------------------
# Compute reduced potential energies
#------------------------------------------------------------------------
print("--Computing reduced energies...")
u_kln = numpy.zeros([K,K,NumIterations], numpy.float64) # u_kln is reduced pot. ener. of segment n of temp k evaluated at temp l
E_kn_samp = numpy.zeros([K,NumIterations], numpy.float64) # u_kln is reduced pot. ener. of segment n of temp k evaluated at temp l
nBoots_work = nBoots + 1 # we add +1 to the bootstrap number, as the zeroth bootstrap sample is the original
allCv_expect = numpy.zeros([K,ntypes,nBoots_work], numpy.float64)
dCv_expect = numpy.zeros([K,ntypes],numpy.float64)
allE_expect = numpy.zeros([K,nBoots_work], numpy.float64)
allE2_expect = numpy.zeros([K,nBoots_work], numpy.float64)
dE_expect = numpy.zeros([K],numpy.float64)
for n in range(nBoots_work):
if (n > 0):
print("Bootstrap: %d/%d" % (n,nBoots))
for k in range(K):
# resample the results:
if Nall_k[k] > 0:
if (n == 0): # don't randomize the first one
booti = numpy.array(range(N_k[k]))
else:
booti=numpy.random.randint(Nall_k[k],size=Nall_k[k])
E_kn_samp[k,0:Nall_k[k]] = E_kn[k,booti]
for k in range(K):
for l in range(K):
u_kln[k,l,0:Nall_k[k]] = beta_k[l] * E_kn_samp[k,0:Nall_k[k]]
#------------------------------------------------------------------------
# Initialize MBAR
#------------------------------------------------------------------------
# Initialize MBAR with Newton-Raphson
if (n==0): # only print this information the first time
print("")
print("Initializing MBAR:")
print("--K = number of Temperatures with data = %d" % (originalK))
print("--L = number of total Temperatures = %d" % (K))
print("--N = number of Energies per Temperature = %d" % (numpy.max(Nall_k)))
if (n==0):
initial_f_k = None # start from zero
else:
initial_f_k = mbar.f_k # start from the previous final free energies to speed convergence
mbar = pymbar.MBAR(u_kln, Nall_k, verbose=False, relative_tolerance=1e-12, initial_f_k=initial_f_k)
#------------------------------------------------------------------------
# Compute Expectations for E_kt and E2_kt as E_expect and E2_expect
#------------------------------------------------------------------------
print("")
print("Computing Expectations for E...")
E_kln = u_kln # not a copy, we are going to write over it, but we don't need it any more.
for k in range(K):
E_kln[:,k,:]*=beta_k[k]**(-1) # get the 'unreduced' potential -- we can't take differences of reduced potentials because the beta is different; math is much more confusing with derivatives of the reduced potentials.
results = mbar.computeExpectations(E_kln, state_dependent = True, return_dict=True)
E_expect = results['mu']
dE_expect = results['sigma']
allE_expect[:,n] = E_expect[:]
# expectations for the differences, which we need for numerical derivatives
results = mbar.computeExpectations(E_kln,output='differences', state_dependent = True, return_dict=True)
DeltaE_expect = results['mu']
dDeltaE_expect = results['sigma']
print("Computing Expectations for E^2...")
results = mbar.computeExpectations(E_kln**2, state_dependent = True, return_dict=True)
E2_expect = results['mu']
dE2_expect = results['sigma']
allE2_expect[:,n] = E2_expect[:]
results = mbar.getFreeEnergyDifferences(return_dict=True)
df_ij = results['Delta_f']
ddf_ij = results['dDelta_f']
#------------------------------------------------------------------------
# Compute Cv for NVT simulations as <E^2> - <E>^2 / (RT^2)
#------------------------------------------------------------------------
if (n==0):
print("")
print("Computing Heat Capacity as ( <E^2> - <E>^2 ) / ( R*T^2 ) and as d<E>/dT")
# Problem is that we don't have a good uncertainty estimate for the variance.
# Try a silly trick: but it doesn't work super well.
# An estimator of the variance of the standard estimator of th evariance is
# var(sigma^2) = (sigma^4)*[2/(n-1)+kurt/n]. If we assume the kurtosis is low
# (which it will be for sufficiently many samples), then we can say that
# d(sigma^2) = sigma^2 sqrt[2/(n-1)].
# However, dE_expect**2 is already an estimator of sigma^2/(n-1)
# Cv = sigma^2/kT^2, so d(Cv) = d(sigma^2)/kT^2 = sigma^2*[sqrt(2/(n-1)]/kT^2
# we just need an estimate of n-1, but we can try to get that by var(dE)/dE_expect**2
# it's within 50% or so, but that's not good enough.
allCv_expect[:,0,n] = (E2_expect - (E_expect*E_expect)) / ( kB * Temp_k**2)
####################################
# C_v by fluctuation formula
####################################
#Cv = (A - B^2) / (kT^2)
# d2(Cv) = [1/(kT^2)]^2 [(dCv/dA)^2*d2A + 2*dCv*(dCv/dA)*(dCv/dB)*dAdB + (dCv/dB)^2*d2B]
# = [1/(kT^2)]^2 [d2A - 4*B*dAdB + 4*B^2*d2B]
# But this formula is not working for uncertainies!
if (n==0):
N_eff = (E2_expect - (E_expect*E_expect))/dE_expect**2 # sigma^2 / (sigma^2/n) = effective number of samples
dCv_expect[:,0] = allCv_expect[:,0,n]*numpy.sqrt(2/N_eff)
# only loop over the points that will be plotted, not the ones that
for i in range(originalK, K):
# Now, calculae heat capacity by T-differences
im = i-1
ip = i+1
if (i==originalK):
im = originalK
if (i==K-1):
ip = i
####################################
# C_v by first derivative of energy
####################################
if (dertype == 'temperature'): # temperature derivative
# C_v = d<E>/dT
allCv_expect[i,1,n] = (DeltaE_expect[im,ip])/(Temp_k[ip]-Temp_k[im])
if (n==0):
dCv_expect[i,1] = (dDeltaE_expect[im,ip])/(Temp_k[ip]-Temp_k[im])
elif (dertype == 'beta'): # beta derivative
#Cv = d<E>/dT = dbeta/dT d<E>/beta = - kB*T(-2) d<E>/dbeta = - kB beta^2 d<E>/dbeta
allCv_expect[i,1,n] = kB * beta_k[i]**2 * (DeltaE_expect[ip,im])/(beta_k[ip]-beta_k[im])
if (n==0):
dCv_expect[i,1] = -kB * beta_k[i]**2 *(dDeltaE_expect[ip,im])/(beta_k[ip]-beta_k[im])
####################################
# C_v by second derivative of free energy
####################################
if (dertype == 'temperature'):
# C_v = d<E>/dT = d/dT k_B T^2 df/dT = 2*T*df/dT + T^2*d^2f/dT^2
if (i==originalK) or (i==K-1):
# We can't calculate this, set a number that will be printed as NAN
allCv_expect[i,2,n] = -10000000.0
else:
allCv_expect[i,2,n] = kB*Temp_k[i]*(2*df_ij[ip,im]/(Temp_k[ip]-Temp_k[im]) +
Temp_k[i]*(df_ij[ip,i]-df_ij[i,im])/
((Temp_k[ip]-Temp_k[im])/(ip-im))**2)
if (n==0):
# Previous work to calculate the uncertainty commented out, should be cleaned up eventually
# all_Cv_expect[i,2,n] = kB*Temp_k[i]*(2*df_ij[ip,i]+df_ij[i,im]/(Temp_k[ip]-Temp_k[im]) + Temp_k[i]*(df_ij[ip,i]-df_ij[i,im])/(Temp_k[ip]-Temp_k[i])**2)
#all_Cv_expect[i,2,n] = kB*([2*Temp_k[i]/(Temp_k[ip]-Temp_k[im]) + Temp_k[i]**2/(Temp_k[ip]-Temp_k[i])**2]*df_ij[ip,i] + [2*Temp_k[i]/(Temp_k[ip]-Temp_k[im]) - Temp_k[i]**2/(Temp_k[ip]-Temp_k[i])**2]) df_ij[i,im]
#all_Cv_expect[i,2,n] = kB*(A df_ij[ip,i] + B df_ij[i,im]
A = 2*Temp_k[i]/(Temp_k[ip]-Temp_k[im]) + 4*Temp_k[i]**2/(Temp_k[ip]-Temp_k[im])**2
B = 2*Temp_k[i]/(Temp_k[ip]-Temp_k[im]) + 4*Temp_k[i]**2/(Temp_k[ip]-Temp_k[im])**2
#dCv_expect[i,2,n] = kB* [(A ddf_ij[ip,i])**2 + (B sdf_ij[i,im])**2 + 2*A*B*cov(df_ij[ip,i],df_ij[i,im])
# This isn't it either: need to figure out that last term.
dCv_expect[i,2] = kB*((A*ddf_ij[ip,i])**2 + (B*ddf_ij[i,im])**2)
# Would need to add function computing covariance of DDG, (A-B)-(C-D)
elif (dertype == 'beta'):
# if beta is evenly spaced, rather than t, we can do 2nd derivative in beta
# C_v = d<E>/dT = d/dT (df/dbeta) = dbeta/dT d/dbeta (df/dbeta) = -k_b beta^2 df^2/d^2beta
if (i==originalK) or (i==K-1):
#Flag as N/A -- we don't try to compute at the endpoints for now
allCv_expect[i,2,n] = -10000000.0
else:
allCv_expect[i,2,n] = kB * beta_k[i]**2 *(df_ij[ip,i]-df_ij[i,im])/((beta_k[ip]-beta_k[im])/(ip-im))**2
if (n==0):
dCv_expect[i,2] = kB*(beta_k[i])**2 * (ddf_ij[ip,i]-ddf_ij[i,im])/((beta_k[ip]-beta_k[im])/(ip-im))**2
# also wrong, need to be fixed.
if (n==0):
print('WARNING: only the first derivative (dT) analytic error estimates can currently be trusted.')
print('They are the only ones reasonably close to bootstrap, within 10-15% at all T.')
print('')
PrintResults("Analytic Error Estimates",E_expect,dE_expect,allCv_expect,dCv_expect,types)
if nBoots > 0:
Cv_boot = numpy.zeros([K,ntypes],float)
dCv_boot = numpy.zeros([K,ntypes],float)
dE_boot = | numpy.zeros([K]) | numpy.zeros |
#coding=utf-8
'''
infer module
'''
import sys
caffe_path = '../caffe/python/'
#caffe_path = '/root/caffe/python/'
sys.path.insert(0, caffe_path)
import caffe
caffe.set_device(0)
caffe.set_mode_gpu()
from caffe.proto import caffe_pb2
from google.protobuf import text_format
import numpy as np
#import cv2
'''
prepare caffemodel proto labelmap etc.
'''
root_googlenet = '../model/'
deploy_googlenet = root_googlenet + 'deploy-googlenet.prototxt'
#labels_filename = root_googlenet + 'labels.txt'
caffe_model_googlenet = root_googlenet + 'googlenet.caffemodel'
googlenet = caffe.Net(deploy_googlenet, caffe_model_googlenet, caffe.TEST)
# labels = np.loadtxt(labels_filename, str, delimiter='\t')
root_alexnet = root_googlenet
#deploy_alexnet = root_alexnet + 'deploy-alex.prototxt'
labels_filename = root_alexnet + 'labels.txt'
#caffe_model_alexnet = root_alexnet + 'snapshot_iter_992.caffemodel'
#alexnet = caffe.Net(deploy_alexnet, caffe_model_alexnet, caffe.TEST)
'''
define infer function with alexnet, googlenet and senet
output parm is prob(score) and class_label respectively
'''
def infer_img(googlenet, url):
transformer = caffe.io.Transformer({'data': googlenet.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2,1,0))
labels = np.loadtxt(labels_filename, str, delimiter='\t')
# googlenet.blobs['data'].data[...] = transformer.preprocess('data', tmp)
# googlenet.forward()
# prob_googlenet = googlenet.blobs['softmax'].data[0].flatten()
# order_googlenet = prob_googlenet.argsort()[-1]
# score_googlenet = np.max(prob_googlenet)
# labels_googlenet = labels[order_googlenet]
image = caffe.io.load_image(url)
googlenet.blobs['data'].data[...] = transformer.preprocess('data', image)
googlenet.forward()
prob_googlenet = googlenet.blobs['softmax'].data[0].flatten()
order_googlenet = prob_googlenet.argsort()[-1]
score_googlenet = | np.max(prob_googlenet) | numpy.max |
import argparse
from absl import flags, app
from absl.flags import FLAGS
import cv2
import os
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from ciou import nmsDiou
from core.config import cfg
from core.yolov4 import YOLOv4, YOLOv3, YOLOv3_tiny, decode
from os.path import join
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
#--weights "prova" \ -m "iou"
flags.DEFINE_string('annotation_path', cfg.TEST.ANNOT_PATH, 'annotation path')
flags.DEFINE_string('write_image_path', "./data/detection/", 'write image path')
flags.DEFINE_string('intersectionMethod', "iou", 'write image path')
flags.DEFINE_string('weights', "iou", 'write image path')
flags.DEFINE_string('size','608', 'write image path')
flags.DEFINE_string('model', 'yolov4', 'yolov4, yolov3 or yolov3-tiny')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('framework', 'tf', '(tf, tflite')
#flags.DEFINE_float('thresh', 0.0, 'write image path')
one_class=cfg.TEST.Oneclass
print_image=cfg.TEST.PrintImage
"""
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--weights', default="none",type=str,help="weights path")
parser.add_argument('-m','--intersectionMethod', default="none",type=str, help="diou or iou")
args, unknown = parser.parse_known_args()
"""
def main(_argv):
#cfg.TEST.SCORE_THRESHOLD=FLAGS.thresh
print(cfg.TEST.SCORE_THRESHOLD.__str__())
INPUT_SIZE = int(FLAGS.size)
#cfg.TEST.IntersectionMethod=args.method
if FLAGS.tiny:
STRIDES = np.array(cfg.YOLO.STRIDES_TINY)
ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS_TINY, FLAGS.tiny)
else:
STRIDES = np.array(cfg.YOLO.STRIDES)
ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS, FLAGS.tiny)
NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
CLASSES = utils.read_class_names(cfg.YOLO.CLASSES)
predicted_dir_path = './mAP/predicted'
ground_truth_dir_path = './mAP/ground-truth'
images_dir_path = './mAP/images'
if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
#if os.path.exists(cfg.TEST.DECTECTED_IMAGE_PATH): shutil.rmtree(cfg.TEST.DECTECTED_IMAGE_PATH)
#if os.path.exists(cfg.TEST.GT_IMAGE_PATH): shutil.rmtree(cfg.TEST.GT_IMAGE_PATH)
if os.path.exists(images_dir_path): shutil.rmtree(images_dir_path)
os.makedirs(predicted_dir_path)
os.makedirs(ground_truth_dir_path)
#cfg.TEST.DECTECTED_IMAGE_PATH=cfg.TEST.DECTECTED_IMAGE_PATH + cfg.TEST.SCORE_THRESHOLD.__str__()
#cfg.TEST.GT_IMAGE_PATH=cfg.TEST.GT_IMAGE_PATH + cfg.TEST.SCORE_THRESHOLD.__str__()
if not os.path.exists(cfg.TEST.DECTECTED_IMAGE_PATH):
os.mkdir(cfg.TEST.DECTECTED_IMAGE_PATH)
if not os.path.exists(cfg.TEST.GT_IMAGE_PATH):
os.mkdir(cfg.TEST.GT_IMAGE_PATH)
os.mkdir(images_dir_path)
# Build Model
if FLAGS.framework == 'tf':
input_layer = tf.keras.layers.Input([int(INPUT_SIZE), int(INPUT_SIZE), 3])
if FLAGS.tiny:
feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
bbox_tensors = []
for i, fm in enumerate(feature_maps):
bbox_tensor = decode(fm, NUM_CLASS, i)
bbox_tensors.append(bbox_tensor)
model = tf.keras.Model(input_layer, bbox_tensors)
utils.load_weights_tiny(model, FLAGS.weights)
else:
if FLAGS.model == 'yolov3':
feature_maps = YOLOv3(input_layer, NUM_CLASS)
bbox_tensors = []
for i, fm in enumerate(feature_maps):
bbox_tensor = decode(fm, NUM_CLASS, i)
bbox_tensors.append(bbox_tensor)
model = tf.keras.Model(input_layer, bbox_tensors)
utils.load_weights_v3(model, FLAGS.weights)
elif FLAGS.model == 'yolov4':
feature_maps = YOLOv4(input_layer, NUM_CLASS)
bbox_tensors = []
for i, fm in enumerate(feature_maps):
bbox_tensor = decode(fm, NUM_CLASS, i)
bbox_tensors.append(bbox_tensor)
model = tf.keras.Model(input_layer, bbox_tensors)
utils.load_weights(model, FLAGS.weights)
else:
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
num_lines = sum(1 for line in open(FLAGS.annotation_path))
with open(cfg.TEST.ANNOT_PATH, 'r') as annotation_file:
for num, line in enumerate(annotation_file):
#annotation = line.strip().split()
annotation=get_bb_list(line)
image_path = line.replace("\n","")
image_name = image_path.split('/')[-1]
shutil.copy(image_path, join(images_dir_path,image_name))
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
bbox_data_gt = np.array([list(map(int, box.split(','))) for box in annotation])
if len(bbox_data_gt) == 0:
bboxes_gt = []
classes_gt = []
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(image_name.split(".")[-2]) + '.txt')
#print('=> ground truth of %s:' % image_name)
num_bbox_gt = len(bboxes_gt)
with open(ground_truth_path, 'w') as f:
for i in range(num_bbox_gt):
class_name = CLASSES[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
#print('\t' + str(bbox_mess).strip())
#print('=> predict result of %s:' % image_name)
predict_result_path = os.path.join(predicted_dir_path, str(image_name.split(".")[-2]) + '.txt')
# Predict Process
image_size = image.shape[:2]
image_data = utils.image_preporcess(np.copy(image), [INPUT_SIZE, INPUT_SIZE])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if FLAGS.framework == "tf":
pred_bbox = model.predict(image_data)
else:
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred_bbox = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if FLAGS.model == 'yolov3':
pred_bbox = utils.postprocess_bbbox(pred_bbox, ANCHORS, STRIDES)
elif FLAGS.model == 'yolov4':
XYSCALE = cfg.YOLO.XYSCALE
pred_bbox = utils.postprocess_bbbox(pred_bbox, ANCHORS, STRIDES, XYSCALE=XYSCALE)
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, image_size, INPUT_SIZE, cfg.TEST.SCORE_THRESHOLD)
if cfg.TEST.IntersectionMethod=="diou":
bboxes = nmsDiou(bboxes, cfg.TEST.DIOU_NMS_THRESHOLD, method=cfg.TEST.IntersectionMethod)
else:
bboxes = utils.nms(bboxes, cfg.TEST.IOU_NMS_THRESHOLD, method="nms")
if cfg.TEST.DECTECTED_IMAGE_PATH is not None:
image2 = utils.draw_bbox( | np.copy(image) | numpy.copy |
import random
from os import path as osp
import glob
import numpy as np
import dataops.common as util
from dataops.common import fix_img_channels, get_image_paths, read_img, np2tensor
from dataops.debug import *
from dataops.imresize import resize as imresize # resize # imresize_np
from dataops.augmennt.augmennt.common import wrap_cv2_function, wrap_pil_function, _cv2_interpolation2str
from torch.utils.data.dataset import Dataset # TODO TMP, move NoisePatches to a separate dataloader
try:
from PIL import Image
pil_available = True
except ImportError:
pil_available = False
# pass
try:
import cv2
cv2_available = True
except ImportError:
cv2_available = False
def set_transforms(loader_type=None):
if not hasattr(set_transforms, 'loader_type') or set_transforms.loader_type != loader_type:
global transforms
if loader_type == 'pil' and pil_available:
import torchvision.transforms as transforms
elif cv2_available:
import dataops.augmennt.augmennt as transforms
else:
Exception("No suitable image loader available. Need either PIL or OpenCV.")
set_transforms.loader_type = loader_type
transforms = None
set_transforms()
custom_ktypes = {
794:'blackman5', 793:'blackman4', 792:'blackman3',
791:'blackman2', 790:'sinc5', 789:'sinc4', 788:'sinc3',
787:'sinc2', 786:'gaussian', 785:'hamming', 784:'hanning',
783:'catrom', 782:'bell', 781:'lanczos5', 780:'lanczos4',
779:'hermite', 778:'mitchell', 777:'cubic', 776:'lanczos3',
775:'lanczos2', 774:'box', 773:'linear',}
def adj_scale_config(scale=None, resize_type:int=None,
res_config:dict=None):
or_scale = False
if res_config.get('ada_scale'):
or_scale = scale
if resize_type == 999:
ds_algo = [777, 773, cv2.INTER_AREA]
resize_type = get_weighted_choice(ds_algo)[0]
scale = -(-scale//2)
or_scale = False
elif resize_type in [0, 774, 997]:
scale = -(-scale//2)
or_scale = False
if resize_type == 998:
# down_up
scale = random.uniform(-(-scale//2), scale)
du_algos = res_config['down_up_types']
du_min = res_config['down_up_min']
resize_type = get_weighted_choice(du_algos)[0]
a_scale = random.uniform(du_min, scale)
scale = scale / a_scale
elif resize_type in [0, 774, 997]:
# nearest
scale = random.choice([-(-scale//2), scale])
elif resize_type != 999:
prob = res_config.get('resize_prob')
# sc_dir = None
# if prob:
# sc_dir = get_weighted_choice(prob)[0]
sc_dir = get_weighted_choice(prob)[0] if prob else 'down'
if sc_dir:
sc_range = None
if sc_dir == 'up':
sc_range = res_config.get('resize_range_up')
elif sc_dir == 'down':
# sc_range = res_config.get('resize_range_down')
sc_range = res_config.get(
'resize_range_down', [1/scale, 2/scale])
else:
scale = 1
if sc_range:
sc_fact = random.uniform(*sc_range)
scale = 1 / sc_fact
if or_scale and scale > 1:
# scale /= or_scale
scale = max(1, scale/or_scale)
return scale, resize_type
class Scale_class:
def __init__(self, size=None, scale=None,
algo=None, ds_kernel=None, resize_type=None,
img_type=None, res_config=None):
if res_config: # and scale !=1:
algo = None
scale, resize_type = adj_scale_config(
scale, resize_type, res_config)
if ((isinstance(algo, list) and 998 in algo) or
(isinstance(algo, int) and algo == 998)) and not res_config:
algo = [777, 773, cv2.INTER_AREA]
self.scale = scale
self.size = size
self.resize_fn, self.resize_type = get_resize(
size=size, scale=scale, ds_algo=algo, ds_kernel=ds_kernel,
resize_type=resize_type, img_type=img_type)
def get_resize_type(self):
return self.resize_type
def __call__(self, img):
return self.resize_fn(img)
def get_scale(self):
return self.scale
def get_size(self):
return self.size
def __repr__(self):
return (self.__class__.__name__ +
f'(type={self.resize_type}, '
f'scale={self.scale}, '
f'size={self.size})')
def Scale(img=None, scale=None, algo=None,
ds_kernel=None, resize_type=None, img_type=None):
""" Simple temporary interface to maintain existing functionality
using the new Scale_class. Will be deprecated in the future.
"""
sc = Scale_class(scale=scale, algo=algo, ds_kernel=ds_kernel,
resize_type=resize_type, img_type=img_type)
return sc(img), sc.get_resize_type()
class MLResize:
"""Abstraction interface for resizing images to the given scale
using the transforms backend or the Matlab-like imresize algorithms.
(warning: the latter is ~an order of magnitude slower than OpenCV).
Args:
scale: Desired amount to scale the image. (>1 is downscale)
size: To use if setting a specific size to resize to.
antialiasing: Whether to apply an antialiasing (only on 'ml').
interpolation: Desired interpolation. Default is
"cubic" interpolation, other options are: "lanczos2",
"lanczos3", "box", "linear", "mitchell", etc.
kind: use the 'transforms' backend or 'ml' matlab-like imresize.
"""
def __init__(self, scale, size=None, antialiasing:bool=True,
interpolation:str='cubic', kind:str='ml'):
self.scale = scale
self.out_shape = size # (h, w)
self.interpolation = interpolation
self.antialiasing = antialiasing
self.kind = kind
def __call__(self, img:np.ndarray) -> np.ndarray:
"""
Args:
img: Image to be scaled.
Returns:
Rescaled image.
"""
if self.scale == 1:
return img
if self.out_shape:
ow, oh = image_size(img)
if ow == self.out_shape[1] and oh == self.out_shape[0]:
return img
if len(self.out_shape) < 3:
self.out_shape = self.out_shape + (image_channels(img),)
if self.kind == 'transforms':
if self.out_shape:
return resize(
np.copy(img),
w=self.out_shape[1], h=self.out_shape[0],
method=self.interpolation)
return scale_(
np.copy(img), self.scale, method=self.interpolation)
scale = None if self.out_shape else 1/self.scale
# return imresize_np(
# np.copy(img), scale=scale, antialiasing=self.antialiasing, interpolation=self.interpolation)
return imresize(
np.copy(img), scale, out_shape=self.out_shape,
antialiasing=self.antialiasing, interpolation=self.interpolation)
def get_resize(size=None, scale=None, ds_algo=None,
ds_kernel=None, resize_type=None, img_type=None):
resize_fn = None
if not resize_type:
if not ds_algo:
# scaling interpolation options
# ds_algo = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC,
# cv2.INTER_AREA, cv2.INTER_LANCZOS4, cv2.INTER_LINEAR_EXACT]
ds_algo = [777, 773, cv2.INTER_AREA]
if isinstance(ds_algo, int):
resize_type = ds_algo
else:
# resize_type = random.choice(ds_algo)
resize_type = get_weighted_choice(ds_algo)[0]
# else(!isinstance(resize_types, list)):
# Error, unexpected type
if img_type == 'pil':
# override
# TODO: pil images will only use default method, not 'algo' yet
resize_type = -1
if resize_type in set(custom_ktypes.keys()):
# use custom scaling methods
resize_fn = MLResize(
size=size, scale=scale,
interpolation=custom_ktypes[resize_type])
elif resize_type == 997:
# use nearest_aligned downscale
resize_fn = transforms.AlignedDownsample(p=1, scale=scale)
elif resize_type == 999:
# use realistic downscale kernels
if ds_kernel:
resize_fn = ds_kernel
else:
# use the provided OpenCV2 or default PIL methods
if img_type == 'pil':
interpolation = get_default_imethod('pil')
else:
interpolation=_cv2_interpolation2str.get(resize_type, 'BICUBIC')
resize_fn = MLResize(
size=size, scale=scale,
interpolation=interpolation, kind='transforms')
return resize_fn, resize_type
def get_blur(blur_types, blur_config):
blur = None
blur_type = get_weighted_choice(blur_types)[0]
if blur_type:
if blur_type == 'average':
blur = transforms.RandomAverageBlur(**blur_config)
elif blur_type == 'box':
blur = transforms.RandomBoxBlur(**blur_config)
elif blur_type == 'gaussian':
blur = transforms.RandomGaussianBlur(**blur_config)
elif blur_type == 'median':
blur = transforms.RandomMedianBlur(**blur_config)
elif blur_type == 'bilateral':
blur = transforms.RandomBilateralBlur(**blur_config)
elif blur_type == 'motion':
blur = transforms.RandomMotionBlur(**blur_config)
elif blur_type == 'complexmotion':
blur = transforms.RandomComplexMotionBlur(**blur_config)
elif blur_type == 'iso':
blur = transforms.RandomAnIsoBlur(**blur_config)
elif blur_type == 'aniso':
blur = transforms.RandomAnIsoBlur(**blur_config)
elif blur_type == 'sinc':
blur = transforms.RandomSincBlur(**blur_config)
# elif blur_type == 'clean':
return blur
def get_noise(noise_types: list, noise_patches=None, noise_config=None):
noise = None
if noise_types:
noise_type = get_weighted_choice(noise_types)[0]
if 'dither' in noise_type:
if ('fs' in noise_type and 'bw' not in noise_type) or noise_type == 'dither':
noise = transforms.FSDitherNoise(**noise_config)
elif 'bayer' in noise_type and 'bw' not in noise_type:
noise = transforms.BayerDitherNoise(**noise_config)
elif 'fs_bw' in noise_type:
noise = transforms.FSBWDitherNoise(**noise_config)
elif 'avg_bw' in noise_type:
noise = transforms.AverageBWDitherNoise(**noise_config)
elif 'bayer_bw' in noise_type:
noise = transforms.BayerBWDitherNoise(**noise_config)
elif 'bin_bw' in noise_type:
noise = transforms.BinBWDitherNoise(**noise_config)
elif 'rnd_bw' in noise_type:
noise = transforms.RandomBWDitherNoise(**noise_config)
elif noise_type in ('simplequantize', 'simple_quantize'):
noise = transforms.SimpleQuantize(**noise_config)
elif noise_type in ('quantize', 'som_quantize'):
noise = transforms.RandomQuantizeSOM(**noise_config)
elif noise_type == 'km_quantize':
noise = transforms.RandomQuantize(**noise_config)
elif noise_type == 'gaussian':
noise = transforms.RandomGaussianNoise(**noise_config)
elif noise_type.lower() == 'jpeg':
noise = transforms.RandomCompression(
**noise_config, compression_type='.jpeg')
elif noise_type.lower() == 'webp':
noise = transforms.RandomCompression(
**noise_config, compression_type='.webp')
elif noise_type == 'poisson':
noise = transforms.RandomPoissonNoise(**noise_config)
elif noise_type == 's&p':
noise = transforms.RandomSPNoise(**noise_config)
elif noise_type == 'speckle':
noise = transforms.RandomSpeckleNoise(**noise_config)
elif noise_type == 'maxrgb':
noise = transforms.FilterMaxRGB(**noise_config)
# elif noise_type == 'canny':
# noise = transforms.FilterCanny(**noise_config)
elif noise_type == 'patches' and noise_patches:
noise = RandomNoisePatches(noise_patches, **noise_config)
elif noise_type == 'clahe':
noise = transforms.CLAHE(**noise_config)
elif noise_type == 'camera':
noise = transforms.RandomCameraNoise(**noise_config)
elif noise_type == 'superpixels':
noise = transforms.Superpixels(**noise_config)
# elif noise_type == 'clean':
return noise
def get_pad(img, size: int, fill = 0, padding_mode: str ='constant'):
w, h = image_size(img)
if fill == 'random':
fill_list = []
for _ in range(len(img.shape)):
fill_list.append(random.randint(0, 255))
fill = tuple(fill_list)
top = (size - h) // 2 if h < size else 0
bottom = top + h % 2 if h < size else 0
left = (size - w) // 2 if w < size else 0
right = left + w % 2 if w < size else 0
pad = transforms.Pad(padding=(top, bottom, left, right), padding_mode=padding_mode, fill=fill) #reflect
return pad, fill
class NoisePatches(Dataset):
"""
Load the patches previously extracted from real images
to apply noise to the LR images.
Ref:
https://openaccess.thecvf.com/content_cvpr_2018/papers/Chen_Image_Blind_Denoising_CVPR_2018_paper.pdf
https://openaccess.thecvf.com/content_ICCV_2019/papers/Zhou_Kernel_Modeling_Super-Resolution_on_Real_Low-Resolution_Images_ICCV_2019_paper.pdf
"""
def __init__(self, dataset=None, size=32, permute=True, grayscale=False):
super(NoisePatches, self).__init__()
assert osp.exists(dataset)
self.grayscale = grayscale
self.noise_imgs = sorted(glob.glob(dataset + '*.png'))
if permute:
np.random.shuffle(self.noise_imgs)
self.pre_process = transforms.Compose([transforms.RandomCrop(size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.5),
])
def __getitem__(self, index, out_nc=3):
noise = self.pre_process(util.read_img(None, self.noise_imgs[index], out_nc))
# describe_numpy(noise, all=True)
# tmp_vis(noise, False)
norm_noise = (noise - np.mean(noise, axis=(0, 1), keepdims=True, dtype=np.float32))
# tmp_vis(np.mean(noise, axis=(0, 1), keepdims=True), False)
# describe_numpy(np.mean(noise, axis=(0, 1), keepdims=True), all=True)
# describe_numpy(norm_noise, all=True)
# TODO: test adding noise to single channel images
if self.grayscale:
norm_noise = util.bgr2ycbcr(norm_noise, only_y=True)
return norm_noise # .astype('uint8')
def __len__(self):
return len(self.noise_imgs)
class RandomNoisePatches():
def __init__(self, noise_patches, noise_amp:float=1.0, p:float=1.0):
self.noise_patches = noise_patches
self.noise_amp = noise_amp
self.p = p
def __call__(self, img):
if random.random() > self.p:
return img
# add noise from patches
noise = self.noise_patches[np.random.randint(0, len(self.noise_patches))]
# tmp_vis(noise, False)
# img = torch.clamp(img + noise, 0, 1)
# describe_numpy(img, all=True)
h, w = img.shape[0:2]
n_h, n_w = noise.shape[0:2]
if n_h < h or n_w < w:
# pad noise patch to image size if smaller
i = random.randint(0, h - n_h)
j = random.randint(0, w - n_w)
# top, bottom, left, right borders
noise = transforms.Pad(
padding=(i, h-(i+n_h), j, w-(j+n_w)), padding_mode='reflect')(noise)
elif n_h > h or n_w > w:
# crop noise patch to image size if larger
noise = transforms.RandomCrop(size=(w,h))(noise)
img = np.clip((img.astype('float32') + self.noise_amp*noise), 0, 255).astype('uint8')
# describe_numpy(img, all=True)
## tmp_vis(img, False)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
############# transformations
# TODO: adapt for video dataloaders, apply same transform to multiple frames
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
load_size = opt.get('load_size')
if isinstance(load_size, list):
load_size = random.choice(load_size)
crop_size = opt.get('crop_size')
center_crop_size = opt.get('center_crop_size')
preprocess_mode = opt.get('preprocess', 'none')
if 'resize_and_crop' in preprocess_mode:
# assert load_size, "load_size not defined"
new_h = new_w = load_size
elif 'scale_width_and_crop' in preprocess_mode:
# assert load_size, "load_size not defined"
new_w = load_size
new_h = load_size * h // w
elif 'scale_height_and_crop' in preprocess_mode:
# assert load_size, "load_size not defined"
new_w = load_size * w // h
new_h = load_size
elif 'scale_shortside_and_crop' in preprocess_mode:
# assert load_size, "load_size not defined"
ss, ls = min(w, h), max(w, h) # shortside and longside
width_is_shorter = w == ss
ls = int(load_size * ls / ss)
new_w, new_h = (ss, ls) if width_is_shorter else (ls, ss)
elif 'center_crop' in preprocess_mode:
# assert center_crop_size, "center_crop_size not defined"
new_w = center_crop_size
new_h = center_crop_size
# elif 'fixed' in preprocess_mode:
# aspect_ratio = opt.get('aspect_ratio')
# assert aspect_ratio, "aspect_ratio not defined"
x = random.randint(0, np.maximum(0, new_w - crop_size))
y = random.randint(0, np.maximum(0, new_h - crop_size))
flip = random.random() > 0.5
rot = random.random() > 0.5
vflip = random.random() > 0.5
hrrot = random.random() > 0.5
angle = int(random.uniform(-90, 90))
return {'load_size': load_size,
'crop_pos': (x, y),
'flip': flip,
'rot': rot,
'vflip': vflip,
'hrrot': hrrot,
'angle': angle,}
# TODO: could use the hasattr to set a value for all future calls
# TODO: need something similar to use the other PIL interpolation methods
def get_default_imethod(img_type='cv2'):
if img_type == 'pil':
return Image.BICUBIC
else:
return "BICUBIC"
def get_transform(opt, params=None, grayscale=False, method=None,
preprocess_mode=None):
"""
Base paired transformations: crop, scale, flip, rotate.
There are different modes to load images by specifying 'preprocess_mode' along with
'load_size', 'crop_size' and 'center_crop_size'. Can use options such as:
- 'resize': resizes the images into square images of side length 'load_size'.
- 'crop': randomly crops images to 'crop_size'.
- 'resize_and_crop': resizes the images into square images of side length 'load_size'
and randomly crops to 'crop_size'.
- scale_shortside_and_crop: scales the image to have a short side of length 'load_size'
and crops to 'crop_size' x 'crop_size' square.
- center_crop: can be used to do an initial center crop of the images of size
'center_crop_size' x 'center_crop_size' before other pre-processing steps.
.... more TBD
Rotations:
Horizontal flips and rotations (0, 90, 180, 270 degrees).
Note: Vertical flip and transpose are used for rotation implementation.
"""
transform_list = []
load_size = params['load_size'] if params else opt.get('load_size')
crop_size = opt.get('crop_size')
center_crop_size = opt.get('center_crop_size')
default_none = opt.get('default_none', 'power2')
img_type = opt.get('img_loader', 'cv2')
if not method:
# TODO: important: if the method does not matches the image type, the error is not
# helpful to debug, get the image type from opt dict and assert it's not None
method = get_default_imethod(img_type)
preprocess_mode = opt.get('preprocess') if preprocess_mode is None else preprocess_mode
preprocess_mode = 'none' if not preprocess_mode else preprocess_mode
# preprocess
if 'center_crop' in preprocess_mode:
transform_list.append(transforms.CenterCrop(center_crop_size))
if grayscale:
transform_list.append(transforms.Grayscale(1))
# TODO:
# elif params and params('color'):
# # other colorspace changes, deal with CV2 and PIL
if 'resize' in preprocess_mode:
if isinstance(load_size, list):
transform_list.append(
transforms.RandomChoice([
transforms.Resize([osize, osize], method) for osize in load_size
]))
elif isinstance(load_size, int):
osize = [load_size, load_size]
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in preprocess_mode:
transform_list.append(transforms.Lambda(
lambda img: scale_width(img, load_size, crop_size, method)))
elif 'scale_height' in preprocess_mode:
transform_list.append(transforms.Lambda(
lambda img: scale_height(img, load_size, crop_size, method)))
elif 'scale_shortside' in preprocess_mode:
transform_list.append(transforms.Lambda(
lambda img: scale_shortside(img, load_size, method)))
# if 'crop' in preprocess_mode and preprocess_mode != 'center_crop':
if preprocess_mode == 'crop' or 'and_crop' in preprocess_mode:
if params is None:
transform_list.append(transforms.RandomCrop(crop_size))
else:
transform_list.append(transforms.Lambda(
lambda img: crop(img, params['crop_pos'],
size=crop_size, img_type=img_type)))
if preprocess_mode == 'fixed':
w = crop_size
h = round(crop_size / opt.get('aspect_ratio'))
transform_list.append(transforms.Lambda(
lambda img: resize(img, w, h, method)))
if preprocess_mode == 'none':
# no preprocessing, fix dimensions if needed
if default_none == 'power2':
# only make sure image has dims of power 2
base = 4 # 32
transform_list.append(transforms.Lambda(
lambda img: make_power_2(img, base=base, method=method)))
elif default_none == 'modcrop':
# only modcrop size according to scale
transform_list.append(transforms.Lambda(
lambda img: modcrop(
img, scale=opt.get('scale'), img_type=img_type)))
elif default_none == 'padbase':
# only pad dims to base
base = 4 # 32
transform_list.append(transforms.Lambda(
lambda img: padbase(img, base=base, img_type=img_type)))
# paired augmentations
if opt.get('use_flip'):
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(
lambda img: flip(img, params['flip'], img_type=img_type)))
# hrrot and regular rotation are mutually exclusive
if opt.get('use_hrrot') and params.get('hrrot'):
if params['angle']:
if preprocess_mode == 'crop' or 'and_crop' in preprocess_mode:
cs = crop_size
else:
cs = None
transform_list.append(transforms.Lambda(
lambda img: rotateHR(
img, crop_size=cs, rescale=1/4,
angle=params['angle'], img_type=img_type,
method=method)))
elif opt.get('use_rot'):
if params is None:
if random.random() < 0.5:
transform_list.append(
transforms.RandomRotation(degrees=(90,90)))
elif params['rot']:
transform_list.append(transforms.Lambda(
lambda img: rotate90(
img, params['rot'], params['vflip'], img_type=img_type)))
return transforms.Compose(transform_list)
def resize(img, w, h, method=None):
if not method:
method = get_default_imethod(image_type(img))
return transforms.Resize((h,w), interpolation=method)(img)
def scale_(img, scale, mul=False, method=None):
"""
Returns a rescaled image by a specific factor given in parameter.
Works with :py:class:`~PIL.Image.Image` or
:py:class:`~np.ndarray` objects.
:param scale: The scale factor, as a float.
:param mul: If true, a scale factor greater than 1 expands
the image, between 0 and 1 contracts the image, else it's
inverted.
:param method: An optional resampling filter. Same values
possible as in the PIL.Image.resize function or CV2
equivalents.
:returns: the scaled image.
"""
if scale <= 0:
raise ValueError("the scale factor must be greater than 0")
if not method:
method = get_default_imethod(image_type(img))
ow, oh = image_size(img)
if mul:
h = int(round(scale * oh))
w = int(round(scale * ow))
else:
h = int(-(-oh//scale))
w = int(-(-ow//scale))
if h == oh and w == ow:
return img
return resize(img, w, h, method)
def make_power_2(img, base, method=None):
ow, oh = image_size(img)
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
if not method:
method = get_default_imethod(image_type(img))
print_size_warning(ow, oh, w, h, base)
return resize(img, w, h, method)
def modcrop(img, scale, img_type=None):
"""Modulo crop images, removing the remainder of
dividing each dimension by a scale factor.
Args:
img (ndarray or Image.Image): Input image.
scale (int): Scale factor.
img_type (str): 'pil' or 'cv2'
Returns:
Mod cropped image.
"""
if not img_type:
img_type = image_type(img)
ow, oh = image_size(img)
# get the remainder in each dim
h, w = oh % scale, ow % scale
if h == oh and w == ow:
return img
print_size_warning(ow, oh, ow-w, oh-h, scale)
if img_type == 'pil':
return img.crop((0, 0, ow-w, oh-h))
else:
return img[0:oh-h, 0:ow-w, ...]
def padbase(img, base, img_type=None):
if not img_type:
img_type = image_type(img)
ow, oh = image_size(img)
ph = ((oh - 1) // base + 1) * base
pw = ((ow - 1) // base + 1) * base
if ph == oh and pw == ow:
return img
print_size_warning(ow, oh, pw, ph, base)
if img_type == 'pil':
# Note: with PIL if crop sizes > sizes, it adds black padding
return img.crop((0, 0, pw, ph))
else:
# TODO: test if correct-> # padding = (0, pw - ow, 0, ph - oh)
return transforms.Pad(padding=(0, ph-oh, 0, pw-ow))(img)
def scale_width(img, target_size, crop_size, method=None):
ow, oh = image_size(img)
if ow == target_size and oh >= crop_size:
return img
if not method:
method = get_default_imethod(image_type(img))
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return resize(img, w, h, method)
def scale_height(img, target_size, crop_size, method=None):
ow, oh = image_size(img)
if oh == target_size and ow >= crop_size:
return img
if not method:
method = get_default_imethod(image_type(img))
h = target_size
w = int(max(target_size * ow / oh, crop_size))
return resize(img, w, h, method)
def scale_shortside(img, target_width, method=None):
ow, oh = image_size(img)
ss, ls = min(ow, oh), max(ow, oh) # shortside and longside
width_is_shorter = ow == ss
if (ss == target_width):
return img
if not method:
method = get_default_imethod(image_type(img))
ls = int(target_width * ls / ss)
nw, nh = (ss, ls) if width_is_shorter else (ls, ss)
return resize(img, nw, nh, method)
def crop(img, pos, size, img_type=None):
if not img_type:
img_type = image_type(img)
ow, oh = image_size(img)
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
if img_type == 'pil':
return img.crop((x1, y1, x1 + tw, y1 + th))
else:
return img[y1:y1 + th, x1:x1 + tw, ...]
return img
def flip(img, flip, img_type=None):
if not img_type:
img_type = image_type(img)
if flip:
if img_type == 'pil':
return img.transpose(Image.FLIP_LEFT_RIGHT)
else:
return | np.flip(img, axis=1) | numpy.flip |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.sparse as sps
import time
RM_train = pd.read_csv('./input/data_train.csv')
R_test = pd.read_csv('./input/data_target_users_test.csv')
URM = pd.read_csv('./input/data_train.csv')
ICM = pd.read_csv('./input/data_ICM_title_abstract.csv')
##### URM
URM_tuples = [tuple(x) for x in URM.to_numpy()]
userList, itemList, ratingList = zip(*URM_tuples)
userList = list(userList)
userList = np.array(userList, dtype=np.int64)
itemList = list(itemList)
itemList = np.array(itemList, dtype=np.int64)
ratingList = list(ratingList) # not needed
ratingList = np.array(ratingList, dtype=np.float) # not needed
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all = URM_all.tocsr()
#### ICM
ICM_tuples = [tuple(x) for x in ICM.to_numpy()]
itemList_icm, featureList_icm, scoreList_icm = zip(*ICM_tuples)
itemList_icm = list(itemList_icm)
itemList_icm = np.array(itemList_icm, dtype=np.int64)
featureList_icm = list(featureList_icm)
featureList_icm = np.array(featureList_icm, dtype=np.int64)
scoreList_icm = list(scoreList_icm)
scoreList_icm = np.array(scoreList_icm, dtype=np.float64)
ICM_all = sps.coo_matrix((scoreList_icm, (itemList_icm, featureList_icm)))
#### Test
userTestList = [x for x in R_test.to_numpy()]
userTestList = zip(*userTestList)
userTestList = [list(a) for a in userTestList][0]
#### make validation and test
from Base.Evaluation.Evaluator import EvaluatorHoldout
from Data_manager.split_functions.split_train_validation_random_holdout import \
split_train_in_two_percentage_global_sample
URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage=0.80)
URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage=0.80)
evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[10])
evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])
### ralph3 0.032
# from GraphBased import P3alphaRecommender
# recommender = P3alphaRecommender.P3alphaRecommender(URM_train)
# recommender.fit(normalize_similarity=True, topK=250)
# print(evaluator_test.evaluateRecommender(recommender)[0][10]['MAP'])
### PURE SVD 0.023
# from MatrixFactorization import PureSVDRecommender
# recommender = PureSVDRecommender.PureSVDRecommender(URM_train)
# recommender.fit(num_factors=400)
### SLIM ELASTIC NET not yet
# from SLIM_ElasticNet.SLIMElasticNetRecommender import MultiThreadSLIM_ElasticNet
# recommender = MultiThreadSLIM_ElasticNet(URM_train.tocsr())
# recommender.fit(topK=400)
### RP3beta 0.0329
# from GraphBased.RP3betaRecommender import RP3betaRecommender
# recommender = RP3betaRecommender(URM_train)
# recommender.fit(beta=-0.1,alpha=1.,topK=200)
### SLIM BPR 0.0375
# from SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython
# recommender = SLIM_BPR_Cython(URM_train, recompile_cython=False)
# recommender.fit(epochs=50, batch_size=100, sgd_mode='sgd', learning_rate=1e-2, positive_threshold_BPR=1)
# recommender.get_S_incremental_and_set_W()
### CBF KNN
### Usinng TF IDF
# from KNN.ItemKNNCBFRecommender import ItemKNNCBFRecommender
# recommender = ItemKNNCBFRecommender(URM_train, ICM_idf)
#
# recommender.fit(shrink=10, topK=800)
# from SLIM_ElasticNet.SLIMElasticNetRecommender import SLIMElasticNetRecommender
# recommender = SLIMElasticNetRecommender(URM_train)
##############
# from MatrixFactorization.IALSRecommender import IALSRecommender
# recommender = IALSRecommender(URM_train)
### Hybrid
##top pop
item_popularity = np.ediff1d(URM_all.tocsc().indptr)
popular_items = np.argsort(item_popularity)
popular_items = np.flip(popular_items, axis=0)
popular_items = popular_items[0:10]
# ## TF_DF
ICM_all = ICM_all.tocsr()
num_tot_items = ICM_all.shape[0]
# let's count how many items have a certain feature
items_per_feature = np.ediff1d(ICM_all.indptr) + 1
# print(items_per_feature)
IDF = np.array( | np.log(num_tot_items / items_per_feature) | numpy.log |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore import nn
from mindspore.ops.operations import _quant_ops as Q
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU', device_id=0)
class Net(nn.Cell):
def __init__(self, num_bits=8, symmetric=False, narrow_range=False, channel_axis=1):
super(Net, self).__init__()
self.op = Q.FakeQuantPerChannel(num_bits=num_bits,
symmetric=symmetric,
narrow_range=narrow_range,
channel_axis=channel_axis)
def construct(self, x, minq, maxq):
return self.op(x, minq, maxq)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel1():
# WithVarsPerChannel_ZeroMinAndMax
x = np.array([0.0, 0.0, 0.0, 0.0]).astype(np.float32)
min_val = np.array([0.0, 0.0, 0.0, 0.0]).astype(np.float32)
max_val = np.array([0.0, 0.0, 0.0, 0.0]).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.0]).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel2():
# WithVarsPerChannelDim1NudgedDown_RegularRange
# scale 1/4, zp 0.4, nudge 0. nudged ranges [0.0, 63.75]
x = np.array([-0.1, 0.0, 63.75, 63.8]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).astype(np.float32)
max_val = np.array([63.65, 63.65, 63.65, 63.65]).astype(np.float32)
expect = np.array([0.0, 0.0, 63.75, 63.75]).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel3():
# WithVarsPerChannelDim1NudgedDown_NarrowRange
# scale 1/4, zp 1.4, nudge 1. nudged ranges[0.0, 63.5]
x = np.array([-0.1, 0.0, 63.5, 63.6]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).astype(np.float32)
max_val = np.array([63.4, 63.4, 63.4, 63.4]).astype(np.float32)
expect = np.array([0.0, 0.0, 63.5, 63.5]).astype(np.float32)
net = Net(num_bits=8, narrow_range=True, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel4():
# WithVarsPerChannelDim1NudgedUp_RegularRange
# [-0.125, 63.625]
# scale 1/4, zp: 0.5, nudge 0. nudged range [-0.25, 63.5]
x = np.array([-0.26, -0.25, -0.24, 63.6]).astype(np.float32)
expect = np.array([-0.25, -0.25, -0.25, 63.5]).astype(np.float32)
min_val = np.array([-0.125, -0.125, -0.125, -0.125]).astype(np.float32)
max_val = np.array([63.625, 63.625, 63.625, 63.625]).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel5():
# WithVarsPerChannelDim1NudgedUp_NarrowRange
# scale 1/4, zp: 1.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.26, -0.25, -0.24, 63.3]).astype(np.float32)
expect = np.array([-0.25, -0.25, -0.25, 63.25]).astype(np.float32)
min_val = np.array([-0.125, -0.125, -0.125, -0.125]).astype(np.float32)
max_val = np.array([63.375, 63.375, 63.375, 63.375]).astype(np.float32)
net = Net(num_bits=8, narrow_range=True, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel6():
# WithVarsPerChannelDim2NudgedDown_RegularRange
# scale 1/4, zp: 0.4, nudge 0. nudged range [-0.25, 63.75]
x = np.array([-0.1, 0.0, 0.1, 0.25, 63.75, 63.80]
).reshape(2, 3).astype(np.float32)
expect = np.array([-0.0, 0.0, 0.0, 0.25, 63.75, 63.75]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1]).reshape(3).astype(np.float32)
max_val = np.array([63.65, 63.65, 63.65]).reshape(3).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel7():
# WithVarsPerChannelDim2NudgedDown_NarrowRange
# scale 1/4, zp: 1.4, nudge 1. nudged range [-0.25, 63.5]
x = np.array([-0.1, 0.0, 0.1, 0.25, 63.5, 63.6]
).reshape(2, 3).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.25, 63.5, 63.5]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1]).reshape(3).astype(np.float32)
max_val = np.array([63.4, 63.4, 63.4]).reshape(3).astype(np.float32)
net = Net(num_bits=8, narrow_range=True, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel8():
# WithVarsPerChannelDim2NudgedUp_RegularRange
# scale 1/4, zp: 0.5, nudge 1. nudged range [-0.25, 63.5]
x = np.array([-0.26, -0.25, -0.24, 0.0, 63.5, 63.6]
).reshape(2, 3).astype(np.float32)
expect = np.array([-0.25, -0.25, -0.25, 0.0, 63.5, 63.5]
).astype(np.float32)
min_val = np.array([-0.125, -0.125, -0.125]).reshape(3).astype(np.float32)
max_val = np.array([63.625, 63.625, 63.625]).reshape(3).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel9():
# WithVarsPerChannelDim2NudgedUp_NarrowRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.26, -0.25, -0.24, 0.0, 63.25, 63.3]
).reshape(2, 3).astype(np.float32)
expect = np.array(
[-0.25, -0.25, -0.25, 0.0, 63.25, 63.25]).astype(np.float32)
min_val = np.array([-0.125, -0.125, -0.125]).reshape(3).astype(np.float32)
max_val = np.array([63.375, 63.375, 63.375]).reshape(3).astype(np.float32)
net = Net(num_bits=8, narrow_range=True, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel10():
# WithVarsPerChannelDim4NudgedDown_RegularRange
# scale 1/4, zp: 0.4, nudge 0. nudged range [-0.25, 63.25]
x = np.array([-0.1, 0.0, 0.1, 0.25, 0.5, 0.75,
1.0, 1.25, 1.5, 1.75, 2.0, 2.25,
63.0, 63.25, 63.5, 63.7, 63.75, 63.8,
63.9, 100.0, 100.0, 100.0, 100.0, 1000.0]).reshape((1, 4, 2, 3)).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.25, 0.5, 0.75,
1.0, 1.25, 1.5, 1.75, 2.0, 2.25,
63.0, 63.25, 63.5, 63.75, 63.75, 63.75,
63.75, 63.75, 63.75, 63.75, 63.75, 63.75]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).reshape(4).astype(np.float32)
max_val = np.array([63.65, 63.65, 63.65, 63.65]
).reshape(4).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel11():
# WithVarsPerChannelDim4NudgedDown_NarrowRange
# scale 1/4, zp: 1.4, nudge 1. nudged range [0.0, 63.25]
x = np.array([-0.1, 0.0, 0.1, 0.25, 0.5, 0.75,
1.0, 1.25, 1.5, 1.75, 2.0, 2.25,
63.0, 63.25, 63.3, 63.4, 63.5, 63.6,
63.7, 100.0, 100.0, 100.0, 100.0, 1000.0]).reshape((1, 4, 2, 3)).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.25, 0.5, 0.75,
1.0, 1.25, 1.5, 1.75, 2.0, 2.25,
63.0, 63.25, 63.25, 63.5, 63.5, 63.5,
63.5, 63.5, 63.5, 63.5, 63.5, 63.5]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).reshape(4).astype(np.float32)
max_val = np.array([63.4, 63.4, 63.4, 63.4]).reshape(4).astype(np.float32)
net = Net(num_bits=8, narrow_range=True, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel12():
# WithVarsPerChannelDim4NudgedUp_RegularRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.3, -0.25, -0.2, 0.0, 0.25, 0.5,
0.75, 1.0, 1.25, 1.5, 1.75, 2.0,
63.0, 63.25, 63.4, 63.5, 63.6, 63.7,
100.0, 100.0, 100.0, 100.0, 100.0, 1000.0]).reshape((1, 4, 2, 3)).astype(np.float32)
expect = np.array([-0.25, -0.25, -0.25, 0.0, 0.25, 0.5,
0.75, 1.0, 1.25, 1.5, 1.75, 2.0,
63.0, 63.25, 63.5, 63.5, 63.5, 63.5,
63.5, 63.5, 63.5, 63.5, 63.5, 63.5]).astype(np.float32)
min_val = np.array([-0.125, -0.125, -0.125, -0.125]
).reshape(4).astype(np.float32)
max_val = np.array([63.625, 63.625, 63.625, 63.625]
).reshape(4).astype(np.float32)
net = Net(num_bits=8, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel13():
# WithVarsPerChannelDim4NudgedUp_NarrowRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.3, -0.25, -0.2, 0.0, 0.25, 0.5,
0.75, 1.0, 1.25, 1.5, 1.75, 2.0,
63.0, 63.2, 63.25, 63.3, 63.4, 63.5,
100.0, 100.0, 100.0, 100.0, 100.0, 1000.0]).reshape((1, 4, 2, 3)).astype(np.float32)
expect = np.array([-0.25, -0.25, -0.25, 0.0, 0.25, 0.5,
0.75, 1.0, 1.25, 1.5, 1.75, 2.0,
63.0, 63.25, 63.25, 63.25, 63.25, 63.25,
63.25, 63.25, 63.25, 63.25, 63.25, 63.25]).astype(np.float32)
min_val = np.array([-0.125, -0.125, -0.125, -0.125]
).reshape(4).astype(np.float32)
max_val = np.array([63.375, 63.375, 63.375, 63.375]
).reshape(4).astype(np.float32)
net = Net(num_bits=8, narrow_range=True, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel14():
# WithVarsPerChannelDim1NudgedDown_4Bits_RegularRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.1, 0.0, 7.5, 7.6]).reshape(4).astype(np.float32)
expect = np.array([0.0, 0.0, 7.5, 7.5]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).reshape(4).astype(np.float32)
max_val = np.array([7.4, 7.4, 7.4, 7.4]).reshape(4).astype(np.float32)
net = Net(num_bits=4, narrow_range=False, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel15():
# WithVarsPerChannelDim1NudgedDown_4Bits_NarrowRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.1, 0.0, 7.0, 7.1]).reshape(4).astype(np.float32)
expect = np.array([0.0, 0.0, 7.0, 7.0]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).reshape(4).astype(np.float32)
max_val = np.array([6.9, 6.9, 6.9, 6.9]).reshape(4).astype(np.float32)
net = Net(num_bits=4, narrow_range=True, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel16():
# WithVarsPerChannelDim1NudgedUp_4Bits_RegularRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.6, -0.5, 7.0, 7.1]).reshape(4).astype(np.float32)
expect = np.array([-0.5, -0.5, 7.0, 7.0]).astype(np.float32)
min_val = np.array([-0.4, -0.4, -0.4, -0.4]).reshape(4).astype(np.float32)
max_val = np.array([7.1, 7.1, 7.1, 7.1]).reshape(4).astype(np.float32)
net = Net(num_bits=4, narrow_range=False, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel17():
# WithVarsPerChannelDim1NudgedUp_4Bits_NarrowRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.6, -0.5, 6.5, 6.6]).reshape(4).astype(np.float32)
expect = np.array([-0.5, -0.5, 6.5, 6.5]).astype(np.float32)
min_val = np.array([-0.4, -0.4, -0.4, -0.4]).reshape(4).astype(np.float32)
max_val = np.array([6.6, 6.6, 6.6, 6.6]).reshape(4).astype(np.float32)
net = Net(num_bits=4, narrow_range=True, channel_axis=0)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel18():
# WithVarsPerChannelDim2NudgedDown_4Bits_RegularRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.1, 0.0, 0.1, 0.5, 7.5, 7.6]
).reshape(2, 3).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.5, 7.5, 7.5]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1]).reshape(3).astype(np.float32)
max_val = np.array([7.4, 7.4, 7.4]).reshape(3).astype(np.float32)
net = Net(num_bits=4, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel19():
# WithVarsPerChannelDim2NudgedDown_4Bits_NarrowRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.1, 0.0, 0.1, 0.5, 7.0, 7.1]
).reshape(2, 3).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.5, 7.0, 7.0]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1]).reshape(3).astype(np.float32)
max_val = np.array([6.9, 6.9, 6.9]).reshape(3).astype(np.float32)
net = Net(num_bits=4, narrow_range=True, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel20():
# WithVarsPerChannelDim2NudgedUp_4Bits_RegularRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.51, -0.5, -0.24, 0.0, 7.0, 7.1]
).reshape(2, 3).astype(np.float32)
expect = np.array([-0.5, -0.5, 0.0, 0.0, 7.0, 7.0]).astype(np.float32)
min_val = np.array([-0.4, -0.4, -0.4]).reshape(3).astype(np.float32)
max_val = np.array([7.1, 7.1, 7.1]).reshape(3).astype(np.float32)
net = Net(num_bits=4, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel21():
# WithVarsPerChannelDim2NudgedUp_4Bits_NarrowRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.6, -0.5, -0.24, 0.0, 6.5, 6.6]
).reshape(2, 3).astype(np.float32)
expect = np.array([-0.5, -0.5, 0.0, 0.0, 6.5, 6.5]).astype(np.float32)
min_val = np.array([-0.4, -0.4, -0.4]).reshape(3).astype(np.float32)
max_val = np.array([6.6, 6.6, 6.6]).reshape(3).astype(np.float32)
net = Net(num_bits=4, narrow_range=True, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel22():
# WithVarsPerChannelDim4NudgedDown_4Bits_RegularRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.1, 0.0, 0.1, 0.5, 1.0, 1.5,
1.5, 2.0, 2.5, 3.0, 3.5, 4.0,
6.0, 6.5, 7.0, 7.4, 7.5, 7.7,
7.8, 100.0, 100.0, 100.0, 100.0, 1000.0]).reshape((1, 4, 2, 3)).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.5, 1.0, 1.5,
1.5, 2.0, 2.5, 3.0, 3.5, 4.0,
6.0, 6.5, 7.0, 7.5, 7.5, 7.5,
7.5, 7.5, 7.5, 7.5, 7.5, 7.5]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).reshape(4).astype(np.float32)
max_val = np.array([7.4, 7.4, 7.4, 7.4]).reshape(4).astype(np.float32)
net = Net(num_bits=4, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel23():
# WithVarsPerChannelDim4NudgedDown_4Bits_NarrowRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.1, 0.0, 0.1, 0.5, 1.0, 1.5,
1.5, 2.0, 2.5, 3.0, 3.5, 4.0,
6.0, 6.5, 6.8, 6.9, 7.0, 7.1,
7.2, 100.0, 100.0, 100.0, 100.0, 1000.0]).reshape((1, 4, 2, 3)).astype(np.float32)
expect = np.array([0.0, 0.0, 0.0, 0.5, 1.0, 1.5,
1.5, 2.0, 2.5, 3.0, 3.5, 4.0,
6.0, 6.5, 7.0, 7.0, 7.0, 7.0,
7.0, 7.0, 7.0, 7.0, 7.0, 7.0]).astype(np.float32)
min_val = np.array([-0.1, -0.1, -0.1, -0.1]).reshape(4).astype(np.float32)
max_val = np.array([6.9, 6.9, 6.9, 6.9]).reshape(4).astype(np.float32)
net = Net(num_bits=4, narrow_range=True, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_perchannel24():
# WithVarsPerChannelDim4NudgedUp_4Bits_RegularRange
# scale 1/4, zp: 0.5, nudge 2. nudged range [-0.25, 63.25]
x = np.array([-0.6, -0.5, -0.4, 0.0, 0.5, 1.0,
1.5, 2.0, 2.5, 3.0, 3.5, 4.0,
6.0, 6.5, 6.9, 7.0, 7.1, 7.7,
100.0, 100.0, 100.0, 100.0, 100.0, 1000.0]).reshape((1, 4, 2, 3)).astype(np.float32)
expect = np.array([-0.5, -0.5, -0.5, 0.0, 0.5, 1.0,
1.5, 2.0, 2.5, 3.0, 3.5, 4.0,
6.0, 6.5, 7.0, 7.0, 7.0, 7.0,
7.0, 7.0, 7.0, 7.0, 7.0, 7.0]).astype(np.float32)
min_val = np.array([-0.4, -0.4, -0.4, -0.4]).reshape(4).astype(np.float32)
max_val = np.array([7.1, 7.1, 7.1, 7.1]).reshape(4).astype(np.float32)
net = Net(num_bits=4, narrow_range=False, channel_axis=1)
output = net(Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all( | np.abs(diff) | numpy.abs |
import numpy as np
import pandas as pd
from tqdm import tqdm
import numpy.ma as ma
from scipy.special import gammaln
from pykalman import KalmanFilter
from pynowcasting.pycsminwel import csminwel
class BVARGLP(object):
def __init__(self, data, lags, hz=8, vc=10e6, stationary_prior=None, crit=1e-16,
hyperpriors=True, mnpsi=True, mnalpha=False, sur=True, noc=True,
fcast=False, mcmc=False, ndraws=20000, ndrawsdiscard=None, mcmcconst=1,
mcmcfcast=True, mcmcstorecoef=True, verbose=False):
"""
This class implements the Bayesian VAR from Giannone, Lenza and Primiceri (2012), hence the name GLP. The main
idea of the models is to use multiple priors, each with their own hyperprior, in order to generate a shrinkage
behaviour.
This class only accepts data with a quarterly frequency and with no missign data.
@param hyperpriors: False = no priors on hyperparameters
True = reference priors on hyperparameters (default)
[NOTE: hyperpriors on psi calibrated for data expressed in
4 x logs, such as 4 x log(GDP). Thus if interest rate is in
percentage, divide by 100]
@param vc: prior variance in the MN prior for the coefficients multiplying
the contant term (Default: vc=10e6)
@param stationary_prior: names of the variables that enter the VAR in first
differences and for which one might want to set the prior mean
on the coefficient on the first own lag in the MN prior and the
prior mean of the sum-of-coefficients prior to 0 (instead of
the typical 1)
@param mnpsi: False = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals NOT treated as
hyperparameters (set to the residual variance of an AR(1))
True = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals treated as hyperparameters (default)
@param mnalpha: False = Lag-decaying parameter of the MN prior set to 2 and
NOT treated as hyperparameter (default)
True = Lag-decaying parameter of the MN prior treated as
hyperparameter
@param sur: False = single-unit-root prior is OFF
True = single-unit-root prior is ON and its std is treated as an
hyperparameter (default)
@param noc: False = no-cointegration (sum-of coefficients) prior is OFF
True = no-cointegration (sum-of coefficients) is ON and its std is
treated as an hyperparameter (default)
@param fcast: False = does not generate forecasts at the posterior mode
True = generates forecasts at the posterior mode (default)
@param hz: number of quarters for which it generates forecasts (default: hz=8)
@param mcmc: False = does not run the MCMC (default)
True = runs the MCMC after the maximization
@param ndraws: number of draws in the MCMC (default: Ndraws=20000)
@param ndrawsdiscard: number of draws initially discarded to allow convergence
in the in the MCMC (default=Ndraws/2)
@param mcmcconst: scaling constant for the MCMC (should be calibrated to achieve
an acceptance rate of approx 25%) (default: MCMCconst=1)
@param mcmcfcast: False = does not generate forecasts when running the MCMC
True = generates forecasts while running the MCMC
(for each draw of the hyperparameters the code takes a
draw of the VAR coefficients and shocks, and generates
forecasts at horizons hz) (default).
@param mcmcstorecoef: False = does not store the MCMC draws of the VAR
coefficients and residual covariance matrix
True = stores the MCMC draws of the VAR coefficients and
residual covariance matrix (default)
@param verbose: Prints relevant information during the estimation.
@param crit: value for convergence criteria
"""
assert data.index.inferred_freq == 'Q', "input 'data' must be quarterly and recognized by pandas."
self.data = data
self.lags = lags
self.hyperpriors = hyperpriors
self.vc = vc
self.stationary_prior = stationary_prior
if stationary_prior is None:
self.pos = None
else:
self.pos = [self.data.columns.get_loc(var) for var in stationary_prior]
self.mnalpha = mnalpha
self.mnpsi = mnpsi
self.sur = sur
self.noc = noc
self.fcast = fcast
self.hz = hz
self.mcmc = mcmc
self.ndraws = ndraws
self.ndrwasdiscard = int(ndraws/2) if ndrawsdiscard is None else ndrawsdiscard
self.mcmccosnt = mcmcconst
self.mcmcfcast = mcmcfcast
self.mcmcstorecoef = mcmcstorecoef
self.verbose = verbose
self.crit = crit
self.TT = data.shape[0] # Time-series sample size without lags
self.n = data.shape[1] # Number of variables in the VAR
self.k = self.n * self.lags + 1 # Number of coefficients on each equation
self._set_priors()
self._regressor_matrix_ols()
self._minimization()
if self.fcast:
self._forecasts()
if self.mcmc:
self._mcmc()
def _set_priors(self):
# Sets up the default choices for the priors of the BVAR of Giannone, Lenza and Primiceri (2012)
if self.hyperpriors:
# hyperprior mode
mode_lambda = 0.2
mode_miu = 1
mode_theta = 1
# hyperprior sds
sd_lambda = 0.4
sd_miu = 1
sd_theta = 1
# scale and shape of the IG on psi/(d-n-1)
scalePSI = 0.02 ** 2
priorcoef = pd.DataFrame(index=['lambda', 'miu', 'theta', 'alpha', 'beta'],
columns=['r_k', 'r_theta', 'PSI'])
priorcoef.loc['lambda', 'r_k'], priorcoef.loc['lambda', 'r_theta'] = \
self._gamma_coef(mode_lambda, sd_lambda)
priorcoef.loc['miu', 'r_k'], priorcoef.loc['miu', 'r_theta'] = self._gamma_coef(mode_miu, sd_miu)
priorcoef.loc['theta', 'r_k'], priorcoef.loc['theta', 'r_theta'] = self._gamma_coef(mode_theta, sd_theta)
priorcoef.loc['alpha', 'PSI'] = scalePSI
priorcoef.loc['beta', 'PSI'] = scalePSI
self.priorcoef = priorcoef
else:
self.priorcoef = None
def _regressor_matrix_ols(self):
# purpose is to construct the SS matrix
# Constructs the matrix of regressors
n = self.n
lags = self.lags
data = self.data
x = np.zeros((self.TT, self.k))
x[:, 0] = 1
for i in range(1, self.lags + 1):
x[:, 1 + (i - 1) * n: i * n + 1] = data.shift(i).values
self.y0 = data.iloc[:lags, :].mean().values
self.x = x[lags:, :]
self.y = data.values[lags:, :]
self.T = self.y.shape[0] # Sample size after lags
# OLS for AR(1) residual variance of each equation
SS = np.zeros(self.n)
for i in range(self.n):
y_reg = self.y[1:, i]
x_reg = np.hstack((np.ones((self.T - 1, 1)), self.y[:-1, i].reshape((-1, 1))))
ar1 = OLS1(y_reg, x_reg)
SS[i] = ar1.sig2hatols
self.SS = SS
def _minimization(self):
# Starting values for the minimization
self.lambda0 = 0.2 # std of MN prior
self.theta0 = 1 # std of SUR prior
self.miu0 = 1 # std NOC prior
self.alpha0 = 2 # lag-decaying parameter of the MN prior
self.psi0 = self.SS
# Bounds for the minimization step
self.lambda_min = 0.0001
self.lambda_max = 5
self.alpha_min = 0.1
self.alpha_max = 5
self.theta_min = 0.0001
self.theta_max = 50
self.miu_min = 0.0001
self.miu_max = 50
self.psi_min = self.SS / 100
self.psi_max = self.SS * 100
# Transforming inputs to unbounded and builds the initial guess
x0 = np.array([-np.log((self.lambda_max - self.lambda0) / (self.lambda0 - self.lambda_min))])
if self.mnpsi:
inpsi = -np.log((self.psi_max - self.psi0) / (self.psi0 - self.psi_min))
x0 = np.concatenate((x0, inpsi))
if self.sur:
intheta = np.array([-np.log((self.theta_max - self.theta0) / (self.theta0 - self.theta_min))])
x0 = np.concatenate((x0, intheta))
if self.noc:
inmiu = np.array([-np.log((self.miu_max - self.miu0) / (self.miu0 - self.miu_min))])
x0 = np.concatenate((x0, inmiu))
if self.mnalpha:
inalpha = np.array([-np.log((self.alpha_max - self.alpha0) / (self.alpha0 - self.alpha_min))])
x0 = np.concatenate((x0, inalpha))
# initial guess for the inverse Hessian
H0 = 10 * np.eye(len(x0))
# Minimization of the negative of the posterior of the hyperparameters
def myfun(xxx):
logML, _, _ = self._logmlvar_formin(xxx)
return -logML
# Optimization
fh, xh, gh, h, itct, fcount, retcodeh = csminwel(fcn=myfun,
x0=x0,
h0=H0,
grad=None,
crit=self.crit,
nit=1000,
verbose=self.verbose)
self.itct = itct
self.xh = xh
self.h = h
self.log_post, self.betahat, self.sigmahat = self._logmlvar_formin(xh)
self.lamb = self.lambda_min + (self.lambda_max - self.lambda_min) / (1 + np.exp(-xh[0]))
self.theta = self.theta_max
self.miu = self.miu_max
if self.mnpsi:
# diagonal elements of the scale matrix of the IW prior on the residual variance
self.psi = self.psi_min + (self.psi_max - self.psi_min) / (1 + np.exp(-xh[1:self.n + 1]))
if self.sur:
# std of sur prior at the peak
self.theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-xh[self.n + 1]))
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[self.n + 2]))
else: # self.sur == 0
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[self.n + 1]))
else: # self.mnpsi == 0
self.psi = self.SS
if self.sur:
# std of sur prior at the peak
self.theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-xh[1]))
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[2]))
else:
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[1]))
if not self.mnalpha:
self.alpha = 2
else:
# Lag-decaying parameter of the MN prior
self.alpha = self.alpha_min + (self.alpha_max - self.alpha_min) / (1 + np.exp(-xh[-1]))
def _forecasts(self):
# Forecasts ate the posterior mode
Y = np.vstack([self.y, np.zeros((self.hz, self.n))])
for tau in range(self.hz):
indexes = list(range(self.T + tau - 1, self.T + tau - self.lags - 1, -1))
xT = np.vstack([1, Y[indexes].T.reshape((self.k - 1, 1), order="F")]).T
Y[self.T + tau, :] = xT @ self.betahat
self.forecast = Y[-self.hz:, :]
def _mcmc(self):
# Jacobian of the transformation of the hyperparameters that has been
# used for the constrained maximization
JJ = np.exp(self.xh) / ((1 + np.exp(self.xh)) ** 2)
JJ[0] = (self.lambda_max - self.lambda_min) * JJ[0]
if self.mnpsi:
JJ[1: self.n + 1] = (self.psi_max - self.psi_min) * JJ[1: self.n + 1]
if self.sur:
JJ[self.n + 1] = (self.theta_max - self.theta_min) * JJ[self.n + 1]
if self.noc:
JJ[self.n + 2] = (self.miu_max - self.miu_min) * JJ[self.n + 2]
else:
if self.noc:
JJ[self.n + 1] = (self.miu_max - self.miu_min) * JJ[self.n + 1]
else:
if self.sur:
JJ[1] = (self.theta_max - self.theta_min) * JJ[1]
if self.noc:
JJ[2] = (self.miu_max - self.miu_min) * JJ[2]
else:
if self.noc:
JJ[1] = (self.miu_max - self.miu_min) * JJ[1]
if self.mnalpha:
JJ[-1] = (self.alpha_max - self.alpha_min) * JJ[-1]
JJ = np.diag(JJ)
HH = JJ @ self.h @ JJ
# Regularization to assure that HH is positive-definite
eigval, eigvec = np.linalg.eig(HH)
HH = eigvec @ np.diag(np.abs(eigval)) @ eigvec.T
# recovering the posterior mode
postmode = np.array([self.lamb])
if self.mnpsi:
modepsi = np.array(self.psi)
postmode = np.concatenate((postmode, modepsi))
if self.sur:
modetheta = np.array([self.theta])
postmode = np.concatenate((postmode, modetheta))
if self.noc:
modemiu = np.array([self.miu])
postmode = np.concatenate((postmode, modemiu))
if self.mnalpha:
modealpha = np.array([self.alpha])
postmode = np.concatenate((postmode, modealpha))
# starting value of the Metropolis algorithm
P = np.zeros((self.ndraws, self.xh.shape[0]))
logMLold = -10e15
while logMLold == -10e15:
P[0, :] = np.random.multivariate_normal(mean=postmode,
cov=(self.mcmccosnt ** 2) * HH)
logMLold, betadrawold, sigmadrawold = self._logmlvar_formcmc(P[0])
# matrix to store the draws of the VAR coefficients if MCMCstorecoeff is on
if self.mcmcstorecoef:
mcmc_beta = np.zeros((self.k, self.n, self.ndraws - self.ndrwasdiscard))
mcmc_sigma = np.zeros((self.n, self.n, self.ndraws - self.ndrwasdiscard))
else:
mcmc_beta = None
mcmc_sigma = None
# matrix to store the forecasts if MCMCfcast is on
if self.mcmcfcast:
mcmc_Dforecast = np.zeros((self.hz, self.n, self.ndraws - self.ndrwasdiscard))
else:
mcmc_Dforecast = None
# Metropolis iterations
count = 0
for i in tqdm(range(1, self.ndraws), 'MCMC Iterations', disable=not self.verbose):
# draw candidate value
P[i, :] = np.random.multivariate_normal(mean=P[i - 1, :],
cov=(self.mcmccosnt ** 2) * HH)
logMLnew, betadrawnew, sigmadrawnew = self._logmlvar_formcmc(P[i, :])
if logMLnew > logMLold: # if there is an improvement, accept it
logMLold = logMLnew
count = count + 1
else: # If there is no improvement, there is a chance to accept the draw
if np.random.rand() < np.exp(logMLnew - logMLold): # If accetpted
logMLold = logMLnew
count = count + 1
else: # If not accepted, overwrite the draw with the last value
P[i, :] = P[i - 1, :]
# if MCMCfcast is on, take a new draw of the VAR coefficients with
# the old hyperparameters if have rejected the new ones
if self.mcmcfcast or self.mcmcstorecoef:
_, betadrawnew, sigmadrawnew = self._logmlvar_formcmc(P[i, :])
# stores draws of VAR coefficients if MCMCstorecoeff is on
if (i >= self.ndrwasdiscard) and self.mcmcstorecoef:
mcmc_beta[:, :, i - self.ndrwasdiscard] = betadrawnew
mcmc_sigma[:, :, i - self.ndrwasdiscard] = sigmadrawnew
# produce and store the forecasts if MCMCfcast is on
if (i >= self.ndrwasdiscard) and self.mcmcfcast:
Y = np.vstack([self.y, np.zeros((self.hz, self.n))])
for tau in range(self.hz):
indexes = list(range(self.T + tau - 1, self.T + tau - self.lags - 1, -1))
xT = np.vstack([1, Y[indexes].T.reshape((self.k - 1, 1), order="F")]).T
Y[self.T + tau, :] = xT @ betadrawnew + np.random.multivariate_normal(mean=np.zeros(self.n),
cov=sigmadrawnew)
mcmc_Dforecast[:, :, i - self.ndrwasdiscard] = Y[-self.hz:, :]
# store the draws of the hyperparameters
mcmc_lambda = P[self.ndrwasdiscard:, 0] # Standard Minesota Prior
mcmc_psi = None
mcmc_theta = None
mcmc_miu = None
if self.mnpsi:
# diagonal elements of the scale matrix of the IW prior on the residual variance
mcmc_psi = P[self.ndrwasdiscard:, 1:self.n+2]
if self.sur:
# std of sur prior
mcmc_theta = P[self.ndrwasdiscard:, self.n + 1]
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, self.n + 2]
else: # self.sur == 0
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, self.n + 1]
else: # self.mnpsi == 0
if self.sur:
# std of sur prior
mcmc_theta = P[self.ndrwasdiscard:, 1]
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, 2]
else: # self.sur == 0
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, 1]
if self.mnalpha:
# Lag-decaying parameter of the MN prior
mcmc_alpha = P[self.ndrwasdiscard:, -1]
self.mcmc_alpha = mcmc_alpha
mcmc_accrate = np.mean((mcmc_lambda[1:] != mcmc_lambda[:-1]))
# Save the chains as attributes
self.mcmc_beta = mcmc_beta
self.mcmc_sigma = mcmc_sigma
self.mcmc_dforecast = mcmc_Dforecast
self.mcmc_lambda = mcmc_lambda
self.mcmc_psi = mcmc_psi
self.mcmc_theta = mcmc_theta
self.mcmc_miu = mcmc_miu
self.mcmc_accrate = mcmc_accrate
def _logmlvar_formin(self, par):
"""
This function computes the log-posterior (or the logML if hyperpriors=0),
the posterior mode of the coefficients and the covariance matrix of the
residuals of the BVAR of Giannone, Lenza and Primiceri (2012)
"""
# The following avoids the warning "referenced before assignment"
theta = None
miu = None
# hyperparameters
lambda_ = self.lambda_min + (self.lambda_max - self.lambda_min) / (1 + np.exp(-par[0]))
d = self.n + 2
if not self.mnpsi:
psi = self.SS * (d - self.n - 1)
if self.sur:
theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-par[1]))
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[2]))
else:
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[1]))
else:
psi = self.psi_min + (self.psi_max - self.psi_min) / (1 + np.exp(-par[1:self.n + 1]))
if self.sur:
theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-par[self.n + 1]))
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[self.n + 2]))
else:
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[self.n + 1]))
if not self.mnalpha:
alpha = 2
else: # self.mnalpha == 1
alpha = self.alpha_min + (self.alpha_max - self.alpha_min) / (1 + np.exp(-par[-1]))
# Setting up the priors
omega = np.zeros(self.k)
omega[0] = self.vc
for i in range(1, self.lags + 1):
omega[1 + (i - 1) * self.n: 1 + i * self.n] = \
(d - self.n - 1) * (lambda_ ** 2) * (1 / (i ** alpha)) / psi
# Prior scale matrix for the covariance of the shocks
PSI = np.diag(psi)
# dummy observations if sur and / or noc = 1
Td = 0
xdsur = np.array([]).reshape((0, self.k))
ydsur = np.array([]).reshape((0, self.n))
xdnoc = np.array([]).reshape((0, self.k))
ydnoc = np.array([]).reshape((0, self.n))
y = self.y.copy()
x = self.x.copy()
T = self.T
if self.sur:
xdsur = (1 / theta) * np.tile(self.y0, (1, self.lags))
xdsur = np.hstack((np.array([[1 / theta]]), xdsur))
ydsur = (1 / theta) * self.y0
y = np.vstack((y, ydsur))
x = np.vstack((x, xdsur))
Td = Td + 1
if self.noc:
ydnoc = (1 / miu) * np.diag(self.y0)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
if self.pos is not None:
ydnoc[self.pos, self.pos] = 0
xdnoc = (1 / miu) * np.tile(np.diag(self.y0), (1, self.lags))
xdnoc = np.hstack((np.zeros((self.n, 1)), xdnoc))
y = np.vstack((y, ydnoc))
x = np.vstack((x, xdnoc))
Td = Td + self.n
T = T + Td
# ===== OUTPUT ===== #
# Minnesota prior mean
b = np.zeros((self.k, self.n))
diagb = np.ones(self.n)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
if self.pos is not None:
diagb[self.pos] = 0
b[1:self.n + 1, :] = np.diag(diagb)
# posterior mode of the VAR coefficients
matA = x.T @ x + np.diag(1 / omega)
matB = x.T @ y + np.diag(1 / omega) @ b
betahat = np.linalg.solve(matA, matB) # np.solve runs more efficiently that inverting a gigantic matrix
# VAR residuals
epshat = y - x @ betahat
# Posterior mode of the covariance matrix
sigmahat = (epshat.T @ epshat + PSI + (betahat - b).T @ np.diag(1 / omega) @ (betahat - b))
sigmahat = sigmahat / (T + d + self.n + 1)
# logML
aaa = np.diag(np.sqrt(omega)) @ x.T @ x @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshat.T @ epshat + (betahat - b).T @ np.diag(1/omega) @
(betahat-b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
logML = - self.n * T * np.log(np.pi) / 2
logML = logML + sum(gammaln((T + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
logML = logML - T * sum(np.log(psi)) / 2
logML = logML - self.n * sum(np.log(eigaaa)) / 2
logML = logML - (T + d) * sum(np.log(eigbbb)) / 2
if self.sur or self.noc:
yd = np.vstack((ydsur, ydnoc))
xd = np.vstack((xdsur, xdnoc))
# prior mode of the VAR coefficients
betahatd = b
# VAR residuals at the prior mode
epshatd = yd - xd @ betahatd
aaa = np.diag(np.sqrt(omega)) @ xd.T @ xd @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshatd.T @ epshatd + (betahatd - b).T @ np.diag(1 / omega) @
(betahatd - b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
# normalizing constant
norm = - self.n * Td * np.log(np.pi) / 2
norm = norm + sum(gammaln((Td + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
norm = norm - Td * sum(np.log(psi)) / 2
norm = norm - self.n * sum(np.log(eigaaa)) / 2
norm = norm - (T + d) * sum(np.log(eigbbb)) / 2
logML = logML - norm
if self.hyperpriors:
logML = logML + self._log_gammma_pdf(x=lambda_,
k=self.priorcoef.loc['lambda', 'r_k'],
theta=self.priorcoef.loc['lambda', 'r_theta'])
if self.sur:
logML = logML + self._log_gammma_pdf(x=theta,
k=self.priorcoef.loc['theta', 'r_k'],
theta=self.priorcoef.loc['theta', 'r_theta'])
if self.noc:
logML = logML + self._log_gammma_pdf(x=miu,
k=self.priorcoef.loc['miu', 'r_k'],
theta=self.priorcoef.loc['miu', 'r_theta'])
if self.mnpsi:
toadd = self._log_invgammma_to_pdf(x=psi / (d - self.n - 1),
alpha=self.priorcoef.loc['alpha', 'PSI'],
beta=self.priorcoef.loc['beta', 'PSI'])
logML = logML + sum(toadd)
return logML, betahat, sigmahat
def _logmlvar_formcmc(self, par):
"""
This function computes the log-posterior (or the logML if hyperpriors=0),
and draws from the posterior distribution of the coefficients and of the
covariance matrix of the residuals of the BVAR of Giannone, Lenza and
Primiceri (2012)
"""
# hyperparameters
lambda_ = par[0]
d = self.n + 2
theta = self.theta_min
miu = self.miu_min
if not self.mnpsi:
psi = self.SS * (d - self.n - 1)
if self.sur:
theta = par[1]
if self.noc:
miu = par[2]
else: # if self.sur == 0
if self.noc:
miu = par[1]
else:
psi = par[1:self.n + 1]
if self.sur:
theta = par[self.n + 1]
if self.noc:
miu = par[self.n + 2]
else:
if self.noc:
miu = par[self.n + 1]
if not self.mnalpha:
alpha = 2
else:
alpha = par[-1]
# Check if parameters are outside of parameter space and, if so, return a very low value of the posterior
cond_lower_bound = np.any([lambda_ < self.lambda_min,
np.any(psi < self.psi_min),
theta < self.theta_min,
miu < self.miu_min,
alpha < self.alpha_min])
cond_upper_bound = np.any([lambda_ > self.lambda_max,
np.any(psi > self.psi_max),
theta > self.theta_max,
miu > self.miu_max])
if cond_lower_bound or cond_upper_bound:
logML = -10e15
betadraw = None
drawSIGMA = None
return logML, betadraw, drawSIGMA
else:
# Priors
omega = np.zeros(self.k)
omega[0] = self.vc
for i in range(1, self.lags + 1):
omega[1 + (i - 1) * self.n: 1 + i * self.n] = \
((d - self.n - 1) * (lambda_ ** 2) * (1 / (i ** alpha))) / psi
# Prior scale matrix for the covariance of the shocks
PSI = np.diag(psi)
Td = 0
xdsur = np.array([]).reshape((0, self.k))
ydsur = np.array([]).reshape((0, self.n))
xdnoc = np.array([]).reshape((0, self.k))
ydnoc = np.array([]).reshape((0, self.n))
# dummy observations if sur and / or noc = 1
y = self.y.copy()
x = self.x.copy()
T = self.T
if self.sur:
xdsur = (1 / theta) * np.tile(self.y0, (1, self.lags))
xdsur = np.hstack((np.array([[1 / theta]]), xdsur))
ydsur = (1 / theta) * self.y0
y = np.vstack((y, ydsur))
x = np.vstack((x, xdsur))
Td = Td + 1
if self.noc:
ydnoc = (1 / miu) * np.diag(self.y0)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
ydnoc[self.pos, self.pos] = 0
xdnoc = (1 / miu) * np.tile(np.diag(self.y0), (1, self.lags))
xdnoc = np.hstack((np.zeros((self.n, 1)), xdnoc))
y = np.vstack((y, ydnoc))
x = np.vstack((x, xdnoc))
Td = Td + self.n
# ===== Output =====
# minesota prior mean
b = np.zeros((self.k, self.n))
diagb = np.ones(self.n)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
diagb[self.pos] = 0
b[1:self.n + 1, :] = np.diag(diagb)
# self.b = b
# posterior mode of the VAR coefficients
matA = x.T @ x + np.diag(1 / omega)
matB = x.T @ y + np.diag(1 / omega) @ b
betahat = np.linalg.solve(matA, matB) # np.solve runs more efficiently that inverting a gigantic matrix
# VAR residuals
epshat = y - x @ betahat
# logMl
T = T + Td
aaa = np.diag(np.sqrt(omega)) @ x.T @ x @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshat.T @ epshat + (betahat - b).T @ np.diag(1 / omega) @
(betahat - b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
logML = - self.n * T * np.log(np.pi) / 2
logML = logML + sum(gammaln((T + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
logML = logML - T * sum(np.log(psi)) / 2
logML = logML - self.n * sum(np.log(eigaaa)) / 2
logML = logML - (T + d) * sum(np.log(eigbbb)) / 2
# More terms for logML in case of more priors
if self.sur or self.noc:
yd = np.vstack((ydsur, ydnoc))
xd = np.vstack((xdsur, xdnoc))
# prior mode of the VAR coefficients
betahatd = b
# VAR residuals at the prior mode
epshatd = yd - xd @ betahatd
aaa = np.diag(np.sqrt(omega)) @ xd.T @ xd @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshatd.T @ epshatd + (betahatd - b).T @ np.diag(1 / omega) @
(betahatd - b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
# normalizing constant
norm = - self.n * Td * np.log(np.pi) / 2
norm = norm + sum(gammaln((Td + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
norm = norm - Td * sum(np.log(psi)) / 2
norm = norm - self.n * sum(np.log(eigaaa)) / 2
norm = norm - (T + d) * sum(np.log(eigbbb)) / 2
logML = logML - norm
if self.hyperpriors:
logML = logML + self._log_gammma_pdf(x=lambda_,
k=self.priorcoef.loc['lambda', 'r_k'],
theta=self.priorcoef.loc['lambda', 'r_theta'])
if self.sur:
logML = logML + self._log_gammma_pdf(x=theta,
k=self.priorcoef.loc['theta', 'r_k'],
theta=self.priorcoef.loc['theta', 'r_theta'])
if self.noc:
logML = logML + self._log_gammma_pdf(x=miu,
k=self.priorcoef.loc['miu', 'r_k'],
theta=self.priorcoef.loc['miu', 'r_theta'])
if self.mnpsi:
toadd = self._log_invgammma_to_pdf(x=psi / (d - self.n - 1),
alpha=self.priorcoef.loc['alpha', 'PSI'],
beta=self.priorcoef.loc['beta', 'PSI'])
logML = logML + sum(toadd)
# takes a draw from the posterior of SIGMA and beta, if draw is on
draw = self.mcmcfcast or self.mcmcstorecoef
if not draw:
betadraw = None
drawSIGMA = None
else:
S = PSI + epshat.T @ epshat + (betahat - b).T @ np.diag(1 / omega) @ (betahat - b)
E, V = np.linalg.eig(S)
Sinv = V @ np.diag(1 / np.abs(E)) @ V.T
eta = np.random.multivariate_normal(mean=np.zeros(self.n),
cov=Sinv,
size=T+d)
drawSIGMA = np.linalg.solve(eta.T @ eta, np.eye(self.n))
cholSIGMA = self._cholred((drawSIGMA + drawSIGMA.T) / 2)
cholZZinv = self._cholred(np.linalg.solve(x.T @ x + np.diag(1 / omega), np.eye(self.k)))
betadraw = betahat + cholZZinv.T @ np.random.normal(size=betahat.shape) @ cholSIGMA
return logML, betadraw, drawSIGMA
@staticmethod
def _gamma_coef(mode, sd):
k = (2 + mode ** 2 / sd ** 2 + np.sqrt((4 + mode ** 2 / sd ** 2) * mode ** 2 / sd ** 2)) / 2
theta = np.sqrt(sd ** 2 / k)
return k, theta
@staticmethod
def _log_gammma_pdf(x, k, theta):
r = (k - 1) * np.log(x) - x / theta - k * np.log(theta) - gammaln(k)
return r
@staticmethod
def _log_invgammma_to_pdf(x, alpha, beta):
r = alpha * np.log(beta) - (alpha + 1) * np.log(x) - beta * (1 / x) - gammaln(alpha)
return r
@staticmethod
def _cholred(s):
d, v = np.linalg.eig((s + s.T) / 2)
d = d.real
scale = np.diag(s).mean() * 1e-12
J = d > scale
C = np.zeros(s.shape)
C[J, :] = (v[:, J] @ np.diag(d[J] ** 0.5)).T
return C
class OLS1(object):
"""
This is a simple OLS regression, but with a more leaner and simple layout
"""
def __init__(self, y, x):
self.x = x
self.y = y
nobsy = y.shape[0]
nobs, nvar = x.shape
assert nobsy == nobs, 'x and y must have the same number of lines'
self.nobs = nobs
self.nvar = nvar
self.XX = x.T @ x
self.invXX = np.linalg.inv(self.XX)
self.bhatols = self.invXX @ (x.T @ y)
self.yhatols = x @ self.bhatols
self.resols = y - self.yhatols
self.sig2hatols = (self.resols.T @ self.resols) / (nobs - nvar)
self.sigbhatols = self.sig2hatols * self.invXX
self.r2 = np.var(self.yhatols) / np.var(y)
class CRBVAR(object):
def __init__(self, data, lags, hz=24, vc=10e6, stationary_prior=None, crit=1e-16,
hyperpriors=True, mnpsi=True, mnalpha=False, sur=True, noc=True,
fcast=False, mcmc=False, ndraws=20000, ndrawsdiscard=None, mcmcconst=1,
mcmcfcast=True, mcmcstorecoef=True, verbose=False, resample_method='full'):
"""
This class implements the "Cube-Root" Bayesian VAR from Climadomo, Giannone, Lenza, Monti and Sokol (2020).
The main idea of the models is to use the BVARGLP class to estimate a quarterly VAR and "monthlize" it for
a state-space model capable of dealing with missing data and mixed frequancy data.
This class only accepts data with at leaset one monthly time series. Quarterly variable are allowed but
must be in the same pandas.DataFrame with a monthly index.
@param hyperpriors: False = no priors on hyperparameters
True = reference priors on hyperparameters (default)
[NOTE: hyperpriors on psi calibrated for data expressed in
4 x logs, such as 4 x log(GDP). Thus if interest rate is in
percentage, divide by 100]
@param vc: prior variance in the MN prior for the coefficients multiplying
the contant term (Default: vc=10e6)
@param stationary_prior: names of the variables that enter the VAR in first
differences and for which one might want to set the prior mean
on the coefficient on the first own lag in the MN prior and the
prior mean of the sum-of-coefficients prior to 0 (instead of
the typical 1)
@param mnpsi: False = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals NOT treated as
hyperparameters (set to the residual variance of an AR(1))
True = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals treated as hyperparameters (default)
@param mnalpha: False = Lag-decaying parameter of the MN prior set to 2 and
NOT treated as hyperparameter (default)
True = Lag-decaying parameter of the MN prior treated as
hyperparameter
@param sur: False = single-unit-root prior is OFF
True = single-unit-root prior is ON and its std is treated as an
hyperparameter (default)
@param noc: False = no-cointegration (sum-of coefficients) prior is OFF
True = no-cointegration (sum-of coefficients) is ON and its std is
treated as an hyperparameter (default)
@param fcast: False = does not generate forecasts at the posterior mode
True = generates forecasts at the posterior mode (default)
@param hz: number of quarters for which it generates forecasts (default: hz=8)
@param mcmc: False = does not run the MCMC (default)
True = runs the MCMC after the maximization
@param ndraws: number of draws in the MCMC (default: Ndraws=20000)
@param ndrawsdiscard: number of draws initially discarded to allow convergence
in the in the MCMC (default=Ndraws/2)
@param mcmcconst: scaling constant for the MCMC (should be calibrated to achieve
an acceptance rate of approx 25%) (default: MCMCconst=1)
@param mcmcfcast: False = does not generate forecasts when running the MCMC
True = generates forecasts while running the MCMC
(for each draw of the hyperparameters the code takes a
draw of the VAR coefficients and shocks, and generates
forecasts at horizons hz) (default).
@param mcmcstorecoef: False = does not store the MCMC draws of the VAR
coefficients and residual covariance matrix
True = stores the MCMC draws of the VAR coefficients and
residual covariance matrix (default)
@param verbose: Prints relevant information during the estimation.
@param crit: precision for convergence criteria.
@param resample_method: 'full' only includes quarters that have all of its data available.
'last' uses the last observation available for each quarter.
"""
assert data.index.inferred_freq == 'M', "input 'data' must be monthly and recognized by pandas."
self.data = data
if resample_method == 'full':
self.data_quarterly = self._get_quarterly_df()
elif resample_method == 'last':
self.data_quarterly = data.resample('Q').last().dropna()
else:
raise NotImplementedError('resample method not implemented')
self.lags = lags
self.hyperpriors = hyperpriors
self.vc = vc
self.stationary_prior = stationary_prior
self.mnalpha = mnalpha
self.mnpsi = mnpsi
self.sur = sur
self.noc = noc
self.fcast = fcast
self.hz = hz
self.mcmc = mcmc
self.ndraws = ndraws
self.ndrwasdiscard = int(ndraws/2) if ndrawsdiscard is None else ndrawsdiscard
self.mcmccosnt = mcmcconst
self.mcmcfcast = mcmcfcast
self.mcmcstorecoef = mcmcstorecoef
self.verbose = verbose
self.crit = crit
self.bvar_quarterly = BVARGLP(data=self.data_quarterly,
lags=lags,
hyperpriors=hyperpriors,
vc=vc,
stationary_prior=stationary_prior,
mnpsi=mnpsi,
mnalpha=mnalpha,
sur=sur,
noc=noc,
fcast=fcast,
hz=hz,
mcmc=mcmc,
ndraws=ndraws,
ndrawsdiscard=ndrawsdiscard,
mcmcconst=mcmcconst,
mcmcfcast=mcmcfcast,
mcmcstorecoef=mcmcstorecoef,
verbose=verbose,
crit=crit)
betahat = self.bvar_quarterly.betahat
sigmahat = self.bvar_quarterly.sigmahat
k, n = betahat.shape
_, _, _, aa, _, qq, c2, c1, CC, _, _, _ = self._build_monthly_ss(betahat, sigmahat)
qqKF = np.zeros((n * lags, n * lags))
qqKF[:n, :n] = qq.real
# Next line is just a weird reshaping of the starting state
initX = np.flip(self.data_quarterly.iloc[:lags].values, axis=0).T.reshape(-1, 1, order='F').reshape(-1)
initV = | np.eye(initX.shape[0]) | numpy.eye |
from __future__ import print_function
import tensorflow as tf # Deep Learning library
import numpy as np # Handle matrices
import random # Handling random number generation
import time
import skimage as skimage
from skimage import transform, color, exposure
from skimage.transform import rotate
from skimage.viewer import ImageViewer
import sys
tf.compat.v1.disable_eager_execution()
sys.path.append("game/")
from Memory import Memory
from SumTree import SumTree
import wrapped_flappy_bird as gamer
from collections import deque# Ordered collection with ends
import matplotlib.pyplot as plt # Display graphs
import warnings # This ignore all the warning messages that are normally printed during the training because of skiimage
warnings.filterwarnings('ignore')
'''
1. Convert image to grayscale
2. Resize image to 80x80
3. Stack last 4 frames to produce an 80x80x4 input array for network
'''
def preprocess_frame(frame):
# Crop the screen (remove part that contains no information)
# [Up: Down, Left: right]
frame = skimage.color.rgb2gray(frame)
frame = skimage.transform.resize(frame,(80,80))
frame = skimage.exposure.rescale_intensity(frame,out_range=(0,255))
frame = frame / 255.0
return frame # 80x80x1 frame
stack_size = 4 # We stack 4 frames
stacked_frames = deque([np.zeros((80,80), dtype=np.int) for i in range(stack_size)], maxlen=4)
def stack_frames(stacked_frames, state, is_new_episode):
frame = preprocess_frame(state)
if is_new_episode:
stacked_frames = deque([np.zeros((80,80), dtype=np.int) for i in range(stack_size)], maxlen=4)
stacked_frames.append(frame)
stacked_frames.append(frame)
stacked_frames.append(frame)
stacked_frames.append(frame)
stacked_state = np.stack(stacked_frames, axis=2)
else:
stacked_frames.append(frame)
stacked_state = np.stack(stacked_frames, axis=2)
return stacked_state, stacked_frames
#MODEL HYPERPARAMETERS
state_size = [80,80,4] #(width, height, channels)
action_size = 2 #2 possible actions
learning_rate = 0.00025
#TRAINING HYPERPARAMETERS
total_episodes = 5000
max_steps = 5000
batch_size = 64
#FIXED Q TARGET HYPERPARAMTERS
max_tau = 10000
#Exploration parameters for epsilon greedy strategy
explore_start = 1.0
explore_stop = 0.01
decay_rate = 0.00005 #exponential decay rate for exploration prob
#Q Learning hyperparameters
gamma = 0.95 #discounting rate
#MEMORY HAPERPARAMETERS
pretrain_length = 100000#number of expeiences stoed in the Memory when initialized for the first time
memory_size = 100000
#ADJUSTABLE
training = False
episode_render = False
FRAME_PER_ACTION = 1
class DDDQNetwork:
def __init__(self, state_size, action_size, learning_rate, name):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
self.name = name
with tf.compat.v1.variable_scope(self.name):
#create placeholder
self.inputs_ = tf.compat.v1.placeholder(tf.float32, [None, *state_size], name="inputs_")
self.ISWeights_ = tf.compat.v1.placeholder(tf.float32, [None, 1], name="ISWeights_")
self.actions_ = tf.compat.v1.placeholder(tf.float32, [None, action_size], name="actions_")
#target_Q is the R(s,a) + ymax Qhat(s', a')
self.target_Q = tf.compat.v1.placeholder(tf.float32, [None], name='target')
#Input is 80*80*4
self.conv1 = tf.compat.v1.layers.conv2d(inputs = self.inputs_,
filters = 32,
kernel_size = [8,8],
strides = [4,4],
padding = "VALID",
kernel_initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
name = "conv1")
self.conv1_out = tf.nn.elu(self.conv1, name="conv1_out")
## --> [20, 20, 32]
self.conv2 = tf.compat.v1.layers.conv2d(inputs = self.conv1_out,
filters = 64,
kernel_size = [4,4],
strides = [2,2],
padding = "VALID",
kernel_initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
name = "conv2")
self.conv2_out = tf.nn.elu(self.conv2, name="conv2_out")
self.conv3 = tf.compat.v1.layers.conv2d(inputs = self.conv2_out,
filters = 64,
kernel_size = [3,3],
strides = [1,1],
padding = "VALID",
kernel_initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
name = "conv3")
self.conv3_out = tf.nn.elu(self.conv3, name="conv3_out")
self.flatten = tf.compat.v1.layers.flatten(self.conv3_out)
## Here we separate into two streams
#The one that calculate V(s)
self.value_fc = tf.compat.v1.layers.dense(inputs = self.flatten,
units = 256,
activation = tf.nn.elu,
kernel_initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
name = "value_fc")
self.value = tf.compat.v1.layers.dense(inputs = self.value_fc,
kernel_initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
units = 1,
activation = None,
name = "value")
#The one that calculate A(s,a)
self.advantage_fc = tf.compat.v1.layers.dense(inputs = self.flatten,
units = 256,
activation = tf.nn.elu,
kernel_initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
name = "advantage_fc")
self.advantage = tf.compat.v1.layers.dense(inputs = self.advantage_fc,
kernel_initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
units = self.action_size,
activation = None,
name = "advantage")
#Agregating layer
#Q(s,a) = V(s) +(A(s,a) - 1/|A| * sum A(s,a'))
self.output = self.value + tf.subtract(self.advantage, tf.reduce_mean(input_tensor=self.advantage, axis=1, keepdims=True))
#Q is our predicted Q value
self.Q = tf.reduce_sum(input_tensor=tf.multiply(self.output, self.actions_), axis=1)
#The loss is modified because of PER
self.absolute_errors = tf.abs(self.target_Q - self.Q)
self.loss = tf.reduce_mean(input_tensor=self.ISWeights_ * tf.math.squared_difference(self.target_Q, self.Q))
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
# Reset the graph
tf.compat.v1.reset_default_graph()
#Instantiate the DQNetwork
DQNetwork = DDDQNetwork(state_size, action_size, learning_rate, name="DQNetwork")
#Instantiate the target network
TargetNetwork = DDDQNetwork(state_size, action_size, learning_rate, name="TargetNetwork")
#deal with the empty memory problem bu pre-populating our emory by taking random actions
memory = Memory(memory_size)
# Render the environment
game_state = gamer.GameState()
nothing = [1, 0]
up = [0, 1]
possible_actions = [nothing, up]
if training == True:
for i in range(pretrain_length):
# If it's the first step
if i == 0: # First we need a state
do_nothing = [1, 0]
state, reward, done = game_state.frame_step(do_nothing)
state, stacked_frames = stack_frames(stacked_frames, state, True)
# Random action
index = random.randrange(10);
if (index<9):
action = [1,0]
else:
action = [0,1]
# Get the rewards
next_state, reward, done = game_state.frame_step(action)
# If we're dead
if done:
# We finished the episode
next_state = np.zeros(state.shape)
# Add experience to memory
#experience = np.hstack((state, [action, reward], next_state, done))
experience = state, action, reward, next_state, done
memory.store(experience)
# Start a new episode
game_state = gamer.GameState()
# First we need a state
do_nothing = [1, 0]
state, reward, done = game_state.frame_step(do_nothing)
state, stacked_frames = stack_frames(stacked_frames, state, True)
else:
# Get the next state
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Add experience to memory
experience = state, action, reward, next_state, done
memory.store(experience)
# Our state is now the next_state
state = next_state
#Setup TensorBoard Writer
writer = tf.compat.v1.summary.FileWriter("/tensorboard/dddqn/1")
##Losses
tf.compat.v1.summary.scalar("Loss", DQNetwork.loss)
write_op = tf.compat.v1.summary.merge_all()
'''
Train the Agent
'''
def predict_action(explore_start, explore_stop, decay_rate, decay_step, state, actions):
#EPSILON GREEDY STRATEGY
exp_exp_tradeoff = np.random.rand()
#improved EG
explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate*decay_step)
if (explore_probability > exp_exp_tradeoff):
#print("----------Random Action----------")
index = random.randrange(10);
if (index<9):
action = [1,0]
else:
action = [0,1]
#print(action)
else:
Qs = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: state.reshape((1, *state.shape))})
choice = np.argmax(Qs)
action = possible_actions[int(choice)]
return action, explore_probability
#This function helps us to copy one set of variables to another
def update_target_graph():
# Get the parameters of our DQNNetwork
from_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, "DQNetwork")
# Get the parameters of our Target_network
to_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, "TargetNetwork")
op_holder = []
# Update our target_network parameters with DQNNetwork parameters
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Saver will help us to save our model
saver = tf.compat.v1.train.Saver()
if training == True:
with tf.compat.v1.Session() as sess:
# Initialize the variables
sess.run(tf.compat.v1.global_variables_initializer())
decay_step = 0
tau = 0
# Update the parameters of our TargetNetwork with DQN_weights
update_target = update_target_graph()
sess.run(update_target)
for episode in range(total_episodes):
step = 0
episode_rewards = []
# Make a new episode and observe the first state
game_state = gamer.GameState()
do_nothing = [1, 0]
state, reward, done = game_state.frame_step(do_nothing)
# Remember that stack frame function also call our preprocess function.
state, stacked_frames = stack_frames(stacked_frames, state, True)
while step < max_steps:
step += 1
tau += 1
decay_step +=1
action, explore_probability = predict_action(explore_start, explore_stop, decay_rate, decay_step, state, possible_actions)
# Do the action
next_state, reward, done = game_state.frame_step(action)
# Add the reward to total reward
episode_rewards.append(reward)
# If the game is finished
if done:
# the episode ends so no next state
next_state = np.zeros((120,140), dtype=np.int)
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Set step = max_steps to end the episode
step = max_steps
# Get the total reward of the episode
total_reward = np.sum(episode_rewards)
print('Episode: {}'.format(episode),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_probability))
# Add experience to memory
experience = state, action, reward, next_state, done
memory.store(experience)
else:
# Stack the frame of the next_state
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Add experience to memory
experience = state, action, reward, next_state, done
memory.store(experience)
# st+1 is now our current state
state = next_state
### LEARNING PART
# Obtain random mini-batch from memory
tree_idx, batch, ISWeights_mb = memory.sample(batch_size)
states_mb = np.array([each[0][0] for each in batch], ndmin=3)
actions_mb = | np.array([each[0][1] for each in batch]) | numpy.array |
#from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pf
import matplotlib.cm as cm
import warnings
import multiprocess as mtp
from numpy import linalg as LA
from scipy import signal as scp
import scipy.ndimage.filters as med
from SLIT import Lens
from SLIT import tools
warnings.simplefilter("ignore")
##SLIT: Sparse Lens Inversion Technique
def SLIT(Y, Fkappa, kmax, niter, size, PSF, PSFconj, S0 = [0], levels = [0], scheme = 'FB',
mask = [0], lvl = 0, weightS = 1, noise = 'gaussian', tau = 0, verbosity = 0, nweights = 1):
##DESCRIPTION:
## Function that estimates the source light profile from an image of a lensed source given the mass density profile.
##
##INPUTS:
## -img: a 2-D image of a lensed source given as n1xn2 numpy array.
## -Fkappa: an array giving the mapping between lens and source. This array is calculated from the lens mass density
## using tools from SLIT.Lens
## -kmax: the detection threshold in units of noise levels. We usualy set this value to 5 to get a 5 sigma
## detection threshold.
## -niter: maximal number of iterations of the algorithm.
## -size: resoluution factor between lens and source grids such thathe size of the output source
## will be n1sizexn2size
## -PSF: the point spread function of the observation provided as a 2D array.
## -PSFconj: The conjugate of the PSF. Usually computed via np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
## butthe user has to make sure that the conjugate is well centered.
##
##OPTIONS:
## -levels: an array that contains the noise levels at each band of the wavelet decomposition of the source.
## If not provided, the routine will compute the levels and save them in a fits file 'Noise_levels.fits'
## so that they can be used at a later time. This option allows to save time when running the same
## experiment several times.
## -mask: an array of zeros and one with size ns1xns2. The zeros will stand for masked data.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
##
##EXAMPLE:
## S,FS = SLIT(img, Fkappa, 5, 100, 1, PSF, PSFconj)
n1,n2 = np.shape(Y)
PSFconj = PSF.T
#Size of the source
ns1,ns2 = int(n1*size), int(n2*size)
#Number of starlet scales in source plane
if lvl ==0:
lvl = np.int(np.log2(ns2))
else:
lvl = np.min([lvl,np.int(np.log2(ns2))])
lvlg = np.int(np.log2(n2))
#Masking if required
if np.sum(mask) == 0:
mask = np.ones((n1,n2))
#Noise in image plane
if noise == 'gaussian':
print('noise statistic is gaussain')
sigma0 = tools.MAD(Y)
print('sigma: ', sigma0)
if noise == 'poisson':
print('noise statistic is poisson')
sigma0 = tools.MAD_poisson(Y,tau)
if (noise == 'G+P') or (noise == 'P+G'):
print('noise statistic is poisson and gaussain mixture')
sigma0 = np.sqrt(tools.MAD_poisson(Y,tau, lvlg)**2+tools.MAD(Y)**2)
plt.imshow(sigma0); plt.colorbar(); plt.show()
#Mapping of an all-at-one image to source plane
lensed = lens_one(Fkappa, n1,n2, size)
#estimation of the frame of the image in source plane
supp = np.zeros((lvl,lensed.shape[0],lensed.shape[1]))
supp[:, lensed/lensed==1] = 1
#Useful functions
def Finv_apply(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed)
def Lens_op2(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed, square = 1)
def F_apply(Si):
return Lens.source_to_image(Si, n1, n2,Fkappa)
def PSF_apply(i):
return scp.fftconvolve(i,PSF,mode = 'same')
def PSFT_apply(ii):
return scp.fftconvolve(ii,PSFconj,mode = 'same')
def transform(x):
coeffs, _ = tools.wave_transform(x, lvl, newwave=1)
return coeffs
def inverse(x):
return tools.iuwt(x, newwave=1)
#Forward operator
def F_op(X):
return PSF_apply(F_apply(X))
#Inverse operator
def I_op(X):
return Finv_apply(PSFT_apply(X))
#Forward operator
def FW_op(X):
return PSF_apply(F_apply(inverse(X)))
#Inverse operator
def IW_op(X):
return transform(Finv_apply(PSFT_apply(X)))
#Regularisation (Backward term)
def reg0(X):
return tools.Hard(X, levels, (ks), supp=supp)
def reg00(X):
return tools.Hard_Threshold(X, transform, inverse,levels, (ks), M = [0], supp=supp)
def reg1(X):
return tools.Soft(X, levels*weightS, kmax, supp=supp, Kill = 0)
def reg_plus(X):
Xreg = np.copy(X)
Xreg[X<0] = 0
return Xreg
def reg_supp(X):
X[X < 0] = 0
return X*supp
def reg_filter(X):
return tools.mr_filter(X,levels,ks,10,transform, inverse, I_op(sigma0*np.ones((n1,n2))), lvl = lvl, supp = supp)
#Noise simulations to estimate noise levels in source plane
if np.sum(levels)==0:
print('Calculating noise levels')
#levels = simulate_noise(n1,n2, sigma0, size, I_op, transform, lvl)
levels = level_source(n1,n2,sigma0,size,PSFconj, Lens_op2, lensed, lvl)
#Saves levels
hdus = pf.PrimaryHDU(levels)
lists = pf.HDUList([hdus])
lists.writeto('Noise_levels.fits', clobber=True)
def mk_levels(sigma):
return level_source(n1,n2,sigma0,size,PSFconj, Lens_op2, lensed, lvl)
##Compute spectral norms
opwave_norm = spectralNorm(n1,n2,20,1e-10,IW_op,FW_op)
op_norm = spectralNorm(ns1, ns2, 20, 1e-10, F_op, I_op)
wave_norm = spectralNorm(ns1,ns2,20,1e-10,transform,inverse)
if scheme == 'Vu':
mu = 1.
tau = 1./(mu*wave_norm**2+0.5*op_norm)
if verbosity == 1:
print(mu,tau)
else:
mu = .5/(opwave_norm)
if verbosity == 1:
print(mu)
#Initialisation
niter0 = np.copy(niter)
Res1= []
for jr in range(nweights):
if jr!= nweights-1:
niter = niter0/2
else:
niter = niter0
trans = (transform(I_op(Y))/levels)*supp
#trans[:,lensed==0] = 0
trans[levels==0] =0
ks0 = np.max(trans)*0.9
print(ks0)
ks=np.copy(ks0)
steps = (ks0-kmax)/(niter-10.)
karg = np.log(kmax/ks0)/(niter-10.)
i = 0
ts = 1
csi = 0
M = [0]
Res1= []
Res2 = []
if np.sum(S0) == 0:
S = np.random.randn(ns1, ns2) * np.median(sigma0)*0
else:
S = S0
Snew = S
alpha =transform(S)
alphaY = transform(I_op(Y))
alphanew = np.copy(alpha)
points = 0
while i < niter:
if scheme == 'FB':
print('FB ', i)
ks = ks0*np.exp(i*karg)
ks = np.max([ks, kmax])
S = np.copy(Snew)
Snew = tools.Forward_Backward(Y, S, F_op, I_op, transform, inverse, mu, reg1, pos = 1)
S[S<0] = 0
FS = F_op(Snew)*mask
if (noise == 'G+P') or (noise == 'P+G') and (i<10):
sigma = (tools.MAD(Y)+np.sqrt(FS/tau))
levels = mk_levels(sigma)
elif scheme == 'FISTA':
print('FISTA ', i)
#S = np.copy(Snew)
alphanew = np.copy(alpha)
alpha, csi, ts = tools.FISTA(Y, alphanew, F_op, I_op, mu, ts, csi, reg1, transform, inverse, mask = mask)
#Snew = inverse(alpha)
#FS = F_op(Snew)
elif scheme == 'Vu':
print('Vu ', i)
S = np.copy(Snew)
Snew,alpha = tools.Vu_Primal_dual(Y, S, alpha, mu, tau, F_op, I_op, transform, inverse, reg1, reg_plus)
# FS = F_op(Snew)
# plt.imshow(S)
# plt.show()
SDR = tools.SDR(alpha, alphanew)
# Res = tools.Res(Y,FS,sigma0)
#Convergence condition
# Res1.append(Res)
Res2.append(SDR)
# ks = ks-steps
if i>5:
add = Criteria(i, SDR, Res2)
if add == 0:
points = np.max([0,points-1])
else:
points+=add
if points >= 5:
print('BREAK: algorithm converged at iteration: ', i)
break
i = i+1
if i == niter:
print('BREAK: Maximum number of iterations reached.')
# alpha = transform(S)
weightS = 2./(1.+np.exp(-10.*(levels*kmax-alpha)))
# plt.show()
Snew = inverse(alpha)
FS = F_op(Snew)
#Final reconstruction of the source
if np.size(np.shape(sigma0))>2:
sigma0[sigma0==0]=np.mean(sigma0)
if verbosity == 1:
plt.imshow((Y-FS)/(sigma0)); plt.colorbar(); plt.show()
# plt.plot(Res1, 'b'); plt.show()
plt.plot(Res2, 'r');
plt.show()
if noise == 'poisson':
plt.subplot(211)
plt.title('S')
plt.imshow(S); plt.colorbar()
plt.show()
return Snew, FS
#############################SLIT MCA for blended lenses############################
def SLIT_MCA(Y, Fkappa, kmax, niter, riter, size,PSF, PSFconj, lvlg = 0, lvls = 0, noise = 'gaussian', scheme = 'FISTA', decrease = 0,
tau =0, levels = [0], WS = 1, WG = 1, mask = [0,0], Sinit = 0, Ginit=0, Kills = 0, Killg = 0, verbosity = 0, nweight = 5,
original_fista=False, noise_levels_file='Noise_levels_MCA.fits'):
##DESCRIPTION:
## Function that estimates the source and lens light profiles from an image of a
## strong lens system
##
##INPUTS:
## -img: a 2-D image of a lensed source given as n1xn2 numpy array.
## -Fkappa: an array giving the mapping between lens and source. This array is calculated from the lens mass density
## using tools from SLIT.Lens
## -kmax: the detection threshold in units of noise levels. We usualy set this value to 5 to get a 5 sigma
## detection threshold.
## -niter: maximal number of iterations in the main loop over G.
## -riter: maximal number of iterations in the inner loop over S.
## -size: resoluution factor between lens and source grids such thathe size of the output source
## will be n1sizexn2size
## -PSF: the point spread function of the observation provided as a 2D array.
## -PSFconj: The conjugate of the PSF. Usually computed via np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
## butthe user has to make sure that the conjugate is well centered.
##
##OPTIONS:
## -levels: an array that contains the noise levels at each band of the wavelet decomposition of the source.
## If not provided, the routine will compute the levels and save them in a fits file 'Noise_levels.fits'
## so that they can be used at a later time. This option allows to save time when running the same
## experiment several times.
## -mask: an array of zeros and one with size ns1xns2. The zeros will stand for masked data.
## -Ginit: Educated guedd for the lens galaxy light profile. if set to a 2D numpy array, the array will be used as
## as an initialisation for G.
##
##OUTPUTS:
## -S: the source light profile.
## -G: the convolved lens light profile
## -FS: the lensed version of the estimated source light profile
##
##EXAMPLE:
## S,FS = SLIT(img, Fkappa, 5, 100, 1, PSF, PSFconj)
niter = max([6, niter])
#Shape of the image
n1,n2 = np.shape(Y)
#Initialisation of the source
ns1 = int(n1*size)
ns2 = int(n2*size)
PSFconj = PSF.T
#Number of starlet scales in source and image planes
if lvlg ==0:
lvlg = np.int(np.log2(n2))
else:
lvlg = np.min([lvlg,np.int(np.log2(n2))])
lvls = lvlg
if lvls >np.int(np.log2(ns2)):
print('Error, too many wavelet levels for the source. Choose a smaller value for lvl')
exit
#Masking if required
if np.sum(mask) == 0:
mask = np.ones((n1,n2))
# Y = Y*mask # instead we put noise where pixels are masked
#Noise standard deviation in image plane
if noise == 'gaussian':
sigma0 = tools.MAD(Y)
print('noise statistic is gaussian (sigma = {:.5e})'.format(sigma0))
if noise == 'poisson':
sigma0 = tools.MAD_poisson(Y,tau)
print('noise statistic is poisson (sigma = {:.5e})'.format(sigma0))
if (noise == 'G+P') or (noise == 'P+G'):
sigma0 = np.sqrt(tools.MAD_poisson(Y,tau, lvlg)**2+tools.MAD(Y)**2)
print('noise statistic is gaussian-poisson mixture (sigma = {:.3f})'.format(sigma0))
# replace masked pixels with gaussian noise (fix k computation)
masked_pixels = np.where(mask == 0)
gaussian_noise_map = sigma0 * np.random.randn(n1, n2)
Y[masked_pixels] = gaussian_noise_map[masked_pixels]
#Mapping of an all-at-one image
lensed = lens_one(Fkappa, n1,n2, size)
supp = np.zeros((lvls,lensed.shape[0],lensed.shape[1]))
supp[:,lensed/lensed ==1] =1
#Limits of the image plane in source plane
bound = mk_bound(Fkappa, n1,n2, size)
#Useful functions
def Finv_apply(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed)
def Lens_op2(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed, square = 1)
def F_apply(Si):
return Lens.source_to_image(Si, n1, n2,Fkappa)
def PSF_apply(i):
return scp.fftconvolve(i,PSF,mode = 'same')
def PSFT_apply(ii):
return scp.fftconvolve(ii,PSFconj,mode = 'same')
def transform(x):
coeffs, _ = tools.wave_transform(x, lvlg, newwave=1)
return coeffs
def inverse(x):
return tools.iuwt(x, newwave=1)
def FWS_op(X):
return PSF_apply(F_apply(inverse(X)))
#Inverse operator
def IWS_op(X):
return transform(Finv_apply(PSFT_apply(X)))
def FWG_op(X):
return PSF_apply(inverse(X))
#Inverse operator
def IWG_op(X):
return transform(PSFT_apply(X))
#Forward Source operator
def FS_op(X):
return PSF_apply(F_apply(X))
#Inverse Source operator
def IS_op(X):
return Finv_apply(PSFT_apply(X))
#Forward Lens operator
def FG_op(X):
return X#(PSF_apply(X))
#Inverse Lens operator
def IG_op(X):
return X#(PSFT_apply(X))
#Regularisation (Backward term)
def regG0(X):
return tools.Hard_Threshold(X, transform, inverse, levelg*kG)
def regS0(X):
return tools.Hard_Threshold(X, transform, inverse, levels*kS)
def regG1(X):
return tools.Soft(X, levelg*weightG, k, supp=1, Kill = Killg)
def regS1(X):
return tools.Soft(X, levels*weightS, k , supp=supp, Kill = Kills)
def reg_plus(X):
X[X<0] = 0
return X
def reg_filter(X):
return tools.mr_filter(X,levels,kmax,20,transform, inverse, I_op(sigma0*np.ones((n1,n2))), lvl = lvl, supp = supp)
# Noise levels in image plane in starlet space
levelg = tools.level(n1, n2, lvlg) * sigma0
#Noise simulations to estimate noise levels in source plane
if not np.any(levels):
print('Calculating noise levels')
levels = level_source(n1, n2, sigma0, size, PSFconj, Lens_op2, lensed, lvls)
#levels[:,lensed ==0] = np.max(levels*10)
#Saves levels
hdus = pf.PrimaryHDU(levels)
lists = pf.HDUList([hdus])
lists.writeto(noise_levels_file, clobber=True)
#Computationt of spectral norms
FS_norm = spectralNorm(ns1,ns2,20,1e-10,FS_op,IS_op)
FG_norm = spectralNorm(ns1, ns2, 20, 1e-10, FG_op, IG_op)
wave_norm_im = spectralNorm(n1,n2,20,1e-10,transform,inverse)
wave_norm_s = spectralNorm(ns1,ns2,20,1e-10,transform,inverse)
opwaveS_norm = spectralNorm(n1, n2, 20, 1e-10, IWS_op, FWS_op)
opwaveG_norm = spectralNorm(n1, n2, 20, 1e-10, IWG_op, FWG_op)
if scheme == 'Vu':
mu = 1.
tauG = 0.5/(mu*wave_norm_im**2+0.5*FG_norm)
tauS = 0.5 / (mu * wave_norm_s ** 2 + 0.5 * FS_norm)
if verbosity == 1:
print(tauS, tauG)
else:
muG = 1. / (opwaveG_norm)
muS = 1. / (opwaveS_norm)
if verbosity == 1:
print(muS, muG)
weightS = WS
weightG = WG
niter0 = np.copy(niter)
riter0 = np.copy(riter)
#Reweighting loop
# k = tools.MOM(transform(Y), transform(Y), levelg, levelg) # original code
k = tools.MOM(transform(Y), transform(Y), levels, levelg)
k0 = np.copy(k)
karg = np.log(kmax / k0) / (niter - 10.)
if np.sum(Ginit) == 0:
G = np.random.randn(n1, n2) * sigma0
else:
G = Ginit
if np.sum(Sinit) == 0:
S = np.random.randn(ns1, ns2) * sigma0
else:
S = Sinit
# FS = FG_op(G) # original code
# FG = FS_op(S) # original code
FS = 0
FG = 0
Gnew = np.copy(G)
Snew = np.copy(S)
alphaSnew = transform(S)
csiS = np.copy(alphaSnew)
alphaGnew = transform(G)
csiG = np.copy(alphaGnew)
for it in range(nweight):
#Initialisations
if it<np.max(range(nweight)):
niter = niter0#/2
riter = riter0#/2
else:
niter = niter0
riter = riter0
i = 0
tg = 1
ts = 1
if decrease == 1:
# k = tools.MOM(transform(Y), transform(Y), levelg, levelg) # original code
k = tools.MOM(transform(Y), transform(Y), levels, levelg)
else:
k = kmax
k0 = np.copy(k)
karg = np.log(kmax / k0) / (niter - 10.)
print(k)
step = (k-kmax)/(niter-5)
Res1 = []
Res2 = []
DS = np.copy(Y)
DG = np.copy(Y)
#Beginning of main loop
points = 0
Res1G = [1, 2]
Res1S= [1,2]
while i < niter:
k = k-step#k0 * np.exp(i * karg)#
# kMOM = tools.MOM(transform(DS), transform(DG), levelg, levelg) # original code
kMOM = tools.MOM(transform(DS), transform(DG), levels, levelg)
if kMOM<k:
k = np.copy(kMOM)
print('MOMs threshold: ', k)
step = (k-kmax)/(niter-i-5)
k = np.max([kmax, k])
print('main loop: ', i, k, kMOM)
DS = Y - FG
ts = 1
pointS = 0
Res1S = []
Res2S = []
pointS = 0
for j in range(riter):
if scheme == 'FISTA':
alphaS = np.copy(alphaSnew)
alphaSnew, csiS, ts = tools.FISTA(DS, alphaS, FS_op, IS_op, muS, ts, csiS, regS1, transform,
inverse, pos=0, original_fista=original_fista)
if scheme == 'Vu':
alphaS = np.copy(alphaSnew)
S = np.copy(Snew)
Snew, alphaSnew = tools.Vu_Primal_dual(DS, S, alphaS, mu, tauS, FS_op, IS_op, transform, inverse,
regS1, reg_plus)
Res2S.append(tools.SDR(alphaS, alphaSnew))
if j > 5:
pointS = Criteria(j, Res1S, Res2S)
if pointS >= 5:
if verbosity == 1:
print('Convergence on S in:', j, ' iterations.')
break
if scheme == 'FISTA':
Snew = inverse(alphaSnew)
Snew[Snew<0] = 0
FS = FS_op(Snew)
DG = Y - FS
tg = 1
pointG = 0
Res1G = []
Res2G = []
G = np.copy(Gnew)
pointG = 0
for j2 in range(1):
if scheme == 'FISTA':
alphaG = np.copy(alphaGnew)
alphaGnew, csiG, tg = tools.FISTA(DG, alphaG, FG_op, IG_op, muG, tg, csiG, regG1, transform, inverse, pos = 0, original_fista=original_fista)
if scheme == 'Vu':
alphaG = np.copy(alphaGnew)
G = np.copy(Gnew)
Gnew, alphaGnew = tools.Vu_Primal_dual(DG, G, alphaG, mu, tauG, FG_op, IG_op, transform, inverse, regG1,
reg_plus)
Res2G.append(tools.SDR(alphaG, alphaGnew))
if j2>5:
pointG = Criteria(j2, Res1G, Res2G)
if pointG >=5:
if verbosity == 1:
print('Convergence on S in:', j2, ' iterations.')
break
if scheme == 'FISTA':
Gnew = inverse(alphaGnew)
Gnew[Gnew<0] = 0
FG = FG_op(Gnew)
Res1.append(tools.Res(Y, FS+FG, sigma0))
Res2.append((tools.SDR(Gnew, G)+tools.SDR(Snew, S))/2.)
if i>5:
points = Criteria(i, Res2, Res1)
if points >= 5:
if verbosity ==1:
print('BREAK: algorithm converged at iteration: ', i)
break
if verbosity ==1:
plt.figure(0)
plt.subplot(221)
plt.title('S')
plt.imshow(Snew)
plt.subplot(222)
plt.title('FS')
plt.imshow(FS)
plt.subplot(223)
plt.title('FG')
plt.imshow(FG)
plt.subplot(224)
plt.title('Residuals')
plt.imshow(Y-FS-FG)
plt.savefig('Res'+str(i)+'.png')
i +=1
#Weighting
weightS = 2./(1.+np.exp(-10.*(levels*kmax-alphaSnew)))
weightG = 2./(1.+np.exp(-10.*(levelg*kmax-alphaGnew)))
# Snew, FS = SLIT(Y-FG, Fkappa, kmax, niter, size, PSF, PSFconj, levels = [0], scheme = 'FISTA', mask = mask, lvl = lvls)
#Final reconstructions
if verbosity == 2:
plt.show()
plt.figure(1)
plt.subplot(211)
plt.plot(Res1)
plt.subplot(212)
plt.plot(Res2)
return Snew, FS,Gnew, FG, Res1, Res2
def SLIT_MCA_HR(Y, Fkappa, kmax, niter, riter, size, PSF, lvlg=0, lvls=0, noise='gaussian', scheme='FISTA',
tau=0, levels=[0], WS=1, WG=1, mask=[0, 0], Ginit=0, Kills=0, Killg=0, verbosity=0, nweight=5):
##DESCRIPTION:
## Function that estimates the source and lens light profiles from an image of a
## strong lens system
##
##INPUTS:
## -img: a 2-D image of a lensed source given as n1xn2 numpy array.
## -Fkappa: an array giving the mapping between lens and source. This array is calculated from the lens mass density
## using tools from SLIT.Lens
## -kmax: the detection threshold in units of noise levels. We usualy set this value to 5 to get a 5 sigma
## detection threshold.
## -niter: maximal number of iterations in the main loop over G.
## -riter: maximal number of iterations in the inner loop over S.
## -size: resoluution factor between lens and source grids such thathe size of the output source
## will be n1sizexn2size
## -PSF: the point spread function of the observation provided as a 2D array.
## -PSFconj: The conjugate of the PSF. Usually computed via np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
## butthe user has to make sure that the conjugate is well centered.
##
##OPTIONS:
## -levels: an array that contains the noise levels at each band of the wavelet decomposition of the source.
## If not provided, the routine will compute the levels and save them in a fits file 'Noise_levels.fits'
## so that they can be used at a later time. This option allows to save time when running the same
## experiment several times.
## -mask: an array of zeros and one with size ns1xns2. The zeros will stand for masked data.
## -Ginit: Educated guedd for the lens galaxy light profile. if set to a 2D numpy array, the array will be used as
## as an initialisation for G.
##
##OUTPUTS:
## -S: the source light profile.
## -G: the convolved lens light profile
## -FS: the lensed version of the estimated source light profile
##
##EXAMPLE:
## S,FS = SLIT(img, Fkappa, 5, 100, 1, PSF, PSFconj)
niter = max([6, niter])
# Shape of the image
n1, n2 = np.shape(Y)
# Initialisation of the source
ns1 = n1 * size
ns2 = n2 * size
PSFconj = PSF.T
# Number of starlet scales in source and image planes
if lvlg == 0:
lvlg = np.int(np.log2(n2))
else:
lvlg = np.min([lvlg, np.int(np.log2(n2))])
lvls = lvlg
if lvls > np.int(np.log2(ns2)):
print('Error, too many wavelet levels for the source. Choose a smaller value for lvl')
exit
# Masking if required
if np.sum(mask) == 0:
mask = np.ones((n1, n2))
Y = Y * mask
# Noise standard deviation in image plane
if noise == 'gaussian':
print('noise statistic is gaussain')
sigma0 = tools.MAD(Y)
if noise == 'poisson':
print('noise statistic is poisson')
sigma0 = tools.MAD_poisson(Y, tau)
if (noise == 'G+P') or (noise == 'P+G'):
print('noise statistic is poisson and gaussain mixture')
sigma0 = np.sqrt(tools.MAD_poisson(Y, tau, lvlg) ** 2 + tools.MAD(Y) ** 2)
# Mapping of an all-at-one image
lensed = lens_one(Fkappa, ns1, ns2, 1)
supp = np.zeros((lvls, lensed.shape[0], lensed.shape[1]))
supp[:, lensed / lensed == 1] = 1
# Limits of the image plane in source plane
bound = mk_bound(Fkappa, ns1, ns2, 1)
# Useful functions
def Down(I):
return tools.Downsample(I, size)
def Up(I):
return tools.Upsample(I, size)
def Finv_apply(I):
return Lens.image_to_source(I, 1, Fkappa, lensed=lensed)
def Lens_op2(I):
return Lens.image_to_source(I, 1, Fkappa, lensed=lensed, square=1)
def F_apply(Si):
return Lens.source_to_image(Si, ns1, ns2, Fkappa)
def PSF_apply(i):
return scp.fftconvolve(i, PSF, mode='same')
def PSFT_apply(ii):
return scp.fftconvolve(ii, PSFconj, mode='same')
def transform(x):
coeffs, _ = tools.wave_transform(x, lvlg, newwave=1)
return coeffs
def inverse(x):
return tools.iuwt(x, newwave=1)
def FWS_op(X):
return Down(PSF_apply(F_apply(inverse(X))))
# Inverse operator
def IWS_op(X):
return transform(Finv_apply(PSFT_apply(Up(X))))
def FWG_op(X):
return inverse(X)
# Inverse operator
def IWG_op(X):
return transform(X)
# Forward Source operator
def FS_op(X):
return Down(PSF_apply(F_apply(X)))
# Inverse Source operator
def IS_op(X):
return Finv_apply(PSFT_apply(Up(X)))
# Forward Lens operator
def FG_op(X):
return X # (PSF_apply(X))
# Inverse Lens operator
def IG_op(X):
return X # (PSFT_apply(X))
# Regularisation (Backward term)
def regG0(X):
return tools.Hard_Threshold(X, transform, inverse, levelg * kG)
def regS0(X):
return tools.Hard_Threshold(X, transform, inverse, levels * kS)
def regG1(X):
return tools.Soft(X, levelg * weightG, k, supp=1, Kill=Killg)
def regS1(X):
return tools.Soft(X, levels * weightS, k, supp=supp, Kill=Kills)
def reg_plus(X):
X[X < 0] = 0
return X
def reg_filter(X):
return tools.mr_filter(X, levels, kmax, 20, transform, inverse, I_op(sigma0 * np.ones((n1, n2))), lvl=lvl,
supp=supp)
# Noise levels in image plane in starlet space
levelg = tools.level(n1, n2, lvlg) * sigma0
# Noise simulations to estimate noise levels in source plane
if np.sum(levels) == 0:
print('Calculating noise levels')
levels = level_source_HR(n1, n2, size, sigma0, PSFconj, Lens_op2, Up, lvls)
# levels[:,lensed ==0] = np.max(levels*10)
# Saves levels
hdus = pf.PrimaryHDU(levels)
lists = pf.HDUList([hdus])
lists.writeto('Noise_levels_MCA.fits', clobber=True)
# Computationt of spectral norms
FS_norm = spectralNorm(ns1, ns2, 20, 1e-10, FS_op, IS_op)
FG_norm = spectralNorm(ns1, ns2, 20, 1e-10, FG_op, IG_op)
wave_norm_im = spectralNorm(ns1, ns2, 20, 1e-10, transform, inverse)
wave_norm_s = spectralNorm(ns1, ns2, 20, 1e-10, transform, inverse)
opwaveS_norm = spectralNorm(n1, n2, 20, 1e-10, IWS_op, FWS_op)
opwaveG_norm = spectralNorm(n1, n2, 20, 1e-10, IWG_op, FWG_op)
if scheme == 'Vu':
mu = 1.
tauG = 0.5 / (mu * wave_norm_im ** 2 + 0.5 * FG_norm)
tauS = 0.5 / (mu * wave_norm_s ** 2 + 0.5 * FS_norm)
if verbosity == 1:
print(tauS, tauG)
else:
muG = 1. / (opwaveG_norm)
muS = 1. / (opwaveS_norm)
if verbosity == 1:
print(muS, muG)
weightS = WS
weightG = WG
niter0 = np.copy(niter)
riter0 = np.copy(riter)
# Reweighting loop
for it in range(nweight):
# Initialisations
if it < np.max(range(nweight)):
niter = niter0 # /2
riter = riter0 # /2
else:
niter = niter0
riter = riter0
i = 0
tg = 1
ts = 1
FS = 0
FG = 0
G = np.random.randn(n1, n2) * sigma0
S = np.random.randn(ns1, ns2) * sigma0
Gnew = np.copy(G)
Snew = np.copy(S)
alphaSnew = transform(S)
csiS = np.copy(alphaSnew)
alphaGnew = transform(G)
csiG = np.copy(alphaGnew)
k = tools.MOM(transform(Y), transform(Y), levels, levelg) / 100.
k0 = np.copy(k)
karg = np.log(kmax / k0) / (niter - 5.)
print(k)
step = (k - kmax) / (niter - 5)
Res1 = []
Res2 = []
DS = np.copy(Y)
DG = np.copy(Y)
# Beginning of main loop
points = 0
Res1G = [1, 2]
Res1S = [1, 2]
while i < niter:
k = k0 * np.exp(i * karg)
kMOM = tools.MOM(transform(DS), transform(DG), levels, levelg)
if kMOM < k:
k = np.copy(kMOM)
print('MOMs threshold: ', k)
step = (k - kmax) / (niter - i - 5)
k = np.max([kmax, k])
print('main loop: ', i, k, kMOM)
DG = Y - FS
tg = 1
pointG = 0
Res1G = []
Res2G = []
G = np.copy(Gnew)
pointG = 0
for j2 in range(1):
if scheme == 'FISTA':
alphaG = np.copy(alphaGnew)
alphaGnew, csiG, tg = tools.FISTA(DG, alphaG, FG_op, IG_op, muG, tg, csiG, regG1, transform,
inverse, pos=0)
if scheme == 'Vu':
alphaG = np.copy(alphaGnew)
G = np.copy(Gnew)
Gnew, alphaGnew = tools.Vu_Primal_dual(DG, G, alphaG, mu, tauG, FG_op, IG_op, transform, inverse,
regG1,
reg_plus)
Res2G.append(tools.SDR(alphaG, alphaGnew))
if j2 > 5:
pointG = Criteria(j2, Res1G, Res2G)
if pointG >= 5:
if verbosity == 1:
print('Convergence on S in:', j2, ' iterations.')
break
if scheme == 'FISTA':
Gnew = inverse(alphaGnew)
Gnew[Gnew < 0] = 0
FG = FG_op(Gnew)
DS = Y - FG
ts = 1
pointS = 0
Res1S = []
Res2S = []
pointS = 0
for j in range(riter):
if scheme == 'FISTA':
alphaS = np.copy(alphaSnew)
alphaSnew, csiS, ts = tools.FISTA(DS, alphaS, FS_op, IS_op, muS, ts, csiS, regS1, transform,
inverse, pos=0)
if scheme == 'Vu':
alphaS = | np.copy(alphaSnew) | numpy.copy |
import pytest
import numpy as np
from cascade import kappa
from cascade import group_offsets
from cascade import Cascade
from cascade import ScoreType
from cascade import IndicatorType
import factories
import fixtures
np.random.seed(42)
def test_kappa_with_empy_qid():
qid = np.array([])
cutoff = 5
assert [] == kappa([], cutoff, qid)
def _make_query_data(num_queries=1, depth=5, random_depth=False):
queries = np.random.choice(list(range(num_queries)),
num_queries,
replace=False)
scores = np.random.uniform(low=0., high=10., size=num_queries * depth)
qid = []
for x in queries:
qid += [x] * depth
return scores, np.array(qid)
def test_kappa_when_query_equals_cutoff():
cutoff = 5
query_depth = 5
scores, qid = _make_query_data(depth=query_depth)
topk = sorted(scores, reverse=True)
res = kappa(scores, cutoff, qid)
np.testing.assert_almost_equal(res, np.full_like(res, topk[cutoff - 1]))
def test_kappa_score_when_query_shorter_than_cutoff():
cutoff = 10
query_depth = 5
scores, qid = _make_query_data(depth=query_depth)
topk = sorted(scores, reverse=True)
res = kappa(scores, cutoff, qid)
np.testing.assert_almost_equal(res, np.full_like(res,
topk[query_depth - 1]))
def test_kappa_score_when_query_longer_than_cutoff():
cutoff = 5
query_depth = 10
scores, qid = _make_query_data(depth=query_depth)
topk = sorted(scores, reverse=True)
res = kappa(scores, cutoff, qid)
np.testing.assert_almost_equal(res, np.full_like(res, topk[cutoff - 1]))
def test_single_stage_cascade_resets_predict_attributes():
cascade = factories.dummy_cascade()
cascade.predict_reset()
for ranker in cascade:
assert None == ranker.predict
assert None == ranker.kappa
assert None == ranker.mask
assert None == ranker.estimate
def test_cascade_first_stage_has_no_score_mask():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=1, cutoffs=[5])
ranker = cascade.rankers[0]
cascade.predict(X, qid)
assert Cascade.SCORE_MASK not in ranker.predict
def test_cascade_first_stage_applies_cutoff():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=1, cutoffs=[2])
ranker = cascade.rankers[0]
ranker.booster.update()
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid)
expected = (b - a) * [0.01948363]
np.testing.assert_almost_equal(ranker.kappa[a:b], expected)
def test_cascade_first_stage_applies_mask():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=1, cutoffs=[2])
ranker = cascade.rankers[0]
ranker.booster.update()
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid)
expected = [0, 0, 1, 1, 0]
np.testing.assert_almost_equal(ranker.mask[a:b], expected)
def test_cascade_second_stage_applies_cutoff():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.update()
ranker_two = cascade.rankers[1]
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid)
topk = sorted(ranker_two.predict[a:b], reverse=True)
expected = (b - a) * [topk[1]]
np.testing.assert_almost_equal(ranker_two.kappa[a:b], expected)
def test_cascade_second_stage_applies_mask():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.update()
ranker_one = cascade.rankers[0]
ranker_two = cascade.rankers[1]
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid)
expected = [1, 0, 1, 1, 1]
np.testing.assert_almost_equal(ranker_one.mask[a:b], expected)
expected = [0, 0, 1, 1, 0]
np.testing.assert_almost_equal(ranker_two.mask[a:b], expected)
def test_cascade_score_mask_does_not_appear_in_first_stage():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.update()
ranker_one = cascade.rankers[0]
ranker_two = cascade.rankers[1]
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid, is_train=True)
assert Cascade.SCORE_MASK not in ranker_one.predict
def test_cascade_uses_score_mask():
"""As per previous implementation, always use the SCORE_MASK during predict
regardless of whether we are doing training or inference.
"""
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.update()
ranker_one = cascade.rankers[0]
ranker_two = cascade.rankers[1]
offsets = group_offsets(qid)
a, b = next(offsets)
for is_train in [True, False]:
cascade.predict(X, qid, is_train=is_train)
assert Cascade.SCORE_MASK in ranker_two.predict
def test_cascade_computed_kappa_when_training():
qid = np.array([1, 1, 1, 1, 1])
offsets = group_offsets(qid)
a, b = next(offsets)
cascade = factories.dummy_cascade()
ranker = factories.ranker()
ranker.cutoff = 2
prev_mask = [1, 1, 0, 1, 1]
scores = np.array([0.1, 1.0, -0.03, 0.5, 0.25])
ranker.predict = np.copy(scores)
# according to previous mask
ranker.predict[2] = Cascade.SCORE_MASK
scores = cascade.ranker_apply_cutoff(ranker,
scores,
prev_mask,
qid,
is_train=True)
expected = [0.5] * 5
np.testing.assert_almost_equal(ranker.kappa[a:b], expected)
assert scores is not ranker.predict
def test_cascade_computed_kappa_when_inference():
qid = np.array([1, 1, 1, 1, 1])
offsets = group_offsets(qid)
a, b = next(offsets)
cascade = factories.dummy_cascade()
ranker = factories.ranker()
ranker.cutoff = 2
prev_mask = [1, 1, 0, 1, 1]
# put 10. to test if SCORE_MASK is used in `ranker_apply_cutoff`
scores = np.array([0.1, 1.0, 10., 0.5, 0.25])
ranker.predict = np.copy(scores)
# according to previous mask
ranker.predict[2] = Cascade.SCORE_MASK
scores = cascade.ranker_apply_cutoff(ranker,
scores,
prev_mask,
qid,
is_train=False)
expected = [0.5] * 5
np.testing.assert_almost_equal(ranker.kappa[a:b], expected)
assert scores is ranker.predict
def test_cascade_first_stage_score_any_type():
cascade = factories.cascade(num_stages=1, cutoffs=[4])
for name, member in ScoreType.__members__.items():
if member.name != name: # skip alias names
continue
cascade.set_score_type(name)
ranker_one = cascade.rankers[0]
cascade.ranker_score(ranker_one)
assert ranker_one.predict is ranker_one.estimate
def test_cascade_second_stage_score_independent_type():
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.set_score_type('independent')
ranker_one = cascade.rankers[0]
ranker_one.mask = np.array([1, 1, 1, 1, 0])
ranker_one.estimate = np.array([4., 3., 2., 1., 0.])
ranker_two = cascade.rankers[1]
ranker_two.predict = np.array([5., 5., 5., 5., 5.])
prev_ranker = ranker_one
cascade.ranker_score(ranker_two, prev_ranker)
assert ranker_two.predict is not ranker_two.estimate
np.testing.assert_almost_equal(ranker_two.estimate,
np.array([5., 5., 5., 5., 0.]))
def test_cascade_second_stage_score_full_type():
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.set_score_type('full')
ranker_one = cascade.rankers[0]
ranker_one.mask = np.array([1, 1, 1, 1, 0])
ranker_one.estimate = np.array([4., 3., 2., 1., 0.])
ranker_two = cascade.rankers[1]
ranker_two.predict = np.array([5., 5., 5., 5., 5.])
prev_ranker = ranker_one
cascade.ranker_score(ranker_two, prev_ranker)
assert ranker_two.predict is not ranker_two.estimate
np.testing.assert_almost_equal(ranker_two.estimate,
np.array([9., 8., 7., 6., 0.]))
def test_cascade_second_stage_score_weak_type():
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.set_score_type('weak')
ranker_one = cascade.rankers[0]
ranker_one.mask = np.array([1, 1, 1, 1, 0])
ranker_one.estimate = np.array([4., 3., 2., 1., 0.])
ranker_two = cascade.rankers[1]
ranker_two.predict = np.array([3., 5., 1., 5., 5.])
prev_ranker = ranker_one
cascade.ranker_score(ranker_two, prev_ranker)
assert ranker_two.predict is not ranker_two.estimate
np.testing.assert_almost_equal(ranker_two.estimate,
np.array([4., 5., 2., 5., 0.]))
def test_cascade_second_stage_score_unkown_type():
cascade = factories.cascade(num_stages=2, cutoffs=[2, 4])
ranker_one = cascade.rankers[0]
ranker_two = cascade.rankers[1]
with pytest.raises(RuntimeError):
cascade.score_type = 'unkown-foo-bar-baz'
prev_ranker = ranker_one
cascade.ranker_score(ranker_two, prev_ranker)
def test_cascade_set_unkown_score_type():
cascade = factories.cascade(num_stages=1, cutoffs=[4])
with pytest.raises(KeyError):
cascade.set_score_type('unkown-foo-bar-baz')
def test_ranker_set_unkown_indicator_type():
ranker = factories.ranker()
with pytest.raises(KeyError):
ranker.set_indicator_type('unkown-foo')
def test_ranker_indicator_function_logistic():
ranker = factories.ranker()
ranker.set_indicator_type('logistic')
ranker.sigma = 0.1
ranker.cutoff = 5
ranker.predict = np.array(
[-1., -0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.])
ranker.kappa = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.])
ranker.indicator_func()
# indicator score
expected = [
4.5397869e-05, 5.5277864e-04, 6.6928509e-03, 7.5858180e-02,
5.0000000e-01, 9.2414182e-01, 9.9330715e-01, 9.9944722e-01,
9.9995460e-01
]
np.testing.assert_almost_equal(expected, ranker.indicator_score)
# indicator derivative
expected = [
4.5395808e-04, 5.5247307e-03, 6.6480567e-02, 7.0103717e-01,
2.5000000e+00, 7.0103717e-01, 6.6480567e-02, 5.5247307e-03,
4.5395808e-04
]
np.testing.assert_almost_equal(expected, ranker.indicator_derivative)
def test_ranker_indicator_function_relu():
ranker = factories.ranker()
ranker.set_indicator_type('relu')
ranker.delta = 0.1
ranker.cutoff = 3
ranker.predict = np.array(
[-1., -0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.])
ranker.kappa = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.])
ranker.indicator_func()
# indicator score
expected = [0., 0., 0., 0., 0.5, 1., 1., 1., 1.]
np.testing.assert_almost_equal(expected, ranker.indicator_score)
# indicator derivative
expected = [0., 0., 0., 0., 5., 0., 0., 0., 0.]
np.testing.assert_almost_equal(expected, ranker.indicator_derivative)
def test_cascade_last_ranker_indicator_is_zero():
cascade = factories.cascade(num_stages=1, cutoffs=[1])
ranker = cascade.last()
ranker.predict = np.array([1., 2., 3.])
ranker.kappa = np.array([1., 1., 1.])
cascade.ranker_indicator()
assert 0. == ranker.indicator_score.all()
assert 0. == ranker.indicator_derivative.all()
def test_cascade_collect_weights_independent_scoring():
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.set_score_type('independent')
ranker_one = cascade.rankers[0]
ranker_one.weights = np.array([1., 2., 3.])
ranker_two = cascade.rankers[1]
ranker_two.weights = np.array([3., 2., 1.])
result = cascade.collect_weights(0, ranker_one)
expected = [1., 2., 3.]
| np.testing.assert_almost_equal(expected, result) | numpy.testing.assert_almost_equal |
# Copyright 2019 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a class that represents an adversarial example."""
import numpy as np
import numbers
from abc import ABC
from perceptron.utils.distances import MSE
from perceptron.utils.distances import Distance
class StopAttack(Exception):
"""Exception thrown to request early stopping of an attack
if a given (optional!) threshold is reached.
"""
pass
class Adversarial(ABC):
"""Defines the base class of an adversarial that should be found and
stores the result. The :class:`Adversarial` class represents a single
adversarial example for a given model, criterion and reference image.
It can be passed to an adversarial attack to find the actual adversarial.
Parameters
----------
model : a :class:`Model` instance
The model that should be evaluated against the adversarial.
criterion : a :class:`Criterion` instance
The criterion that determines which images are adversarial.
original_image : a :class:`numpy.ndarray`
The original image to which the adversarial image should
be as close as possible.
original_pred : int(ClsAdversarial) or dict(DetAdversarial)
The ground-truth predictions of the original image.
distance : a :class:`Distance` class
The measure used to quantify similarity between images.
threshold : float or :class:`Distance`
If not None, the attack will stop as soon as the adversarial
perturbation has a size smaller than this threshold. Can be
an instance of the :class:`Distance` class passed to the distance
argument, or a float assumed to have the same unit as the
the given distance. If None, the attack will simply minimize
the distance as good as possible. Note that the threshold only
influences early stopping of the attack; the returned adversarial
does not necessarily have smaller perturbation size than this
threshold; the `reached_threshold()` method can be used to check
if the threshold has been reached.
"""
def __init__(
self,
model,
criterion,
original_image,
original_pred=None,
threshold=None,
distance=MSE,
verbose=False):
self._model = model
self._criterion = criterion
self._original_image = original_image
self._original_image_for_distance = original_image
self._original_pred = original_pred
self._distance = distance
if threshold is not None and not isinstance(threshold, Distance):
threshold = distance(value=threshold)
self._threshold = threshold
self.verbose = verbose
self._best_adversarial = None
self._best_distance = distance(value=np.inf)
self._best_adversarial_output = None
self._total_prediction_calls = 0
self._total_gradient_calls = 0
self._best_prediction_calls = 0
self._best_gradient_calls = 0
# used for attacks that can provide a verifiable bound
self._verifiable_bounds = (0., 0.)
# check if the original image is already adversarial
try:
self.predictions(original_image)
except StopAttack:
# if a threshold is specified and the original input is
# misclassified, this can already cause a StopAttack
# exception
assert self._distance.value == 0.
def _reset(self):
self._best_adversarial = None
self._best_distance = self._distance(value=np.inf)
self._best_adversarial_output = None
self._best_prediction_calls = 0
self._best_gradient_calls = 0
self.predictions(self._original_image)
@property
def verifiable_bounds(self):
"""The verifiable bounds obtained so far."""
return self._verifiable_bounds
@verifiable_bounds.setter
def verifiable_bounds(self, bounds):
"""The setter of verifiable bounds"""
self._verifiable_bounds = bounds
@property
def image(self):
"""The best adversarial found so far."""
return self._best_adversarial
@property
def output(self):
"""The model predictions for the best adversarial found so far.
None if no adversarial has been found.
"""
return self._best_adversarial_output
@property
def distance(self):
"""The distance of the adversarial input to the original input."""
return self._best_distance
@property
def original_image(self):
"""The original input."""
return self._original_image
@property
def original_pred(self):
"""The original label."""
return self._original_pred
def set_distance_dtype(self, dtype):
"""Set the dtype of Distance."""
assert dtype >= self._original_image.dtype
self._original_image_for_distance = self._original_image.astype(
dtype, copy=False)
def reset_distance_dtype(self):
"""Reset the dtype of Distance."""
self._original_image_for_distance = self._original_image
def normalized_distance(self, image):
"""Calculates the distance of a given image to the
original image.
Parameters
----------
image : `numpy.ndarray`
The image that should be compared to the original image.
Returns
-------
:class:`Distance`
The distance between the given image and the original image.
"""
return self._distance(
self._original_image_for_distance,
image,
bounds=self.bounds())
def reached_threshold(self):
"""Returns True if a threshold is given and the currently
best adversarial distance is smaller than the threshold."""
return self._threshold is not None \
and self._best_distance <= self._threshold
def target_class(self):
"""Interface to criterion.target_class for attacks.
"""
try:
target_class = self._criterion.target_class()
except AttributeError:
target_class = None
return target_class
def num_classes(self):
"""Return number of classes."""
n = self._model.num_classes()
assert isinstance(n, numbers.Number)
return n
def bounds(self):
"""Return bounds of model."""
min_, max_ = self._model.bounds()
assert isinstance(min_, numbers.Number)
assert isinstance(max_, numbers.Number)
assert min_ < max_
return min_, max_
def in_bounds(self, input_):
"""Check if input is in bounds."""
min_, max_ = self.bounds()
return min_ <= input_.min() and input_.max() <= max_
def channel_axis(self, batch):
""" Interface to model.channel_axis for attacks.
Parameters
----------
batch : bool
Controls whether the index of the axis for a batch of images
(4 dimensions) or a single image (3 dimensions) should be
returned.
"""
axis = self._model.channel_axis()
if not batch:
axis = axis - 1
return axis
def has_gradient(self):
""" Returns true if _backward and _forward_backward can be called
by an attack, False otherwise.
"""
try:
self._model.gradient
self._model.predictions_and_gradient
except AttributeError:
return False
else:
return True
def _new_adversarial(self, image, predictions, in_bounds):
image = image.copy() # to prevent accidental inplace changes
distance = self.normalized_distance(image)
if in_bounds and self._best_distance > distance:
# new best adversarial
if self.verbose:
print('new best adversarial: {}'.format(distance))
self._best_adversarial = image
self._best_distance = distance
self._best_adversarial_output = predictions
self._best_prediction_calls = self._total_prediction_calls
self._best_gradient_calls = self._total_gradient_calls
if self.reached_threshold():
raise StopAttack
return True, distance
return False, distance
def _is_adversarial(self, image, predictions, in_bounds):
"""Interface to `criterion.is_adversary()` that calls
_new_adversarial if necessary.
Parameters
----------
image : `numpy.ndarray`
Image with shape (height, width, channels).
predictions : :class:`numpy.ndarray`
A vector with the predictions for some image.
label : int
The label of the unperturbed reference image.
"""
is_adversarial = self._criterion.is_adversarial(
predictions, self._original_pred)
assert isinstance(is_adversarial, bool) or \
isinstance(is_adversarial, np.bool_)
if is_adversarial:
is_best, distance = self._new_adversarial(
image, predictions, in_bounds)
else:
is_best = False
distance = None
return is_adversarial, is_best, distance
def predictions(self, image, strict=True, return_details=False):
"""Interface to model.predictions for attacks.
Parameters
----------
image : `numpy.ndarray`
Image with shape (height, width, channels).
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
in_bounds = self.in_bounds(image)
assert not strict or in_bounds
self._total_prediction_calls += 1
predictions = self._model.predictions(image)
is_adversarial, is_best, distance = self._is_adversarial(
image, predictions, in_bounds)
if return_details:
return predictions, is_adversarial, is_best, distance
else:
return predictions, is_adversarial
def batch_predictions(
self, images, greedy=False, strict=True, return_details=False):
"""Interface to model.batch_predictions for attacks.
Parameters
----------
images : `numpy.ndarray`
Batch of images with shape (batch, height, width, channels).
greedy : bool
Whether the first adversarial should be returned.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
if strict:
in_bounds = self.in_bounds(images)
assert in_bounds
self._total_prediction_calls += len(images)
predictions = self._model.batch_predictions(images)
assert predictions.ndim == 2
assert predictions.shape[0] == images.shape[0]
if return_details:
assert greedy
adversarials = []
for i in range(len(predictions)):
if strict:
in_bounds_i = True
else:
in_bounds_i = self.in_bounds(images[i])
is_adversarial, is_best, distance = self._is_adversarial(
images[i], predictions[i], in_bounds_i)
if is_adversarial and greedy:
if return_details:
return predictions, is_adversarial, i, is_best, distance
else:
return predictions, is_adversarial, i
adversarials.append(is_adversarial)
if greedy: # pragma: no cover
# no adversarial found
if return_details:
return predictions, False, None, False, None
else:
return predictions, False, None
is_adversarial = | np.array(adversarials) | numpy.array |
import torch
import numpy as np
from maml.datasets.metadataset import Task
def generate_sinusoid_batch(amp_range, phase_range, input_range, num_samples,
batch_size, oracle, bias=0):
amp = np.random.uniform(amp_range[0], amp_range[1], [batch_size])
phase = np.random.uniform(phase_range[0], phase_range[1], [batch_size])
outputs = | np.zeros([batch_size, num_samples, 1]) | numpy.zeros |
"""
Utility function module for the Bayesian Neural Network.
"""
import numpy as np
import scipy.optimize as spo
import theano
import theano.tensor as T
def load_reg_data(X_tr_reg, y_tr_reg, X_te_reg, y_te_reg):
def shared_reg_dataset(data_xy, borrow=True):
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
return shared_x, T.cast(shared_y, 'float32')
mean_y_tr_reg = 0 * np.mean(y_tr_reg, 0)
std_y_tr_reg = 1.0 + 0 * | np.std(y_tr_reg, 0) | numpy.std |
# -*- coding: utf-8 -*-
# built-in
import time
import warnings
# common
import numpy as np
import scipy.integrate as scpinteg
import matplotlib.pyplot as plt # DB
# specific
from . import _utils
from . import _class_checks
_SCIPY_URL_BASE = 'https://docs.scipy.org/doc/scipy/reference/generated/'
_SCIPY_URL = (
_SCIPY_URL_BASE
+ 'scipy.integrate.solve_ivp.html#scipy.integrate.solve_ivp'
)
_DSOLVERS = {
'eRK4-homemade': {
'type': 'explicit',
'step': 'fixed',
'com': 'Runge_Kutta order 4',
'source': __file__,
},
'eRK1-homemade': {
'type': 'explicit',
'step': 'fixed',
'com': 'Runge_Kutta order 1',
'source': __file__,
},
'eRK2-scipy': {
'scipy': 'RK23',
'type': 'explicit',
'step': 'variable',
'com': 'Runge_Kutta order 2',
'source': _SCIPY_URL,
},
'eRK4-scipy': {
'scipy': 'RK45',
'type': 'explicit',
'step': 'variable',
'com': 'Runge_Kutta order 4',
'source': _SCIPY_URL,
},
'eRK8-scipy': {
'scipy': 'DOP853',
'type': 'explicit',
'step': 'variable',
'com': 'Runge_Kutta order 8',
'source': _SCIPY_URL,
},
}
_SOLVER = 'eRK4-homemade'
# #############################################################################
# #############################################################################
# user-interface to display solvers
# #############################################################################
def get_available_solvers(returnas=None, verb=None):
# ----------------
# check inputs
if returnas is None:
returnas = False
lreturnok = [False, list, dict]
if returnas not in lreturnok:
msg = (
f"Arg returnas must be in {lreturnok}\n"
f"Provided: {returnas}"
)
if verb is None:
verb = returnas is False
# ----------------
# print or return
if verb is True:
def make_source(k0, dsolvers=_DSOLVERS):
if 'scipy' in k0:
method = dsolvers[k0]['scipy']
source = f"scipy.integrate.solve_ivp(method='{method}')"
else:
source = dsolvers[k0]['source']
return source
col = ['key', 'type', 'step', 'comments', 'source']
ar = [
[
f"'{k0}'",
v0['type'],
v0['step'],
v0['com'],
make_source(k0),
]
for k0, v0 in _DSOLVERS.items()
]
return _utils._get_summary(
lar=[ar],
lcol=[col],
verb=verb,
returnas=False,
)
elif returnas is dict:
return {k0: dict(v0) for k0, v0 in _DSOLVERS.items()}
else:
return list(_DSOLVERS.keys())
# #############################################################################
# #############################################################################
# check
# #############################################################################
def _check_solver(solver):
if solver is None:
solver = _SOLVER
c0 = (
isinstance(solver, str)
and solver in _DSOLVERS.keys()
)
if not c0:
msg = (
"Arg solver must be among the avaible solver keys:\n"
+ get_available_solvers(verb=False, returnas=str)
+ f"\n\nProvided: '{solver}'"
)
raise Exception(msg)
return solver
# #############################################################################
# #############################################################################
# Main entry point
# #############################################################################
def solve(
solver=None,
dparam=None,
dmulti=None,
lode=None,
lstate=None,
dargs=None,
nt=None,
rtol=None,
atol=None,
max_time_step=None,
dverb=None,
):
# -----------
# check input
solver = _check_solver(solver)
# -----------
# Define the function that takes/returns all functions
y0, dydt_func, lode_solve, dargs_temp, vectorized = get_func_dydt(
dparam=dparam,
dargs=dargs,
lode=lode,
lstate=lstate,
solver=solver,
dmulti=dmulti,
)
# -------------
# dispatch to relevant solver to solve ode using dydt_func
if solver == 'eRK4-homemade':
_eRK4_homemade(
y0=y0,
dydt_func=dydt_func,
dparam=dparam,
lode=lode_solve,
lstate=lstate,
nt=nt,
dverb=dverb,
)
elif solver == 'eRK1-homemade':
_eRK1_homemade(
y0=y0,
dydt_func=dydt_func,
dparam=dparam,
lode=lode_solve,
lstate=lstate,
nt=nt,
dverb=dverb,
)
else:
# scipy takes one-dimensional y only
_solver_scipy(
y0=y0,
dydt_func=dydt_func,
dparam=dparam,
dargs_temp=dargs_temp,
lode=lode_solve,
lstate=lstate,
atol=atol,
rtol=rtol,
max_time_step=max_time_step,
solver=solver,
dverb=dverb,
dmulti=dmulti,
vectorized=vectorized,
)
# ----------------------
# Post-treatment
# compute statevar functions, in good order
for k0 in lstate:
dparam[k0]['value'][...] = dparam[k0]['func'](**dargs[k0])
return solver
# #############################################################################
# #############################################################################
# Common utility: define dydt = f(y, t)
# #############################################################################
def get_func_dydt(
dparam=None,
dargs=None,
lode=None,
lstate=None,
solver=None,
dmulti=None,
):
# for implicit solver => vectorize
vectorized = False
if 'scipy' in solver:
if not solver.startswith('e'):
vectorized = True
# ---------------
# Get list of ode except time
if 'scipy' in solver and vectorized is True:
msg = "Vectorized version for implicit solvers not implemented yet"
raise NotImplementedError(msg)
if 'scipy' in solver:
lode_solve = [k0 for k0 in lode if k0 != 'time']
shapey = (len(lode_solve),)
shapebuf = (1,)
else:
lode_solve = lode
shapey = tuple(np.r_[len(lode_solve), dmulti['shape']])
shapebuf = tuple(dmulti['shape'])
# ---------------------
# initialize y
y0 = np.array([dparam[k0]['value'][0, ...] for k0 in lode_solve])
# ---------------------
# prepare array to be used as buffer
# (to avoid a new array creation everytime the function is called)
# array of dydt
dydt = np.full(shapey, np.nan)
# dict of values
dbuffer = {
k0: np.full(shapebuf, np.nan)
for k0 in lode_solve + lstate
}
# dict of args, takes values in dbuffer by reference
dargs_temp = {
k0: {
k1: dbuffer['lambda' if k1 == 'lamb' else k1]
for k1 in dargs[k0].keys()
}
for k0 in dargs.keys()
}
# -----------------
# get func
if 'scipy' in solver and False:
pass
else:
def func(
t,
y,
dargs_temp=dargs_temp,
dydt=dydt,
dparam=dparam,
lode_solve=lode_solve,
lstate=lstate,
dbuffer=dbuffer,
):
""" dydt = f(t, y)
Where y is a (n,) array
y[0] = fisrt ode
y[1] = second ode
...
y[n] = last ode
All intermediate values ae stored in dparam[k0]['value'][-1, 0]
"""
# ------------
# update cache => also updates dargs and dargs_temp by reference
# used by dargs_temp (by reference)
for ii, k0 in enumerate(lode_solve):
dbuffer[k0][...] = y[ii, ...]
# ------------
# First update intermediary functions based on provided y
# The last time step is used as temporary buffer
# used by dargs_temp (by reference)
for ii, k0 in enumerate(lstate):
dbuffer[k0][...] = dparam[k0]['func'](**dargs_temp[k0])
# ------------
# Then compute derivative dydt (ode)
for ii, k0 in enumerate(lode_solve):
if 'itself' in dparam[k0]['kargs']:
dydt[ii, ...] = dparam[k0]['func'](
itself=y[ii, ...], **dargs_temp[k0],
)
else:
dydt[ii, ...] = dparam[k0]['func'](**dargs_temp[k0])
return np.copy(dydt)
return y0, func, lode_solve, dargs_temp, vectorized
# #############################################################################
# #############################################################################
# Home-made
# #############################################################################
def _eRK4_homemade(
y0=None,
dydt_func=None,
dparam=None,
lode=None,
lstate=None,
nt=None,
dverb=None,
):
""" Structure of the homemade rk4 solver, with time loop, intermediaries...
"""
# initialize y
y = np.copy(y0)
# start loop on time
t0 = time.time()
for ii in range(1, nt):
# print of wait
if dverb['verb'] > 0:
t0 = _class_checks._print_or_wait(ii=ii, nt=nt, t0=t0, **dverb)
# Estimate dt (for future variable time step versions)
# dt =
# compute ode variables from ii-1, using solver
y += _rk4(
dydt_func=dydt_func,
dt=dparam['dt']['value'],
y=y,
t=np.nan, # no model with explicit time dependence (for now)
)
# dispatch to store result of ode
for jj, k0 in enumerate(lode):
dparam[k0]['value'][ii, ...] = y[jj, ...]
def _rk4(dydt_func=None, dt=None, y=None, t=None):
"""
a traditional RK4 scheme, with:
- y = array of all variables (all ode)
- dt = fixed time step
"""
dy1_on_dt = dydt_func(t, y)
dy2_on_dt = dydt_func(t + dt/2., y + dy1_on_dt * dt/2.)
dy3_on_dt = dydt_func(t + dt/2., y + dy2_on_dt * dt/2.)
dy4_on_dt = dydt_func(t + dt, y + dy3_on_dt * dt)
return (dy1_on_dt + 2*dy2_on_dt + 2*dy3_on_dt + dy4_on_dt) * dt/6.
# ###########################################
# eRK1
# ###########################################
def _eRK1_homemade(
y0=None,
dydt_func=None,
dparam=None,
lode=None,
lstate=None,
nt=None,
dverb=None,
):
""" Structure of the homemade rk1 solver, with time loop, intermediaries...
Coded as debugging for the rk1
"""
# initialize y
y = np.copy(y0)
# start loop on time
t0 = time.time()
for ii in range(1, nt):
# print of wait
if dverb['verb'] > 0:
t0 = _class_checks._print_or_wait(ii=ii, nt=nt, t0=t0, **dverb)
# Estimate dt (for future variable time step versions)
# dt =
# compute ode variables from ii-1, using solver
y += _rk1(
dydt_func=dydt_func,
dt=dparam['dt']['value'],
y=y,
t=np.nan,
)
# dispatch to store result of ode
for jj, k0 in enumerate(lode):
dparam[k0]['value'][ii, ...] = y[jj, ...]
def _rk1(dydt_func=None, dt=None, y=None, t=None):
"""
a traditional euler scheme, with:
- y = array of all variables (all ode)
- dt = fixed time step
"""
dy1_on_dt = dydt_func(t, y)
return (dy1_on_dt) * dt
# #############################################################################
# #############################################################################
# scipy
# #############################################################################
def _solver_scipy(
y0=None,
dydt_func=None,
dparam=None,
dargs_temp=None,
lode=None,
lstate=None,
atol=None,
rtol=None,
dverb=None,
max_time_step=None,
solver=None,
dmulti=None,
vectorized=None,
):
""" scipy.RK45 solver, for cross-checking
First try: with a unique system (nx = 1)
issue: how do we update intermediary functions?
alogorithm only seems to allow ode...
Beware: here variable time steps are possible and should be handled
"""
# -----------------
# check inputs
if rtol is None:
rtol = 1.e-3
if atol is None:
atol = 1.e-6
if max_time_step is None:
max_time_step = 10. * dparam['dt']['value']
# -----------------
# define t_span, t_eval
t_span = [dparam['time']['initial'], dparam['Tmax']['value']]
t_eval = | np.linspace(t_span[0], t_span[1], dparam['nt']['value']) | numpy.linspace |
#!python
# cython: language_level=3
# This script contains classes that define all the parameters for
# a radar system
# This script requires that 'numpy' be installed within the Python
# environment you are running this script in.
# This file can be imported as a module and contains the following
# class:
# * Transmitter - A class defines parameters of a radar transmitter
# * Receiver - A class defines parameters of a radar receiver
# * Radar - A class defines basic parameters of a radar system
# ----------
# RadarSimPy - A Radar Simulator Built with Python
# Copyright (C) 2018 - 2020 <NAME>
# E-mail: <EMAIL>
# Website: https://zpeng.me
# ` `
# -:. -#:
# -//:. -###:
# -////:. -#####:
# -/:.://:. -###++##:
# .. `://:- -###+. :##:
# `:/+####+. :##:
# .::::::::/+###. :##:
# .////-----+##: `:###:
# `-//:. :##: `:###/.
# `-//:. :##:`:###/.
# `-//:+######/.
# `-/+####/.
# `+##+.
# :##:
# :##:
# :##:
# :##:
# :##:
# .+:
import numpy as np
def cal_phase_noise(signal, fs, freq, power, seed=None, validation=False):
"""
Oscillator Phase Noise Model
:param numpy.2darray signal:
Input signal
:param float fs:
Sampling frequency
:param numpy.1darray freq:
Frequency of the phase noise
:param numpy.1darray power:
Power of the phase noise
:param int seed:
Seed for noise generator
:param boolean validation:
Validate phase noise
:return:
Signal with phase noise
:rtype: numpy.2darray
**NOTES**
- The presented model is a simple VCO phase noise model based
on the following consideration:
If the output of an oscillator is given as
V(t) = V0 * cos( w0*t + phi(t) ), then phi(t) is defined
as the phase noise. In cases of small noise sources (a valid
assumption in any usable system), a narrowband modulatio
approximation can be used to express the oscillator output as:
V(t) = V0 * cos( w0*t + phi(t) )
= V0 * [cos(w0*t)*cos(phi(t)) - signal(w0*t)*signal(phi(t)) ]
~ V0 * [cos(w0*t) - signal(w0*t)*phi(t)]
This shows that phase noise will be mixed with the carrier
to produce sidebands around the carrier.
- In other words, exp(j*x) ~ (1+j*x) for small x
- Phase noise = 0 dBc/Hz at freq. offset of 0 Hz
- The lowest phase noise level is defined by the input SSB phase
noise power at the maximal freq. offset from DC.
(IT DOES NOT BECOME EQUAL TO ZERO )
The generation process is as follows:
First of all we interpolate (in log-scale) SSB phase noise power
spectrum in M equally spaced points
(on the interval [0 fs/2] including bounds ).
After that we calculate required frequency shape of the phase
noise by X(m) = sqrt(P(m)*dF(m)) and after that complement it
by the symmetrical negative part of the spectrum.
After that we generate AWGN of power 1 in the freq domain and
multiply it sample-by-sample to the calculated shape
Finally we perform 2*M-2 points IFFT to such generated noise
::
| 0 dBc/Hz
| \\ /
| \\ /
| \\ /
| \\P dBc/Hz /
| .\\ /
| . \\ /
| . \\ /
| . \\______________________________________/ <- This level
| . is defined by the power at the maximal freq
| |__| _|__|__|__|__|__|__|__|__|__|__|__|__|__|__|__|__|__ (N)
| 0 dF fs/2 fs
| DC
|
"""
if seed is None:
rng = np.random.default_rng()
else:
rng = np.random.default_rng(seed)
signal = signal.astype(complex)
# Sort freq and power
sort_idx = np.argsort(freq)
freq = freq[sort_idx]
power = power[sort_idx]
cut_idx = np.where(freq < fs/2)
freq = freq[cut_idx]
power = power[cut_idx]
# Add 0 dBc/Hz @ DC
if not np.any(np.isin(freq, 0)):
freq = np.concatenate(([0], freq))
power = np.concatenate(([0], power))
# Calculate input length
[row, N] = np.shape(signal)
# Define M number of points (frequency resolution) in the
# positive spectrum (M equally spaced points on the interval
# [0 fs/2] including bounds), then the number of points in the
# negative spectrum will be M-2 ( interval (fs/2, fs) not
# including bounds )
#
# The total number of points in the frequency domain will be
# 2*M-2, and if we want to get the same length as the input
# signal, then
# 2*M-2 = N
# M-1 = N/2
# M = N/2 + 1
#
# So, if N is even then M = N/2 + 1, and if N is odd we will take
# M = (N+1)/2 + 1
#
if np.remainder(N, 2):
M = int((N+1)/2 + 1)
else:
M = int(N/2 + 1)
# Equally spaced partitioning of the half spectrum
F = np.linspace(0, fs/2, int(M)) # Freq. Grid
dF = np.concatenate((np.diff(F), [F[-1]-F[-2]])) # Delta F
realmin = np.finfo(np.float64).tiny
# realmin = 1e-30
# Perform interpolation of power in log-scale
intrvlNum = len(freq)
logP = np.zeros(int(M))
# for intrvlIndex = 1 : intrvlNum,
for intrvlIndex in range(0, intrvlNum):
leftBound = freq[intrvlIndex]
t1 = power[intrvlIndex]
if intrvlIndex == intrvlNum-1:
rightBound = fs/2
t2 = power[-1]
inside = np.where(np.logical_and(
F >= leftBound, F <= rightBound))
else:
rightBound = freq[intrvlIndex+1]
t2 = power[intrvlIndex+1]
inside = np.where(np.logical_and(
F >= leftBound, F < rightBound))
logP[inside] = t1 + (np.log10(F[inside] + realmin) -
np.log10(leftBound + realmin)) / \
(np.log10(rightBound + 2*realmin) -
np.log10(leftBound + realmin)) * (t2-t1)
# Interpolated P ( half spectrum [0 fs/2] ) [ dBc/Hz ]
P = 10**(np.real(logP)/10)
# Now we will generate AWGN of power 1 in frequency domain and shape
# it by the desired shape as follows:
#
# At the frequency offset F(m) from DC we want to get power Ptag(m)
# such that P(m) = Ptag/dF(m), that is we have to choose
# X(m) = sqrt( P(m)*dF(m) );
#
# Due to the normalization factors of FFT and IFFT defined as follows:
# For length K input vector x, the DFT is a length K vector X,
# with elements
# K
# X(k) = sum x(n)*exp(-j*2*pi*(k-1)*(n-1)/K), 1 <= k <= K.
# n=1
# The inverse DFT (computed by IFFT) is given by
# K
# x(n) = (1/K) sum X(k)*exp( j*2*pi*(k-1)*(n-1)/K), 1 <= n <= K.
# k=1
#
# we have to compensate normalization factor (1/K) multiplying X(k)
# by K. In our case K = 2*M-2.
# Generate AWGN of power 1
if validation:
awgn_P1 = (np.sqrt(0.5)*( | np.ones((row, M)) | numpy.ones |
""" Sparse model downloaded from https://s3-us-west-1.amazonaws.com/nndistiller/agp-pruning/mobilenet/checkpoint.pth.tar
python profiling/profile_sparse_mobilenet.py -a mobilenet -d imagenet --dataset-dir $NAS_HOME/datasets/ILSVRC2012 --resume $ICCV19/sparse/mobilenet_distiller
"""
import os
import sys
import argparse
import copy
import time
import shutil
import json
import logging
import functools
logging.getLogger().setLevel(logging.DEBUG)
import pandas as pd
import numpy as np
import torch
import torch.sparse
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from gumi import model_utils
from gumi.ops import *
from gumi.model_runner.model_runner import ModelRunner
from gumi.model_runner.parser import create_cli_parser
parser = create_cli_parser(prog="CLI tool for profiling GConv models.")
parser.add_argument(
"--iters", type=int, default=10000, help="Number of profiling iterations"
)
parser.add_argument(
"--use-cuda", action="store_true", default=False, help="Whether to use GPU"
)
args = parser.parse_args()
# CUDA
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
cudnn.benchmark = True
class SparsityCalculator(object):
@staticmethod
def get_model_sparsity(model, threshold=1e-5, level=None):
""" Compute the total sparsity of a model """
sum_nnz = 0
sum_total = 0 # total elements
for name, mod in model.named_modules():
if not isinstance(mod, nn.Conv2d):
continue
nnz, total = SparsityCalculator.get_sparsity(
mod, threshold=threshold, level=level
)
sum_nnz += nnz
sum_total += total
return float(sum_nnz) / sum_total
@staticmethod
def get_sparsity(mod, threshold=1e-5, level=None):
""" Compute the sparsity of a module. """
assert isinstance(mod, nn.Conv2d)
W = mod.weight.cpu().detach().numpy()
if level is None or level == "elem":
nz = ( | np.abs(W) | numpy.abs |
import argparse
import os
import json
import faiss
import numpy as np
import pandas as pd
from data import create_multi_splits
from validate import L2norm
from validate import retrieve, KNN, score
# Training settings
parser = argparse.ArgumentParser(description='PyTorch SBIR')
parser.add_argument('--dir-path', type=str, default='exp', metavar='ED',
help='directory with domainnet models')
parser.add_argument('--new-data-path', type=str, default='', metavar='ED',
help='overwrite data path')
parser.add_argument('--eval', type=str, required=True, metavar='ED',
help='many2any|any2many')
args = parser.parse_args()
GROUPS = 500
SEED = 1234
def get_config():
# iterate over folders in the directory
configs = {}
for path in os.listdir(args.dir_path):
fname = os.path.join(args.dir_path, path, 'config.json')
if os.path.isfile(fname):
with open(fname) as f:
tmp = json.load(f)
if tmp['mode'] == 'im':
configs[tmp['domain']] = tmp
configs[tmp['domain']]['working_path'] = os.path.join(
args.dir_path, path)
if not args.new_data_path == '':
configs[tmp['domain']]['data_dir'] = args.new_data_path
else:
configs['quickdraw'] = tmp
configs['quickdraw']['working_path'] = os.path.join(
args.dir_path, path)
if not args.new_data_path == '':
configs['quickdraw']['data_dir'] = args.new_data_path
return configs
def get_splits(configs):
keys = configs.keys()
keys.sort()
fpaths = []
domains = []
y = []
for key in keys:
# get data splits
df_dir = os.path.join('aux', 'data', configs[key]['dataset'])
splits = create_multi_splits(df_dir, configs[key]['domain'])
if key == 'quickdraw':
fpaths.extend(splits['sk']['test'].index.values)
domains.extend(splits['sk']['test']['domain'].values)
y.extend(splits['sk']['test']['cat'].values)
else:
fpaths.extend(splits['im']['test'].index.values)
domains.extend(splits['im']['test']['domain'].values)
y.extend(splits['im']['test']['cat'].values)
df = pd.DataFrame({'domain': domains, 'cat': y}, index=fpaths)
return df
def read_data(fpath):
data = np.load(fpath)
return data['features'], data['labels']
def mix_queries(base, complement, alpha=0.5):
idx = sample_complement(base['y'], complement['y'])
mixture = alpha * base['x'] + (1-alpha) * complement['x'][idx, :]
return mixture, idx
def sample_complement(y_base, y_complement):
np.random.seed(SEED)
idx = []
for y in y_base:
cond_idx = np.argwhere(y_complement == y).squeeze()
idx.append(np.random.choice(cond_idx))
return idx
def many2any_retrieval(configs, sources=['quickdraw', 'real']):
keys = configs.keys()
keys.sort()
source_data = {}
for domain in sources:
dirname = configs[domain]['working_path']
fpath = os.path.join(dirname, 'features.npz')
x, y = read_data(fpath)
source_data[domain] = {}
source_data[domain]['x'] = x
source_data[domain]['y'] = y
# save images that have been mixed, such that they don't get retrived
x_src, idx = mix_queries(source_data[sources[0]], source_data[sources[1]])
y_src = source_data[sources[0]]['y']
np.save('plop.npy', idx)
res = {}
for domain in keys:
dirname = configs[domain]['working_path']
fpath = os.path.join(dirname, 'features.npz')
x_tgt, y_tgt = read_data(fpath)
if sources[0] == domain and sources[1] == domain:
pass
else:
print('\nRetrieval from %s+%s to %s' %
(sources[0], sources[1], domain))
if domain == sources[1]:
do_mixture = True
else:
do_mixture = False
tmp = cross_domain_retrieval(
x_src, y_src, x_tgt, y_tgt,
zeroshot=configs[domain]['overwrite'],
mixture=do_mixture)
res[domain] = tmp
os.remove('plop.npy')
def get_data(configs):
keys = configs.keys()
keys.sort()
feats = []
labels = []
domains = []
for i, key in enumerate(keys):
dirname = configs[key]['working_path']
fpath = os.path.join(dirname, 'features.npz')
data = np.load(fpath)
nsamples = len(data['labels'])
feats.extend(data['features'])
labels.extend(data['labels'])
domains.extend([key] * nsamples)
return feats, labels, domains
def one2many_retrieve_intent_aware(feats, labels, domains, splits,
source='quickdraw',
zeroshot=False):
cond = np.asarray(domains) == source
x_src = np.asarray(feats)[cond, :]
y_src = np.asarray(labels)[cond]
x_tgt = np.asarray(feats)[~cond, :]
y_tgt = np.asarray(labels)[~cond]
d_tgt = np.asarray(domains)[~cond]
# KNN
g_src_x = KNN(x_src, x_tgt, K=1, mode='ones')
if zeroshot:
alpha = 0.7
else:
alpha = 0.4
x_src = slerp(alpha, L2norm(x_src), L2norm(g_src_x))
idx = myretrieve(x_src, x_tgt, topK=100)
yd_tgt = np.char.add(y_tgt.astype(d_tgt.dtype), d_tgt)
domains = np.unique(d_tgt)
categories = np.unique(y_tgt)
# compute occurrences of every category per domain
occ = []
for d in domains:
occ_inner = []
for c in categories:
cond = np.logical_and(d_tgt == d, y_tgt == c)
occ_inner.append(np.sum(cond))
occ.append(occ_inner)
occ = np.asarray(occ, dtype=np.float)
# normalize occurences
occ /= np.sum(occ, axis=0)
import multiprocessing as mp
from metrics import average_precision
# compute intent-aware mAP per domain
mAP_ia = []
for d in domains:
yd_src = np.char.add(y_src.astype(d_tgt.dtype), d)
res = np.char.equal(yd_tgt[idx], yd_src[:, None])
pool = mp.Pool(processes=10)
results = [pool.apply_async(average_precision, args=(r,)) for r in res]
mAP = np.asarray([p.get() for p in results])
pool.close()
mAP_ia.append(mAP)
print('%s: %.3f' % (d, np.mean(mAP)))
mAP_ia = | np.asarray(mAP_ia) | numpy.asarray |
import numpy as np
def plot_kde_density_1d(x, bw_method=None, n_steps=200, alpha_fill=0.25, rug=False, overshoot=0.1, ax=None, kernel_size=None, **kwargs):
"""
Plot a 1D kernel density estimate function of the data.
"""
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
if ax is None:
ax = plt.gca()
kernel = scipy.stats.gaussian_kde(x, bw_method)
if not kernel_size is None:
kernel.inv_cov /= kernel_size
x_min, x_max = np.min(x), np.max(x)
x_range = x_max - x_min
x_eval = np.linspace(x_min - overshoot * x_range, x_max + overshoot * x_range, n_steps)
y_eval = kernel(x_eval)
res = ax.plot(x_eval, y_eval, **kwargs)
if alpha_fill != 0:
ax.fill_between(x_eval, y_eval, color=res[0].get_color(), alpha=alpha_fill)
if rug:
plot_rug(x, color=res[0].get_color(), ax=ax)
return res
def plot_kde_density_2d(x, y, bw_method=None, n_steps=200, ax=None, overshoot=0.1, kernel_size=None, **kwargs):
"""
Plot a 2D kernel density estimate function of the data.
"""
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
if ax is None:
ax = plt.gca()
xy = np.vstack((x, y))
kernel = gaussian_kde(xy, bw_method)
if not kernel_size is None:
kernel.inv_cov /= kernel_size
x_min, x_max, y_min, y_max = np.min(x), np.max(x), np.min(y), np.max(y)
x_range, y_range = x_max - x_min, y_max - y_min
x_eval = np.linspace(x_min - overshoot * x_range, x_max + overshoot * x_range, n_steps)
y_eval = np.linspace(y_min - overshoot * y_range, y_max + overshoot * y_range, n_steps)
X, Y = np.meshgrid(x_eval, y_eval)
z_eval = kernel(np.vstack((X.flatten(), Y.flatten()))).reshape(X.shape)
return plt.imshow(np.rot90(z_eval), extent=[np.min(x), | np.max(x) | numpy.max |
#============================================================================
# Copyright (c) 2018 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#============================================================================
# Author: <NAME>
# E-mail: <EMAIL>
# Description: Improvements of stripe artifact removal methods:
# [1] <NAME>, <NAME>, and <NAME>, "Superior techniques
# for eliminating ring artifacts in X-ray micro-tomography," Optics
# Express 26, 28396-28412 (2018). https://doi.org/10.1364/OE.26.028396.
# [2] <NAME>, <NAME>, and <NAME>,"Preprocessing techniques
# for removing artifacts in synchrotron-based tomographic images,"
# Proc. SPIE 11113, Developments in X-Ray Tomography XII.
# https://doi.org/10.1117/12.2530324.
# Publication date: 09th October 2019
#============================================================================
"""
Module for stripe removal methods proposed in:
https://doi.org/10.1117/12.2530324
"""
import numpy as np
from scipy import interpolate
from scipy.signal.windows import gaussian
from scipy.ndimage import median_filter
from scipy.ndimage import binary_dilation
# import scipy.fftpack as fft
import pyfftw.interfaces.scipy_fftpack as fft
from sarepy.prep.stripe_removal_original import remove_stripe_based_sorting
from sarepy.prep.stripe_removal_original import remove_stripe_based_fitting
from sarepy.prep.stripe_removal_original import apply_gaussian_filter
from sarepy.prep.stripe_removal_original import detect_stripe
def remove_stripe_based_filtering_sorting(sinogram, sigma, size, dim=1):
"""
Removing stripes using the filtering and sorting technique, combination of
algorithm 2 and algorithm 3 in Ref.[1]. Angular direction is along the axis 0.
Parameters
----------
sinogram : array_like
2D array. Sinogram image.
sigma : int
Sigma of the Gaussian window used to separate the low-pass and
high-pass components of the intensity profile of each column.
size : int
Window size of the median filter.
dim : {1, 2}, optional
Dimension of the window.
Returns
-------
ndarray
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://doi.org/10.1364/OE.26.028396
"""
pad = min(150, int(0.1 * sinogram.shape[0]))
sinogram = np.transpose(sinogram)
sino_pad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect')
(_, ncol) = sino_pad.shape
window = gaussian(ncol, std=sigma)
list_sign = np.power(-1.0, np.arange(ncol))
sino_smooth = np.copy(sinogram)
for i, sino_1d in enumerate(sino_pad):
sino_smooth[i] = np.real(
fft.ifft(fft.fft(sino_1d * list_sign) * window) * list_sign)[pad:ncol - pad]
sino_sharp = sinogram - sino_smooth
sino_smooth_cor = np.transpose(
remove_stripe_based_sorting(np.transpose(sino_smooth), size, dim))
return | np.transpose(sino_smooth_cor + sino_sharp) | numpy.transpose |
import copy
import inspect
import logging
import math
import os
import pprint
import time
from typing import Union
import networkx as nx
import numpy as np
import pandas as pd
from autogluon.common.utils.log_utils import set_logger_verbosity
from autogluon.common.utils.pandas_utils import get_approximate_df_mem_usage
from autogluon.common.utils.utils import setup_outputdir
from autogluon.core.calibrate.temperature_scaling import tune_temperature_scaling
from autogluon.core.calibrate.conformity_score import compute_conformity_score
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, QUANTILE, AUTO_WEIGHT, BALANCE_WEIGHT, PSEUDO_MODEL_SUFFIX, PROBLEM_TYPES_CLASSIFICATION
from autogluon.core.data.label_cleaner import LabelCleanerMulticlassToBinary
from autogluon.core.dataset import TabularDataset
from autogluon.core.pseudolabeling.pseudolabeling import filter_pseudo, filter_ensemble_pseudo
from autogluon.core.scheduler.scheduler_factory import scheduler_factory
from autogluon.core.trainer import AbstractTrainer
from autogluon.core.utils import get_pred_from_proba_df
from autogluon.core.utils import plot_performance_vs_trials, plot_summary_of_models, plot_tabular_models
from autogluon.core.utils.decorators import apply_presets
from autogluon.core.utils.loaders import load_pkl, load_str
from autogluon.core.utils.savers import save_pkl, save_str
from autogluon.core.utils.utils import default_holdout_frac
from ..configs.feature_generator_presets import get_default_feature_generator
from ..configs.hyperparameter_configs import get_hyperparameter_config
from ..configs.presets_configs import tabular_presets_dict
from ..learner import AbstractLearner, DefaultLearner
logger = logging.getLogger(__name__) # return autogluon root logger
# TODO: num_bag_sets -> ag_args
# Extra TODOs (Stretch): Can occur post v0.1
# TODO: make core_kwargs a kwargs argument to predictor.fit
# TODO: add aux_kwargs to predictor.fit
# TODO: add pip freeze + python version output after fit + log file, validate that same pip freeze on load as cached
# TODO: predictor.clone()
# TODO: Add logging comments that models are serialized on disk after fit
# TODO: consider adding kwarg option for data which has already been preprocessed by feature generator to skip feature generation.
# TODO: Resolve raw text feature usage in default feature generator
# Done for Tabular
# TODO: Remove all `time_limits` in project, replace with `time_limit`
class TabularPredictor:
"""
AutoGluon TabularPredictor predicts values in a column of a tabular dataset (classification or regression).
Parameters
----------
label : str
Name of the column that contains the target variable to predict.
problem_type : str, default = None
Type of prediction problem, i.e. is this a binary/multiclass classification or regression problem (options: 'binary', 'multiclass', 'regression', 'quantile').
If `problem_type = None`, the prediction problem type is inferred based on the label-values in provided dataset.
eval_metric : function or str, default = None
Metric by which predictions will be ultimately evaluated on test data.
AutoGluon tunes factors such as hyperparameters, early-stopping, ensemble-weights, etc. in order to improve this metric on validation data.
If `eval_metric = None`, it is automatically chosen based on `problem_type`.
Defaults to 'accuracy' for binary and multiclass classification, 'root_mean_squared_error' for regression, and 'pinball_loss' for quantile.
Otherwise, options for classification:
['accuracy', 'balanced_accuracy', 'f1', 'f1_macro', 'f1_micro', 'f1_weighted',
'roc_auc', 'roc_auc_ovo_macro', 'average_precision', 'precision', 'precision_macro', 'precision_micro',
'precision_weighted', 'recall', 'recall_macro', 'recall_micro', 'recall_weighted', 'log_loss', 'pac_score']
Options for regression:
['root_mean_squared_error', 'mean_squared_error', 'mean_absolute_error', 'median_absolute_error', 'r2']
For more information on these options, see `sklearn.metrics`: https://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics
You can also pass your own evaluation function here as long as it follows formatting of the functions defined in folder `autogluon.core.metrics`.
path : str, default = None
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
verbosity : int, default = 2
Verbosity levels range from 0 to 4 and control how much information is printed.
Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`,
where `L` ranges from 0 to 50 (Note: higher values of `L` correspond to fewer print statements, opposite of verbosity levels).
sample_weight : str, default = None
If specified, this column-name indicates which column of the data should be treated as sample weights. This column will NOT be considered as a predictive feature.
Sample weights should be non-negative (and cannot be nan), with larger values indicating which rows are more important than others.
If you want your usage of sample weights to match results obtained outside of this Predictor, then ensure sample weights for your training (or tuning) data sum to the number of rows in the training (or tuning) data.
You may also specify two special strings: 'auto_weight' (automatically choose a weighting strategy based on the data) or 'balance_weight' (equally weight classes in classification, no effect in regression). If specifying your own sample_weight column, make sure its name does not match these special strings.
weight_evaluation : bool, default = False
Only considered when `sample_weight` column is not None. Determines whether sample weights should be taken into account when computing evaluation metrics on validation/test data.
If True, then weighted metrics will be reported based on the sample weights provided in the specified `sample_weight` (in which case `sample_weight` column must also be present in test data).
In this case, the 'best' model used by default for prediction will also be decided based on a weighted version of evaluation metric.
Note: we do not recommend specifying `weight_evaluation` when `sample_weight` is 'auto_weight' or 'balance_weight', instead specify appropriate `eval_metric`.
groups : str, default = None
[Experimental] If specified, AutoGluon will use the column named the value of groups in `train_data` during `.fit` as the data splitting indices for the purposes of bagging.
This column will not be used as a feature during model training.
This parameter is ignored if bagging is not enabled. To instead specify a custom validation set with bagging disabled, specify `tuning_data` in `.fit`.
The data will be split via `sklearn.model_selection.LeaveOneGroupOut`.
Use this option to control the exact split indices AutoGluon uses.
It is not recommended to use this option unless it is required for very specific situations.
Bugs may arise from edge cases if the provided groups are not valid to properly train models, such as if not all classes are present during training in multiclass classification. It is up to the user to sanitize their groups.
As an example, if you want your data folds to preserve adjacent rows in the table without shuffling, then for 3 fold bagging with 6 rows of data, the groups column values should be [0, 0, 1, 1, 2, 2].
**kwargs :
learner_type : AbstractLearner, default = DefaultLearner
A class which inherits from `AbstractLearner`. This dictates the inner logic of predictor.
If you don't know what this is, keep it as the default.
learner_kwargs : dict, default = None
Kwargs to send to the learner. Options include:
positive_class : str or int, default = None
Used to determine the positive class in binary classification.
This is used for certain metrics such as 'f1' which produce different scores depending on which class is considered the positive class.
If not set, will be inferred as the second element of the existing unique classes after sorting them.
If classes are [0, 1], then 1 will be selected as the positive class.
If classes are ['def', 'abc'], then 'def' will be selected as the positive class.
If classes are [True, False], then True will be selected as the positive class.
ignored_columns : list, default = None
Banned subset of column names that predictor may not use as predictive features (e.g. unique identifier to a row or user-ID).
These columns are ignored during `fit()`.
label_count_threshold : int, default = 10
For multi-class classification problems, this is the minimum number of times a label must appear in dataset in order to be considered an output class.
AutoGluon will ignore any classes whose labels do not appear at least this many times in the dataset (i.e. will never predict them).
cache_data : bool, default = True
When enabled, the training and validation data are saved to disk for future reuse.
Enables advanced functionality in predictor such as `fit_extra()` and feature importance calculation on the original data.
trainer_type : AbstractTrainer, default = AutoTrainer
A class inheriting from `AbstractTrainer` that controls training/ensembling of many models.
If you don't know what this is, keep it as the default.
Attributes
----------
path : str
Path to directory where all models used by this Predictor are stored.
problem_type : str
What type of prediction problem this Predictor has been trained for.
eval_metric : function or str
What metric is used to evaluate predictive performance.
label : str
Name of table column that contains data from the variable to predict (often referred to as: labels, response variable, target variable, dependent variable, Y, etc).
feature_metadata : :class:`autogluon.common.features.feature_metadata.FeatureMetadata`
Inferred data type of each predictive variable after preprocessing transformation (i.e. column of training data table used to predict `label`).
Contains both raw dtype and special dtype information. Each feature has exactly 1 raw dtype (such as 'int', 'float', 'category') and zero to many special dtypes (such as 'datetime_as_int', 'text', 'text_ngram').
Special dtypes are AutoGluon specific feature types that are used to identify features with meaning beyond what the raw dtype can convey.
`feature_metadata.type_map_raw`: Dictionary of feature name -> raw dtype mappings.
`feature_metadata.type_group_map_special`: Dictionary of lists of special feature names, grouped by special feature dtype.
positive_class : str or int
Returns the positive class name in binary classification. Useful for computing metrics such as F1 which require a positive and negative class.
In binary classification, :meth:`TabularPredictor.predict_proba` returns the estimated probability that each row belongs to the positive class.
Will print a warning and return None if called when `predictor.problem_type != 'binary'`.
class_labels : list
For multiclass problems, this list contains the class labels in sorted order of `predict_proba()` output.
For binary problems, this list contains the class labels in sorted order of `predict_proba(as_multiclass=True)` output.
`class_labels[0]` corresponds to internal label = 0 (negative class), `class_labels[1]` corresponds to internal label = 1 (positive class).
This is relevant for certain metrics such as F1 where True and False labels impact the metric score differently.
For other problem types, will equal None.
For example if `pred = predict_proba(x, as_multiclass=True)`, then ith index of `pred` provides predicted probability that `x` belongs to class given by `class_labels[i]`.
class_labels_internal : list
For multiclass problems, this list contains the internal class labels in sorted order of internal `predict_proba()` output.
For binary problems, this list contains the internal class labels in sorted order of internal `predict_proba(as_multiclass=True)` output.
The value will always be `class_labels_internal=[0, 1]` for binary problems, with 0 as the negative class, and 1 as the positive class.
For other problem types, will equal None.
class_labels_internal_map : dict
For binary and multiclass classification problems, this dictionary contains the mapping of the original labels to the internal labels.
For example, in binary classification, label values of 'True' and 'False' will be mapped to the internal representation `1` and `0`.
Therefore, class_labels_internal_map would equal {'True': 1, 'False': 0}
For other problem types, will equal None.
For multiclass, it is possible for not all of the label values to have a mapping.
This indicates that the internal models will never predict those missing labels, and training rows associated with the missing labels were dropped.
"""
Dataset = TabularDataset
predictor_file_name = 'predictor.pkl'
_predictor_version_file_name = '__version__'
def __init__(
self,
label,
problem_type=None,
eval_metric=None,
path=None,
verbosity=2,
sample_weight=None,
weight_evaluation=False,
groups=None,
**kwargs
):
self.verbosity = verbosity
set_logger_verbosity(self.verbosity)
if sample_weight == AUTO_WEIGHT: # TODO: update auto_weight strategy and make it the default
sample_weight = None
logger.log(15, f"{AUTO_WEIGHT} currently does not use any sample weights.")
self.sample_weight = sample_weight
self.weight_evaluation = weight_evaluation # TODO: sample_weight and weight_evaluation can both be properties that link to self._learner.sample_weight, self._learner.weight_evaluation
if self.sample_weight in [AUTO_WEIGHT, BALANCE_WEIGHT] and self.weight_evaluation:
logger.warning(
f"We do not recommend specifying weight_evaluation when sample_weight='{self.sample_weight}', instead specify appropriate eval_metric.")
self._validate_init_kwargs(kwargs)
path = setup_outputdir(path)
learner_type = kwargs.pop('learner_type', DefaultLearner)
learner_kwargs = kwargs.pop('learner_kwargs', dict())
quantile_levels = kwargs.get('quantile_levels', None)
self._learner: AbstractLearner = learner_type(path_context=path, label=label, feature_generator=None,
eval_metric=eval_metric, problem_type=problem_type,
quantile_levels=quantile_levels,
sample_weight=self.sample_weight,
weight_evaluation=self.weight_evaluation, groups=groups,
**learner_kwargs)
self._learner_type = type(self._learner)
self._trainer = None
@property
def class_labels(self):
return self._learner.class_labels
@property
def class_labels_internal(self):
return self._learner.label_cleaner.ordered_class_labels_transformed
@property
def class_labels_internal_map(self):
return self._learner.label_cleaner.inv_map
@property
def quantile_levels(self):
return self._learner.quantile_levels
@property
def eval_metric(self):
return self._learner.eval_metric
@property
def problem_type(self):
return self._learner.problem_type
def features(self, feature_stage: str = 'original'):
"""
Returns a list of feature names dependent on the value of feature_stage.
Parameters
----------
feature_stage : str, default = 'original'
If 'original', returns the list of features specified in the original training data. This feature set is required in input data when making predictions.
If 'transformed', returns the list of features after pre-processing by the feature generator.
Returns
-------
Returns a list of feature names
"""
if feature_stage == 'original':
return self.feature_metadata_in.get_features()
elif feature_stage == 'transformed':
return self.feature_metadata.get_features()
else:
raise ValueError(f"Unknown feature_stage: '{feature_stage}'. Must be one of {['original', 'transformed']}")
@property
def feature_metadata(self):
return self._trainer.feature_metadata
@property
def feature_metadata_in(self):
return self._learner.feature_generator.feature_metadata_in
@property
def label(self):
return self._learner.label
@property
def path(self):
return self._learner.path
@apply_presets(tabular_presets_dict)
def fit(self,
train_data,
tuning_data=None,
time_limit=None,
presets=None,
hyperparameters=None,
feature_metadata='infer',
**kwargs):
"""
Fit models to predict a column of a data table (label) based on the other columns (features).
Parameters
----------
train_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
Table of the training data, which is similar to a pandas DataFrame.
If str is passed, `train_data` will be loaded using the str value as the file path.
tuning_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Another dataset containing validation data reserved for tuning processes such as early stopping and hyperparameter tuning.
This dataset should be in the same format as `train_data`.
If str is passed, `tuning_data` will be loaded using the str value as the file path.
Note: final model returned may be fit on `tuning_data` as well as `train_data`. Do not provide your evaluation test data here!
In particular, when `num_bag_folds` > 0 or `num_stack_levels` > 0, models will be trained on both `tuning_data` and `train_data`.
If `tuning_data = None`, `fit()` will automatically hold out some random validation examples from `train_data`.
time_limit : int, default = None
Approximately how long `fit()` should run for (wallclock time in seconds).
If not specified, `fit()` will run until all models have completed training, but will not repeatedly bag models unless `num_bag_sets` is specified.
presets : list or str or dict, default = ['medium_quality_faster_train']
List of preset configurations for various arguments in `fit()`. Can significantly impact predictive accuracy, memory-footprint, and inference latency of trained models, and various other properties of the returned `predictor`.
It is recommended to specify presets and avoid specifying most other `fit()` arguments or model hyperparameters prior to becoming familiar with AutoGluon.
As an example, to get the most accurate overall predictor (regardless of its efficiency), set `presets='best_quality'`.
To get good quality with minimal disk usage, set `presets=['good_quality_faster_inference_only_refit', 'optimize_for_deployment']`
Any user-specified arguments in `fit()` will override the values used by presets.
If specifying a list of presets, later presets will override earlier presets if they alter the same argument.
For precise definitions of the provided presets, see file: `autogluon/tabular/configs/presets_configs.py`.
Users can specify custom presets by passing in a dictionary of argument values as an element to the list.
Available Presets: ['best_quality', 'high_quality_fast_inference_only_refit', 'good_quality_faster_inference_only_refit', 'medium_quality_faster_train', 'optimize_for_deployment', 'ignore_text']
It is recommended to only use one `quality` based preset in a given call to `fit()` as they alter many of the same arguments and are not compatible with each-other.
In-depth Preset Info:
best_quality={'auto_stack': True}
Best predictive accuracy with little consideration to inference time or disk usage. Achieve even better results by specifying a large time_limit value.
Recommended for applications that benefit from the best possible model accuracy.
high_quality_fast_inference_only_refit={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, '_save_bag_folds': False}
High predictive accuracy with fast inference. ~10x-200x faster inference and ~10x-200x lower disk usage than `best_quality`.
Recommended for applications that require reasonable inference speed and/or model size.
good_quality_faster_inference_only_refit={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, '_save_bag_folds': False, 'hyperparameters': 'light'}
Good predictive accuracy with very fast inference. ~4x faster inference and ~4x lower disk usage than `high_quality_fast_inference_only_refit`.
Recommended for applications that require fast inference speed.
medium_quality_faster_train={'auto_stack': False}
Medium predictive accuracy with very fast inference and very fast training time. ~20x faster training than `good_quality_faster_inference_only_refit`.
This is the default preset in AutoGluon, but should generally only be used for quick prototyping, as `good_quality_faster_inference_only_refit` results in significantly better predictive accuracy and faster inference time.
optimize_for_deployment={'keep_only_best': True, 'save_space': True}
Optimizes result immediately for deployment by deleting unused models and removing training artifacts.
Often can reduce disk usage by ~2-4x with no negatives to model accuracy or inference speed.
This will disable numerous advanced functionality, but has no impact on inference.
This will make certain functionality less informative, such as `predictor.leaderboard()` and `predictor.fit_summary()`.
Because unused models will be deleted under this preset, methods like `predictor.leaderboard()` and `predictor.fit_summary()` will no longer show the full set of models that were trained during `fit()`.
Recommended for applications where the inner details of AutoGluon's training is not important and there is no intention of manually choosing between the final models.
This preset pairs well with the other presets such as `good_quality_faster_inference_only_refit` to make a very compact final model.
Identical to calling `predictor.delete_models(models_to_keep='best', dry_run=False)` and `predictor.save_space()` directly after `fit()`.
ignore_text={'_feature_generator_kwargs': {'enable_text_ngram_features': False, 'enable_text_special_features': False, 'enable_raw_text_features': False}}
Disables automated feature generation when text features are detected.
This is useful to determine how beneficial text features are to the end result, as well as to ensure features are not mistaken for text when they are not.
Ignored if `feature_generator` was also specified.
hyperparameters : str or dict, default = 'default'
Determines the hyperparameters used by the models.
If `str` is passed, will use a preset hyperparameter configuration.
Valid `str` options: ['default', 'light', 'very_light', 'toy', 'multimodal']
'default': Default AutoGluon hyperparameters intended to maximize accuracy without significant regard to inference time or disk usage.
'light': Results in smaller models. Generally will make inference speed much faster and disk usage much lower, but with worse accuracy.
'very_light': Results in much smaller models. Behaves similarly to 'light', but in many cases with over 10x less disk usage and a further reduction in accuracy.
'toy': Results in extremely small models. Only use this when prototyping, as the model quality will be severely reduced.
'multimodal': [EXPERIMENTAL] Trains a multimodal transformer model alongside tabular models. Requires that some text columns appear in the data, a GPU, and CUDA-enabled MXNet.
When combined with 'best_quality' `presets` option, this can achieve extremely strong results in multimodal data tables that contain columns with text in addition to numeric/categorical columns.
Reference `autogluon/tabular/configs/hyperparameter_configs.py` for information on the hyperparameters associated with each preset.
Keys are strings that indicate which model types to train.
Stable model options include:
'GBM' (LightGBM)
'CAT' (CatBoost)
'XGB' (XGBoost)
'RF' (random forest)
'XT' (extremely randomized trees)
'KNN' (k-nearest neighbors)
'LR' (linear regression)
'NN' (neural network with MXNet backend)
'FASTAI' (neural network with FastAI backend)
Experimental model options include:
'FASTTEXT' (FastText)
'AG_TEXT_NN' (Multimodal Text+Tabular model, GPU is required)
'TRANSF' (Tabular Transformer, GPU is recommended)
If a certain key is missing from hyperparameters, then `fit()` will not train any models of that type. Omitting a model key from hyperparameters is equivalent to including this model key in `excluded_model_types`.
For example, set `hyperparameters = { 'NN':{...} }` if say you only want to train neural networks and no other types of models.
Values = dict of hyperparameter settings for each model type, or list of dicts.
Each hyperparameter can either be a single fixed value or a search space containing many possible values.
Unspecified hyperparameters will be set to default values (or default search spaces if `hyperparameter_tune = True`).
Caution: Any provided search spaces will be overridden by fixed defaults if `hyperparameter_tune = False`.
To train multiple models of a given type, set the value to a list of hyperparameter dictionaries.
For example, `hyperparameters = {'RF': [{'criterion': 'gini'}, {'criterion': 'entropy'}]}` will result in 2 random forest models being trained with separate hyperparameters.
Advanced functionality: Custom models
`hyperparameters` can also take special string values instead of a dictionary of model parameters which maps to a pre-configured model configuration (currently supported options = ['GBMLarge']).
These additional models will be trained using custom pre-specified hyperparameter settings that are known to work well.
Advanced functionality: Custom stack levels
By default, AutoGluon re-uses the same models and model hyperparameters at each level during stack ensembling.
To customize this behaviour, create a hyperparameters dictionary separately for each stack level, and then add them as values to a new dictionary, with keys equal to the stack level.
Example: `hyperparameters = {1: {'RF': rf_params1}, 2: {'CAT': [cat_params1, cat_params2], 'NN': {}}}`
This will result in a stack ensemble that has one custom random forest in level 1 followed by two CatBoost models with custom hyperparameters and a default neural network in level 2, for a total of 4 models.
If a level is not specified in `hyperparameters`, it will default to using the highest specified level to train models. This can also be explicitly controlled by adding a 'default' key.
Default:
hyperparameters = {
'NN': {},
'GBM': [
{'extra_trees': True, 'ag_args': {'name_suffix': 'XT'}},
{},
'GBMLarge',
],
'CAT': {},
'XGB': {},
'FASTAI': {},
'RF': [
{'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'mse', 'ag_args': {'name_suffix': 'MSE', 'problem_types': ['regression']}},
],
'XT': [
{'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'mse', 'ag_args': {'name_suffix': 'MSE', 'problem_types': ['regression']}},
],
'KNN': [
{'weights': 'uniform', 'ag_args': {'name_suffix': 'Unif'}},
{'weights': 'distance', 'ag_args': {'name_suffix': 'Dist'}},
],
}
Details regarding the hyperparameters you can specify for each model are provided in the following files:
NN: `autogluon.tabular.models.tabular_nn.hyperparameters.parameters`
Note: certain hyperparameter settings may cause these neural networks to train much slower.
GBM: `autogluon.tabular.models.lgb.hyperparameters.parameters`
See also the lightGBM docs: https://lightgbm.readthedocs.io/en/latest/Parameters.html
CAT: `autogluon.tabular.models.catboost.hyperparameters.parameters`
See also the CatBoost docs: https://catboost.ai/docs/concepts/parameter-tuning.html
XGB: `autogluon.tabular.models.xgboost.hyperparameters.parameters`
See also the XGBoost docs: https://xgboost.readthedocs.io/en/latest/parameter.html
FASTAI: `autogluon.tabular.models.fastainn.hyperparameters.parameters`
See also the FastAI docs: https://docs.fast.ai/tabular.models.html
RF: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
Note: Hyperparameter tuning is disabled for this model.
XT: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
Note: Hyperparameter tuning is disabled for this model.
KNN: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
Note: Hyperparameter tuning is disabled for this model.
LR: `autogluon.tabular.models.lr.hyperparameters.parameters`
Note: Hyperparameter tuning is disabled for this model.
Note: 'penalty' parameter can be used for regression to specify regularization method: 'L1' and 'L2' values are supported.
Advanced functionality: Custom AutoGluon model arguments
These arguments are optional and can be specified in any model's hyperparameters.
Example: `hyperparameters = {'RF': {..., 'ag_args': {'name_suffix': 'CustomModelSuffix', 'disable_in_hpo': True}}`
ag_args: Dictionary of customization options related to meta properties of the model such as its name, the order it is trained, the problem types it is valid for, and the type of HPO it utilizes.
Valid keys:
name: (str) The name of the model. This overrides AutoGluon's naming logic and all other name arguments if present.
name_main: (str) The main name of the model. Example: 'RandomForest'.
name_prefix: (str) Add a custom prefix to the model name. Unused by default.
name_suffix: (str) Add a custom suffix to the model name. Unused by default.
priority: (int) Determines the order in which the model is trained. Larger values result in the model being trained earlier. Default values range from 100 (KNN) to 0 (custom), dictated by model type. If you want this model to be trained first, set priority = 999.
problem_types: (list) List of valid problem types for the model. `problem_types=['binary']` will result in the model only being trained if `problem_type` is 'binary'.
disable_in_hpo: (bool) If True, the model will only be trained if `hyperparameter_tune_kwargs=None`.
valid_stacker: (bool) If False, the model will not be trained as a level 2 or higher stacker model.
valid_base: (bool) If False, the model will not be trained as a level 1 (base) model.
hyperparameter_tune_kwargs: (dict) Refer to :meth:`TabularPredictor.fit` hyperparameter_tune_kwargs argument. If specified here, will override global HPO settings for this model.
Reference the default hyperparameters for example usage of these options.
ag_args_fit: Dictionary of model fit customization options related to how and with what constraints the model is trained. These parameters affect stacker fold models, but not stacker models themselves.
Clarification: `time_limit` is the internal time in seconds given to a particular model to train, which is dictated in part by the `time_limit` argument given during `predictor.fit()` but is not the same.
Valid keys:
stopping_metric: (str or :class:`autogluon.core.metrics.Scorer`, default=None) The metric to use for early stopping of the model. If None, model will decide.
max_memory_usage_ratio: (float, default=1.0) The ratio of memory usage relative to the default to allow before early stopping or killing the model. Values greater than 1.0 will be increasingly prone to out-of-memory errors.
max_time_limit_ratio: (float, default=1.0) The ratio of the provided time_limit to use during model `fit()`. If `time_limit=10` and `max_time_limit_ratio=0.3`, time_limit would be changed to 3. Does not alter max_time_limit or min_time_limit values.
max_time_limit: (float, default=None) Maximum amount of time to allow this model to train for (in sec). If the provided time_limit is greater than this value, it will be replaced by max_time_limit.
min_time_limit: (float, default=0) Allow this model to train for at least this long (in sec), regardless of the time limit it would otherwise be granted.
If `min_time_limit >= max_time_limit`, time_limit will be set to min_time_limit.
If `min_time_limit=None`, time_limit will be set to None and the model will have no training time restriction.
num_cpus : (int or str, default='auto')
How many CPUs to use during model fit.
If 'auto', model will decide.
num_gpus : (int or str, default='auto')
How many GPUs to use during model fit.
If 'auto', model will decide. Some models can use GPUs but don't by default due to differences in model quality.
Set to 0 to disable usage of GPUs.
ag_args_ensemble: Dictionary of hyperparameters shared by all models that control how they are ensembled, if bag mode is enabled.
Valid keys:
use_orig_features: (bool) Whether a stack model will use the original features along with the stack features to train (akin to skip-connections). If the model has no stack features (no base models), this value is ignored and the stack model will use the original features.
max_base_models: (int, default=25) Maximum number of base models whose predictions form the features input to this stacker model. If more than `max_base_models` base models are available, only the top `max_base_models` models with highest validation score are used.
max_base_models_per_type: (int, default=5) Similar to `max_base_models`. If more than `max_base_models_per_type` of any particular model type are available, only the top `max_base_models_per_type` of that type are used. This occurs before the `max_base_models` filter.
save_bag_folds: (bool, default=True)
If True, bagged models will save their fold models (the models from each individual fold of bagging). This is required to use bagged models for prediction.
If False, bagged models will not save their fold models. This means that bagged models will not be valid models during inference.
This should only be set to False when planning to call `predictor.refit_full()` or when `refit_full` is set and `set_best_to_refit_full=True`.
Particularly useful if disk usage is a concern. By not saving the fold models, bagged models will use only very small amounts of disk space during training.
In many training runs, this will reduce peak disk usage by >10x.
fold_fitting_strategy: (AbstractFoldFittingStrategy default=auto) Whether to fit folds in parallel or in sequential order.
If parallel_local, folds will be trained in parallel with evenly distributed computing resources. This could bring 2-4x speedup compared to SequentialLocalFoldFittingStrategy, but could consume much more memory.
If sequential_local, folds will be trained in sequential.
If auto, strategy will be determined by OS and whether ray is installed or not. MacOS support for parallel_local is unstable, and may crash if enabled.
num_folds_parallel: (int or str, default='auto') Number of folds to be trained in parallel if using ParallelLocalFoldFittingStrategy. Consider lowering this value if you encounter either out of memory issue or CUDA out of memory issue(when trained on gpu).
if 'auto', will try to train all folds in parallel.
feature_metadata : :class:`autogluon.tabular.FeatureMetadata` or str, default = 'infer'
The feature metadata used in various inner logic in feature preprocessing.
If 'infer', will automatically construct a FeatureMetadata object based on the properties of `train_data`.
In this case, `train_data` is input into :meth:`autogluon.tabular.FeatureMetadata.from_df` to infer `feature_metadata`.
If 'infer' incorrectly assumes the dtypes of features, consider explicitly specifying `feature_metadata`.
**kwargs :
auto_stack : bool, default = False
Whether AutoGluon should automatically utilize bagging and multi-layer stack ensembling to boost predictive accuracy.
Set this = True if you are willing to tolerate longer training times in order to maximize predictive accuracy!
Automatically sets `num_bag_folds` and `num_stack_levels` arguments based on dataset properties.
Note: Setting `num_bag_folds` and `num_stack_levels` arguments will override `auto_stack`.
Note: This can increase training time (and inference time) by up to 20x, but can greatly improve predictive performance.
num_bag_folds : int, default = None
Number of folds used for bagging of models. When `num_bag_folds = k`, training time is roughly increased by a factor of `k` (set = 0 to disable bagging).
Disabled by default (0), but we recommend values between 5-10 to maximize predictive performance.
Increasing num_bag_folds will result in models with lower bias but that are more prone to overfitting.
`num_bag_folds = 1` is an invalid value, and will raise a ValueError.
Values > 10 may produce diminishing returns, and can even harm overall results due to overfitting.
To further improve predictions, avoid increasing `num_bag_folds` much beyond 10 and instead increase `num_bag_sets`.
num_bag_sets : int, default = None
Number of repeats of kfold bagging to perform (values must be >= 1). Total number of models trained during bagging = `num_bag_folds * num_bag_sets`.
Defaults to 1 if `time_limit` is not specified, otherwise 20 (always disabled if `num_bag_folds` is not specified).
Values greater than 1 will result in superior predictive performance, especially on smaller problems and with stacking enabled (reduces overall variance).
num_stack_levels : int, default = None
Number of stacking levels to use in stack ensemble. Roughly increases model training time by factor of `num_stack_levels+1` (set = 0 to disable stack ensembling).
Disabled by default (0), but we recommend values between 1-3 to maximize predictive performance.
To prevent overfitting, `num_bag_folds >= 2` must also be set or else a ValueError will be raised.
holdout_frac : float, default = None
Fraction of train_data to holdout as tuning data for optimizing hyperparameters (ignored unless `tuning_data = None`, ignored if `num_bag_folds != 0` unless `use_bag_holdout == True`).
Default value (if None) is selected based on the number of rows in the training data. Default values range from 0.2 at 2,500 rows to 0.01 at 250,000 rows.
Default value is doubled if `hyperparameter_tune_kwargs` is set, up to a maximum of 0.2.
Disabled if `num_bag_folds >= 2` unless `use_bag_holdout == True`.
use_bag_holdout : bool, default = False
If True, a `holdout_frac` portion of the data is held-out from model bagging.
This held-out data is only used to score models and determine weighted ensemble weights.
Enable this if there is a large gap between score_val and score_test in stack models.
Note: If `tuning_data` was specified, `tuning_data` is used as the holdout data.
Disabled if not bagging.
hyperparameter_tune_kwargs : str or dict, default = None
Hyperparameter tuning strategy and kwargs (for example, how many HPO trials to run).
If None, then hyperparameter tuning will not be performed.
Valid preset values:
'auto': Uses the 'bayesopt' preset.
'random': Performs HPO via random search using local scheduler.
'bayesopt': Performs HPO via bayesian optimization using local scheduler.
For valid dictionary keys, refer to :class:`autogluon.core.scheduler.FIFOScheduler` documentation.
The 'searcher' key is required when providing a dict.
feature_prune_kwargs: dict, default = None
Performs layer-wise feature pruning via recursive feature elimination with permutation feature importance.
This fits all models in a stack layer once, discovers a pruned set of features, fits all models in the stack layer
again with the pruned set of features, and updates input feature lists for models whose validation score improved.
If None, do not perform feature pruning. If empty dictionary, perform feature pruning with default configurations.
For valid dictionary keys, refer to :class:`autogluon.core.utils.feature_selection.FeatureSelector` and
`autogluon.core.trainer.abstract_trainer.AbstractTrainer._proxy_model_feature_prune` documentation.
To force all models to work with the pruned set of features, set force_prune=True in the dictionary.
ag_args : dict, default = None
Keyword arguments to pass to all models (i.e. common hyperparameters shared by all AutoGluon models).
See the `ag_args` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args` parameter for all models in `hyperparameters`.
If a key in `ag_args` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
ag_args_fit : dict, default = None
Keyword arguments to pass to all models.
See the `ag_args_fit` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args_fit` parameter for all models in `hyperparameters`.
If a key in `ag_args_fit` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
ag_args_ensemble : dict, default = None
Keyword arguments to pass to all models.
See the `ag_args_ensemble` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args_ensemble` parameter for all models in `hyperparameters`.
If a key in `ag_args_ensemble` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
excluded_model_types : list, default = None
Banned subset of model types to avoid training during `fit()`, even if present in `hyperparameters`.
Reference `hyperparameters` documentation for what models correspond to each value.
Useful when a particular model type such as 'KNN' or 'custom' is not desired but altering the `hyperparameters` dictionary is difficult or time-consuming.
Example: To exclude both 'KNN' and 'custom' models, specify `excluded_model_types=['KNN', 'custom']`.
refit_full : bool or str, default = False
Whether to retrain all models on all of the data (training + validation) after the normal training procedure.
This is equivalent to calling `predictor.refit_full(model=refit_full)` after fit.
If `refit_full=True`, it will be treated as `refit_full='all'`.
If `refit_full=False`, refitting will not occur.
Valid str values:
`all`: refits all models.
`best`: refits only the best model (and its ancestors if it is a stacker model).
`{model_name}`: refits only the specified model (and its ancestors if it is a stacker model).
For bagged models:
Reduces a model's inference time by collapsing bagged ensembles into a single model fit on all of the training data.
This process will typically result in a slight accuracy reduction and a large inference speedup.
The inference speedup will generally be between 10-200x faster than the original bagged ensemble model.
The inference speedup factor is equivalent to (k * n), where k is the number of folds (`num_bag_folds`) and n is the number of finished repeats (`num_bag_sets`) in the bagged ensemble.
The runtime is generally 10% or less of the original fit runtime.
The runtime can be roughly estimated as 1 / (k * n) of the original fit runtime, with k and n defined above.
For non-bagged models:
Optimizes a model's accuracy by retraining on 100% of the data without using a validation set.
Will typically result in a slight accuracy increase and no change to inference time.
The runtime will be approximately equal to the original fit runtime.
This process does not alter the original models, but instead adds additional models.
If stacker models are refit by this process, they will use the refit_full versions of the ancestor models during inference.
Models produced by this process will not have validation scores, as they use all of the data for training.
Therefore, it is up to the user to determine if the models are of sufficient quality by including test data in `predictor.leaderboard(test_data)`.
If the user does not have additional test data, they should reference the original model's score for an estimate of the performance of the refit_full model.
Warning: Be aware that utilizing refit_full models without separately verifying on test data means that the model is untested, and has no guarantee of being consistent with the original model.
The time taken by this process is not enforced by `time_limit`.
set_best_to_refit_full : bool, default = False
If True, will change the default model that Predictor uses for prediction when model is not specified to the refit_full version of the model that exhibited the highest validation score.
Only valid if `refit_full` is set.
keep_only_best : bool, default = False
If True, only the best model and its ancestor models are saved in the outputted `predictor`. All other models are deleted.
If you only care about deploying the most accurate predictor with the smallest file-size and no longer need any of the other trained models or functionality beyond prediction on new data, then set: `keep_only_best=True`, `save_space=True`.
This is equivalent to calling `predictor.delete_models(models_to_keep='best', dry_run=False)` directly after `fit()`.
If used with `refit_full` and `set_best_to_refit_full`, the best model will be the refit_full model, and the original bagged best model will be deleted.
`refit_full` will be automatically set to 'best' in this case to avoid training models which will be later deleted.
save_space : bool, default = False
If True, reduces the memory and disk size of predictor by deleting auxiliary model files that aren't needed for prediction on new data.
This is equivalent to calling `predictor.save_space()` directly after `fit()`.
This has NO impact on inference accuracy.
It is recommended if the only goal is to use the trained model for prediction.
Certain advanced functionality may no longer be available if `save_space=True`. Refer to `predictor.save_space()` documentation for more details.
feature_generator : :class:`autogluon.features.generators.AbstractFeatureGenerator`, default = :class:`autogluon.features.generators.AutoMLPipelineFeatureGenerator`
The feature generator used by AutoGluon to process the input data to the form sent to the models. This often includes automated feature generation and data cleaning.
It is generally recommended to keep the default feature generator unless handling an advanced use-case.
To control aspects of the default feature generation process, you can pass in an :class:`AutoMLPipelineFeatureGenerator` object constructed using some of these kwargs:
enable_numeric_features : bool, default True
Whether to keep features of 'int' and 'float' raw types.
These features are passed without alteration to the models.
Appends IdentityFeatureGenerator(infer_features_in_args=dict(valid_raw_types=['int', 'float']))) to the generator group.
enable_categorical_features : bool, default True
Whether to keep features of 'object' and 'category' raw types.
These features are processed into memory optimized 'category' features.
Appends CategoryFeatureGenerator() to the generator group.
enable_datetime_features : bool, default True
Whether to keep features of 'datetime' raw type and 'object' features identified as 'datetime_as_object' features.
These features will be converted to 'int' features representing milliseconds since epoch.
Appends DatetimeFeatureGenerator() to the generator group.
enable_text_special_features : bool, default True
Whether to use 'object' features identified as 'text' features to generate 'text_special' features such as word count, capital letter ratio, and symbol counts.
Appends TextSpecialFeatureGenerator() to the generator group.
enable_text_ngram_features : bool, default True
Whether to use 'object' features identified as 'text' features to generate 'text_ngram' features.
Appends TextNgramFeatureGenerator(vectorizer=vectorizer) to the generator group.
enable_raw_text_features : bool, default False
Whether to keep the raw text features.
Appends IdentityFeatureGenerator(infer_features_in_args=dict(required_special_types=['text'])) to the generator group.
vectorizer : CountVectorizer, default CountVectorizer(min_df=30, ngram_range=(1, 3), max_features=10000, dtype=np.uint8)
sklearn CountVectorizer object to use in TextNgramFeatureGenerator.
Only used if `enable_text_ngram_features=True`.
unlabeled_data : pd.DataFrame, default = None
[Experimental Parameter]
Collection of data without labels that we can use to pretrain on. This is the same schema as train_data, except
without the labels. Currently, unlabeled_data is only used for pretraining a TabTransformer model.
If you do not specify 'TRANSF' with unlabeled_data, then no pretraining will occur and unlabeled_data will be ignored!
After the pretraining step, we will finetune using the TabTransformer model as well. If TabTransformer is ensembled
with other models, like in typical AutoGluon fashion, then the output of this "pretrain/finetune" will be ensembled
with other models, which will not used the unlabeled_data. The "pretrain/finetune flow" is also known as semi-supervised learning.
The typical use case for unlabeled_data is to add signal to your model where you may not have sufficient training
data. e.g. 500 hand-labeled samples (perhaps a hard human task), whole data set (unlabeled) is thousands/millions.
However, this isn't the only use case. Given enough unlabeled data(millions of rows), you may see improvements
to any amount of labeled data.
verbosity : int
If specified, overrides the existing `predictor.verbosity` value.
calibrate: bool, default = False
If True and the problem_type is classification, temperature scaling will be used to calibrate the Predictor's estimated class probabilities
(which may improve metrics like log_loss) and will train a scalar parameter on the validation set.
If True and the problem_type is quantile regression, conformalization will be used to calibrate the Predictor's estimated quantiles
(which may improve the prediction interval coverage, and bagging could futher improve it) and will compute a set of scalar parameters on the validation set.
Returns
-------
:class:`TabularPredictor` object. Returns self.
Examples
--------
>>> from autogluon.tabular import TabularDataset, TabularPredictor
>>> train_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/train.csv')
>>> label = 'class'
>>> predictor = TabularPredictor(label=label).fit(train_data)
>>> test_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/test.csv')
>>> leaderboard = predictor.leaderboard(test_data)
>>> y_test = test_data[label]
>>> test_data = test_data.drop(columns=[label])
>>> y_pred = predictor.predict(test_data)
>>> perf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred)
To maximize predictive performance, use the following:
>>> eval_metric = 'roc_auc' # set this to the metric you ultimately care about
>>> time_limit = 3600 # set as long as you are willing to wait (in sec)
>>> predictor = TabularPredictor(label=label, eval_metric=eval_metric).fit(train_data, presets=['best_quality'], time_limit=time_limit)
"""
if self._learner.is_fit:
raise AssertionError(
'Predictor is already fit! To fit additional models, refer to `predictor.fit_extra`, or create a new `Predictor`.')
kwargs_orig = kwargs.copy()
kwargs = self._validate_fit_kwargs(kwargs)
verbosity = kwargs.get('verbosity', self.verbosity)
set_logger_verbosity(verbosity)
if presets:
if not isinstance(presets, list):
presets = [presets]
logger.log(20, f'Presets specified: {presets}')
if verbosity >= 3:
logger.log(20, '============ fit kwarg info ============')
logger.log(20, 'User Specified kwargs:')
logger.log(20, f'{pprint.pformat(kwargs_orig)}')
logger.log(20, 'Full kwargs:')
logger.log(20, f'{pprint.pformat(kwargs)}')
logger.log(20, '========================================')
holdout_frac = kwargs['holdout_frac']
num_bag_folds = kwargs['num_bag_folds']
num_bag_sets = kwargs['num_bag_sets']
num_stack_levels = kwargs['num_stack_levels']
auto_stack = kwargs['auto_stack']
feature_generator = kwargs['feature_generator']
unlabeled_data = kwargs['unlabeled_data']
ag_args = kwargs['ag_args']
ag_args_fit = kwargs['ag_args_fit']
ag_args_ensemble = kwargs['ag_args_ensemble']
excluded_model_types = kwargs['excluded_model_types']
use_bag_holdout = kwargs['use_bag_holdout']
if ag_args is None:
ag_args = {}
ag_args = self._set_hyperparameter_tune_kwargs_in_ag_args(kwargs['hyperparameter_tune_kwargs'], ag_args,
time_limit=time_limit)
feature_generator_init_kwargs = kwargs['_feature_generator_kwargs']
if feature_generator_init_kwargs is None:
feature_generator_init_kwargs = dict()
train_data, tuning_data, unlabeled_data = self._validate_fit_data(train_data=train_data,
tuning_data=tuning_data,
unlabeled_data=unlabeled_data)
if hyperparameters is None:
hyperparameters = 'default'
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
# TODO: Hyperparam could have non-serializble objects. Save as pkl and loaded on demand
# in case the hyperprams are large in memory
self.fit_hyperparameters_ = hyperparameters
###################################
# FIXME: v0.1 This section is a hack
if 'enable_raw_text_features' not in feature_generator_init_kwargs:
if 'AG_TEXT_NN' in hyperparameters:
feature_generator_init_kwargs['enable_raw_text_features'] = True
else:
for key in hyperparameters:
if isinstance(key, int) or key == 'default':
if 'AG_TEXT_NN' in hyperparameters[key]:
feature_generator_init_kwargs['enable_raw_text_features'] = True
break
###################################
if feature_metadata is not None and isinstance(feature_metadata, str) and feature_metadata == 'infer':
feature_metadata = None
self._set_feature_generator(feature_generator=feature_generator, feature_metadata=feature_metadata,
init_kwargs=feature_generator_init_kwargs)
num_bag_folds, num_bag_sets, num_stack_levels = self._sanitize_stack_args(
num_bag_folds=num_bag_folds, num_bag_sets=num_bag_sets, num_stack_levels=num_stack_levels,
time_limit=time_limit, auto_stack=auto_stack, num_train_rows=len(train_data),
)
if holdout_frac is None:
holdout_frac = default_holdout_frac(len(train_data),
ag_args.get('hyperparameter_tune_kwargs', None) is not None)
if kwargs['_save_bag_folds'] is not None:
if use_bag_holdout and not kwargs['_save_bag_folds']:
logger.log(30,
f'WARNING: Attempted to disable saving of bagged fold models when `use_bag_holdout=True`. Forcing `save_bag_folds=True` to avoid errors.')
else:
if ag_args_ensemble is None:
ag_args_ensemble = {}
ag_args_ensemble['save_bag_folds'] = kwargs['_save_bag_folds']
if time_limit is None:
mb_mem_usage_train_data = get_approximate_df_mem_usage(train_data, sample_ratio=0.2).sum() / 1e6
num_rows_train = len(train_data)
if mb_mem_usage_train_data >= 50 or num_rows_train >= 100000:
logger.log(20,
f'Warning: Training may take a very long time because `time_limit` was not specified and `train_data` is large ({num_rows_train} samples, {round(mb_mem_usage_train_data, 2)} MB).')
logger.log(20,
f'\tConsider setting `time_limit` to ensure training finishes within an expected duration or experiment with a small portion of `train_data` to identify an ideal `presets` and `hyperparameters` configuration.')
core_kwargs = {
'ag_args': ag_args,
'ag_args_ensemble': ag_args_ensemble,
'ag_args_fit': ag_args_fit,
'excluded_model_types': excluded_model_types,
'feature_prune_kwargs': kwargs.get('feature_prune_kwargs', None)
}
self.save(silent=True) # Save predictor to disk to enable prediction and training after interrupt
self._learner.fit(X=train_data, X_val=tuning_data, X_unlabeled=unlabeled_data,
holdout_frac=holdout_frac, num_bag_folds=num_bag_folds, num_bag_sets=num_bag_sets,
num_stack_levels=num_stack_levels,
hyperparameters=hyperparameters, core_kwargs=core_kwargs, time_limit=time_limit,
verbosity=verbosity, use_bag_holdout=use_bag_holdout)
self._set_post_fit_vars()
self._post_fit(
keep_only_best=kwargs['keep_only_best'],
refit_full=kwargs['refit_full'],
set_best_to_refit_full=kwargs['set_best_to_refit_full'],
save_space=kwargs['save_space'],
calibrate=kwargs['calibrate']
)
self.save()
return self
def _post_fit(self, keep_only_best=False, refit_full=False, set_best_to_refit_full=False, save_space=False,
calibrate=False):
if refit_full is True:
if keep_only_best is True:
if set_best_to_refit_full is True:
refit_full = 'best'
else:
logger.warning(
f'refit_full was set to {refit_full}, but keep_only_best=True and set_best_to_refit_full=False. Disabling refit_full to avoid training models which would be automatically deleted.')
refit_full = False
else:
refit_full = 'all'
if refit_full is not False:
trainer_model_best = self._trainer.get_model_best()
self.refit_full(model=refit_full)
if set_best_to_refit_full:
if trainer_model_best in self._trainer.model_full_dict.keys():
self._trainer.model_best = self._trainer.model_full_dict[trainer_model_best]
# Note: model_best will be overwritten if additional training is done with new models, since model_best will have validation score of None and any new model will have a better validation score.
# This has the side-effect of having the possibility of model_best being overwritten by a worse model than the original model_best.
self._trainer.save()
else:
logger.warning(
f'Best model ({trainer_model_best}) is not present in refit_full dictionary. Training may have failed on the refit model. AutoGluon will default to using {trainer_model_best} for predictions.')
if keep_only_best:
self.delete_models(models_to_keep='best', dry_run=False)
if calibrate:
if self.problem_type in PROBLEM_TYPES_CLASSIFICATION + [QUANTILE]:
self._calibrate_model()
else:
logger.log(30, 'WARNING: calibrate is only applicable to classification or quantile regression problems')
if save_space:
self.save_space()
def _calibrate_model(self, model_name: str = None, lr: float = 0.01, max_iter: int = 1000, init_val: float = 1.0):
"""
Applies temperature scaling to the AutoGluon model. Applies
inverse softmax to predicted probs then trains temperature scalar
on validation data to maximize negative log likelihood. Inversed
softmaxes are divided by temperature scalar then softmaxed to return
predicted probs.
Parameters:
-----------
model_name: str: default=None
model name to tune temperature scaling on. If set to None
then will tune best model only. Best model chosen by validation score
lr: float: default=0.01
The learning rate for temperature scaling algorithm
max_iter: int: default=1000
Number of iterations optimizer should take for
tuning temperature scaler
init_val: float: default=1.0
The initial value for temperature scalar term
"""
# TODO: Note that temperature scaling is known to worsen calibration in the face of shifted test data.
if model_name is None:
model_name = self._trainer.get_model_best()
if self._trainer.bagged_mode:
y_val_probs = self.get_oof_pred_proba(model_name).to_numpy()
y_val = self._trainer.load_y().to_numpy()
else:
X_val = self._trainer.load_X_val()
y_val_probs = self._trainer.predict_proba(X_val, model_name)
y_val = self._trainer.load_y_val().to_numpy()
if self.problem_type == BINARY:
y_val_probs = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(y_val_probs)
model = self._trainer.load_model(model_name=model_name)
if self.problem_type == QUANTILE:
logger.log(15, f'Conformity scores being computed to calibrate model: {model_name}')
conformalize = compute_conformity_score(y_val_pred=y_val_probs, y_val=y_val,
quantile_levels=self.quantile_levels)
model.conformalize = conformalize
else:
logger.log(15, f'Temperature scaling term being tuned for model: {model_name}')
temp_scalar = tune_temperature_scaling(y_val_probs=y_val_probs, y_val=y_val,
init_val=init_val, max_iter=max_iter, lr=lr)
logger.log(15, f'Temperature term found is: {temp_scalar}')
model.temperature_scalar = temp_scalar
model.save()
def fit_extra(self, hyperparameters, time_limit=None, base_model_names=None, **kwargs):
"""
Fits additional models after the original :meth:`TabularPredictor.fit` call.
The original train_data and tuning_data will be used to train the models.
Parameters
----------
hyperparameters : str or dict
Refer to argument documentation in :meth:`TabularPredictor.fit`.
If `base_model_names` is specified and hyperparameters is using the level-based key notation,
the key of the level which directly uses the base models should be 1. The level in the hyperparameters
dictionary is relative, not absolute.
time_limit : int, default = None
Refer to argument documentation in :meth:`TabularPredictor.fit`.
base_model_names : list, default = None
The names of the models to use as base models for this fit call.
Base models will provide their out-of-fold predictions as additional features to the models in `hyperparameters`.
If specified, all models trained will be stack ensembles.
If None, models will be trained as if they were specified in :meth:`TabularPredictor.fit`, without depending on existing models.
Only valid if bagging is enabled.
**kwargs :
Refer to kwargs documentation in :meth:`TabularPredictor.fit`.
Note that the following kwargs are not available in `fit_extra` as they cannot be changed from their values set in `fit()`:
[`holdout_frac`, `num_bag_folds`, `auto_stack`, `feature_generator`, `unlabeled_data`]
pseudo_data : pd.DataFrame, default = None
Data that has been self labeled by Autogluon model and will be incorporated into training during 'fit_extra'
"""
self._assert_is_fit('fit_extra')
time_start = time.time()
kwargs_orig = kwargs.copy()
kwargs = self._validate_fit_extra_kwargs(kwargs)
verbosity = kwargs.get('verbosity', self.verbosity)
set_logger_verbosity(verbosity)
if verbosity >= 3:
logger.log(20, '============ fit kwarg info ============')
logger.log(20, 'User Specified kwargs:')
logger.log(20, f'{pprint.pformat(kwargs_orig)}')
logger.log(20, 'Full kwargs:')
logger.log(20, f'{pprint.pformat(kwargs)}')
logger.log(20, '========================================')
# TODO: Allow disable aux (default to disabled)
# TODO: num_bag_sets
# num_bag_sets = kwargs['num_bag_sets']
num_stack_levels = kwargs['num_stack_levels']
# save_bag_folds = kwargs['save_bag_folds'] # TODO: Enable
ag_args = kwargs['ag_args']
ag_args_fit = kwargs['ag_args_fit']
ag_args_ensemble = kwargs['ag_args_ensemble']
excluded_model_types = kwargs['excluded_model_types']
pseudo_data = kwargs.get('pseudo_data', None)
# TODO: Since data preprocessor is fitted on original train_data it cannot account for if
# labeled pseudo data has new labels unseen in the original train. Probably need to refit
# data preprocessor if this is the case.
if pseudo_data is not None:
if self.label not in pseudo_data.columns:
raise ValueError('\'pseudo_data\' does not contain the labeled column.')
if self.sample_weight is not None:
raise ValueError('Applying \'sample_weight\' while calling \'fit_pseudolabel\' is not supported')
X_pseudo = pseudo_data.drop(columns=[self.label])
y_pseudo_og = pseudo_data[self.label]
X_pseudo = self._learner.transform_features(X_pseudo)
y_pseudo = self._learner.label_cleaner.transform(y_pseudo_og)
if np.isnan(y_pseudo.unique()).any():
raise Exception('NaN was found in the label column for pseudo labeled data.'
'Please ensure no NaN values in target column')
else:
X_pseudo = None
y_pseudo = None
if ag_args is None:
ag_args = {}
ag_args = self._set_hyperparameter_tune_kwargs_in_ag_args(kwargs['hyperparameter_tune_kwargs'], ag_args,
time_limit=time_limit)
fit_new_weighted_ensemble = False # TODO: Add as option
aux_kwargs = None # TODO: Add as option
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
if num_stack_levels is None:
hyperparameter_keys = list(hyperparameters.keys())
highest_level = 1
for key in hyperparameter_keys:
if isinstance(key, int):
highest_level = max(key, highest_level)
num_stack_levels = highest_level
# TODO: make core_kwargs a kwargs argument to predictor.fit, add aux_kwargs to predictor.fit
core_kwargs = {'ag_args': ag_args, 'ag_args_ensemble': ag_args_ensemble, 'ag_args_fit': ag_args_fit,
'excluded_model_types': excluded_model_types}
if X_pseudo is not None and y_pseudo is not None:
core_kwargs['X_pseudo'] = X_pseudo
core_kwargs['y_pseudo'] = y_pseudo
# TODO: Add special error message if called and training/val data was not cached.
X, y, X_val, y_val = self._trainer.load_data()
if y_pseudo is not None and self.problem_type in PROBLEM_TYPES_CLASSIFICATION:
y_og = self._learner.label_cleaner.inverse_transform(y)
y_og_classes = y_og.unique()
y_pseudo_classes = y_pseudo_og.unique()
matching_classes = np.in1d(y_pseudo_classes, y_og_classes)
if not matching_classes.all():
raise Exception(f'Pseudo training data contains classes not in original train data: {y_pseudo_classes[~matching_classes]}')
name_suffix = kwargs.get('name_suffix', '')
fit_models = self._trainer.train_multi_levels(
X=X, y=y, hyperparameters=hyperparameters, X_val=X_val, y_val=y_val,
base_model_names=base_model_names, time_limit=time_limit, relative_stack=True, level_end=num_stack_levels,
core_kwargs=core_kwargs, aux_kwargs=aux_kwargs, name_suffix=name_suffix
)
if time_limit is not None:
time_limit = time_limit - (time.time() - time_start)
if fit_new_weighted_ensemble:
if time_limit is not None:
time_limit_weighted = max(time_limit, 60)
else:
time_limit_weighted = None
fit_models += self.fit_weighted_ensemble(time_limit=time_limit_weighted)
self._post_fit(
keep_only_best=kwargs['keep_only_best'],
refit_full=kwargs['refit_full'],
set_best_to_refit_full=kwargs['set_best_to_refit_full'],
save_space=kwargs['save_space'],
calibrate=kwargs['calibrate']
)
self.save()
return self
def _get_all_fit_extra_args(self):
ret = list(self._fit_extra_kwargs_dict().keys()) + list(inspect.signature(self.fit_extra).parameters.keys())
ret.remove('kwargs')
return ret
def _fit_weighted_ensemble_pseudo(self):
"""
Fits weighted ensemble on top models trained with pseudo labeling, then if new
weighted ensemble model is best model then sets `model_best` in trainer to
weighted ensemble model.
"""
logger.log(15, 'Fitting weighted ensemble using top models')
weighted_ensemble_model_name = self.fit_weighted_ensemble()[0]
# TODO: This is a hack! self.predict_prob does not update to use weighted ensemble
# if it's the best model.
# TODO: There should also be PL added to weighted ensemble model name to notify
# users it is a model trained with PL models if they are indeed ensembled
model_best_name = self._trainer.leaderboard().iloc[0]['model']
if model_best_name == weighted_ensemble_model_name:
self._trainer.model_best = model_best_name
self._trainer.save()
logger.log(15, 'Weighted ensemble was the best model for current iteration of pseudo labeling')
else:
logger.log(15, 'Weighted ensemble was not the best model for current iteration of pseudo labeling')
def _run_pseudolabeling(self, unlabeled_data: pd.DataFrame, max_iter: int,
return_pred_prob: bool = False, use_ensemble: bool = False,
fit_ensemble: bool = False, fit_ensemble_every_iter: bool = False,
**kwargs):
"""
Runs pseudolabeling algorithm using the same hyperparameters and model and fit settings
used in original model unless specified by the user. This is an internal function that iteratively
self labels unlabeled test data then incorporates all self labeled data above a threshold into training.
Will keep incorporating self labeled data into training until validation score does not improve
Parameters:
-----------
unlabeled_data: Extra unlabeled data (could be the test data) to assign pseudolabels to
and incorporate as extra training data.
max_iter: int, default = 5
Maximum allowed number of iterations, where in each iteration, the data are pseudolabeled
by the current predictor and the predictor is refit including the pseudolabled data in its training set.
return_pred_proba: bool, default = False
Transductive learning setting, will return predictive probabiliteis of unlabeled_data
use_ensemble: bool, default = False
If True will use ensemble pseudo labeling algorithm if False will use best model
pseudo labeling method
fit_ensemble: bool, default = False
If True will fit weighted ensemble on final best models. Fitting weighted ensemble will be done after fitting
of models is completed unless otherwise specified. If False will not fit weighted ensemble on final best
models.
fit_ensemble_every_iter: bool, default = False
If True will fit weighted ensemble model using combination of best models
for every iteration of pseudo label algorithm. If False and fit_ensemble
is True, will just do it at the very end of training pseudo labeled models.
Returns:
--------
self: TabularPredictor
"""
previous_score = self.info()['best_model_score_val']
y_pseudo_og = pd.Series()
if return_pred_prob:
if self.problem_type is REGRESSION:
y_pred_proba_og = pd.Series()
else:
y_pred_proba_og = pd.DataFrame()
X_test = unlabeled_data.copy()
for i in range(max_iter):
if len(X_test) == 0:
logger.log(20, f'No more unlabeled data to pseudolabel. Done with pseudolabeling...')
break
iter_print = str(i + 1)
logger.log(20, f'Beginning iteration {iter_print} of pseudolabeling out of max: {max_iter}')
if use_ensemble:
if self.problem_type in PROBLEM_TYPES_CLASSIFICATION:
test_pseudo_idxes_true, y_pred_proba, y_pred = filter_ensemble_pseudo(predictor=self,
unlabeled_data=X_test)
else:
test_pseudo_idxes_true, y_pred = filter_ensemble_pseudo(predictor=self, unlabeled_data=X_test)
y_pred_proba = y_pred.copy()
else:
y_pred_proba = self.predict_proba(data=X_test, as_multiclass=True)
y_pred = get_pred_from_proba_df(y_pred_proba, problem_type=self.problem_type)
test_pseudo_idxes_true = filter_pseudo(y_pred_proba_og=y_pred_proba, problem_type=self.problem_type)
if return_pred_prob:
if i == 0:
y_pred_proba_og = y_pred_proba
else:
y_pred_proba_og.loc[test_pseudo_idxes_true.index] = y_pred_proba.loc[test_pseudo_idxes_true.index]
if len(test_pseudo_idxes_true) < 1:
logger.log(20,
f'Could not confidently assign pseudolabels for any of the provided rows in iteration: {iter_print}. Done with pseudolabeling...')
break
else:
logger.log(20,
f'Pseudolabeling algorithm confidently assigned pseudolabels to: {len(test_pseudo_idxes_true)} rows of data'
f'on iteration: {iter_print}. Adding to train data')
test_pseudo_idxes = pd.Series(data=False, index=y_pred_proba.index)
test_pseudo_idxes[test_pseudo_idxes_true.index] = True
y_pseudo_og = y_pseudo_og.append(y_pred.loc[test_pseudo_idxes_true.index], verify_integrity=True)
pseudo_data = unlabeled_data.loc[y_pseudo_og.index]
pseudo_data[self.label] = y_pseudo_og
self.fit_extra(pseudo_data=pseudo_data, name_suffix=PSEUDO_MODEL_SUFFIX.format(iter=(i + 1)),
**kwargs)
if fit_ensemble and fit_ensemble_every_iter:
self._fit_weighted_ensemble_pseudo()
current_score = self.info()['best_model_score_val']
logger.log(20,
f'Pseudolabeling algorithm changed validation score from: {previous_score}, to: {current_score}'
f' using evaluation metric: {self.eval_metric.name}')
if previous_score >= current_score:
break
else:
# Cut down X_test to not include pseudo labeled data
X_test = X_test.loc[test_pseudo_idxes[~test_pseudo_idxes].index]
previous_score = current_score
if fit_ensemble and not fit_ensemble_every_iter:
self._fit_weighted_ensemble_pseudo()
y_pred_proba_og = self.predict_proba(unlabeled_data)
if return_pred_prob:
return self, y_pred_proba_og
else:
return self
def fit_pseudolabel(self, pseudo_data: pd.DataFrame, max_iter: int = 5, return_pred_prob: bool = False,
use_ensemble: bool = False, fit_ensemble: bool = False, fit_ensemble_every_iter: bool = False,
**kwargs):
"""
If 'pseudo_data' is labeled then incorporates all test_data into train_data for
newly fit models. If 'pseudo_data' is unlabeled then 'fit_pseudolabel' will self label the
data and will augment the original training data by adding all the self labeled
data that meets a criteria (For example all rows with predictive prob above 95%). If
predictor is fit then will call fit_extra with added training data, if predictor
is not fit then will fit model on train_data then run.
Parameters
----------
pseudo_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
Extra data to incorporate into training. Pre-labeled test data allowed. If no labels
then pseudolabeling algorithm will predict and filter out which rows to incorporate into
training
max_iter: int, default = 5
Maximum iterations of pseudolabeling allowed
return_pred_prob: bool, default = False
Returns held-out predictive probabilities from pseudo-labeling. If test_data is labeled then
returns model's predictive probabilities.
use_ensemble: bool, default = False
If True will use ensemble pseudo labeling algorithm. If False will just use best model
for pseudo labeling algorithm.
fit_ensemble: bool, default = False
If True with fit weighted ensemble model using combination of best models.
Fitting weighted ensemble will be done after fitting has
being completed unless otherwise specified. If False will not fit weighted ensemble
over models trained with pseudo labeling and models trained without it.
fit_ensemble_every_iter: bool, default = False
If True fits weighted ensemble model for every iteration of pseudo labeling algorithm. If False
and fit_ensemble is True will fit after all pseudo labeling training is done.
kwargs: dict
If predictor is not already fit, then kwargs are for the functions 'fit' and 'fit_extra':
Refer to parameters documentation in :meth:`TabularPredictor.fit`.
Refer to parameters documentation in :meth:`TabularPredictor.fit_extra`.
If predictor is fit kwargs are for 'fit_extra':
Refer to parameters documentation in :meth:`TabularPredictor.fit_extra`.
Returns
-------
self : TabularPredictor
Returns self, which is a Python class of TabularPredictor
"""
if len(pseudo_data) < 1:
raise Exception('No pseudo data given')
if not self._learner.is_fit:
if 'train_data' not in kwargs.keys():
Exception('Autogluon is required to be fit or given \'train_data\' in order to run \'fit_pseudolabel\'.'
' Autogluon is not fit and \'train_data\' was not given')
logger.log(20,
f'Predictor not fit prior to pseudolabeling. Fitting now...')
self.fit(**kwargs)
if self.problem_type is MULTICLASS and self.eval_metric.name != 'accuracy':
logger.warning('AutoGluon has detected the problem type as \'multiclass\' and '
f'eval_metric is {self.eval_metric.name}, we recommend using'
f'fit_pseudolabeling when eval metric is \'accuracy\'')
is_labeled = self.label in pseudo_data.columns
hyperparameters = kwargs.get('hyperparameters', None)
if hyperparameters is None:
if self._learner.is_fit:
hyperparameters = self.fit_hyperparameters_
elif isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
kwargs['hyperparameters'] = hyperparameters
fit_extra_args = self._get_all_fit_extra_args()
fit_extra_kwargs = {key: value for key, value in kwargs.items() if key in fit_extra_args}
if is_labeled:
logger.log(20, "Fitting predictor using the provided pseudolabeled examples as extra training data...")
self.fit_extra(pseudo_data=pseudo_data, name_suffix=PSEUDO_MODEL_SUFFIX.format(iter='')[:-1],
**fit_extra_kwargs)
if fit_ensemble:
logger.log(15, 'Fitting weighted ensemble model using best models')
self.fit_weighted_ensemble()
if return_pred_prob:
y_pred_proba = self.predict_proba(pseudo_data)
return self, y_pred_proba
else:
return self
else:
logger.log(20, 'Given test_data for pseudo labeling did not contain labels. '
'AutoGluon will assign pseudo labels to data and use it for extra training data...')
return self._run_pseudolabeling(unlabeled_data=pseudo_data, max_iter=max_iter,
return_pred_prob=return_pred_prob, use_ensemble=use_ensemble,
fit_ensemble=fit_ensemble, fit_ensemble_every_iter=fit_ensemble_every_iter,
**fit_extra_kwargs)
def predict(self, data, model=None, as_pandas=True):
"""
Use trained models to produce predictions of `label` column values for new data.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
The data to make predictions for. Should contain same column names as training Dataset and follow same format
(may contain extra columns that won't be used by Predictor, including the label-column itself).
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get predictions from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
as_pandas : bool, default = True
Whether to return the output as a :class:`pd.Series` (True) or :class:`np.ndarray` (False).
Returns
-------
Array of predictions, one corresponding to each row in given dataset. Either :class:`np.ndarray` or :class:`pd.Series` depending on `as_pandas` argument.
"""
self._assert_is_fit('predict')
data = self.__get_dataset(data)
return self._learner.predict(X=data, model=model, as_pandas=as_pandas)
def predict_proba(self, data, model=None, as_pandas=True, as_multiclass=True):
"""
Use trained models to produce predicted class probabilities rather than class-labels (if task is classification).
If `predictor.problem_type` is regression, this functions identically to `predict`, returning the same output.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
The data to make predictions for. Should contain same column names as training dataset and follow same format
(may contain extra columns that won't be used by Predictor, including the label-column itself).
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get prediction probabilities from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
as_pandas : bool, default = True
Whether to return the output as a pandas object (True) or numpy array (False).
Pandas object is a DataFrame if this is a multiclass problem or `as_multiclass=True`, otherwise it is a Series.
If the output is a DataFrame, the column order will be equivalent to `predictor.class_labels`.
as_multiclass : bool, default = True
Whether to return binary classification probabilities as if they were for multiclass classification.
Output will contain two columns, and if `as_pandas=True`, the column names will correspond to the binary class labels.
The columns will be the same order as `predictor.class_labels`.
If False, output will contain only 1 column for the positive class (get positive_class name via `predictor.positive_class`).
Only impacts output for binary classification problems.
Returns
-------
Array of predicted class-probabilities, corresponding to each row in the given data.
May be a :class:`np.ndarray` or :class:`pd.DataFrame` / :class:`pd.Series` depending on `as_pandas` and `as_multiclass` arguments and the type of prediction problem.
For binary classification problems, the output contains for each datapoint the predicted probabilities of the negative and positive classes, unless you specify `as_multiclass=False`.
"""
self._assert_is_fit('predict_proba')
data = self.__get_dataset(data)
return self._learner.predict_proba(X=data, model=model, as_pandas=as_pandas, as_multiclass=as_multiclass)
def evaluate(self, data, model=None, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict:
"""
Report the predictive performance evaluated over a given dataset.
This is basically a shortcut for: `pred_proba = predict_proba(data); evaluate_predictions(data[label], pred_proba)`.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
This dataset must also contain the `label` with the same column-name as previously specified.
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get prediction probabilities from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
silent : bool, default = False
If False, performance results are printed.
auxiliary_metrics: bool, default = True
Should we compute other (`problem_type` specific) metrics in addition to the default metric?
detailed_report : bool, default = False
Should we computed more detailed versions of the `auxiliary_metrics`? (requires `auxiliary_metrics = True`)
Returns
-------
Returns dict where keys = metrics, values = performance along each metric. To get the `eval_metric` score, do `output[predictor.eval_metric.name]`
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
"""
self._assert_is_fit('evaluate')
data = self.__get_dataset(data)
y_pred_proba = self.predict_proba(data=data, model=model)
return self.evaluate_predictions(y_true=data[self.label], y_pred=y_pred_proba, silent=silent,
auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report)
def evaluate_predictions(self, y_true, y_pred, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict:
"""
Evaluate the provided prediction probabilities against ground truth labels.
Evaluation is based on the `eval_metric` previously specified in init, or default metrics if none was specified.
Parameters
----------
y_true : :class:`np.array` or :class:`pd.Series`
The ordered collection of ground-truth labels.
y_pred : :class:`pd.Series` or :class:`pd.DataFrame`
The ordered collection of prediction probabilities or predictions.
Obtainable via the output of `predictor.predict_proba`.
Caution: For certain types of `eval_metric` (such as 'roc_auc'), `y_pred` must be predicted-probabilities rather than predicted labels.
silent : bool, default = False
If False, performance results are printed.
auxiliary_metrics: bool, default = True
Should we compute other (`problem_type` specific) metrics in addition to the default metric?
detailed_report : bool, default = False
Should we computed more detailed versions of the `auxiliary_metrics`? (requires `auxiliary_metrics = True`)
Returns
-------
Returns dict where keys = metrics, values = performance along each metric.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
"""
return self._learner.evaluate_predictions(y_true=y_true, y_pred=y_pred, silent=silent,
auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report)
def leaderboard(self, data=None, extra_info=False, extra_metrics=None, only_pareto_frontier=False, silent=False):
"""
Output summary of information about models produced during `fit()` as a :class:`pd.DataFrame`.
Includes information on test and validation scores for all models, model training times, inference times, and stack levels.
Output DataFrame columns include:
'model': The name of the model.
'score_val': The validation score of the model on the 'eval_metric'.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
'pred_time_val': The inference time required to compute predictions on the validation data end-to-end.
Equivalent to the sum of all 'pred_time_val_marginal' values for the model and all of its base models.
'fit_time': The fit time required to train the model end-to-end (Including base models if the model is a stack ensemble).
Equivalent to the sum of all 'fit_time_marginal' values for the model and all of its base models.
'pred_time_val_marginal': The inference time required to compute predictions on the validation data (Ignoring inference times for base models).
Note that this ignores the time required to load the model into memory when bagging is disabled.
'fit_time_marginal': The fit time required to train the model (Ignoring base models).
'stack_level': The stack level of the model.
A model with stack level N can take any set of models with stack level less than N as input, with stack level 1 models having no model inputs.
'can_infer': If model is able to perform inference on new data. If False, then the model either was not saved, was deleted, or an ancestor of the model cannot infer.
`can_infer` is often False when `save_bag_folds=False` was specified in initial `fit()`.
'fit_order': The order in which models were fit. The first model fit has `fit_order=1`, and the Nth model fit has `fit_order=N`. The order corresponds to the first child model fit in the case of bagged ensembles.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
This Dataset must also contain the label-column with the same column-name as specified during fit().
If specified, then the leaderboard returned will contain additional columns 'score_test', 'pred_time_test', and 'pred_time_test_marginal'.
'score_test': The score of the model on the 'eval_metric' for the data provided.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
'pred_time_test': The true end-to-end wall-clock inference time of the model for the data provided.
Equivalent to the sum of all 'pred_time_test_marginal' values for the model and all of its base models.
'pred_time_test_marginal': The inference time of the model for the data provided, minus the inference time for the model's base models, if it has any.
Note that this ignores the time required to load the model into memory when bagging is disabled.
If str is passed, `data` will be loaded using the str value as the file path.
extra_info : bool, default = False
If `True`, will return extra columns with advanced info.
This requires additional computation as advanced info data is calculated on demand.
Additional output columns when `extra_info=True` include:
'num_features': Number of input features used by the model.
Some models may ignore certain features in the preprocessed data.
'num_models': Number of models that actually make up this "model" object.
For non-bagged models, this is 1. For bagged models, this is equal to the number of child models (models trained on bagged folds) the bagged ensemble contains.
'num_models_w_ancestors': Equivalent to the sum of 'num_models' values for the model and its' ancestors (see below).
'memory_size': The amount of memory in bytes the model requires when persisted in memory. This is not equivalent to the amount of memory the model may use during inference.
For bagged models, this is the sum of the 'memory_size' of all child models.
'memory_size_w_ancestors': Equivalent to the sum of 'memory_size' values for the model and its' ancestors.
This is the amount of memory required to avoid loading any models in-between inference calls to get predictions from this model.
For online-inference, this is critical. It is important that the machine performing online inference has memory more than twice this value to avoid loading models for every call to inference by persisting models in memory.
'memory_size_min': The amount of memory in bytes the model minimally requires to perform inference.
For non-bagged models, this is equivalent to 'memory_size'.
For bagged models, this is equivalent to the largest child model's 'memory_size_min'.
To minimize memory usage, child models can be loaded and un-persisted one by one to infer. This is the default behavior if a bagged model was not already persisted in memory prior to inference.
'memory_size_min_w_ancestors': Equivalent to the max of the 'memory_size_min' values for the model and its' ancestors.
This is the minimum required memory to infer with the model by only loading one model at a time, as each of its ancestors will also have to be loaded into memory.
For offline-inference where latency is not a concern, this should be used to determine the required memory for a machine if 'memory_size_w_ancestors' is too large.
'num_ancestors': Number of ancestor models for the given model.
'num_descendants': Number of descendant models for the given model.
'model_type': The type of the given model.
If the model is an ensemble type, 'child_model_type' will indicate the inner model type. A stack ensemble of bagged LightGBM models would have 'StackerEnsembleModel' as its model type.
'child_model_type': The child model type. None if the model is not an ensemble. A stack ensemble of bagged LightGBM models would have 'LGBModel' as its child type.
child models are models which are used as a group to generate a given bagged ensemble model's predictions. These are the models trained on each fold of a bagged ensemble.
For 10-fold bagging, the bagged ensemble model would have 10 child models.
For 10-fold bagging with 3 repeats, the bagged ensemble model would have 30 child models.
Note that child models are distinct from ancestors and descendants.
'hyperparameters': The hyperparameter values specified for the model.
All hyperparameters that do not appear in this dict remained at their default values.
'hyperparameters_fit': The hyperparameters set by the model during fit.
This overrides the 'hyperparameters' value for a particular key if present in 'hyperparameters_fit' to determine the fit model's final hyperparameters.
This is most commonly set for hyperparameters that indicate model training iterations or epochs, as early stopping can find a different value from what 'hyperparameters' indicated.
In these cases, the provided hyperparameter in 'hyperparameters' is used as a maximum for the model, but the model is still able to early stop at a smaller value during training to achieve a better validation score or to satisfy time constraints.
For example, if a NN model was given `epochs=500` as a hyperparameter, but found during training that `epochs=60` resulted in optimal validation score, it would use `epoch=60` and `hyperparameters_fit={'epoch': 60}` would be set.
'ag_args_fit': Special AutoGluon arguments that influence model fit.
See the documentation of the `hyperparameters` argument in `TabularPredictor.fit()` for more information.
'features': List of feature names used by the model.
'child_hyperparameters': Equivalent to 'hyperparameters', but for the model's children.
'child_hyperparameters_fit': Equivalent to 'hyperparameters_fit', but for the model's children.
'child_ag_args_fit': Equivalent to 'ag_args_fit', but for the model's children.
'ancestors': The model's ancestors. Ancestor models are the models which are required to make predictions during the construction of the model's input features.
If A is an ancestor of B, then B is a descendant of A.
If a model's ancestor is deleted, the model is no longer able to infer on new data, and its 'can_infer' value will be False.
A model can only have ancestor models whose 'stack_level' are lower than itself.
'stack_level'=1 models have no ancestors.
'descendants': The model's descendants. Descendant models are the models which require this model to make predictions during the construction of their input features.
If A is a descendant of B, then B is an ancestor of A.
If this model is deleted, then all descendant models will no longer be able to infer on new data, and their 'can_infer' values will be False.
A model can only have descendant models whose 'stack_level' are higher than itself.
extra_metrics : list, default = None
A list of metrics to calculate scores for and include in the output DataFrame.
Only valid when `data` is specified. The scores refer to the scores on `data` (same data as used to calculate the `score_test` column).
This list can contain any values which would also be valid for `eval_metric` in predictor init.
For example, `extra_metrics=['accuracy', 'roc_auc', 'log_loss']` would be valid in binary classification.
This example would return 3 additional columns in the output DataFrame, whose column names match the names of the metrics.
Passing `extra_metrics=[predictor.eval_metric]` would return an extra column in the name of the eval metric that has identical values to `score_test`.
This also works with custom metrics. If passing an object instead of a string, the column name will be equal to the `.name` attribute of the object.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
only_pareto_frontier : bool, default = False
If `True`, only return model information of models in the Pareto frontier of the accuracy/latency trade-off (models which achieve the highest score within their end-to-end inference time).
At minimum this will include the model with the highest score and the model with the lowest inference time.
This is useful when deciding which model to use during inference if inference time is a consideration.
Models filtered out by this process would never be optimal choices for a user that only cares about model inference time and score.
silent : bool, default = False
Should leaderboard DataFrame be printed?
Returns
-------
:class:`pd.DataFrame` of model performance summary information.
"""
self._assert_is_fit('leaderboard')
data = self.__get_dataset(data) if data is not None else data
return self._learner.leaderboard(X=data, extra_info=extra_info, extra_metrics=extra_metrics,
only_pareto_frontier=only_pareto_frontier, silent=silent)
def fit_summary(self, verbosity=3, show_plot=False):
"""
Output summary of information about models produced during `fit()`.
May create various generated summary plots and store them in folder: `predictor.path`.
Parameters
----------
verbosity : int, default = 3
Controls how detailed of a summary to output.
Set <= 0 for no output printing, 1 to print just high-level summary,
2 to print summary and create plots, >= 3 to print all information produced during `fit()`.
show_plot : bool, default = False
If True, shows the model summary plot in browser when verbosity > 1.
Returns
-------
Dict containing various detailed information. We do not recommend directly printing this dict as it may be very large.
"""
self._assert_is_fit('fit_summary')
# hpo_used = len(self._trainer.hpo_results) > 0
hpo_used = False # Disabled until a more memory efficient hpo_results object is implemented.
model_types = self._trainer.get_models_attribute_dict(attribute='type')
model_inner_types = self._trainer.get_models_attribute_dict(attribute='type_inner')
model_typenames = {key: model_types[key].__name__ for key in model_types}
model_innertypenames = {key: model_inner_types[key].__name__ for key in model_types if key in model_inner_types}
MODEL_STR = 'Model'
ENSEMBLE_STR = 'Ensemble'
for model in model_typenames:
if (model in model_innertypenames) and (ENSEMBLE_STR not in model_innertypenames[model]) and (
ENSEMBLE_STR in model_typenames[model]):
new_model_typename = model_typenames[model] + "_" + model_innertypenames[model]
if new_model_typename.endswith(MODEL_STR):
new_model_typename = new_model_typename[:-len(MODEL_STR)]
model_typenames[model] = new_model_typename
unique_model_types = set(model_typenames.values()) # no more class info
# all fit() information that is returned:
results = {
'model_types': model_typenames, # dict with key = model-name, value = type of model (class-name)
'model_performance': self._trainer.get_models_attribute_dict('val_score'),
# dict with key = model-name, value = validation performance
'model_best': self._trainer.model_best, # the name of the best model (on validation data)
'model_paths': self._trainer.get_models_attribute_dict('path'),
# dict with key = model-name, value = path to model file
'model_fit_times': self._trainer.get_models_attribute_dict('fit_time'),
'model_pred_times': self._trainer.get_models_attribute_dict('predict_time'),
'num_bag_folds': self._trainer.k_fold,
'max_stack_level': self._trainer.get_max_level(),
}
if self.problem_type == QUANTILE:
results['num_quantiles'] = len(self.quantile_levels)
elif self.problem_type != REGRESSION:
results['num_classes'] = self._trainer.num_classes
# if hpo_used:
# results['hpo_results'] = self._trainer.hpo_results
# get dict mapping model name to final hyperparameter values for each model:
model_hyperparams = {}
for model_name in self._trainer.get_model_names():
model_obj = self._trainer.load_model(model_name)
model_hyperparams[model_name] = model_obj.params
results['model_hyperparams'] = model_hyperparams
if verbosity > 0: # print stuff
print("*** Summary of fit() ***")
print("Estimated performance of each model:")
results['leaderboard'] = self._learner.leaderboard(silent=False)
# self._summarize('model_performance', 'Validation performance of individual models', results)
# self._summarize('model_best', 'Best model (based on validation performance)', results)
# self._summarize('hyperparameter_tune', 'Hyperparameter-tuning used', results)
print("Number of models trained: %s" % len(results['model_performance']))
print("Types of models trained:")
print(unique_model_types)
num_fold_str = ""
bagging_used = results['num_bag_folds'] > 0
if bagging_used:
num_fold_str = f" (with {results['num_bag_folds']} folds)"
print("Bagging used: %s %s" % (bagging_used, num_fold_str))
num_stack_str = ""
stacking_used = results['max_stack_level'] > 2
if stacking_used:
num_stack_str = f" (with {results['max_stack_level']} levels)"
print("Multi-layer stack-ensembling used: %s %s" % (stacking_used, num_stack_str))
hpo_str = ""
# if hpo_used and verbosity <= 2:
# hpo_str = " (call fit_summary() with verbosity >= 3 to see detailed HPO info)"
# print("Hyperparameter-tuning used: %s %s" % (hpo_used, hpo_str))
# TODO: uncomment once feature_prune is functional: self._summarize('feature_prune', 'feature-selection used', results)
print("Feature Metadata (Processed):")
print("(raw dtype, special dtypes):")
print(self.feature_metadata)
if verbosity > 1: # create plots
plot_tabular_models(results, output_directory=self.path,
save_file="SummaryOfModels.html",
plot_title="Models produced during fit()",
show_plot=show_plot)
if hpo_used:
for model_type in results['hpo_results']:
if 'trial_info' in results['hpo_results'][model_type]:
plot_summary_of_models(
results['hpo_results'][model_type],
output_directory=self.path, save_file=model_type + "_HPOmodelsummary.html",
plot_title=f"Models produced during {model_type} HPO", show_plot=show_plot)
plot_performance_vs_trials(
results['hpo_results'][model_type],
output_directory=self.path, save_file=model_type + "_HPOperformanceVStrials.png",
plot_title=f"HPO trials for {model_type} models", show_plot=show_plot)
if verbosity > 2: # print detailed information
if hpo_used:
hpo_results = results['hpo_results']
print("*** Details of Hyperparameter optimization ***")
for model_type in hpo_results:
hpo_model = hpo_results[model_type]
if 'trial_info' in hpo_model:
print(
f"HPO for {model_type} model: Num. configurations tried = {len(hpo_model['trial_info'])}, Time spent = {hpo_model['total_time']}s, Search strategy = {hpo_model['search_strategy']}")
print(
f"Best hyperparameter-configuration (validation-performance: {self.eval_metric} = {hpo_model['validation_performance']}):")
print(hpo_model['best_config'])
"""
if bagging_used:
pass # TODO: print detailed bagging info
if stacking_used:
pass # TODO: print detailed stacking info, like how much it improves validation performance
if results['feature_prune']:
pass # TODO: print detailed feature-selection info once feature-selection is functional.
"""
if verbosity > 0:
print("*** End of fit() summary ***")
return results
def transform_features(self, data=None, model=None, base_models=None, return_original_features=True):
"""
Transforms data features through the AutoGluon feature generator.
This is useful to gain an understanding of how AutoGluon interprets the data features.
The output of this function can be used to train further models, even outside of AutoGluon.
This can be useful for training your own models on the same data representation as AutoGluon.
Individual AutoGluon models like the neural network may apply additional feature transformations that are not reflected in this method.
This method only applies universal transforms employed by all AutoGluon models.
When `data=None`, `base_models=[{best_model}], and bagging was enabled during fit():
This returns the out-of-fold predictions of the best model, which can be used as training input to a custom user stacker model.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
The data to apply feature transformation to.
This data does not require the label column.
If str is passed, `data` will be loaded using the str value as the file path.
If not specified, the original data used during fit() will be used if fit() was previously called with `cache_data=True`. Otherwise, an exception will be raised.
For non-bagged mode predictors:
The data used when not specified is the validation set.
This can either be an automatically generated validation set or the user-defined `tuning_data` if passed during fit().
If all parameters are unspecified, then the output is equivalent to `predictor.load_data_internal(data='val', return_X=True, return_y=False)[0]`.
To get the label values of the output, call `predictor.load_data_internal(data='val', return_X=False, return_y=True)[1]`.
If the original training set is desired, it can be passed in through `data`.
Warning: Do not pass the original training set if `model` or `base_models` are set. This will result in overfit feature transformation.
For bagged mode predictors:
The data used when not specified is the full training set.
If all parameters are unspecified, then the output is equivalent to `predictor.load_data_internal(data='train', return_X=True, return_y=False)[0]`.
To get the label values of the output, call `predictor.load_data_internal(data='train', return_X=False, return_y=True)[1]`.
`base_model` features generated in this instance will be from out-of-fold predictions.
Note that the training set may differ from the training set originally passed during fit(), as AutoGluon may choose to drop or duplicate rows during training.
Warning: Do not pass the original training set through `data` if `model` or `base_models` are set. This will result in overfit feature transformation. Instead set `data=None`.
model : str, default = None
Model to generate input features for.
The output data will be equivalent to the input data that would be sent into `model.predict_proba(data)`.
Note: This only applies to cases where `data` is not the training data.
If `None`, then only return generically preprocessed features prior to any model fitting.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
Specifying a `refit_full` model will cause an exception if `data=None`.
`base_models=None` is a requirement when specifying `model`.
base_models : list, default = None
List of model names to use as base_models for a hypothetical stacker model when generating input features.
If `None`, then only return generically preprocessed features prior to any model fitting.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
If a stacker model S exists with `base_models=M`, then setting `base_models=M` is equivalent to setting `model=S`.
`model=None` is a requirement when specifying `base_models`.
return_original_features : bool, default = True
Whether to return the original features.
If False, only returns the additional output columns from specifying `model` or `base_models`.
This is useful to set to False if the intent is to use the output as input to further stacker models without the original features.
Returns
-------
:class:`pd.DataFrame` of the provided `data` after feature transformation has been applied.
This output does not include the label column, and will remove it if present in the supplied `data`.
If a transformed label column is desired, use `predictor.transform_labels`.
Examples
--------
>>> from autogluon.tabular import TabularPredictor
>>> predictor = TabularPredictor(label='class').fit('train.csv', label='class', auto_stack=True) # predictor is in bagged mode.
>>> model = 'WeightedEnsemble_L2'
>>> train_data_transformed = predictor.transform_features(model=model) # Internal training DataFrame used as input to `model.fit()` for each model trained in predictor.fit()`
>>> test_data_transformed = predictor.transform_features('test.csv', model=model) # Internal test DataFrame used as input to `model.predict_proba()` during `predictor.predict_proba(test_data, model=model)`
"""
self._assert_is_fit('transform_features')
data = self.__get_dataset(data) if data is not None else data
return self._learner.get_inputs_to_stacker(dataset=data, model=model, base_models=base_models,
use_orig_features=return_original_features)
def transform_labels(self, labels, inverse=False, proba=False):
"""
Transforms data labels to the internal label representation.
This can be useful for training your own models on the same data label representation as AutoGluon.
Regression problems do not differ between original and internal representation, and thus this method will return the provided labels.
Warning: When `inverse=False`, it is possible for the output to contain NaN label values in multiclass problems if the provided label was dropped during training.
Parameters
----------
labels : :class:`np.ndarray` or :class:`pd.Series`
Labels to transform.
If `proba=False`, an example input would be the output of `predictor.predict(test_data)`.
If `proba=True`, an example input would be the output of `predictor.predict_proba(test_data, as_multiclass=False)`.
inverse : boolean, default = False
When `True`, the input labels are treated as being in the internal representation and the original representation is outputted.
proba : boolean, default = False
When `True`, the input labels are treated as probabilities and the output will be the internal representation of probabilities.
In this case, it is expected that `labels` be a :class:`pd.DataFrame` or :class:`np.ndarray`.
If the `problem_type` is multiclass:
The input column order must be equal to `predictor.class_labels`.
The output column order will be equal to `predictor.class_labels_internal`.
if `inverse=True`, the same logic applies, but with input and output columns interchanged.
When `False`, the input labels are treated as actual labels and the output will be the internal representation of the labels.
In this case, it is expected that `labels` be a :class:`pd.Series` or :class:`np.ndarray`.
Returns
-------
:class:`pd.Series` of labels if `proba=False` or :class:`pd.DataFrame` of label probabilities if `proba=True`.
"""
self._assert_is_fit('transform_labels')
if inverse:
if proba:
labels_transformed = self._learner.label_cleaner.inverse_transform_proba(y=labels, as_pandas=True)
else:
labels_transformed = self._learner.label_cleaner.inverse_transform(y=labels)
else:
if proba:
labels_transformed = self._learner.label_cleaner.transform_proba(y=labels, as_pandas=True)
else:
labels_transformed = self._learner.label_cleaner.transform(y=labels)
return labels_transformed
def feature_importance(self, data=None, model=None, features=None, feature_stage='original', subsample_size=1000,
time_limit=None, num_shuffle_sets=None, include_confidence_band=True, confidence_level=0.99,
silent=False):
"""
Calculates feature importance scores for the given model via permutation importance. Refer to https://explained.ai/rf-importance/ for an explanation of permutation importance.
A feature's importance score represents the performance drop that results when the model makes predictions on a perturbed copy of the data where this feature's values have been randomly shuffled across rows.
A feature score of 0.01 would indicate that the predictive performance dropped by 0.01 when the feature was randomly shuffled.
The higher the score a feature has, the more important it is to the model's performance.
If a feature has a negative score, this means that the feature is likely harmful to the final model, and a model trained with the feature removed would be expected to achieve a better predictive performance.
Note that calculating feature importance can be a very computationally expensive process, particularly if the model uses hundreds or thousands of features. In many cases, this can take longer than the original model training.
To estimate how long `feature_importance(model, data, features)` will take, it is roughly the time taken by `predict_proba(data, model)` multiplied by the number of features.
Note: For highly accurate importance and p_value estimates, it is recommend to set `subsample_size` to at least 5,000 if possible and `num_shuffle_sets` to at least 10.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
This data must also contain the label-column with the same column-name as specified during `fit()`.
If specified, then the data is used to calculate the feature importance scores.
If str is passed, `data` will be loaded using the str value as the file path.
If not specified, the original data used during `fit()` will be used if `cache_data=True`. Otherwise, an exception will be raised.
Do not pass the training data through this argument, as the feature importance scores calculated will be biased due to overfitting.
More accurate feature importances will be obtained from new data that was held-out during `fit()`.
model : str, default = None
Model to get feature importances for, if None the best model is chosen.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
features : list, default = None
List of str feature names that feature importances are calculated for and returned, specify None to get all feature importances.
If you only want to compute feature importances for some of the features, you can pass their names in as a list of str.
Valid feature names change depending on the `feature_stage`.
To get the list of feature names for `feature_stage='original'`, call `predictor.feature_metadata_in.get_features()`.
To get the list of feature names for `feature_stage='transformed'`, call `list(predictor.transform_features().columns)`.
To get the list of feature names for `feature_stage=`transformed_model`, call `list(predictor.transform_features(model={model_name}).columns)`.
[Advanced] Can also contain tuples as elements of (feature_name, feature_list) form.
feature_name can be any string so long as it is unique with all other feature names / features in the list.
feature_list can be any list of valid features in the data.
This will compute importance of the combination of features in feature_list, naming the set of features in the returned DataFrame feature_name.
This importance will differ from adding the individual importances of each feature in feature_list, and will be more accurate to the overall group importance.
Example: ['featA', 'featB', 'featC', ('featBC', ['featB', 'featC'])]
In this example, the importance of 'featBC' will be calculated by jointly permuting 'featB' and 'featC' together as if they were a single two-dimensional feature.
feature_stage : str, default = 'original'
What stage of feature-processing should importances be computed for.
Options:
'original':
Compute importances of the original features.
Warning: `data` must be specified with this option, otherwise an exception will be raised.
'transformed':
Compute importances of the post-internal-transformation features (after automated feature engineering). These features may be missing some original features, or add new features entirely.
An example of new features would be ngram features generated from a text column.
Warning: For bagged models, feature importance calculation is not yet supported with this option when `data=None`. Doing so will raise an exception.
'transformed_model':
Compute importances of the post-model-transformation features. These features are the internal features used by the requested model. They may differ greatly from the original features.
If the model is a stack ensemble, this will include stack ensemble features such as the prediction probability features of the stack ensemble's base (ancestor) models.
subsample_size : int, default = 1000
The number of rows to sample from `data` when computing feature importance.
If `subsample_size=None` or `data` contains fewer than `subsample_size` rows, all rows will be used during computation.
Larger values increase the accuracy of the feature importance scores.
Runtime linearly scales with `subsample_size`.
time_limit : float, default = None
Time in seconds to limit the calculation of feature importance.
If None, feature importance will calculate without early stopping.
A minimum of 1 full shuffle set will always be evaluated. If a shuffle set evaluation takes longer than `time_limit`, the method will take the length of a shuffle set evaluation to return regardless of the `time_limit`.
num_shuffle_sets : int, default = None
The number of different permutation shuffles of the data that are evaluated.
Larger values will increase the quality of the importance evaluation.
It is generally recommended to increase `subsample_size` before increasing `num_shuffle_sets`.
Defaults to 3 if `time_limit` is None or 10 if `time_limit` is specified.
Runtime linearly scales with `num_shuffle_sets`.
include_confidence_band: bool, default = True
If True, returned DataFrame will include two additional columns specifying confidence interval for the true underlying importance value of each feature.
Increasing `subsample_size` and `num_shuffle_sets` will tighten the confidence interval.
confidence_level: float, default = 0.99
This argument is only considered when `include_confidence_band` is True, and can be used to specify the confidence level used for constructing confidence intervals.
For example, if `confidence_level` is set to 0.99, then the returned DataFrame will include columns 'p99_high' and 'p99_low' which indicates that the true feature importance will be between 'p99_high' and 'p99_low' 99% of the time (99% confidence interval).
More generally, if `confidence_level` = 0.XX, then the columns containing the XX% confidence interval will be named 'pXX_high' and 'pXX_low'.
silent : bool, default = False
Whether to suppress logging output.
Returns
-------
:class:`pd.DataFrame` of feature importance scores with 6 columns:
index: The feature name.
'importance': The estimated feature importance score.
'stddev': The standard deviation of the feature importance score. If NaN, then not enough num_shuffle_sets were used to calculate a variance.
'p_value': P-value for a statistical t-test of the null hypothesis: importance = 0, vs the (one-sided) alternative: importance > 0.
Features with low p-value appear confidently useful to the predictor, while the other features may be useless to the predictor (or even harmful to include in its training data).
A p-value of 0.01 indicates that there is a 1% chance that the feature is useless or harmful, and a 99% chance that the feature is useful.
A p-value of 0.99 indicates that there is a 99% chance that the feature is useless or harmful, and a 1% chance that the feature is useful.
'n': The number of shuffles performed to estimate importance score (corresponds to sample-size used to determine confidence interval for true score).
'pXX_high': Upper end of XX% confidence interval for true feature importance score (where XX=99 by default).
'pXX_low': Lower end of XX% confidence interval for true feature importance score.
"""
self._assert_is_fit('feature_importance')
data = self.__get_dataset(data) if data is not None else data
if (data is None) and (not self._trainer.is_data_saved):
raise AssertionError(
'No data was provided and there is no cached data to load for feature importance calculation. `cache_data=True` must be set in the `TabularPredictor` init `learner_kwargs` argument call to enable this functionality when data is not specified.')
if data is not None:
# Avoid crash when indices are duplicated
data = data.reset_index(drop=True)
if num_shuffle_sets is None:
num_shuffle_sets = 10 if time_limit else 3
fi_df = self._learner.get_feature_importance(model=model, X=data, features=features,
feature_stage=feature_stage,
subsample_size=subsample_size, time_limit=time_limit,
num_shuffle_sets=num_shuffle_sets, silent=silent)
if include_confidence_band:
if confidence_level <= 0.5 or confidence_level >= 1.0:
raise ValueError("confidence_level must lie between 0.5 and 1.0")
ci_str = "{:0.0f}".format(confidence_level * 100)
import scipy.stats
num_features = len(fi_df)
ci_low_dict = dict()
ci_high_dict = dict()
for i in range(num_features):
fi = fi_df.iloc[i]
mean = fi['importance']
stddev = fi['stddev']
n = fi['n']
if stddev == np.nan or n == np.nan or mean == np.nan or n == 1:
ci_high = np.nan
ci_low = np.nan
else:
t_val = scipy.stats.t.ppf(1 - (1 - confidence_level) / 2, n - 1)
ci_high = mean + t_val * stddev / math.sqrt(n)
ci_low = mean - t_val * stddev / math.sqrt(n)
ci_high_dict[fi.name] = ci_high
ci_low_dict[fi.name] = ci_low
high_str = 'p' + ci_str + '_high'
low_str = 'p' + ci_str + '_low'
fi_df[high_str] = pd.Series(ci_high_dict)
fi_df[low_str] = pd.Series(ci_low_dict)
return fi_df
def persist_models(self, models='best', with_ancestors=True, max_memory=0.1) -> list:
"""
Persist models in memory for reduced inference latency. This is particularly important if the models are being used for online-inference where low latency is critical.
If models are not persisted in memory, they are loaded from disk every time they are asked to make predictions.
Parameters
----------
models : list of str or str, default = 'best'
Model names of models to persist.
If 'best' then the model with the highest validation score is persisted (this is the model used for prediction by default).
If 'all' then all models are persisted.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
with_ancestors : bool, default = True
If True, all ancestor models of the provided models will also be persisted.
If False, stacker models will not have the models they depend on persisted unless those models were specified in `models`. This will slow down inference as the ancestor models will still need to be loaded from disk for each predict call.
Only relevant for stacker models.
max_memory : float, default = 0.1
Proportion of total available memory to allow for the persisted models to use.
If the models' summed memory usage requires a larger proportion of memory than max_memory, they are not persisted. In this case, the output will be an empty list.
If None, then models are persisted regardless of estimated memory usage. This can cause out-of-memory errors.
Returns
-------
List of persisted model names.
"""
self._assert_is_fit('persist_models')
return self._learner.persist_trainer(low_memory=False, models=models, with_ancestors=with_ancestors,
max_memory=max_memory)
def unpersist_models(self, models='all') -> list:
"""
Unpersist models in memory for reduced memory usage.
If models are not persisted in memory, they are loaded from disk every time they are asked to make predictions.
Note: Another way to reset the predictor and unpersist models is to reload the predictor from disk via `predictor = TabularPredictor.load(predictor.path)`.
Parameters
----------
models : list of str or str, default = 'all'
Model names of models to unpersist.
If 'all' then all models are unpersisted.
Valid models are listed in this `predictor` by calling `predictor.get_model_names_persisted()`.
Returns
-------
List of unpersisted model names.
"""
self._assert_is_fit('unpersist_models')
return self._learner.load_trainer().unpersist_models(model_names=models)
def refit_full(self, model='all'):
"""
Retrain model on all of the data (training + validation).
For bagged models:
Optimizes a model's inference time by collapsing bagged ensembles into a single model fit on all of the training data.
This process will typically result in a slight accuracy reduction and a large inference speedup.
The inference speedup will generally be between 10-200x faster than the original bagged ensemble model.
The inference speedup factor is equivalent to (k * n), where k is the number of folds (`num_bag_folds`) and n is the number of finished repeats (`num_bag_sets`) in the bagged ensemble.
The runtime is generally 10% or less of the original fit runtime.
The runtime can be roughly estimated as 1 / (k * n) of the original fit runtime, with k and n defined above.
For non-bagged models:
Optimizes a model's accuracy by retraining on 100% of the data without using a validation set.
Will typically result in a slight accuracy increase and no change to inference time.
The runtime will be approximately equal to the original fit runtime.
This process does not alter the original models, but instead adds additional models.
If stacker models are refit by this process, they will use the refit_full versions of the ancestor models during inference.
Models produced by this process will not have validation scores, as they use all of the data for training.
Therefore, it is up to the user to determine if the models are of sufficient quality by including test data in `predictor.leaderboard(test_data)`.
If the user does not have additional test data, they should reference the original model's score for an estimate of the performance of the refit_full model.
Warning: Be aware that utilizing refit_full models without separately verifying on test data means that the model is untested, and has no guarantee of being consistent with the original model.
`cache_data` must have been set to `True` during the original training to enable this functionality.
Parameters
----------
model : str, default = 'all'
Model name of model to refit.
If 'all' then all models are refitted.
If 'best' then the model with the highest validation score is refit.
All ancestor models will also be refit in the case that the selected model is a weighted or stacker ensemble.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
Returns
-------
Dictionary of original model names -> refit_full model names.
"""
self._assert_is_fit('refit_full')
refit_full_dict = self._learner.refit_ensemble_full(model=model)
return refit_full_dict
def get_model_best(self):
"""
Returns the string model name of the best model by validation score.
This is typically the same model used during inference when `predictor.predict` is called without specifying a model.
Returns
-------
String model name of the best model
"""
self._assert_is_fit('get_model_best')
return self._trainer.get_model_best(can_infer=True)
def get_model_full_dict(self):
"""
Returns a dictionary of original model name -> refit full model name.
Empty unless `refit_full=True` was set during fit or `predictor.refit_full()` was called.
This can be useful when determining the best model based off of `predictor.leaderboard()`, then getting the _FULL version of the model by passing its name as the key to this dictionary.
Returns
-------
Dictionary of original model name -> refit full model name.
"""
self._assert_is_fit('get_model_full_dict')
return copy.deepcopy(self._trainer.model_full_dict)
def info(self):
"""
[EXPERIMENTAL] Returns a dictionary of `predictor` metadata.
Warning: This functionality is currently in preview mode.
The metadata information returned may change in structure in future versions without warning.
The definitions of various metadata values are not yet documented.
The output of this function should not be used for programmatic decisions.
Contains information such as row count, column count, model training time, validation scores, hyperparameters, and much more.
Returns
-------
Dictionary of `predictor` metadata.
"""
self._assert_is_fit('info')
return self._learner.get_info(include_model_info=True)
# TODO: Add data argument
# TODO: Add option to disable OOF generation of newly fitted models
# TODO: Move code logic to learner/trainer
# TODO: Add fit() arg to perform this automatically at end of training
# TODO: Consider adding cutoff arguments such as top-k models
def fit_weighted_ensemble(self, base_models: list = None, name_suffix='Best', expand_pareto_frontier=False,
time_limit=None):
"""
Fits new weighted ensemble models to combine predictions of previously-trained models.
`cache_data` must have been set to `True` during the original training to enable this functionality.
Parameters
----------
base_models : list, default = None
List of model names the weighted ensemble can consider as candidates.
If None, all previously trained models are considered except for weighted ensemble models.
As an example, to train a weighted ensemble that can only have weights assigned to the models 'model_a' and 'model_b', set `base_models=['model_a', 'model_b']`
name_suffix : str, default = 'Best'
Name suffix to add to the name of the newly fitted ensemble model.
expand_pareto_frontier : bool, default = False
If True, will train N-1 weighted ensemble models instead of 1, where `N=len(base_models)`.
The final model trained when True is equivalent to the model trained when False.
These weighted ensemble models will attempt to expand the pareto frontier.
This will create many different weighted ensembles which have different accuracy/memory/inference-speed trade-offs.
This is particularly useful when inference speed is an important consideration.
time_limit : int, default = None
Time in seconds each weighted ensemble model is allowed to train for. If `expand_pareto_frontier=True`, the `time_limit` value is applied to each model.
If None, the ensemble models train without time restriction.
Returns
-------
List of newly trained weighted ensemble model names.
If an exception is encountered while training an ensemble model, that model's name will be absent from the list.
"""
self._assert_is_fit('fit_weighted_ensemble')
trainer = self._learner.load_trainer()
if trainer.bagged_mode:
X = trainer.load_X()
y = trainer.load_y()
fit = True
else:
X = trainer.load_X_val()
y = trainer.load_y_val()
fit = False
stack_name = 'aux1'
if base_models is None:
base_models = trainer.get_model_names(stack_name='core')
X_stack_preds = trainer.get_inputs_to_stacker(X=X, base_models=base_models, fit=fit, use_orig_features=False)
models = []
if expand_pareto_frontier:
leaderboard = self.leaderboard(silent=True)
leaderboard = leaderboard[leaderboard['model'].isin(base_models)]
leaderboard = leaderboard.sort_values(by='pred_time_val')
models_to_check = leaderboard['model'].tolist()
for i in range(1, len(models_to_check) - 1):
models_to_check_now = models_to_check[:i + 1]
max_base_model_level = max([trainer.get_model_level(base_model) for base_model in models_to_check_now])
weighted_ensemble_level = max_base_model_level + 1
models += trainer.generate_weighted_ensemble(X=X_stack_preds, y=y, level=weighted_ensemble_level,
stack_name=stack_name,
base_model_names=models_to_check_now,
name_suffix=name_suffix + '_Pareto' + str(i),
time_limit=time_limit)
max_base_model_level = max([trainer.get_model_level(base_model) for base_model in base_models])
weighted_ensemble_level = max_base_model_level + 1
models += trainer.generate_weighted_ensemble(X=X_stack_preds, y=y, level=weighted_ensemble_level,
stack_name=stack_name, base_model_names=base_models,
name_suffix=name_suffix, time_limit=time_limit)
return models
def get_oof_pred(self, model: str = None, transformed=False, train_data=None, internal_oof=False) -> pd.Series:
"""
Note: This is advanced functionality not intended for normal usage.
Returns the out-of-fold (OOF) predictions for every row in the training data.
For more information, refer to `get_oof_pred_proba()` documentation.
Parameters
----------
model : str (optional)
Refer to `get_oof_pred_proba()` documentation.
transformed : bool, default = False
Refer to `get_oof_pred_proba()` documentation.
train_data : pd.DataFrame, default = None
Refer to `get_oof_pred_proba()` documentation.
internal_oof : bool, default = False
Refer to `get_oof_pred_proba()` documentation.
Returns
-------
:class:`pd.Series` object of the out-of-fold training predictions of the model.
"""
self._assert_is_fit('get_oof_pred')
y_pred_proba_oof = self.get_oof_pred_proba(model=model,
transformed=transformed,
as_multiclass=True,
train_data=train_data,
internal_oof=internal_oof)
return get_pred_from_proba_df(y_pred_proba_oof, problem_type=self.problem_type)
# TODO: Improve error messages when trying to get oof from refit_full and distilled models.
# TODO: v0.1 add tutorial related to this method, as it is very powerful.
# TODO: Remove train_data argument once we start caching the raw original data: Can just load that instead.
def get_oof_pred_proba(self, model: str = None, transformed=False, as_multiclass=True, train_data=None,
internal_oof=False) -> Union[pd.DataFrame, pd.Series]:
"""
Note: This is advanced functionality not intended for normal usage.
Returns the out-of-fold (OOF) predicted class probabilities for every row in the training data.
OOF prediction probabilities may provide unbiased estimates of generalization accuracy (reflecting how predictions will behave on new data)
Predictions for each row are only made using models that were fit to a subset of data where this row was held-out.
Warning: This method will raise an exception if called on a model that is not a bagged ensemble. Only bagged models (such a stacker models) can produce OOF predictions.
This also means that refit_full models and distilled models will raise an exception.
Warning: If intending to join the output of this method with the original training data, be aware that a rare edge-case issue exists:
Multiclass problems with rare classes combined with the use of the 'log_loss' eval_metric may have forced AutoGluon to duplicate rows in the training data to satisfy minimum class counts in the data.
If this has occurred, then the indices and row counts of the returned :class:`pd.Series` in this method may not align with the training data.
In this case, consider fetching the processed training data using `predictor.load_data_internal()` instead of using the original training data.
A more benign version of this issue occurs when 'log_loss' wasn't specified as the eval_metric but rare classes were dropped by AutoGluon.
In this case, not all of the original training data rows will have an OOF prediction. It is recommended to either drop these rows during the join or to get direct predictions on the missing rows via :meth:`TabularPredictor.predict_proba`.
Parameters
----------
model : str (optional)
The name of the model to get out-of-fold predictions from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
transformed : bool, default = False
Whether the output values should be of the original label representation (False) or the internal label representation (True).
The internal representation for binary and multiclass classification are integers numbering the k possible classes from 0 to k-1, while the original representation is identical to the label classes provided during fit.
Generally, most users will want the original representation and keep `transformed=False`.
as_multiclass : bool, default = True
Whether to return binary classification probabilities as if they were for multiclass classification.
Output will contain two columns, and if `transformed=False`, the column names will correspond to the binary class labels.
The columns will be the same order as `predictor.class_labels`.
If False, output will contain only 1 column for the positive class (get positive_class name via `predictor.positive_class`).
Only impacts output for binary classification problems.
train_data : pd.DataFrame, default = None
Specify the original `train_data` to ensure that any training rows that were originally dropped internally are properly handled.
If None, then output will not contain all rows if training rows were dropped internally during fit.
internal_oof : bool, default = False
[Advanced Option] Return the internal OOF preds rather than the externally facing OOF preds.
Internal OOF preds may have more/fewer rows than was provided in train_data, and are incompatible with external data.
If you don't know what this does, keep it as False.
Returns
-------
:class:`pd.Series` or :class:`pd.DataFrame` object of the out-of-fold training prediction probabilities of the model.
"""
self._assert_is_fit('get_oof_pred_proba')
if model is None:
model = self.get_model_best()
if not self._trainer.bagged_mode:
raise AssertionError('Predictor must be in bagged mode to get out-of-fold predictions.')
if model in self._trainer._model_full_dict_val_score:
# FIXME: This is a hack, add refit tag in a nicer way than via the _model_full_dict_val_score
# TODO: bagged-with-holdout refit to bagged-no-holdout should still be able to return out-of-fold predictions
raise AssertionError('_FULL models do not have out-of-fold predictions.')
if self._trainer.get_model_attribute_full(model=model, attribute='val_in_fit', func=max):
raise AssertionError(
f'Model {model} does not have out-of-fold predictions because it used a validation set during training.')
y_pred_proba_oof_transformed = self.transform_features(base_models=[model], return_original_features=False)
if not internal_oof:
is_duplicate_index = y_pred_proba_oof_transformed.index.duplicated(keep='first')
if is_duplicate_index.any():
logger.log(20,
'Detected duplicate indices... This means that data rows may have been duplicated during training. '
'Removing all duplicates except for the first instance.')
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed[is_duplicate_index == False]
if self._learner._pre_X_rows is not None and len(y_pred_proba_oof_transformed) < self._learner._pre_X_rows:
len_diff = self._learner._pre_X_rows - len(y_pred_proba_oof_transformed)
if train_data is None:
logger.warning(f'WARNING: {len_diff} rows of training data were dropped internally during fit. '
f'The output will not contain all original training rows.\n'
f'If attempting to get `oof_pred_proba`, DO NOT pass `train_data` into `predictor.predict_proba` or `predictor.transform_features`!\n'
f'Instead this can be done by the following '
f'(Ensure `train_data` is identical to when it was used in fit):\n'
f'oof_pred_proba = predictor.get_oof_pred_proba(train_data=train_data)\n'
f'oof_pred = predictor.get_oof_pred(train_data=train_data)\n')
else:
missing_idx = list(train_data.index.difference(y_pred_proba_oof_transformed.index))
if len(missing_idx) > 0:
missing_idx_data = train_data.loc[missing_idx]
missing_pred_proba = self.transform_features(data=missing_idx_data, base_models=[model],
return_original_features=False)
y_pred_proba_oof_transformed = pd.concat([y_pred_proba_oof_transformed, missing_pred_proba])
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed.reindex(list(train_data.index))
if self.problem_type == MULTICLASS and self._learner.label_cleaner.problem_type_transform == MULTICLASS:
y_pred_proba_oof_transformed.columns = copy.deepcopy(
self._learner.label_cleaner.ordered_class_labels_transformed)
elif self.problem_type == QUANTILE:
y_pred_proba_oof_transformed.columns = self.quantile_levels
else:
y_pred_proba_oof_transformed.columns = [self.label]
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed[self.label]
if as_multiclass and self.problem_type == BINARY:
y_pred_proba_oof_transformed = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(
y_pred_proba_oof_transformed, as_pandas=True)
elif self.problem_type == MULTICLASS:
if transformed:
y_pred_proba_oof_transformed = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(
y_pred_proba_oof_transformed, as_pandas=True)
y_pred_proba_oof_transformed.columns = copy.deepcopy(
self._learner.label_cleaner.ordered_class_labels_transformed)
if transformed:
return y_pred_proba_oof_transformed
else:
return self.transform_labels(labels=y_pred_proba_oof_transformed, inverse=True, proba=True)
@property
def positive_class(self):
"""
Returns the positive class name in binary classification. Useful for computing metrics such as F1 which require a positive and negative class.
In binary classification, :class:`TabularPredictor.predict_proba(as_multiclass=False)` returns the estimated probability that each row belongs to the positive class.
Will print a warning and return None if called when `predictor.problem_type != 'binary'`.
Returns
-------
The positive class name in binary classification or None if the problem is not binary classification.
"""
return self._learner.positive_class
def load_data_internal(self, data='train', return_X=True, return_y=True):
"""
Loads the internal data representation used during model training.
Individual AutoGluon models like the neural network may apply additional feature transformations that are not reflected in this method.
This method only applies universal transforms employed by all AutoGluon models.
Warning, the internal representation may:
Have different features compared to the original data.
Have different row counts compared to the original data.
Have indices which do not align with the original data.
Have label values which differ from those in the original data.
Internal data representations should NOT be combined with the original data, in most cases this is not possible.
Parameters
----------
data : str, default = 'train'
The data to load.
Valid values are:
'train':
Load the training data used during model training.
This is a transformed and augmented version of the `train_data` passed in `fit()`.
'val':
Load the validation data used during model training.
This is a transformed and augmented version of the `tuning_data` passed in `fit()`.
If `tuning_data=None` was set in `fit()`, then `tuning_data` is an automatically generated validation set created by splitting `train_data`.
Warning: Will raise an exception if called by a bagged predictor, as bagged predictors have no validation data.
return_X : bool, default = True
Whether to return the internal data features
If set to `False`, then the first element in the returned tuple will be None.
return_y : bool, default = True
Whether to return the internal data labels
If set to `False`, then the second element in the returned tuple will be None.
Returns
-------
Tuple of (:class:`pd.DataFrame`, :class:`pd.Series`) corresponding to the internal data features and internal data labels, respectively.
"""
self._assert_is_fit('load_data_internal')
if data == 'train':
load_X = self._trainer.load_X
load_y = self._trainer.load_y
elif data == 'val':
load_X = self._trainer.load_X_val
load_y = self._trainer.load_y_val
else:
raise ValueError(f'data must be one of: [\'train\', \'val\'], but was \'{data}\'.')
X = load_X() if return_X else None
y = load_y() if return_y else None
return X, y
def save_space(self, remove_data=True, remove_fit_stack=True, requires_save=True, reduce_children=False):
"""
Reduces the memory and disk size of predictor by deleting auxiliary model files that aren't needed for prediction on new data.
This function has NO impact on inference accuracy.
It is recommended to invoke this method if the only goal is to use the trained model for prediction.
However, certain advanced functionality may no longer be available after `save_space()` has been called.
Parameters
----------
remove_data : bool, default = True
Whether to remove cached files of the original training and validation data.
Only reduces disk usage, it has no impact on memory usage.
This is especially useful when the original data was large.
This is equivalent to setting `cache_data=False` during the original `fit()`.
Will disable all advanced functionality that requires `cache_data=True`.
remove_fit_stack : bool, default = True
Whether to remove information required to fit new stacking models and continue fitting bagged models with new folds.
Only reduces disk usage, it has no impact on memory usage.
This includes:
out-of-fold (OOF) predictions
This is useful for multiclass problems with many classes, as OOF predictions can become very large on disk. (1 GB per model in extreme cases)
This disables `predictor.refit_full()` for stacker models.
requires_save : bool, default = True
Whether to remove information that requires the model to be saved again to disk.
Typically this only includes flag variables that don't have significant impact on memory or disk usage, but should technically be updated due to the removal of more important information.
An example is the `is_data_saved` boolean variable in `trainer`, which should be updated to `False` if `remove_data=True` was set.
reduce_children : bool, default = False
Whether to apply the reduction rules to bagged ensemble children models. These are the models trained for each fold of the bagged ensemble.
This should generally be kept as `False` since the most important memory and disk reduction techniques are automatically applied to these models during the original `fit()` call.
"""
self._assert_is_fit('save_space')
self._trainer.reduce_memory_size(remove_data=remove_data, remove_fit_stack=remove_fit_stack, remove_fit=True,
remove_info=False, requires_save=requires_save,
reduce_children=reduce_children)
def delete_models(self, models_to_keep=None, models_to_delete=None, allow_delete_cascade=False,
delete_from_disk=True, dry_run=True):
"""
Deletes models from `predictor`.
This can be helpful to minimize memory usage and disk usage, particularly for model deployment.
This will remove all references to the models in `predictor`.
For example, removed models will not appear in `predictor.leaderboard()`.
WARNING: If `delete_from_disk=True`, this will DELETE ALL FILES in the deleted model directories, regardless if they were created by AutoGluon or not.
DO NOT STORE FILES INSIDE OF THE MODEL DIRECTORY THAT ARE UNRELATED TO AUTOGLUON.
Parameters
----------
models_to_keep : str or list, default = None
Name of model or models to not delete.
All models that are not specified and are also not required as a dependency of any model in `models_to_keep` will be deleted.
Specify `models_to_keep='best'` to keep only the best model and its model dependencies.
`models_to_delete` must be None if `models_to_keep` is set.
To see the list of possible model names, use: `predictor.get_model_names()` or `predictor.leaderboard()`.
models_to_delete : str or list, default = None
Name of model or models to delete.
All models that are not specified but depend on a model in `models_to_delete` will also be deleted.
`models_to_keep` must be None if `models_to_delete` is set.
allow_delete_cascade : bool, default = False
If `False`, if unspecified dependent models of models in `models_to_delete` exist an exception will be raised instead of deletion occurring.
An example of a dependent model is m1 if m2 is a stacker model and takes predictions from m1 as inputs. In this case, m1 would be a dependent model of m2.
If `True`, all dependent models of models in `models_to_delete` will be deleted.
Has no effect if `models_to_delete=None`.
delete_from_disk : bool, default = True
If `True`, deletes the models from disk if they were persisted.
WARNING: This deletes the entire directory for the deleted models, and ALL FILES located there.
It is highly recommended to first run with `dry_run=True` to understand which directories will be deleted.
dry_run : bool, default = True
If `True`, then deletions don't occur, and logging statements are printed describing what would have occurred.
Set `dry_run=False` to perform the deletions.
"""
self._assert_is_fit('delete_models')
if models_to_keep == 'best':
models_to_keep = self._trainer.model_best
if models_to_keep is None:
models_to_keep = self._trainer.get_model_best()
self._trainer.delete_models(models_to_keep=models_to_keep, models_to_delete=models_to_delete,
allow_delete_cascade=allow_delete_cascade, delete_from_disk=delete_from_disk,
dry_run=dry_run)
# TODO: v0.1 add documentation for arguments
def get_model_names(self, stack_name=None, level=None, can_infer: bool = None, models: list = None) -> list:
"""Returns the list of model names trained in this `predictor` object."""
self._assert_is_fit('get_model_names')
return self._trainer.get_model_names(stack_name=stack_name, level=level, can_infer=can_infer, models=models)
def get_model_names_persisted(self) -> list:
"""Returns the list of model names which are persisted in memory."""
self._assert_is_fit('get_model_names_persisted')
return list(self._learner.load_trainer().models.keys())
def distill(self, train_data=None, tuning_data=None, augmentation_data=None, time_limit=None, hyperparameters=None,
holdout_frac=None,
teacher_preds='soft', augment_method='spunge', augment_args={'size_factor': 5, 'max_size': int(1e5)},
models_name_suffix=None, verbosity=None):
"""
Distill AutoGluon's most accurate ensemble-predictor into single models which are simpler/faster and require less memory/compute.
Distillation can produce a model that is more accurate than the same model fit directly on the original training data.
After calling `distill()`, there will be more models available in this Predictor, which can be evaluated using `predictor.leaderboard(test_data)` and deployed with: `predictor.predict(test_data, model=MODEL_NAME)`.
This will raise an exception if `cache_data=False` was previously set in `fit()`.
NOTE: Until catboost v0.24 is released, `distill()` with CatBoost students in multiclass classification requires you to first install catboost-dev: `pip install catboost-dev`
Parameters
----------
train_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Same as `train_data` argument of `fit()`.
If None, the same training data will be loaded from `fit()` call used to produce this Predictor.
tuning_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Same as `tuning_data` argument of `fit()`.
If `tuning_data = None` and `train_data = None`: the same training/validation splits will be loaded from `fit()` call used to produce this Predictor,
unless bagging/stacking was previously used in which case a new training/validation split is performed.
augmentation_data : :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
An optional extra dataset of unlabeled rows that can be used for augmenting the dataset used to fit student models during distillation (ignored if None).
time_limit : int, default = None
Approximately how long (in seconds) the distillation process should run for.
If None, no time-constraint will be enforced allowing the distilled models to fully train.
hyperparameters : dict or str, default = None
Specifies which models to use as students and what hyperparameter-values to use for them.
Same as `hyperparameters` argument of `fit()`.
If = None, then student models will use the same hyperparameters from `fit()` used to produce this Predictor.
Note: distillation is currently only supported for ['GBM','NN','RF','CAT'] student models, other models and their hyperparameters are ignored here.
holdout_frac : float
Same as `holdout_frac` argument of :meth:`TabularPredictor.fit`.
teacher_preds : str, default = 'soft'
What form of teacher predictions to distill from (teacher refers to the most accurate AutoGluon ensemble-predictor).
If None, we only train with original labels (no data augmentation).
If 'hard', labels are hard teacher predictions given by: `teacher.predict()`
If 'soft', labels are soft teacher predictions given by: `teacher.predict_proba()`
Note: 'hard' and 'soft' are equivalent for regression problems.
If `augment_method` is not None, teacher predictions are only used to label augmented data (training data keeps original labels).
To apply label-smoothing: `teacher_preds='onehot'` will use original training data labels converted to one-hot vectors for multiclass problems (no data augmentation).
augment_method : str, default='spunge'
Specifies method to use for generating augmented data for distilling student models.
Options include:
None : no data augmentation performed.
'munge' : The MUNGE algorithm (https://www.cs.cornell.edu/~caruana/compression.kdd06.pdf).
'spunge' : A simpler, more efficient variant of the MUNGE algorithm.
augment_args : dict, default = {'size_factor':5, 'max_size': int(1e5)}
Contains the following kwargs that control the chosen `augment_method` (these are ignored if `augment_method=None`):
'num_augmented_samples': int, number of augmented datapoints used during distillation. Overrides 'size_factor', 'max_size' if specified.
'max_size': float, the maximum number of augmented datapoints to add (ignored if 'num_augmented_samples' specified).
'size_factor': float, if n = training data sample-size, we add int(n * size_factor) augmented datapoints, up to 'max_size'.
Larger values in `augment_args` will slow down the runtime of distill(), and may produce worse results if provided time_limit are too small.
You can also pass in kwargs for the `spunge_augment`, `munge_augment` functions in `autogluon.tabular.augmentation.distill_utils`.
models_name_suffix : str, default = None
Optional suffix that can be appended at the end of all distilled student models' names.
Note: all distilled models will contain '_DSTL' substring in their name by default.
verbosity : int, default = None
Controls amount of printed output during distillation (4 = highest, 0 = lowest).
Same as `verbosity` parameter of :class:`TabularPredictor`.
If None, the same `verbosity` used in previous fit is employed again.
Returns
-------
List of names (str) corresponding to the distilled models.
Examples
--------
>>> from autogluon.tabular import TabularDataset, TabularPredictor
>>> train_data = TabularDataset('train.csv')
>>> predictor = TabularPredictor(label='class').fit(train_data, auto_stack=True)
>>> distilled_model_names = predictor.distill()
>>> test_data = TabularDataset('test.csv')
>>> ldr = predictor.leaderboard(test_data)
>>> model_to_deploy = distilled_model_names[0]
>>> predictor.predict(test_data, model=model_to_deploy)
"""
self._assert_is_fit('distill')
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
return self._learner.distill(X=train_data, X_val=tuning_data, time_limit=time_limit,
hyperparameters=hyperparameters, holdout_frac=holdout_frac,
verbosity=verbosity, models_name_suffix=models_name_suffix,
teacher_preds=teacher_preds,
augmentation_data=augmentation_data, augment_method=augment_method,
augment_args=augment_args)
def plot_ensemble_model(self, prune_unused_nodes=True) -> str:
"""
Output the visualized stack ensemble architecture of a model trained by `fit()`.
The plot is stored to a file, `ensemble_model.png` in folder `predictor.path`
This function requires `graphviz` and `pygraphviz` to be installed because this visualization depends on those package.
Unless this function will raise `ImportError` without being able to generate the visual of the ensemble model.
To install the required package, run the below commands (for Ubuntu linux):
$ sudo apt-get install graphviz
$ pip install graphviz
For other platforms, refer to https://graphviz.org/ for Graphviz install, and https://pygraphviz.github.io/documentation.html for PyGraphviz.
Parameters
----------
Returns
-------
The file name with the full path to the saved graphic
"""
self._assert_is_fit('plot_ensemble_model')
try:
import pygraphviz
except:
raise ImportError('Visualizing ensemble network architecture requires pygraphviz library')
G = self._trainer.model_graph.copy()
if prune_unused_nodes == True:
nodes_without_outedge = [node for node, degree in dict(G.degree()).items() if degree < 1]
else:
nodes_without_outedge = []
nodes_no_val_score = [node for node in G if G.nodes[node]['val_score'] == None]
G.remove_nodes_from(nodes_without_outedge)
G.remove_nodes_from(nodes_no_val_score)
root_node = [n for n, d in G.out_degree() if d == 0]
best_model_node = self.get_model_best()
A = nx.nx_agraph.to_agraph(G)
A.graph_attr.update(rankdir='BT')
A.node_attr.update(fontsize=10)
A.node_attr.update(shape='rectangle')
for node in A.iternodes():
node.attr['label'] = f"{node.name}\nVal score: {float(node.attr['val_score']):.4f}"
if node.name == best_model_node:
node.attr['style'] = 'filled'
node.attr['fillcolor'] = '#ff9900'
node.attr['shape'] = 'box3d'
elif nx.has_path(G, node.name, best_model_node):
node.attr['style'] = 'filled'
node.attr['fillcolor'] = '#ffcc00'
model_image_fname = os.path.join(self.path, 'ensemble_model.png')
A.draw(model_image_fname, format='png', prog='dot')
return model_image_fname
@staticmethod
def _summarize(key, msg, results):
if key in results:
print(msg + ": " + str(results[key]))
@staticmethod
def __get_dataset(data):
if isinstance(data, TabularDataset):
return data
elif isinstance(data, pd.DataFrame):
return TabularDataset(data)
elif isinstance(data, str):
return TabularDataset(data)
elif isinstance(data, pd.Series):
raise TypeError("data must be TabularDataset or pandas.DataFrame, not pandas.Series. \
To predict on just single example (ith row of table), use data.iloc[[i]] rather than data.iloc[i]")
else:
raise TypeError("data must be TabularDataset or pandas.DataFrame or str file path to data")
def _validate_hyperparameter_tune_kwargs(self, hyperparameter_tune_kwargs, time_limit=None):
"""
Returns True if hyperparameter_tune_kwargs is None or can construct a valid scheduler.
Returns False if hyperparameter_tune_kwargs results in an invalid scheduler.
"""
if hyperparameter_tune_kwargs is None:
return True
scheduler_cls, scheduler_params = scheduler_factory(hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
time_out=time_limit,
nthreads_per_trial='auto', ngpus_per_trial='auto')
assert scheduler_params[
'searcher'] != 'bayesopt_hyperband', "searcher == 'bayesopt_hyperband' not yet supported"
if scheduler_params.get('dist_ip_addrs', None):
logger.warning(
'Warning: dist_ip_addrs does not currently work for Tabular. Distributed instances will not be utilized.')
if scheduler_params['num_trials'] == 1:
logger.warning(
'Warning: Specified num_trials == 1 for hyperparameter tuning, disabling HPO. This can occur if time_limit was not specified in `fit()`.')
return False
scheduler_ngpus = scheduler_params['resource'].get('num_gpus', 0)
if scheduler_ngpus is not None and isinstance(scheduler_ngpus, int) and scheduler_ngpus > 1:
logger.warning(
f"Warning: TabularPredictor currently doesn't use >1 GPU per training run. Detected {scheduler_ngpus} GPUs.")
return True
def _set_hyperparameter_tune_kwargs_in_ag_args(self, hyperparameter_tune_kwargs, ag_args, time_limit):
if hyperparameter_tune_kwargs is not None and 'hyperparameter_tune_kwargs' not in ag_args:
if 'hyperparameter_tune_kwargs' in ag_args:
AssertionError(
'hyperparameter_tune_kwargs was specified in both ag_args and in kwargs. Please only specify once.')
else:
ag_args['hyperparameter_tune_kwargs'] = hyperparameter_tune_kwargs
if not self._validate_hyperparameter_tune_kwargs(ag_args.get('hyperparameter_tune_kwargs', None), time_limit):
ag_args.pop('hyperparameter_tune_kwargs', None)
if ag_args.get('hyperparameter_tune_kwargs', None) is not None:
logger.log(30,
'Warning: hyperparameter tuning is currently experimental and may cause the process to hang.')
return ag_args
def _set_post_fit_vars(self, learner: AbstractLearner = None):
if learner is not None:
self._learner: AbstractLearner = learner
self._learner_type = type(self._learner)
if self._learner.trainer_path is not None:
self._learner.persist_trainer(low_memory=True)
self._trainer: AbstractTrainer = self._learner.load_trainer() # Trainer object
@classmethod
def _load_version_file(cls, path) -> str:
version_file_path = path + cls._predictor_version_file_name
version = load_str.load(path=version_file_path)
return version
def _save_version_file(self, silent=False):
from ..version import __version__
version_file_contents = f'{__version__}'
version_file_path = self.path + self._predictor_version_file_name
save_str.save(path=version_file_path, data=version_file_contents, verbose=not silent)
def save(self, silent=False):
"""
Save this Predictor to file in directory specified by this Predictor's `path`.
Note that :meth:`TabularPredictor.fit` already saves the predictor object automatically
(we do not recommend modifying the Predictor object yourself as it tracks many trained models).
Parameters
----------
silent : bool, default = False
Whether to save without logging a message.
"""
path = self.path
tmp_learner = self._learner
tmp_trainer = self._trainer
self._learner.save()
self._learner = None
self._trainer = None
save_pkl.save(path=path + self.predictor_file_name, object=self)
self._learner = tmp_learner
self._trainer = tmp_trainer
self._save_version_file(silent=silent)
if not silent:
logger.log(20, f'TabularPredictor saved. To load, use: predictor = TabularPredictor.load("{self.path}")')
@classmethod
def _load(cls, path: str):
"""
Inner load method, called in `load`.
"""
predictor: TabularPredictor = load_pkl.load(path=path + cls.predictor_file_name)
learner = predictor._learner_type.load(path)
predictor._set_post_fit_vars(learner=learner)
return predictor
@classmethod
def load(cls, path: str, verbosity: int = None, require_version_match: bool = True):
"""
Load a TabularPredictor object previously produced by `fit()` from file and returns this object. It is highly recommended the predictor be loaded with the exact AutoGluon version it was fit with.
Parameters
----------
path : str
The path to directory in which this Predictor was previously saved.
verbosity : int, default = None
Sets the verbosity level of this Predictor after it is loaded.
Valid values range from 0 (least verbose) to 4 (most verbose).
If None, logging verbosity is not changed from existing values.
Specify larger values to see more information printed when using Predictor during inference, smaller values to see less information.
Refer to TabularPredictor init for more information.
require_version_match : bool, default = True
If True, will raise an AssertionError if the `autogluon.tabular` version of the loaded predictor does not match the installed version of `autogluon.tabular`.
If False, will allow loading of models trained on incompatible versions, but is NOT recommended. Users may run into numerous issues if attempting this.
"""
if verbosity is not None:
set_logger_verbosity(verbosity) # Reset logging after load (may be in new Python session)
if path is None:
raise ValueError("path cannot be None in load()")
try:
from ..version import __version__
version_load = __version__
except:
version_load = None
path = setup_outputdir(path, warn_if_exist=False) # replace ~ with absolute path if it exists
try:
version_init = cls._load_version_file(path=path)
except:
logger.warning(f'WARNING: Could not find version file at "{path + cls._predictor_version_file_name}".\n'
f'This means that the predictor was fit in a version `<=0.3.1`.')
version_init = None
if version_init is None:
predictor = cls._load(path=path)
try:
version_init = predictor._learner.version
except:
version_init = None
else:
predictor = None
if version_init is None:
version_init = 'Unknown (Likely <=0.0.11)'
if version_load != version_init:
logger.warning('')
logger.warning('############################## WARNING ##############################')
logger.warning('WARNING: AutoGluon version differs from the version used to create the predictor! '
'This may lead to instability and it is highly recommended the predictor be loaded '
'with the exact AutoGluon version it was created with.')
logger.warning(f'\tPredictor Version: {version_init}')
logger.warning(f'\tCurrent Version: {version_load}')
logger.warning('############################## WARNING ##############################')
logger.warning('')
if require_version_match:
raise AssertionError(
f'Predictor was created on version {version_init} but is being loaded with version {version_load}. '
f'Please ensure the versions match to avoid instability. While it is NOT recommended, '
f'this error can be bypassed by specifying `require_version_match=False`.')
if predictor is None:
predictor = cls._load(path=path)
return predictor
@staticmethod
def _validate_init_kwargs(kwargs):
valid_kwargs = {
'learner_type',
'learner_kwargs',
'quantile_levels',
}
invalid_keys = []
for key in kwargs:
if key not in valid_kwargs:
invalid_keys.append(key)
if invalid_keys:
raise ValueError(f'Invalid kwargs passed: {invalid_keys}\nValid kwargs: {list(valid_kwargs)}')
def _validate_fit_kwargs(self, kwargs):
# TODO:
# Valid core_kwargs values:
# ag_args, ag_args_fit, ag_args_ensemble, stack_name, ensemble_type, name_suffix, time_limit
# Valid aux_kwargs values:
# name_suffix, time_limit, stack_name, aux_hyperparameters, ag_args, ag_args_ensemble
# TODO: Remove features from models option for fit_extra
# TODO: Constructor?
fit_kwargs_default = dict(
# data split / ensemble architecture kwargs -> Don't nest but have nested documentation -> Actually do nesting
holdout_frac=None, # TODO: Potentially error if num_bag_folds is also specified
num_bag_folds=None,
# TODO: Potentially move to fit_extra, raise exception if value too large / invalid in fit_extra.
auto_stack=False,
use_bag_holdout=False,
# other
feature_generator='auto',
unlabeled_data=None,
_feature_generator_kwargs=None,
)
kwargs = self._validate_fit_extra_kwargs(kwargs, extra_valid_keys=list(fit_kwargs_default.keys()))
kwargs_sanitized = fit_kwargs_default.copy()
kwargs_sanitized.update(kwargs)
return kwargs_sanitized
def _fit_extra_kwargs_dict(self):
"""
Returns:
--------
dict of fit_extra args:
verbosity: Which levels of logger should be printed
pseudo_data: pseudo labeled data to be incorporated into train
but not used in validation
name_suffix: A suffix string to be added to the individual model names
"""
return dict(
# data split / ensemble architecture kwargs -> Don't nest but have nested documentation -> Actually do nesting
num_bag_sets=None,
num_stack_levels=None,
hyperparameter_tune_kwargs=None,
# core_kwargs -> +1 nest
ag_args=None,
ag_args_fit=None,
ag_args_ensemble=None,
excluded_model_types=None,
# aux_kwargs -> +1 nest
# post_fit_kwargs -> +1 nest
set_best_to_refit_full=False,
keep_only_best=False,
save_space=False,
refit_full=False,
# other
verbosity=self.verbosity,
feature_prune_kwargs=None,
# private
_save_bag_folds=None,
# quantile levels
quantile_levels=None,
calibrate=False,
# pseudo label
pseudo_data=None,
name_suffix=None
)
def _validate_fit_extra_kwargs(self, kwargs, extra_valid_keys=None):
fit_extra_kwargs_default = self._fit_extra_kwargs_dict()
allowed_kwarg_names = list(fit_extra_kwargs_default.keys())
if extra_valid_keys is not None:
allowed_kwarg_names += extra_valid_keys
for kwarg_name in kwargs.keys():
if kwarg_name not in allowed_kwarg_names:
public_kwarg_options = [kwarg for kwarg in allowed_kwarg_names if kwarg[0] != '_']
public_kwarg_options.sort()
raise ValueError(
f"Unknown keyword argument specified: {kwarg_name}\nValid kwargs: {public_kwarg_options}")
kwargs_sanitized = fit_extra_kwargs_default.copy()
kwargs_sanitized.update(kwargs)
# Deepcopy args to avoid altering outer context
deepcopy_args = ['ag_args', 'ag_args_fit', 'ag_args_ensemble', 'excluded_model_types']
for deepcopy_arg in deepcopy_args:
kwargs_sanitized[deepcopy_arg] = copy.deepcopy(kwargs_sanitized[deepcopy_arg])
refit_full = kwargs_sanitized['refit_full']
set_best_to_refit_full = kwargs_sanitized['set_best_to_refit_full']
if refit_full and not self._learner.cache_data:
raise ValueError(
'`refit_full=True` is only available when `cache_data=True`. Set `cache_data=True` to utilize `refit_full`.')
if set_best_to_refit_full and not refit_full:
raise ValueError(
'`set_best_to_refit_full=True` is only available when `refit_full=True`. Set `refit_full=True` to utilize `set_best_to_refit_full`.')
return kwargs_sanitized
def _prune_data_features(self, train_features: pd.DataFrame, other_features: pd.DataFrame, is_labeled: bool):
"""
Removes certain columns from the provided datasets that do not contain predictive features.
Parameters
----------
train_features : pd.DataFrame
The features/columns for the incoming training data
other_features : pd.DataFrame
Features of other auxiliary data that contains the same covariates as the training data.
Examples of this could be: tuning data, pseudo data
is_labeled: bool
Is other_features dataframe labeled or not
"""
if self.sample_weight is not None:
if self.sample_weight in train_features:
train_features.remove(self.sample_weight)
if self.sample_weight in other_features:
other_features.remove(self.sample_weight)
if self._learner.groups is not None and is_labeled:
train_features.remove(self._learner.groups)
return train_features, other_features
def _validate_fit_data(self, train_data, tuning_data=None, unlabeled_data=None):
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
if unlabeled_data is not None and isinstance(unlabeled_data, str):
unlabeled_data = TabularDataset(unlabeled_data)
if not isinstance(train_data, pd.DataFrame):
raise AssertionError(
f'train_data is required to be a pandas DataFrame, but was instead: {type(train_data)}')
if len(set(train_data.columns)) < len(train_data.columns):
raise ValueError(
"Column names are not unique, please change duplicated column names (in pandas: train_data.rename(columns={'current_name':'new_name'})")
self._validate_unique_indices(data=train_data, name='train_data')
if tuning_data is not None:
if not isinstance(tuning_data, pd.DataFrame):
raise AssertionError(
f'tuning_data is required to be a pandas DataFrame, but was instead: {type(tuning_data)}')
self._validate_unique_indices(data=tuning_data, name='tuning_data')
train_features = [column for column in train_data.columns if column != self.label]
tuning_features = [column for column in tuning_data.columns if column != self.label]
train_features, tuning_features = self._prune_data_features(train_features=train_features,
other_features=tuning_features,
is_labeled=True)
train_features = np.array(train_features)
tuning_features = np.array(tuning_features)
if | np.any(train_features != tuning_features) | numpy.any |
import re
import pandas as pd
import numpy as np
import pathlib
from collections import OrderedDict
from pyutil import read_table, intersection
BASE_PAIR = {
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G'
}
def check_flip(a1, a2, b1, b2):
res = []
for _a1, _a2, _b1, _b2 in zip(a1, a2, b1, b2):
res.append(_check_flip(_a1, _a2, _b1, _b2))
return np.array(res)
def _check_flip(a0, a1, b0, b1):
'''
check if (a0, a1) and (b0, b1) are of the same direction.
If there is nan or they don't match at all or ambiguious return nan
Else if they are in the same direction, return 1
Else return -1
'''
if a0 is np.nan or a1 is np.nan or b0 is np.nan or b1 is np.nan:
return np.nan
# remove ambiguious first.
if a0 == BASE_PAIR[a1] or b0 == BASE_PAIR[b1]:
return np.nan
# exact match
if a0 == b0 and a1 == b1:
return 1
# flip
if a0 == b1 and a1 == b0:
return -1
# compliment match
if a0 == BASE_PAIR[b0] and a1 == BASE_PAIR[b1]:
return 1
# compliment flip
if a0 == BASE_PAIR[b1] and a1 == BASE_PAIR[b0]:
return -1
# if all above does not return, it has to be invalid.
return np.nan
def rearrage_df_by_target(df, target, df_value_cols):
df_res = target[['snpid', 'chr', 'effect_allele', 'non_effect_allele']]
df_res = pd.merge(
df_res, df,
on=['snpid', 'chr'],
suffixes=['_res', '_df'],
how='left'
)
flip_factor = check_flip(
a1=df_res.effect_allele_res,
a2=df_res.non_effect_allele_res,
b1=df_res.effect_allele_df,
b2=df_res.non_effect_allele_df
)
# we need to carry the missingness when we move on
with | np.errstate(invalid='ignore') | numpy.errstate |
# Copyright (c) 2020. <NAME>, Ghent University
import math
import operator
from functools import reduce
import numpy as np
import rasterio
from geographiclib.geodesic import Geodesic
from scipy.interpolate import interp1d
def read_file(file=None, header=0):
"""Reads space separated dat file"""
with open(file, "r") as fr:
op = np.array([list(map(float, i.split())) for i in fr.readlines()[header:]])
return op
def order_vertices(vertices):
"""
Paraview expects vertices in a particular order for Quad object, with the origin at the bottom left corner.
:param vertices: (x, y) coordinates of the quad vertices
:return: Sorted vertices
"""
# Compute center of vertices
center = tuple(
map(
operator.truediv,
reduce(lambda x, y: map(operator.add, x, y), vertices),
[len(vertices)] * 2,
)
)
# Sort vertices according to angle
so = sorted(
vertices,
key=lambda coord: (
math.degrees(math.atan2(*tuple(map(operator.sub, coord, center))[::-1]))
),
)
return np.array(so)
class Transformation:
def __init__(
self,
blocks_: str = None,
bounds: list = None,
dem: str = None,
origin: list = None,
name: str = None,
):
"""
:param blocks_: Results file containing block coordinates and associated values
:param bounds: tuple: ((lat1, lon1), (lat2, lon2))
:param dem: Digital Elevation Model file
:param origin: Coordinates of the origin of the map (lat, lon)
:param name: str: Name of the output file
"""
self.block_data = blocks_
if bounds is not None:
self.bounds = np.array(bounds)
self.elevation_file = dem
self.origin = origin
if name is None:
self.name = "anonymous"
else:
self.name = name
def conversion(self):
if type(self.block_data) is str:
blocks = read_file(self.block_data) # Raw mesh info
# TODO: Optimize parse - implement for several output (res, ip..)
blocks2d_flat = blocks[:, 1:9] # Flat list of polygon vertices
else:
blocks = self.block_data
blocks2d_flat = blocks[:, 1:9]
rho = blocks[:, 9:] # Values associated to each block
# Load profile bounds, must be in the correct format:
# [[ lat1, lon1], [lat2, lon2]]
# Elevation data
tif = 0
if self.elevation_file is not None:
if ".tif" in self.elevation_file.lower(): # If tif file
tif = 1
# Load tif file
z = rasterio.open(self.elevation_file)
# Elevation data:
r = z.read(1)
def elevation(lat_, lon_):
"""
Gets the elevation from a raster file given a pair of latitude/longitude coordinates
:param lat_: latitude WGS84 in decimal degrees
:param lon_: longitude WGS84 in decimal degrees
:return: Elevation (m)
"""
idx = z.index(lon_, lat_)
return r[idx]
else: # If 2D x - z data
z = read_file(self.elevation_file)
# Interpolating function to fill missing values
fi = interp1d(z[:, 0], z[:, 1], fill_value="extrapolate")
def elevation(x_):
"""
:return: Elevation (m)
"""
return fi(x_)
else:
def elevation(*args):
return 0
blocks2d = blocks2d_flat.reshape(-1, 4, 2) # Reshape in (n, 4, 2)
# %% Order vertices in each block to correspond to VTK requirements
# Blocks' vertices are now correctly ordered
blocks2d_vo = np.array([order_vertices(vs) for vs in blocks2d])
# %% Add a new axis to make coordinates 3-D
# We have now the axis along the profile line and the depth.
shp = blocks2d_vo.shape
# Create 3D empty array
blocks3d = np.zeros((shp[0], shp[1], 3))
# Insert 0 value for each vertices
for i in range(len(blocks2d_vo)):
for j in range(shp[1]):
blocks3d[i, j] = np.insert(blocks2d_vo[i, j], 1, 0)
# Flatten
blocks3d = blocks3d.reshape(-1, 3)
# Set the maximum elevation at 0
blocks3d[:, 2] -= np.min(
(np.abs(blocks3d[:, 2].min()), np.abs(blocks3d[:, 2].max()))
)
# %% Coordinates conversion
geod = Geodesic.WGS84 # define the WGS84 ellipsoid
# Create an 'InverseLine' bounded by the profile endpoints.
profile = geod.InverseLine(
self.bounds[0, 0], self.bounds[0, 1], self.bounds[1, 0], self.bounds[1, 1]
)
def lat_lon(distance):
"""
Returns the WGS coordinates given a distance along the axis of the profile.
:param distance: Distance along the profile from its origin (m)
:return: latitude WGS84 in decimal degrees, longitude WGS84 in decimal degrees
"""
g = profile.Position(distance, Geodesic.STANDARD | Geodesic.LONG_UNROLL)
return g["lat2"], g["lon2"]
blocks_wgs = np.copy(blocks3d)
# %% Insert elevation
# Convert distance along axis to lat/lon and add elevation
if tif:
for i in range(len(blocks_wgs)):
lat, lon = lat_lon(blocks_wgs[i, 0])
blocks_wgs[i, 0] = lat
blocks_wgs[i, 1] = lon
altitude = elevation(lat, lon) - np.abs(blocks_wgs[i, 2])
blocks_wgs[i, 2] = altitude
else:
for i in range(len(blocks_wgs)):
altitude = elevation(blocks_wgs[i, 0]) - np.abs(blocks_wgs[i, 2])
lat, lon = lat_lon(blocks_wgs[i, 0])
blocks_wgs[i, 0] = lat
blocks_wgs[i, 1] = lon
blocks_wgs[i, 2] = altitude
# %% Set in local coordinate system
lat_origin, long_origin = self.origin # Arbitrary origin
def local_system(lat_p, lon_p):
"""
Given an origin, converts the WGS84 coordinates into meters around that point.
:param lat_p: latitude (decimal degree wgs84)
:param lon_p: longitude (decimal degree wgs84)
:return:
"""
line = geod.InverseLine(lat_origin, long_origin, lat_p, lon_p)
azi = line.azi1
dis = line.s13
return dis * math.sin(math.radians(azi)), dis * math.cos(math.radians(azi))
blocks_local = np.copy(blocks_wgs)
# Update coordinates
for i in range(len(blocks_wgs)):
x, y = local_system(blocks_local[i, 0], blocks_local[i, 1])
blocks_local[i, 0], blocks_local[i, 1] = x, y
return blocks_local, rho
def dem(self, dem_file, bounding_box, n_x=100, n_y=100):
"""
:param dem_file: Path to dem file
:param bounding_box: tuple: Bounding box (rectangle) of the DEM ((lat1, long1), (lat2, long2))
:param n_x: int: Number of cells in x direction (longitude)
:param n_y: int: Number of cells in y direction (latitude)
:return:
"""
# TODO: add more DEM files input options
if ".tif" in dem_file.lower():
# Load tif file
dataset = rasterio.open(dem_file)
# Elevation data:
r = dataset.read(1)
def elevation(lat_, lon_):
idx = dataset.index(lon_, lat_)
return r[idx]
else: # Expects (lat, lon, elev) data file
dataset = read_file(dem_file)
points = dataset[:, :2]
values = dataset[:, -1]
def elevation(lat_, lon_):
"""Inverse Square Distance"""
d = np.sqrt((lon_ - points[:, 1]) ** 2 + (lat_ - points[:, 0]) ** 2)
if d.min() > 0:
v = np.sum(values * (1 / d ** 2) / | np.sum(1 / d ** 2) | numpy.sum |
#
# Copyright (c) 2021 The Markovflow Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module containing the unit tests for the `BlockTriDiagonal` class."""
from typing import Optional, Tuple
import numpy as np
import pytest
import tensorflow as tf
from markovflow.block_tri_diag import LowerTriangularBlockTriDiagonal, SymmetricBlockTriDiagonal
INNER_DIMS = [1, 3]
OUTER_DIMS = [1, 4]
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("outer_dim", [2, 4])
def test_block_sub_diag(with_tf_random_seed, batch_shape, inner_dim, outer_dim):
"""Test that the `block_sub_diagonal` method works."""
diag = np.random.normal(size=batch_shape + (outer_dim, inner_dim, inner_dim))
sub_diag_np = np.random.normal(size=batch_shape + (outer_dim - 1, inner_dim, inner_dim))
block_tri_diag = LowerTriangularBlockTriDiagonal(tf.constant(diag), tf.constant(sub_diag_np))
sub_diag_tf = block_tri_diag.block_sub_diagonal
np.testing.assert_allclose(sub_diag_np, sub_diag_tf)
@pytest.mark.parametrize("has_sub_diag", [True, False])
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("outer_dim", OUTER_DIMS)
def test_dense(with_tf_random_seed, batch_shape, has_sub_diag, inner_dim, outer_dim):
"""Test that the `to_dense` method works."""
if has_sub_diag and outer_dim == 1:
return
dense_np, block_tri_diag = _generate_random_tri_diag(
batch_shape, outer_dim, inner_dim, has_sub_diag
)
dense = block_tri_diag.to_dense()
np.testing.assert_allclose(dense, dense_np)
@pytest.mark.parametrize("has_sub_diag_1", [True, False])
@pytest.mark.parametrize("has_sub_diag_2", [True, False])
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("outer_dim", OUTER_DIMS)
def test_add(
with_tf_random_seed, batch_shape, has_sub_diag_1, has_sub_diag_2, inner_dim, outer_dim
):
"""Test that the addition of two `SymmetricBlockTriDiagonal` matrices works."""
if (has_sub_diag_1 or has_sub_diag_2) and outer_dim == 1:
return
dense_np_1, block_tri_diag_1 = _generate_random_pos_def_tri_diag(
batch_shape, outer_dim, inner_dim, has_sub_diag_1
)
dense_np_2, block_tri_diag_2 = _generate_random_pos_def_tri_diag(
batch_shape, outer_dim, inner_dim, has_sub_diag_2
)
added = (block_tri_diag_1 + block_tri_diag_2).to_dense()
np.testing.assert_allclose(added, dense_np_1 + dense_np_2)
@pytest.mark.parametrize("has_sub_diag", [True, False])
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("outer_dim", OUTER_DIMS)
def test_abs_log_det(with_tf_random_seed, batch_shape, has_sub_diag, inner_dim, outer_dim):
"""Test that the `abs_log_det` method works."""
if has_sub_diag and outer_dim == 1:
return
dense_np, block_tri_diag = _generate_random_tri_diag(
batch_shape, outer_dim, inner_dim, has_sub_diag
)
_, log_det_np = np.linalg.slogdet(dense_np)
log_det = block_tri_diag.abs_log_det()
np.testing.assert_allclose(log_det, log_det_np)
@pytest.mark.parametrize("has_sub_diag", [True, False])
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("outer_dim", OUTER_DIMS)
def test_cholesky(with_tf_random_seed, batch_shape, has_sub_diag, inner_dim, outer_dim):
"""Test that the `cholesky` method works."""
if has_sub_diag and outer_dim == 1:
return
dense_np, block_tri_diag = _generate_random_pos_def_tri_diag(
batch_shape, outer_dim, inner_dim, has_sub_diag
)
chol_np = np.linalg.cholesky(dense_np)
chol = block_tri_diag.cholesky.to_dense()
np.testing.assert_allclose(chol, chol_np, rtol=1e-3)
@pytest.mark.parametrize("has_sub_diag", [True, False])
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("transpose_left", [True, False])
@pytest.mark.parametrize("outer_dim", OUTER_DIMS)
def test_solve(
with_tf_random_seed, batch_shape, has_sub_diag, inner_dim, transpose_left, outer_dim
):
"""Test that the `solve` method works."""
if has_sub_diag and outer_dim == 1:
return
dense_np, block_tri_diag = _generate_random_tri_diag(
batch_shape, outer_dim, inner_dim, has_sub_diag
)
right = np.random.normal(size=batch_shape + (outer_dim, inner_dim))
solve = block_tri_diag.solve(tf.constant(right), transpose_left=transpose_left)
if transpose_left:
einsum_string = "...ji,...j->...i"
else:
einsum_string = "...ij,...j->...i"
solve_np = np.einsum(
einsum_string,
np.linalg.inv(dense_np),
right.reshape(batch_shape + (outer_dim * inner_dim,)),
)
np.testing.assert_allclose(solve, solve_np.reshape(batch_shape + (outer_dim, inner_dim)))
@pytest.mark.parametrize("has_sub_diag", [True, False])
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("outer_dim", OUTER_DIMS)
@pytest.mark.parametrize("symmetrise", [True, False])
@pytest.mark.parametrize("transpose_left", [True, False])
def test_dense_mult(
with_tf_random_seed,
batch_shape,
has_sub_diag,
inner_dim,
transpose_left,
symmetrise,
outer_dim,
):
"""Test that the `dense_mult` method works."""
if has_sub_diag and outer_dim == 1:
return
if transpose_left and symmetrise:
# can't symmetrise and transpose at the same time
return
if symmetrise:
dense_np, block_tri_diag = _generate_random_pos_def_tri_diag(
batch_shape, outer_dim, inner_dim, has_sub_diag
)
else:
dense_np, block_tri_diag = _generate_random_tri_diag(
batch_shape, outer_dim, inner_dim, has_sub_diag
)
right = np.random.normal(size=batch_shape + (outer_dim, inner_dim))
mult = block_tri_diag.dense_mult(tf.constant(right), transpose_left=transpose_left)
if transpose_left:
einsum_string = "...ji,...j->...i"
else:
einsum_string = "...ij,...j->...i"
mult_np = np.einsum(
einsum_string, dense_np, right.reshape(batch_shape + (outer_dim * inner_dim,))
)
np.testing.assert_allclose(mult, mult_np.reshape(batch_shape + (outer_dim, inner_dim)))
@pytest.mark.parametrize("has_sub_diag", [True, False])
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("outer_dim", OUTER_DIMS)
def test_diagonal_of_inverse(with_tf_random_seed, batch_shape, has_sub_diag, inner_dim, outer_dim):
"""Test that the `block_diagonal_of_inverse` method works."""
if has_sub_diag and outer_dim == 1:
return
dense_np, _ = _generate_random_pos_def_tri_diag(batch_shape, outer_dim, inner_dim, has_sub_diag)
chol_np = np.linalg.cholesky(dense_np)
diag, sub_diag = _blocktridiag_from_dense(chol_np, inner_dim, has_sub_diag)
if has_sub_diag:
sub_diag = tf.constant(sub_diag)
block_tri_diag = LowerTriangularBlockTriDiagonal(tf.constant(diag), sub_diag)
diag_of_inv = block_tri_diag.block_diagonal_of_inverse()
diag_of_inv_np, _ = _blocktridiag_from_dense(np.linalg.inv(dense_np), inner_dim, False)
np.testing.assert_allclose(diag_of_inv_np, diag_of_inv, rtol=1e-3)
@pytest.mark.parametrize("inner_dim", INNER_DIMS)
@pytest.mark.parametrize("outer_dim", [3, 5])
def test_upper_diagonal_lower(with_tf_random_seed, batch_shape, inner_dim, outer_dim):
"""Test that the `upper_diagonal_lower` method works."""
dense_np, block_tri_diag = _generate_random_pos_def_tri_diag(
batch_shape, outer_dim, inner_dim, True
)
lower_tf, diag_tf = block_tri_diag.upper_diagonal_lower()
lower, diag = lower_tf.to_dense(), diag_tf.to_dense()
chol_d_u = np.swapaxes(diag, -1, -2) @ lower
# make sure the lower triangular matrix is actually lower
np.testing.assert_allclose(lower, np.tril(lower))
# ensure the block diagonal matrix is actually block diagonal
assert diag_tf.block_sub_diagonal is None
# verify that recombining results in the original matrix
np.testing.assert_allclose(dense_np, np.swapaxes(chol_d_u, -1, -2) @ chol_d_u, rtol=1e-6)
def _to_dense(diag: np.ndarray, sub_diag: Optional[np.ndarray]) -> np.ndarray:
""" Convert a diagonal and sub-diagonal to a dense matrix. """
*batch_shape, outer_dim, inner_dim, _ = diag.shape
dense = np.zeros(batch_shape + [outer_dim * inner_dim, outer_dim * inner_dim])
for i in range(outer_dim):
block_start = i * inner_dim
for j in range(inner_dim):
for k in range(j + 1): # only want the lower half of the diagonal matrices
dense[..., block_start + j, block_start + k] = diag[..., i, j, k]
if sub_diag is not None:
for i in range(outer_dim - 1):
block_start_k = i * inner_dim
block_start_j = block_start_k + inner_dim
for j in range(inner_dim):
for k in range(inner_dim):
dense[..., block_start_j + j, block_start_k + k] = sub_diag[..., i, j, k]
return dense
def _blocktridiag_from_dense(
array: np.ndarray, inner_dim: int, has_sub_diag: bool
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Extract the diagonal and sub-diagonal from a dense matrix."""
*batch_shape, outer_inner, _ = array.shape
outer_dim = outer_inner // inner_dim
diag = np.zeros(batch_shape + [outer_dim, inner_dim, inner_dim])
for i in range(outer_dim):
block_start = i * inner_dim
for j in range(inner_dim):
for k in range(inner_dim):
diag[..., i, j, k] = array[..., block_start + j, block_start + k]
sub_diag = None
if has_sub_diag:
sub_diag = np.zeros(batch_shape + [outer_dim - 1, inner_dim, inner_dim])
for i in range(outer_dim - 1):
block_start_k = i * inner_dim
block_start_j = block_start_k + inner_dim
for j in range(inner_dim):
for k in range(inner_dim):
sub_diag[..., i, j, k] = array[..., block_start_j + j, block_start_k + k]
return diag, sub_diag
def _generate_random_pos_def_tri_diag(
batch_shape: Tuple, outer_dim: int, inner_dim: int, has_sub_diag: bool
) -> Tuple[np.ndarray, SymmetricBlockTriDiagonal]:
"""
Create a random tri-diagonal symmetric positive definite matrix.
This works by creating a lower triangular tri-diagonal matrix and multiplying it by
its transpose to make a symmetric positive definite tri diagonal.
"""
diag = np.tril(np.random.normal(loc=1.0, size=batch_shape + (outer_dim, inner_dim, inner_dim)))
sub_diag_np = None
if has_sub_diag:
sub_diag_np = np.random.normal(size=batch_shape + (outer_dim - 1, inner_dim, inner_dim))
dense_np = _to_dense(diag, sub_diag_np)
dense_np = np.einsum("...ij,...kj->...ik", dense_np, dense_np)
diag, sub_diag = _blocktridiag_from_dense(dense_np, inner_dim, has_sub_diag)
if has_sub_diag:
sub_diag = tf.constant(sub_diag)
return dense_np, SymmetricBlockTriDiagonal(tf.constant(diag), sub_diag)
def _generate_random_tri_diag(
batch_shape: Tuple, outer_dim: int, inner_dim: int, has_sub_diag: bool
) -> Tuple[np.ndarray, LowerTriangularBlockTriDiagonal]:
diag = np.tril(np.random.normal(size=batch_shape + (outer_dim, inner_dim, inner_dim)))
sub_diag = None
sub_diag_np = None
if has_sub_diag:
sub_diag_np = | np.random.normal(size=batch_shape + (outer_dim - 1, inner_dim, inner_dim)) | numpy.random.normal |
import numpy as np
import os
import pytest
import warnings
from threeML.io.package_data import get_path_of_data_file
from threeML.utils.OGIP.response import (
InstrumentResponseSet,
InstrumentResponse,
OGIPResponse,
)
from threeML.utils.time_interval import TimeInterval
def get_matrix_elements():
# In[5]: np.diagflat([1, 2, 3, 4])[:3, :]
matrix = np.diagflat([1.0, 2.0, 3.0, 4.0])[:3, :]
# Now matrix is:
# array([[1, 0, 0, 0],
# [0, 2, 0, 0],
# [0, 0, 3, 0]])
mc_energies = [1.0, 2.0, 3.0, 4.0, 5.0]
ebounds = [1.0, 2.5, 4.5, 5.0]
return matrix, mc_energies, ebounds
def get_matrix_set_elements():
matrix, mc_energies, ebounds = get_matrix_elements()
rsp_a = InstrumentResponse(matrix, ebounds, mc_energies)
# Make another matrix with the same matrix but divided by 2
other_matrix = matrix / 2.0
rsp_b = InstrumentResponse(other_matrix, ebounds, mc_energies)
# Remember: the second matrix is like the first one divided by two, and it covers twice as much time.
# They cover 0-10 s the first one, and 10-30 the second one.
# Fake an exposure getter by using a fixed 10% deadtime
livetime_fraction = 0.9
exposure_getter = lambda t1, t2: livetime_fraction * (t2 - t1)
# Fake a count getter
law = lambda x: 1.23 * x
# The counts getter is the integral of the law
counts_getter = (lambda t1, t2: 1.23 * 0.5 *
(t2**2.0 - t1**2.0) * livetime_fraction)
return [rsp_a, rsp_b], exposure_getter, counts_getter
def get_matrix_set_elements_with_coverage(reference_time=0.0):
[rsp_a, rsp_b], exposure_getter, counts_getter = get_matrix_set_elements()
# By making the coverage interval twice for the second matrix we restore parity with the first one,
# so that the weighting by exposure should simply return the first matrix
rsp_a._coverage_interval = TimeInterval(0.0, 10.0) + reference_time
rsp_b._coverage_interval = TimeInterval(10.0, 30.0) + reference_time
return [rsp_a, rsp_b], exposure_getter, counts_getter
def test_instrument_response_constructor():
# Make a fake test matrix
matrix, mc_energies, ebounds = get_matrix_elements()
rsp = InstrumentResponse(matrix, ebounds, mc_energies)
assert np.all(rsp.matrix == matrix)
assert np.all(rsp.ebounds == ebounds)
assert np.all(rsp.monte_carlo_energies == mc_energies)
# Now with coverage interval
with pytest.raises(RuntimeError):
_ = InstrumentResponse(matrix, ebounds, mc_energies, "10-20")
rsp = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(10.0, 20.0))
assert rsp.rsp_filename is None
assert rsp.arf_filename is None
assert rsp.coverage_interval == TimeInterval(10.0, 20.0)
# Check that we do not accept nans in the matrix
matrix[2, 2] = np.nan
with pytest.raises(RuntimeError):
_ = InstrumentResponse(matrix, ebounds, mc_energies, "10-20")
def test_instrument_response_replace_matrix():
matrix, mc_energies, ebounds = get_matrix_elements()
rsp = InstrumentResponse(matrix, ebounds, mc_energies)
new_matrix = matrix / 2.0
rsp.replace_matrix(new_matrix)
assert np.all(rsp.matrix == new_matrix)
with pytest.raises(RuntimeError):
rsp.replace_matrix(np.random.uniform(0, 1, 100).reshape(10, 10))
def test_instrument_response_set_function_and_convolve():
# A very basic test. More tests will be made against XSpec later
matrix, mc_energies, ebounds = get_matrix_elements()
rsp = InstrumentResponse(matrix, ebounds, mc_energies)
# Integral of a constant, so we know easily what the output should be
#integral_function = lambda e1, e2: e2 - e1
def integral_function():
return np.array(mc_energies)[1:] - np.array(mc_energies)[:-1]
rsp.set_function(integral_function)
folded_counts = rsp.convolve()
assert np.all(folded_counts == [1.0, 2.0, 3.0])
def test__instrument_response_energy_to_channel():
matrix, mc_energies, ebounds = get_matrix_elements()
rsp = InstrumentResponse(matrix, ebounds, mc_energies)
assert rsp.energy_to_channel(1.5) == 0
assert rsp.energy_to_channel(2.6) == 1
assert rsp.energy_to_channel(4.75) == 2
assert rsp.energy_to_channel(100.0) == 3
def test_instrument_response_plot_response():
matrix, mc_energies, ebounds = get_matrix_elements()
rsp = InstrumentResponse(matrix, ebounds, mc_energies)
rsp.plot_matrix()
def test_OGIP_response_first_channel():
# Get path of response file
rsp_file = get_path_of_data_file("ogip_test_gbm_n6.rsp")
rsp = OGIPResponse(rsp_file)
assert rsp.first_channel == 1
def test_OGIP_response_arf_rsp_accessors():
# Then load rsp and arf in XSpec
rsp_file = get_path_of_data_file("ogip_test_xmm_pn.rmf")
arf_file = get_path_of_data_file("ogip_test_xmm_pn.arf")
rsp = OGIPResponse(rsp_file, arf_file=arf_file)
assert rsp.arf_filename == arf_file
assert rsp.rsp_filename == rsp_file
def test_response_write_to_fits1():
matrix, mc_energies, ebounds = get_matrix_elements()
rsp = InstrumentResponse(matrix, ebounds, mc_energies)
temp_file = "__test.rsp"
rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True)
# Now check that reloading gives back the same matrix
rsp_reloaded = OGIPResponse(temp_file)
assert np.allclose(rsp_reloaded.matrix, rsp.matrix)
assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)
assert np.allclose(rsp_reloaded.monte_carlo_energies, rsp.monte_carlo_energies)
os.remove(temp_file)
def test_response_write_to_fits2():
# Now do the same for a response read from a file
rsp_file = get_path_of_data_file("ogip_test_gbm_n6.rsp")
rsp = OGIPResponse(rsp_file)
temp_file = "__test.rsp"
rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True)
rsp_reloaded = OGIPResponse(temp_file)
assert np.allclose(rsp_reloaded.matrix, rsp.matrix)
assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)
assert np.allclose(rsp_reloaded.monte_carlo_energies, rsp.monte_carlo_energies)
os.remove(temp_file)
def test_response_write_to_fits3():
# Now do the same for a file with a ARF
rsp_file = get_path_of_data_file("ogip_test_xmm_pn.rmf")
arf_file = get_path_of_data_file("ogip_test_xmm_pn.arf")
rsp = OGIPResponse(rsp_file, arf_file=arf_file)
temp_file = "__test.rsp"
rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True)
rsp_reloaded = OGIPResponse(temp_file)
assert np.allclose(rsp_reloaded.matrix, rsp.matrix)
assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)
assert np.allclose(rsp_reloaded.monte_carlo_energies, rsp.monte_carlo_energies)
os.remove(temp_file)
def test_response_set_constructor():
[rsp_aw, rsp_bw], exposure_getter, counts_getter = get_matrix_set_elements()
with pytest.raises(RuntimeError):
# This should raise because there is no time information for the matrices
_ = InstrumentResponseSet([rsp_aw, rsp_bw], exposure_getter, counts_getter)
# Add the time information
(
[rsp_a, rsp_b],
exposure_getter,
counts_getter,
) = get_matrix_set_elements_with_coverage()
# This should work now
rsp_set = InstrumentResponseSet([rsp_a, rsp_b], exposure_getter, counts_getter)
assert rsp_set[0] == rsp_a
assert rsp_set[1] == rsp_b
# Check that the constructor order the matrices by time when needed
# This should work now
rsp_set = InstrumentResponseSet([rsp_b, rsp_a], exposure_getter, counts_getter)
assert rsp_set[0] == rsp_a
assert rsp_set[1] == rsp_b
# Now test construction from the .from_rsp2 method
rsp2_file = get_path_of_data_file("ogip_test_gbm_b0.rsp2")
with warnings.catch_warnings():
warnings.simplefilter("error", np.VisibleDeprecationWarning)
rsp_set = InstrumentResponseSet.from_rsp2_file(
rsp2_file, exposure_getter, counts_getter
)
assert len(rsp_set) == 3
# Now test that we cannot initialize a response set with matrices which have non-contiguous coverage intervals
matrix, mc_energies, ebounds = get_matrix_elements()
rsp_c = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(0.0, 10.0))
rsp_d = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(20.0, 30.0))
with pytest.raises(RuntimeError):
_ = InstrumentResponseSet([rsp_c, rsp_d], exposure_getter, counts_getter)
def test_response_set_weighting():
(
[rsp_a, rsp_b],
exposure_getter,
counts_getter,
) = get_matrix_set_elements_with_coverage()
rsp_set = InstrumentResponseSet([rsp_a, rsp_b], exposure_getter, counts_getter)
# here we are waiting by exposure. We have:
# weight1 = (0.9 * 5.0) = 4.5
# weight2 = (0.9 * 15.0) = 13.5
# sum = weight1 + weight2 = 18.0
# new_matrix = rsp_a * weight1/sum + rsp_b * weight2 / sum
# but rsp_b = rsp_a / 2.0, so:
# new_matrix = rsp_a * weight1 / sum + rsp_a / 2.0 * weight2 / sum = 1 / sum * rsp_a * (weight1 + weight2 / 2.0)
# so in the end:
# new_matrix = 0.625 * rsp_a
weighted_matrix = rsp_set.weight_by_exposure("5.0 - 25.0")
assert np.allclose(weighted_matrix.matrix, 0.625 * rsp_a.matrix)
# here we are waiting by exposure. We have:
# weight1 = 55.35
# weight2 = 442.8
# so:
# new_matrix = 1 / sum * rsp_a * (weight1 + weight2 / 2.0) = 0.5555555555555555 * rsp_a
weighted_matrix = rsp_set.weight_by_counts("0.0 - 30.0")
assert np.allclose(weighted_matrix.matrix, 0.5555555555555555 * rsp_a.matrix)
# Here we weight by counts in the interval 5.0 - 25.0
# With the same math as before:
weighted_matrix = rsp_set.weight_by_counts("5.0 - 25.0")
assert np.allclose(weighted_matrix.matrix, 0.5625000000000001 * rsp_a.matrix)
def test_response_set_weighting_with_reference_time():
# Now repeat the same tests but using a reference time
ref_time = 123.456
(
[rsp_a, rsp_b],
exposure_getter,
counts_getter,
) = get_matrix_set_elements_with_coverage(reference_time=ref_time)
rsp_set = InstrumentResponseSet(
[rsp_a, rsp_b], exposure_getter, counts_getter, reference_time=ref_time
)
assert rsp_set.reference_time == ref_time
weighted_matrix = rsp_set.weight_by_exposure("5.0 - 25.0")
assert np.allclose(weighted_matrix.matrix, 0.625 * rsp_a.matrix)
weighted_matrix = rsp_set.weight_by_counts("0.0 - 30.0")
assert np.allclose(weighted_matrix.matrix, 0.5555555555555555 * rsp_a.matrix)
weighted_matrix = rsp_set.weight_by_counts("5.0 - 25.0")
assert | np.allclose(weighted_matrix.matrix, 0.5625000000000001 * rsp_a.matrix) | numpy.allclose |
#!/usr/bin/env python
# Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
import unittest
import numpy
from awkward import *
class Test(unittest.TestCase):
def runTest(self):
pass
def test_object_nbytes(self):
class Point(object):
def __init__(self, array):
self.x, self.y, self.z = array
def __repr__(self):
return "<Point {0} {1} {2}>".format(self.x, self.y, self.z)
def __eq__(self, other):
return isinstance(other, Point) and self.x == other.x and self.y == other.y and self.z == other.z
assert isinstance(ObjectArray([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], [7.7, 8.8, 9.9]], Point).nbytes, int)
def test_object_floats(self):
class Point(object):
def __init__(self, array):
self.x, self.y, self.z = array
def __repr__(self):
return "<Point {0} {1} {2}>".format(self.x, self.y, self.z)
def __eq__(self, other):
return isinstance(other, Point) and self.x == other.x and self.y == other.y and self.z == other.z
a = ObjectArray([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], [7.7, 8.8, 9.9]], Point)
assert a[0] == Point([1.1, 2.2, 3.3])
assert a[1] == Point([4.4, 5.5, 6.6])
assert a[2] == Point([7.7, 8.8, 9.9])
assert a[:].tolist() == [Point([1.1, 2.2, 3.3]), Point([4.4, 5.5, 6.6]), Point([7.7, 8.8, 9.9])]
assert a[::2].tolist() == [Point([1.1, 2.2, 3.3]), Point([7.7, 8.8, 9.9])]
assert a[[True, False, True]].tolist() == [Point([1.1, 2.2, 3.3]), Point([7.7, 8.8, 9.9])]
assert a[[2, 0]].tolist() == [Point([7.7, 8.8, 9.9]), Point([1.1, 2.2, 3.3])]
def test_object_bytes(self):
class Point(object):
def __init__(self, bytes):
self.x, self.y, self.z = struct.unpack("ddd", bytes)
def __repr__(self):
return "<Point {0} {1} {2}>".format(self.x, self.y, self.z)
def __eq__(self, other):
return isinstance(other, Point) and self.x == other.x and self.y == other.y and self.z == other.z
a = ObjectArray( | numpy.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) | numpy.array |
import numpy as np
from sklearn.metrics import accuracy_score
import multiprocessing as mp
import sys, os
sys.path.append(os.getcwd())
try:
from quadboost.weak_learner import _WeakLearnerBase
from quadboost.utils import split_int, timed, ComparableMixin
from quadboost.utils.multiprocessing_utils import PicklableExceptionWrapper, SafeQueue, parallel_processes
except ModuleNotFoundError:
from weak_learner import _WeakLearnerBase
from utils import split_int, timed, ComparableMixin
from utils.multiprocessing_utils import PicklableExceptionWrapper, SafeQueue, parallel_processes
class MulticlassDecisionStump(_WeakLearnerBase):
"""
Decision stump classifier with innate multiclass algorithm.
It finds a stump to partition examples into 2 parts which minimizes the quadratic multiclass risk.
It assigns a confidence rates (scalar) for each class for each partition.
Parallelization is implemented for the 'fit' method.
"""
def fit(self, X, Y, W=None, n_jobs=1, sorted_X=None, sorted_X_idx=None):
"""
Fits the model by finding the best decision stump using the algorithm implemented in the StumpFinder class.
Args:
X (Array of shape (n_examples, ...)): Examples
Y (Array of shape (n_examples,) or (n_examples, n_classes)): Labels for the examples. If an encoder was provided at construction, Y should be a vector to be encoded.
W (Array of shape (n_examples, n_classes)): Weights of each examples according to their class. Should be None if Y is not encoded.
n_jobs (int, optional, default=1): Number of processes to execute in parallel to find the stump.
sorted_X (Array of shape (n_examples, ...), optional, default=None): Sorted examples along axis 0. If None, 'X' will be sorted, else it will not.
sorted_X_idx (Array of shape (n_examples, ...), optional, default=None): Indices of the sorted examples along axis 0 (corresponds to argsort). If None, 'X' will be argsorted, else it will not.
Returns self
"""
if self.encoder is not None:
Y, W = self.encoder.encode_labels(Y)
if sorted_X is None or sorted_X_idx is None:
sorted_X, sorted_X_idx = self.sort_data(X)
stump = self.find_stump(sorted_X, sorted_X_idx, Y, W, n_jobs)
for attr in ['feature', 'confidence_rates', 'stump', 'stump_idx', 'risks', 'risk']:
setattr(self, attr, getattr(stump, attr))
return self
def find_stump(self, sorted_X, sorted_X_idx, Y, W, n_jobs):
stump_finder = StumpFinder(sorted_X, sorted_X_idx, Y, W)
stumps_queue = SafeQueue()
if n_jobs > 1: # Need parallelization
n_features = sorted_X.shape[1]
args_iter = ((stumps_queue, sub_idx) for sub_idx in split_int(n_features, n_jobs))
parallel_processes(stump_finder.safe_find_stump, args_iter)
else: # No parallelization
stump_finder.find_stump(stumps_queue)
return min(stump for stump in stumps_queue)
def predict(self, X):
n_partitions, n_classes = self.confidence_rates.shape
n_examples = X.shape[0]
Y_pred = np.zeros((n_examples, n_classes))
for i, partition in enumerate(self.partition_generator(X)):
Y_pred[i] = self.confidence_rates[partition]
return Y_pred
def partition_generator(self, X):
"""
Partition examples into 2 sets denoted by 0 and 1 in an lazy iterator fashion.
"""
n_examples = X.shape[0]
for x in X.reshape((n_examples, -1)):
yield int(x[self.feature] > self.stump)
def partition(self, X, dtype=bool):
return np.array([p for p in self.partition_generator(X)], dtype=dtype)
@staticmethod
def sort_data(X):
"""
Necessary sorting operations on the data to find the optimal stump. It is useful to sort the data prior to boost to speed up the algorithm, since the sorting step will not be made at each round.
'sorted_X' and 'sorted_X_idx' should be passed as keyword arguments to the 'fit' method to speed up the algorithm.
"""
X = X.reshape((X.shape[0],-1))
n_examples, n_features = X.shape
sorted_X_idx = np.argsort(X, axis=0)
sorted_X = X[sorted_X_idx, range(n_features)]
return sorted_X, sorted_X_idx
class StumpFinder:
"""
Implements the algorithm to find the stump. It is separated from the class MulticlassDecisionStump so that it can be pickled when parallelized with 'multiprocessing' (which uses pickle).
"""
def __init__(self, sorted_X, sorted_X_idx, Y, W):
# multiprocessing Arrays are shared between processed to alleviate pickling
self.sorted_X = np.ctypeslib.as_array(mp.RawArray('d', sorted_X.size)).reshape(sorted_X.shape)
self.sorted_X[:] = sorted_X
self.sorted_X_idx = np.ctypeslib.as_array(mp.RawArray('i', sorted_X_idx.size)).reshape(sorted_X_idx.shape)
self.sorted_X_idx[:] = sorted_X_idx
self.zeroth_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.zeroth_moments[:] = W
self.first_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.first_moments[:] = W*Y
self.second_moments = np.ctypeslib.as_array(mp.RawArray('d', W.size)).reshape(W.shape)
self.second_moments[:] = self.first_moments*Y
# # multiprocessing Arrays are shared between processed to alleviate pickling
# self.X_shape = sorted_X.shape
# self.X_idx_shape = sorted_X_idx.shape
# self.moments_shape = W.shape
# self.sorted_X = mp.Array('d', sorted_X.reshape(-1))
# self.sorted_X_idx = mp.Array('i', sorted_X_idx.reshape(-1))
# self.zeroth_moments = mp.Array('d', W.reshape(-1))
# self.first_moments = mp.Array('d', (W*Y).reshape(-1))
# self.second_moments = mp.Array('d', (W*Y*Y).reshape(-1))
def safe_find_stump(self, stumps_queue, sub_idx=(None,)):
"""
Handles exception raised in a subprocess so the script will not hang indefinitely.
This is basically a decorator for find_stump, but parallelizing requires pickling, and decorators cannot be pickled.
"""
with stumps_queue: # Context manager handles exceptions
self.find_stump(stumps_queue, sub_idx)
def find_stump(self, stumps_queue, sub_idx=(None,)):
"""
Algorithm to the best stump within the sub array of X specified by the bounds 'sub_idx'.
"""
X = self.sorted_X[:,slice(*sub_idx)]
X_idx = self.sorted_X_idx[:,slice(*sub_idx)]
_, n_classes = self.zeroth_moments.shape
n_examples, n_features = X.shape
n_partitions = 2
n_moments = 3
moments = np.zeros((n_moments, n_partitions, n_features, n_classes))
# At first, all examples are in partition 1
# Moments are not normalized so they can be computed cumulatively
moments[0,1] = np.sum(self.zeroth_moments[X_idx[:,0]], axis=0)
moments[1,1] = np.sum(self.first_moments[X_idx[:,0]], axis=0)
moments[2,1] = np.sum(self.second_moments[X_idx[:,0]], axis=0)
risks = self.compute_risks(moments) # Shape (n_partitions, n_features)
best_stump = Stump(risks, moments)
for i, row in enumerate(X_idx[:-1]):
self.update_moments(moments, row)
possible_stumps = ~np.isclose(X[i+1] - X[i], 0)
if possible_stumps.any():
risk = self.compute_risks(moments[:,:,possible_stumps,:])
best_stump.update(risk, moments, possible_stumps, stump_idx=i+1)
best_stump.compute_stump_value(X)
best_stump.feature += sub_idx[0] if sub_idx[0] is not None else 0
stumps_queue.append(best_stump)
def update_moments(self, moments, row_idx):
moments_update = np.array([self.zeroth_moments[row_idx],
self.first_moments[row_idx],
self.second_moments[row_idx]])
moments[:,0] += moments_update
moments[:,1] -= moments_update
def compute_risks(self, moments):
"""
Computes the risks for each partitions for every features.
"""
moments[np.isclose(moments,0)] = 0
with np.errstate(divide='ignore', invalid='ignore'):
# We could use
# np.divide(moments[1]**2, moments[0], where=~np.isclose(moments[0]))
# However, the buffer size is not big enough for several examples and the resulting division is not done correctly
normalized_m1 = | np.nan_to_num(moments[1]**2/moments[0]) | numpy.nan_to_num |
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Dict, List, Tuple
import gym
import numpy as np
import torch as th
from torch import nn
from bisk import BiskSingleRobotEnv
from bisk.features import make_featurizer
from hucc.envs.goal_spaces import g_goal_spaces, g_delta_feats
log = logging.getLogger(__name__)
class CtrlgsPreTrainingEnv(BiskSingleRobotEnv):
'''
A multi-task, goal-based pre-training environment.
The environment is "empty" except for a single robot that can be controlled.
The "tasks" consider the control of one or more observed features -- those
will be sampled according to `feature_dist` (which can also be changed after
constructing the environment). For each task (combination of features), a
goal space is constructed using `psi` and `offset`, and goals are sampled in
this goal space in [-1,1].
A continual version of this environment can be obtained with a
`hard_reset_interval` of > 1. This parameter specifices the frequency at
which the simulation is reset to its initial state. Other resets will simply
result in a new goal to be sampled.
'''
def __init__(
self,
robot: str,
features: str,
feature_dist: Dict[str, float],
task_map: Dict[str, int],
precision: float = 0.1,
idle_steps: int = 0,
max_steps: int = 20,
backproject_goal: bool = True,
reward: str = 'potential',
hard_reset_interval: int = 1,
reset_p: float = 0.0,
resample_features: str = 'hard',
full_episodes: bool = False,
allow_fallover: bool = False,
fallover_penalty: float = -1.0,
implicit_soft_resets: bool = False,
goal_sampling: str = 'random',
ctrl_cost: float = 0.0,
normalize_gs_observation: bool = False,
zero_twist_goals: bool = False,
relative_frame_of_reference: bool = False,
):
# XXX hack to have DMC robots operate with their "native" sensor input
super().__init__(
robot=robot,
features='joints'
if features not in ('sensorsnoc', 'native')
else features,
allow_fallover=allow_fallover,
)
self.goal_featurizer = make_featurizer(
features, self.p, self.robot, 'robot'
)
gsdim = self.goal_featurizer.observation_space.shape[0]
self.goal_space = g_goal_spaces[features][robot]
# Construct goal space
self.psi, self.offset = self.abstraction_matrix(robot, features, gsdim)
self.psi_1 = np.linalg.inv(self.psi)
self.offset_1 = -np.matmul(self.offset, self.psi_1)
assert len(self.observation_space.shape) == 1
assert self.psi.shape == (gsdim, gsdim)
assert self.offset.shape == (gsdim,)
self.precision = precision
self.idle_steps = idle_steps
self.max_steps = max_steps
self.backproject_goal = backproject_goal
self.reward = reward
self.hard_reset_interval = hard_reset_interval
self.reset_p = reset_p
self.resample_features = resample_features
self.full_episodes = full_episodes
self.fallover_penalty = fallover_penalty
self.ctrl_cost = ctrl_cost
self.implicit_soft_resets = implicit_soft_resets
self.goal_sampling = goal_sampling
self.normalize_gs_observation = normalize_gs_observation
self.zero_twist_goals = zero_twist_goals
self.relative_frame_of_reference = relative_frame_of_reference
self.task_idx = [0] * len(task_map)
for k, v in task_map.items():
self.task_idx[v] = int(k)
if len(self.goal_space['twist_feats']) > 0:
negpi = self.proj(
-np.pi * np.ones(gsdim), self.goal_space['twist_feats']
)
pospi = self.proj(
np.pi * np.ones(gsdim), self.goal_space['twist_feats']
)
if not np.allclose(-negpi, pospi):
# This could be supported by more elobarte delta computation
# logic in step()
raise ValueError('Twist feature ranges not symmetric')
self.proj_pi = pospi
if backproject_goal:
all_feats = list(range(gsdim))
gmin_back = self.backproj(-np.ones(gsdim), all_feats)
gmax_back = self.backproj(np.ones(gsdim), all_feats)
goal_space = gym.spaces.Box(gmin_back, gmax_back)
else:
max_features = max(
(
len(f.replace('+', ',').split(','))
for f in feature_dist.keys()
)
)
goal_space = gym.spaces.Box(
low=-2, high=2, shape=(max_features,), dtype=np.float32
)
self.task_map = {int(k): v for k, v in task_map.items()}
# Hide position-related invariant features from the observation, i.e.
# X/Y or ant X for cheetah
delta_feats = g_delta_feats[robot]
self.obs_mask = list(range(self.observation_space.shape[0]))
for d in delta_feats:
self.obs_mask.remove(d)
self.observation_space = gym.spaces.Dict(
{
'observation': gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(len(self.obs_mask),),
dtype=np.float32,
),
'desired_goal': goal_space,
'task': gym.spaces.Box(
low=0, high=1, shape=(len(self.task_map),), dtype=np.float32
),
'gs_observation': self.goal_featurizer.observation_space,
}
)
self._do_hard_reset = True
self._reset_counter = 0
self.set_feature_dist(feature_dist)
# Current features
self._features: List[int] = []
self._features_s = ''
self._feature_mask = np.zeros(len(self.task_map))
self.model = None
self.gamma = 1.0
def set_goal_dims(self, dims):
self.set_feature_dist(dims)
def set_model(self, model: nn.Module, gamma: float):
self.model = model
self.gamma = gamma
def set_feature_dist(self, feature_dist: Dict[str, float]):
# Deduplicate features from combinations
fdist: Dict[str, float] = {}
self._feature_strings = {}
for fs, p in feature_dist.items():
ufeats = []
for f in fs.replace('+', ',').split(','):
if not f in ufeats:
ufeats.append(f)
fdist[','.join(ufeats)] = p
self._feature_strings[','.join(ufeats)] = fs.replace('+', ',')
if not self.backproject_goal:
# Check that maximum number of features doesn't change
max_features = max((len(fs.split(',')) for fs in fdist.keys()))
assert (
self.observation_space['desired_goal'].shape[0] == max_features
)
for fs in fdist.keys():
for fi in map(int, fs.split(',')):
assert fi in self.task_map
self._feature_dist_v = [k for k, v in fdist.items()]
s = sum([v for k, v in fdist.items()])
self._feature_dist_p = [v / s for k, v in fdist.items()]
def proj(self, obs: np.ndarray, feats: List[int]) -> np.ndarray:
return np.matmul(obs, self.psi[feats].T) + self.offset[feats]
def backproj(self, obs_w: np.ndarray, feats: List[int]) -> np.ndarray:
s_p = np.matmul(obs_w, self.psi_1[feats]) + self.offset_1
return s_p[self.task_idx]
def seed(self, seed=None):
self._do_hard_reset = True
return super().seed(seed)
def get_observation(self):
obs = super().get_observation()[self.obs_mask]
gs_obs = self.goal_featurizer()
if self.backproject_goal:
s = gs_obs[self.task_idx]
bpg = self.backproj(self.goal, self._features)
g = bpg - s
if len(self.goal_space['twist_feats']) > 0:
twf = [self.task_map[f] for f in self.goal_space['twist_feats']]
g[twf] = (
np.remainder((bpg[twf] - s[twf]) + np.pi, 2 * np.pi) - np.pi
)
g *= self._feature_mask
else:
if len(self.goal_space['twist_feats']) > 0:
raise NotImplementedError()
gs = self.proj(gs_obs, self._features)
g = np.zeros(self.observation_space['desired_goal'].shape)
g[0 : len(self.goal)] = self.goal - gs
if self.normalize_gs_observation:
# XXX if the goal space is defined for fewer features than
# gs_observation, this will be yield bogus values for undefined
# ones.
gs_obs = self.proj(gs_obs, np.arange(0, len(gs_obs)))
return {
'observation': obs,
'desired_goal': g,
'task': self._feature_mask,
'gs_observation': gs_obs,
}
def hard_reset(self):
# Disable contacts during reset to prevent potentially large contact
# forces that can be applied during initial positioning of bodies in
# reset_state().
with self.p.model.disable('contact'):
self.p.reset()
self.reset_state()
for _ in range(self.idle_steps):
self.p.set_control(np.zeros_like(self.p.data.ctrl))
self.step_simulation()
if self.idle_steps <= 0:
self.step_simulation()
def sample_features(self) -> List[int]:
fs = self.np_random.choice(
self._feature_dist_v, 1, p=self._feature_dist_p
)[0]
return list(map(int, fs.split(',')))
def sample_goals_random(self, N: int = 1) -> np.ndarray:
gstate = self.proj(self.goal_featurizer(), self._features)
goal = self.np_random.uniform(
low=-1.0, high=1.0, size=(N, len(self._features))
)
# For delta features we offset the goal by the current state to get
# meaningful deltas afterwards
for i, f in enumerate(self._features):
if f in self.goal_space['delta_feats']:
goal[:, i] += gstate[i]
if self.zero_twist_goals and f in self.goal_space['twist_feats']:
goal[:, i] = 0
return goal
def sample_goal_using_r(self) -> np.ndarray:
N = 128
cand = self.sample_goals_random(N=N)
if self.backproject_goal:
s = self.goal_featurizer()[self.task_idx]
gb = (np.matmul(cand, self.psi_1[self._features]) + self.offset_1)[
:, self.task_idx
]
g = gb - s
g *= self._feature_mask
else:
gs = self.proj(self.goal_featurizer(), self._features)
g = np.zeros((N, self.observation_space['desired_goal'].shape[0]))
g[:, 0 : len(self._features)] = cand - gs
obs = super().get_observation()[self.obs_mask]
inp = {
'observation': th.tensor(obs, dtype=th.float32)
.unsqueeze(0)
.expand(N, obs.shape[0]),
'desired_goal': th.tensor(g, dtype=th.float32),
'task': th.tensor(self._feature_mask, dtype=th.float32)
.unsqueeze(0)
.expand(N, self._feature_mask.shape[0]),
}
with th.no_grad():
action = self.model.pi(inp).mean
inp['action'] = action
with th.no_grad():
r = self.model.reachability(inp).clamp(0, 1)
if self.goal_sampling in {'r2', 'reachability2'}:
# Favor samples reachable with 50% probability
dist = th.tanh(2 * (1 - th.abs(r * 2 - 1) + 1e-1))
else:
# Favor unreachable samples
dist = 1 / (r.view(-1) + 0.1)
return cand[th.multinomial(dist, 1).item()]
def sample_goal_using_q(self, obs: np.ndarray) -> np.ndarray:
N = 128
cand = self.sample_goals_random(N=N)
if self.backproject_goal:
s = self.goal_featurizer()[self.task_idx]
gb = (np.matmul(cand, self.psi_1[self._features]) + self.offset_1)[
:, self.task_idx
]
g = gb - s
g *= self._feature_mask
else:
gs = self.proj(self.goal_featurizer(), self._features)
g = np.zeros((N, self.observation_space['desired_goal'].shape[0]))
g[:, 0 : len(self._features)] = cand - gs
obs = super().get_observation()[self.obs_mask]
inp = {
'observation': th.tensor(obs, dtype=th.float32)
.unsqueeze(0)
.expand(N, obs.shape[0]),
'desired_goal': th.tensor(g, dtype=th.float32),
'task': th.tensor(self._feature_mask, dtype=th.float32)
.unsqueeze(0)
.expand(N, self._feature_mask.shape[0]),
}
with th.no_grad():
action = self.model.pi(inp).mean
inp['action'] = action
with th.no_grad():
q = th.min(self.model.q(inp), dim=-1).values
ctrl_cost = (
self.max_steps
* self.ctrl_cost
* (0.25 * self.action_space.shape[0])
)
wobs = self.proj(obs, self._features)
dist = np.linalg.norm(cand - wobs, ord=2, axis=1)
min_ret = (dist - ctrl_cost) * self.gamma ** self.max_steps
slack = q - min_ret
dist = 1 / (slack - slack.min() + 1)
return cand[th.multinomial(dist, 1).item()]
def reset(self):
need_hard_reset = self._do_hard_reset or (
self.hard_reset_interval > 0
and self._reset_counter % self.hard_reset_interval == 0
)
# Reset
if need_hard_reset:
self.hard_reset()
self._reset_counter = 0
if self.relative_frame_of_reference:
self.goal_featurizer.set_frame_of_reference()
# Sample features and goal
resample_features = False
if need_hard_reset:
resample_features = True
if self.resample_features == 'soft':
resample_features = True
elif self.resample_features.startswith('soft'):
freq = int(self.resample_features[4:])
resample_features = self._reset_counter % freq == 0
if resample_features:
self._features = self.sample_features()
self._features_s = self._feature_strings[
','.join(map(str, self._features))
]
self._feature_mask *= 0
for f in self._features:
self._feature_mask[self.task_map[f]] = 1.0
self.goal = self.sample_goals_random()[0]
if self.goal_sampling in {'q', 'q_value'}:
if self.model:
self.goal = self.sample_goal_using_q()
elif self.goal_sampling in {'r', 'reachability', 'r2', 'reachability2'}:
if self.model:
self.goal = self.sample_goal_using_r()
elif self.goal_sampling not in {'random', 'uniform'}:
raise ValueError(
f'Unknown goal sampling method "{self.goal_sampling}"'
)
def distance_to_goal():
gs = self.proj(self.goal_featurizer(), self._features)
d = self.goal - gs
for i, f in enumerate(self._features):
if f in self.goal_space['twist_feats']:
# Wrap around projected pi/-pi for distance
d[i] = (
np.remainder(
(self.goal[i] - gs[i]) + self.proj_pi,
2 * self.proj_pi,
)
- self.proj_pi
)
return np.linalg.norm(d, ord=2)
self._d_initial = distance_to_goal()
self._do_hard_reset = False
self._reset_counter += 1
self._step = 0
return self.get_observation()
def step(self, action):
def distance_to_goal():
gs = self.proj(self.goal_featurizer(), self._features)
d = self.goal - gs
for i, f in enumerate(self._features):
if f in self.goal_space['twist_feats']:
# Wrap around projected pi/-pi for distance
d[i] = (
np.remainder(
(self.goal[i] - gs[i]) + self.proj_pi,
2 * self.proj_pi,
)
- self.proj_pi
)
return np.linalg.norm(d, ord=2)
d_prev = distance_to_goal()
next_obs, reward, done, info = super().step(action)
d_new = distance_to_goal()
info['potential'] = d_prev - d_new
info['distance'] = d_new
info['reached_goal'] = info['distance'] < self.precision
if self.reward == 'potential':
reward = info['potential']
elif self.reward == 'potential2':
reward = d_prev - self.gamma * d_new
elif self.reward == 'potential3':
reward = 1.0 if info['reached_goal'] else 0.0
reward += d_prev - self.gamma * d_new
elif self.reward == 'potential4':
reward = (d_prev - d_new) / self._d_initial
elif self.reward == 'distance':
reward = -info['distance']
elif self.reward == 'sparse':
reward = 1.0 if info['reached_goal'] else 0.0
else:
raise ValueError(f'Unknown reward: {self.reward}')
reward -= self.ctrl_cost * np.square(action).sum()
info['EpisodeContinues'] = True
if info['reached_goal'] == True and not self.full_episodes:
done = True
info['time'] = self._step
self._step += 1
if self._step >= self.max_steps:
done = True
elif (
not info['reached_goal'] and self.np_random.random() < self.reset_p
):
info['RandomReset'] = True
done = True
if not self.allow_fallover and self.fell_over():
reward = self.fallover_penalty
done = True
self._do_hard_reset = True
info['reached_goal'] = False
info['fell_over'] = True
if done and (
self._do_hard_reset
or (self._reset_counter % self.hard_reset_interval == 0)
):
del info['EpisodeContinues']
if done:
info['LastStepOfTask'] = True
if done and 'EpisodeContinues' in info and self.implicit_soft_resets:
need_hard_reset = self._do_hard_reset or (
self.hard_reset_interval > 0
and self._reset_counter % self.hard_reset_interval == 0
)
if not need_hard_reset:
# Do implicit resets, let episode continue
next_obs = self.reset()
done = False
del info['EpisodeContinues']
info['SoftReset'] = True
info['features'] = self._features_s
return next_obs, reward, done, info
@staticmethod
def feature_controllable(robot: str, features: str, dim: int) -> bool:
if not features in g_goal_spaces:
raise ValueError(f'Unsupported feature space: {robot}')
if not robot in g_goal_spaces[features]:
raise ValueError(f'Unsupported robot: {robot}')
gs = g_goal_spaces[features][robot]
if dim < 0:
raise ValueError(f'Feature {dim} out of range')
if dim >= len(gs['min']):
return False
# Return whether feature is controllable, i.e. range is non-zero
return gs['min'][dim] != gs['max'][dim]
@staticmethod
def abstraction_matrix(
robot: str, features: str, sdim: int
) -> Tuple[np.array, np.array]:
if not features in g_goal_spaces:
raise ValueError(f'Unsupported feature space: {robot}')
if not robot in g_goal_spaces[features]:
raise ValueError(f'Unsupported robot: {robot}')
gs = g_goal_spaces[features][robot]
gmin = | np.array(gs['min']) | numpy.array |
# Generate order book
# Orders are generated randomly each month of the year to reflect seasonality
# For training, the orders will keep the same probability distribution for
# each training episode, but be stochastically regenerated for each new
# episode. This will keep the statistics of each product consistent.
import numpy as np
import calendar
import string
import datetime
from .demand_utils import softmax
from argparse import ArgumentTypeError
def str2bool(argument):
if argument.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif argument.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ArgumentTypeError('Boolean value expected.')
def get_default_seasonal_demand_settings(settings):
'''
Inputs
=========================================================================
settings: dictionary of experimental settings
Outputs
=========================================================================
settings: dictionary of experimental settings which includes demand model
'''
lead_time = 7 # Days
std_lead_time = 2 # Days
vsm_mean = 40 # Dollars
vsm_std = 2 # Dollars
load_level = 1 # Sets ratio between total demand and total theoretical supply
defaults = {
'ORDER_SIZE': 25, # MT
'MEAN_LEAD_TIME': lead_time*24 if settings['BASE_TIME_INTERVAL']=='HOUR'
else lead_time,
'STD_LEAD_TIME': std_lead_time*24 if settings['BASE_TIME_INTERVAL']=='HOUR'
else std_lead_time,
'VAR_STD_MARGIN_MEAN': vsm_mean,
'VAR_STD_MARGIN_STD': vsm_std,
'LOAD_LEVEL': 1,
'FORECAST': 'UNIFORM',
'FORECAST_ACCURACY': 3
}
for key in defaults.keys():
if key not in settings.keys():
settings[key] = defaults[key]
# elif key == 'FORECAST':
# settings[key] = str2bool(str(settings[key]))
elif defaults[key] is not None:
settings[key] = type(defaults[key])(settings[key])
return settings
def generate_seasonal_orders(env):
# Stochastically generate order book
# Check to see if order statistics already exist, if not
# generate them
if env.order_statistics is None:
# Average daily production
avg_daily_prod = env.product_data[
:,env.product_data_cols.index('min_run_time')].astype(float).mean() * env.product_data[
:,env.product_data_cols.index('run_rate')].astype(float).mean()
# Average yearly production
avg_yearly_prod = avg_daily_prod * env.n_days
# Adjust to order load level to account for higher demand years or lower
yearly_demand_volume = env.settings['LOAD_LEVEL'] * avg_yearly_prod
# Generate volume shares for each product using softmax
# on standard normal draws
x = np.random.standard_normal(env.n_products)
prod_volume_shares = softmax(x)
# Add some seasonality to each product
seasonality_offset = np.random.standard_normal(1)
# Take absolute value to ensure that products follow a similar
# demand pattern
amplitude = np.abs(np.random.standard_normal(env.n_products))
months = np.linspace(0, 2 * np.pi, 12)[:env.n_months]
demand = np.sin(months + seasonality_offset).reshape(-1, 1) * amplitude
monthly_prod_volume_shares = (1 + demand) * prod_volume_shares
# Normalize everything so that each month sums to 1
# total_monthly_demand determines how much total volume is to be shipped
# in that month
monthly_demand_share = softmax(monthly_prod_volume_shares.sum(axis=1))
total_monthly_demand = monthly_demand_share * yearly_demand_volume
# monthly_product_prob determines the probability that an order will be
# shipped for a given product in a given month
monthly_product_prob = softmax(demand, axis=1).T
# Generate orders for each month
num_orders_per_month = np.round(total_monthly_demand /
env.settings['ORDER_SIZE'], 0).astype(int)
env.order_statistics = [monthly_product_prob, num_orders_per_month]
# Build order book
# doc_num = 0 reserved for forecasted orders
doc_num = 1
gen_order_placeholder = np.zeros((1, 8), dtype="int32")
start_date = env.start_time
year = str(start_date.year)
start_date_np = np.datetime64(start_date, "D")
products = env.product_data[:,2].astype(int)
# Loop through each month to generate orders
# TODO: this model relies on specific order statistic size.
# e.g. if the sim starts in Feb, we'll get a key error because
# the month number doesn't match the order_statistics index.
for i in range(env.order_statistics[0].shape[0]):
# Number of orders per month
n_orders = env.order_statistics[1][i]
if n_orders != 0:
# Get the month
month = i + 1
# Convert the month to a string
if month < 10:
month_str = "0" + str(int(month))
else:
month_str = str(int(month))
# Get the first of the month
month_start = np.datetime64(year + "-" + month_str + "-01", "D")
# Generate order numbers
doc_nums = np.array([num for num in range(doc_num, int(doc_num + n_orders))])
# Calculate doc creation dates and planned gi dates
# TODO: KeyError arises during implementation runs. Calling continue because
# the demand generated here should not matter as we'll be loading demand from
# a demand file. May cause other issues, however.
try:
ship_dates = np.array(env.shipping_dict[month])
except KeyError:
if env.settings['TRAIN']:
raise KeyError("month {} not found in env.shipping_dict".format(month))
else:
continue
# Sample over available shipment dates for the full month to avoid
# large bunches of orders for partial months
if min(ship_dates) > 4 or max(ship_dates) < 26:
last_day = calendar.monthrange(int(year), month)[1]
ship_dates = np.arange(1, last_day + 1)
for d in ship_dates:
sim_day = (d+month_start-start_date_np).astype(int)
if sim_day not in env.sim_day_to_date:
env.sim_day_to_date[sim_day] = [month, d]
if env.settings['WEEKEND_SHIPMENTS'] == False:
# Filter weekends
weekends = [i[5:7] for i in calendar.monthcalendar(int(year), month)]
weekends = np.array([i for k in weekends for i in k])
ship_dates = ship_dates[np.in1d(ship_dates, weekends,
invert=True)] - 1
planned_gi_dates = np.random.choice(ship_dates - 1, size=n_orders,
replace=True).astype("int")
# Add 1 to ensure no lead times = 0 days
lead_times = np.abs(np.random.normal(
env.settings['MEAN_LEAD_TIME'] - 1,
env.settings['STD_LEAD_TIME'],
size=n_orders)).round(0).astype("int") + 1
# Back calculate the doc create dates based on the lead times
doc_create_dates = planned_gi_dates - lead_times
# Get dates in the year
planned_gi_dates = np.array([(j + month_start) for j in planned_gi_dates])
doc_create_dates = | np.array([(j + month_start) for j in doc_create_dates]) | numpy.array |
from __future__ import print_function
import ecos
import numpy as np
from scipy import *
import scipy.sparse as sp
c = np.array([-1., -1.])
h = np.array([ 4., 12., 0. , 0.])
bool_idx = [1]
G = sp.csc_matrix( (array([2.0, 3.0, -1.0, 1.0, 4.0, -1.0]),
array([0, 1, 2, 0, 1, 3]),
array([0, 3, 6])) )
dims = dict()
dims['l'] = 4
sol = ecos.solve(c, G, h, dims, verbose=False, mi_verbose=False, int_vars_idx=bool_idx)
print(sol['x'])
c = np.array([-1., -1.])
h = np.array([ 4., 12., 0. , 0.])
bool_idx = []
G = sp.csc_matrix( (array([2.0, 3.0, -1.0, 1.0, 4.0, -1.0]),
array([0, 1, 2, 0, 1, 3]),
array([0, 3, 6])) )
dims = dict()
dims['l'] = 4
sol = ecos.solve(c, G, h, dims, verbose=False, mi_verbose=False, int_vars_idx=bool_idx)
print(sol['x'])
c = | np.array([-1., -1.1]) | numpy.array |
#%%
import pytest
from numpy.testing import assert_allclose
import numpy as np
import natural_bm.backend.theano_backend as BTH
import natural_bm.backend.numpy_backend as BNP
from natural_bm.backend.common import floatx, set_floatx
#%% Define checks
def check_dtype(var, dtype):
assert var.dtype == dtype
def check_single_tensor_operation(function_name, input_shape, **kwargs):
val = np.random.random(input_shape) - 0.5
xth = BTH.variable(val)
xnp = BNP.variable(val)
_zth = getattr(BTH, function_name)(xth, **kwargs)
zth = BTH.eval(_zth)
znp = BNP.eval(getattr(BNP, function_name)(xnp, **kwargs))
assert zth.shape == znp.shape
assert_allclose(zth, znp, atol=1e-05)
def check_two_tensor_operation(function_name, x_input_shape,
y_input_shape, **kwargs):
xval = np.random.random(x_input_shape) - 0.5
xth = BTH.variable(xval)
xnp = BNP.variable(xval)
yval = np.random.random(y_input_shape) - 0.5
yth = BTH.variable(yval)
ynp = BNP.variable(yval)
_zth = getattr(BTH, function_name)(xth, yth, **kwargs)
zth = BTH.eval(_zth)
znp = BNP.eval(getattr(BNP, function_name)(xnp, ynp, **kwargs))
assert zth.shape == znp.shape
assert_allclose(zth, znp, atol=1e-05)
def check_composed_tensor_operations(first_function_name, first_function_args,
second_function_name, second_function_args,
input_shape):
''' Creates a random tensor t0 with shape input_shape and compute
t1 = first_function_name(t0, **first_function_args)
t2 = second_function_name(t1, **second_function_args)
with both Theano and TensorFlow backends and ensures the answers match.
'''
val = np.random.random(input_shape) - 0.5
xth = BTH.variable(val)
xnp = BNP.variable(val)
yth = getattr(BTH, first_function_name)(xth, **first_function_args)
ynp = getattr(BNP, first_function_name)(xnp, **first_function_args)
zth = BTH.eval(getattr(BTH, second_function_name)(yth, **second_function_args))
znp = BNP.eval(getattr(BNP, second_function_name)(ynp, **second_function_args))
assert zth.shape == znp.shape
| assert_allclose(zth, znp, atol=1e-05) | numpy.testing.assert_allclose |
# +
import argparse
import os
import pickle
import sys
sys.path.append("..")
import numpy as np
import torchvision
import torchvision.transforms as T
import torch.utils.data as torch_data
from tqdm import tqdm
from models.classifiers import EvalCompoundResNet
# -
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-F', '--function', type=str, required=True, choices=['max_index', 'count_data'])
parser.add_argument('-O', '--output_path', type=str, required=True)
parser.add_argument('--num_attr', type=str, default=8)
parser.add_argument('--sample_per_category', type=int, default=1e5)
parser.add_argument('--weight_path', type=str, default='/home/u5397696/interpolation/celebA-hq-classifier/')
parser.add_argument('--data_root', type=str, default='/home/u5397696/interpolation/interfacegan/data/tmp')
return parser.parse_args()
def max_index(args):
if not os.path.exists(args.output_path):
raise ValueError(f"{args.output_path} doesn't exist.")
with open(args.output_path, 'rb') as f:
data_index = pickle.load(f)
print(f'#attributes: {len(data_index)}')
max_val = -1e9
for i in range(len(data_index)):
max_p = np.max(data_index[i][0])
max_n = | np.max(data_index[i][1]) | numpy.max |
# -*- coding: utf-8 -*-
"""Siamese Network for performing training of a Deep Convolutional
Network for Face Verification on the Olivetti and LFW Faces datasets.
Dependencies:
python 3.4+, numpy>=1.10.4, sklearn>=0.17, scipy>=0.17.0, theano>=0.7.0, lasagne>=0.1, cv2, dlib>=18.18 (only required if using the 'trees' crop mode).
Part of the package siamese_net:
siamese_net/
siamese_net/faces.py
siamese_net/datasets.py
siamese_net/normalization.py
siamese_net/siamese_net.py
Copyright 2016 Kadenze, Inc.
Kadenze(R) and Kannu(R) are Registered Trademarks of Kadenze, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
"""
import sys
import pickle
import os
# base_compiledir = os.path.expandvars("$HOME/.theano/slot-%d" % (os.getpid()))
# os.environ['THEANO_FLAGS'] = "base_compiledir=%s" % base_compiledir
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import time
import lasagne
# For training the final output network
from sklearn import cross_validation
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
# Custom code for parsing datasets and normalizing images
from datasets import Datasets
from normalization import LCN, ZCA
# plt.style.use('ggplot')
theano.config.floatX = 'float32'
def montage(x):
if x.shape[1] == 1 or x.shape[1] == 3:
num_img = x.shape[0]
num_img_per_dim = np.ceil(np.sqrt(num_img)).astype(int)
montage_img = np.zeros((
num_img_per_dim * x.shape[3],
num_img_per_dim * x.shape[2], x.shape[1]))
else:
num_img_per_dim = np.ceil(np.sqrt(x.shape[1])).astype(int)
montage_img = np.zeros((
num_img_per_dim * x.shape[3],
num_img_per_dim * x.shape[2]))
num_img = x.shape[1]
for img_i in range(num_img_per_dim):
for img_j in range(num_img_per_dim):
if img_i * num_img_per_dim + img_j < num_img:
if x.shape[0] == 1:
montage_img[
img_i * x.shape[3]: (img_i + 1) * x.shape[2],
img_j * x.shape[3]: (img_j + 1) * x.shape[2]
] = np.squeeze(np.squeeze(
x[0, img_i * num_img_per_dim + img_j, ...]
) / (np.max(x[0, img_i * num_img_per_dim + img_j, ...]
) + 1e-15))
else:
montage_img[
img_i * x.shape[3]: (img_i + 1) * x.shape[2],
img_j * x.shape[3]: (img_j + 1) * x.shape[2],
:
] = np.swapaxes(np.squeeze(
x[img_i * num_img_per_dim + img_j, ...]
) / (np.max(x[img_i * num_img_per_dim + img_j, ...]
) + 1e-15), 0, 2)
return montage_img
def get_image_manifold(images, features, res=64, n_neighbors=5):
'''Creates a montage of the images based on a TSNE
manifold of the associated image features.
'''
from sklearn import manifold
mapper = manifold.SpectralEmbedding()
transform = mapper.fit_transform(features)
nx = int(np.ceil(np.sqrt(len(transform))))
ny = int(np.ceil(np.sqrt(len(transform))))
montage_img = np.zeros((res * nx, res * ny, 3))
from sklearn.neighbors import NearestNeighbors
nn = NearestNeighbors()
nn.fit(transform)
min_x = np.mean(transform[:, 0]) - np.std(transform[:, 0]) * 3.0
max_x = np.mean(transform[:, 0]) + np.std(transform[:, 0]) * 3.0
min_y = np.mean(transform[:, 1]) - np.std(transform[:, 1]) * 3.0
max_y = np.mean(transform[:, 1]) + np.std(transform[:, 1]) * 3.0
for n_i in range(nx):
for n_j in range(ny):
x = min_x + (max_x - min_x) / nx * n_i
y = min_y + (max_y - min_y) / ny * n_j
idx = nn.kneighbors([x, y], n_neighbors=n_neighbors)[1][0][:]
for neighbor_i in idx:
montage_img[
n_i * res: (n_i + 1) * res, n_j * res: (n_j + 1) * res, :] += images[neighbor_i]
montage_img[
n_i * res: (n_i + 1) * res, n_j * res: (n_j + 1) * res, :] /= float(len(idx))
montage_img = montage_img / np.max(montage_img)
return montage_img
def make_image_pairs(X, y, unique_labels):
'''For each person in unique_labels (P people):
1. combine all matched pairs the images of that person (N images):
N_matched = (P choose 2) * (N choose 2)
2. combine all imposter pairs. N_unmatched = (P choose 2) * (N * N)
Returns an array of matched and unmatched images and their targets
------------------------------------------------------------------
X_matched, y_matched, X_unmatched, y_unmatched
where the dimensions of the Xs are (with 2 being each image in the pair):
[(N_matched + N_unmatched) x 2 x W x H]
and ys are
----------
[(N_matched + N_unmatched),]
Args
----
X : TYPE
Description
y : TYPE
Description
unique_labels : TYPE
Description
Deleted Parameters
------------------
X (TYPE) : Description
y (TYPE) : Description
unique_labels (TYPE) : Description
'''
from itertools import combinations
X_pairs_matched = list()
y_pairs_matched = list()
# Iterate over all actual pairs
# 32 choose 2 = 496 people pairs. 496 * (10 images choose 2) = 496 * 45 =
# 1440
for person in unique_labels:
# Find images of those people
im_idx = np.where(person == y)[0]
for el in combinations(im_idx, 2):
X_pairs_matched.append(
np.concatenate((X[el[0], ...], X[el[1], ...]),
axis=0)[np.newaxis, ...])
y_pairs_matched.append(1)
X_pairs_unmatched = list()
y_pairs_unmatched = list()
# Iterate over all imposter pairs of people
# (32 choose 2 = 496 people pairs. 496 * 10 * 10 image pairs =
# 49600 imposter pairs)
# (157 * 0.4 = 63), 63 choose 2 = 1953, 1953 * 100 = 195300
for pair in combinations(unique_labels, 2):
# Find images of those people
im1_idx = np.where(pair[0] == y)[0]
im2_idx = np.where(pair[1] == y)[0]
for im1_idx_it in im1_idx:
for im2_idx_it in im2_idx:
X_pairs_unmatched.append(np.concatenate(
(X[im1_idx_it, ...], X[im2_idx_it, ...]),
axis=0)[np.newaxis, ...])
y_pairs_unmatched.append(0)
return (np.concatenate(X_pairs_matched),
np.array(y_pairs_matched),
np.concatenate(X_pairs_unmatched),
np.array(y_pairs_unmatched))
def make_image_pair_idxs(y, unique_labels):
'''For each person in unique_labels (P people):
1. combine all matched pairs the images of that person (N images):
N_matched = (P choose 2) * (N choose 2)
2. combine all imposter pairs. N_unmatched = (P choose 2) * (N * N)
Returns an array of matched and unmatched images and their targets
------------------------------------------------------------------
X_matched, y_matched, X_unmatched, y_unmatched
where the dimensions of the Xs are [(N_matched + N_unmatched) x 2]
(with 2 being the index into X defining the image in the pair),
and ys are [(N_matched + N_unmatched),]
Args
----
y : TYPE
Description
unique_labels : TYPE
Description
Deleted Parameters
------------------
y (TYPE) : Description
unique_labels (TYPE) : Description
'''
from itertools import combinations
X_pairs_matched = list()
y_pairs_matched = list()
# Iterate over all actual pairs
# 32 choose 2 = 496 people pairs. 496 * (10 images choose 2) = 496 * 45 =
# 1440
for person in unique_labels:
# Find images of those people
im_idx = np.where(person == y)[0]
for el in combinations(im_idx, 2):
X_pairs_matched.append(np.array([el[0], el[1]])[np.newaxis, ...])
y_pairs_matched.append(1)
X_pairs_unmatched = list()
y_pairs_unmatched = list()
# Iterate over all imposter pairs of people
# (32 choose 2 = 496 people pairs. 496 * 10 * 10 image pairs = 49600 imposter pairs)
# (157 * 0.4 = 63), 63 choose 2 = 1953, 1953 * 100 = 195300
for pair_i, pair in enumerate(combinations(unique_labels, 2)):
# Find images of those people
im1_idx = np.where(pair[0] == y)[0]
im2_idx = np.where(pair[1] == y)[0]
for im1_idx_it in im1_idx:
for im2_idx_it in im2_idx:
X_pairs_unmatched.append(
np.array([im1_idx_it, im2_idx_it])[np.newaxis, ...])
y_pairs_unmatched.append(0)
return (np.concatenate(X_pairs_matched),
np.array(y_pairs_matched),
np.concatenate(X_pairs_unmatched),
np.array(y_pairs_unmatched))
def draw_image_pair(X, y, idx=None):
'''Given X of N x 2 x W x H, and the associated label matrix, plot
a random pair, or a given idx.
Keyword arguments
-----------------
idx -- Integer - Which pair to show. If none is given, then a
idx -- Integer - Which pair to show. If none is given, then a
random one is picked. [None]
Args
----
X : TYPE
Description
y : TYPE
Description
idx : TYPE, optional
Description
Deleted Parameters
------------------
X (TYPE) : Description
y (TYPE) : Description
idx (TYPE, optional) : Description
'''
if idx is None:
idx = np.random.randint(len(X) - 2)
if X.shape[1] == 1:
idx = idx + (idx % 2)
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(10, 8))
if X.shape[1] == 2:
ax1.imshow(np.squeeze(X[idx, 0, ...]), cmap='gray')
ax2.imshow(np.squeeze(X[idx, 1, ...]), cmap='gray')
else:
ax1.imshow(np.squeeze(X[idx, ...]), cmap='gray')
ax2.imshow(np.squeeze(X[idx + 1, ...]), cmap='gray')
ax1.grid(False)
ax2.grid(False)
if y[idx] == 0:
fig.suptitle('Unmatched: %d' % idx, fontsize=30)
else:
fig.suptitle('Matched: %d' % idx, fontsize=30)
def load_pairs(
dataset='lfw',
normalization='LCN',
split=(0.8, 0.1, 0.1),
resolution=(128, 128),
crop_style='none',
crop_factor=1.2,
n_files_per_person=5,
path_to_data=None,
b_load_idxs_only=True,
b_convert_to_grayscale=True):
'''
Given a dataset name, generate the training, validation, and testing
data of matched and unmatched pairs, optionally applying normalization
to each image.
Note this method only returns the idxs of the original dataset.
Parameters
----------
dataset -- string
The name of the dataset to load, 'olivetti', ['lfw'].
normalization -- string
The type of normalization to apply, if any of ['LCN'], 'LCN-',
'ZCA', or '-1:1'.
split -- tuple
The (train, valid, test) split fractions [(0.6, 0.2, 0.2)].
num_files -- int
Number of files to load for each person.
'''
ds = None
if dataset == 'olivetti':
from sklearn.datasets import fetch_olivetti_faces
ds = fetch_olivetti_faces()
# TODO: Apply processing options to olivetti
elif dataset == 'lfw':
ds = Datasets(
crop_style=crop_style,
crop_factor=crop_factor,
resolution=resolution,
n_files_per_person=n_files_per_person,
n_min_files_per_person=(n_files_per_person / 2),
b_convert_to_grayscale=b_convert_to_grayscale
)
ds = ds.get_parsed_dataset(dataset=dataset, path_to_data=path_to_data)
elif dataset.__class__ is dict and 'target' in dataset.keys() and 'images' in dataset.keys():
ds = dataset
else:
raise ValueError(
'Dataset should be either olivetti, lfw, or a dict defining images and target from get_parsed_dataset')
# Split up the dataset into unique targets for train/test,
# making sure not to repeat any subjects between train/test
# Should get 32 subjects train, 8 test, with a 0.8 split
y = ds['target']
total = len(np.unique(y))
train_split = int(total * split[0])
valid_split = train_split + int(total * split[1])
test_split = total - int(total * split[2])
unique_train_labels = np.unique(y)[:train_split]
unique_valid_labels = np.unique(y)[train_split:valid_split]
unique_test_labels = np.unique(y)[-test_split:]
# X = (400, 1, 64, 64); y = (400,), 40 subjects w/ 10 examples each of 64
# x 64 pixels
if b_convert_to_grayscale:
X = np.concatenate([img[np.newaxis, np.newaxis, ...]
for img in ds['images']], axis=0)
else:
X = np.concatenate([img[np.newaxis, ...]
for img in ds['images']], axis=0)
print(X.shape)
if normalization == 'LCN':
lcn = LCN(sigma=round(0.0625 * X.shape[2]), subtractive=False)
lcn.fit(X[:len(y) * split[0], ...])
X = lcn.transform(X)
elif normalization == 'LCN-':
lcn = LCN(sigma=round(0.0625 * X.shape[2]), subtractive=True)
lcn.fit(X[:len(y) * split[0], ...])
X = lcn.transform(X)
elif normalization == 'ZCA':
zca = ZCA(bias=0.1)
zca.fit(X[:len(y) * split[0], ...])
X = zca.transform(X)
elif normalization == '-1:1':
for idx in range(len(X)):
X[idx, ...] = (X[idx, ...] - np.min(X[idx, ...])) / \
(np.max(X[idx, ...]) - np.min(X[idx, ...])) * 2.0 - 1.0
if b_load_idxs_only:
# Make pairs of actual and imposter faces, returning the indexes to
# create them
print('train')
X_train_matched, y_train_matched, X_train_unmatched, y_train_unmatched = make_image_pair_idxs(
y, unique_train_labels)
print('valid')
X_valid_matched, y_valid_matched, X_valid_unmatched, y_valid_unmatched = make_image_pair_idxs(
y, unique_valid_labels)
print('test')
X_test_matched, y_test_matched, X_test_unmatched, y_test_unmatched = make_image_pair_idxs(
y, unique_test_labels)
return {
'X': lasagne.utils.floatX(X),
'y': y.astype(np.int32),
'X_train_matched_idxs': X_train_matched.astype(np.int32),
'y_train_matched_idxs': y_train_matched.astype(np.int32),
'X_train_unmatched_idxs': X_train_unmatched.astype(np.int32),
'y_train_unmatched_idxs': y_train_unmatched.astype(np.int32),
'X_valid_matched_idxs': X_valid_matched.astype(np.int32),
'y_valid_matched_idxs': y_valid_matched.astype(np.int32),
'X_valid_unmatched_idxs': X_valid_unmatched.astype(np.int32),
'y_valid_unmatched_idxs': y_valid_unmatched.astype(np.int32),
'X_test_matched_idxs': X_test_matched.astype(np.int32),
'y_test_matched_idxs': y_test_matched.astype(np.int32),
'X_test_unmatched_idxs': X_test_unmatched.astype(np.int32),
'y_test_unmatched_idxs': y_test_unmatched.astype(np.int32)
}
else:
# Make pairs of actual and imposter faces
X_train_matched, y_train_matched, X_train_unmatched, y_train_unmatched = make_image_pairs(
X, y, unique_train_labels)
X_valid_matched, y_valid_matched, X_valid_unmatched, y_valid_unmatched = make_image_pairs(
X, y, unique_valid_labels)
X_test_matched, y_test_matched, X_test_unmatched, y_test_unmatched = make_image_pairs(
X, y, unique_test_labels)
return {
'X_train_matched': lasagne.utils.floatX(X_train_matched),
'y_train_matched': y_train_matched.astype(np.int32),
'X_train_unmatched': lasagne.utils.floatX(X_train_unmatched),
'y_train_unmatched': y_train_unmatched.astype(np.int32),
'X_valid_matched': lasagne.utils.floatX(X_valid_matched),
'y_valid_matched': y_valid_matched.astype(np.int32),
'X_valid_unmatched': lasagne.utils.floatX(X_valid_unmatched),
'y_valid_unmatched': y_valid_unmatched.astype(np.int32),
'X_test_matched': lasagne.utils.floatX(X_test_matched),
'y_test_matched': y_test_matched.astype(np.int32),
'X_test_unmatched': lasagne.utils.floatX(X_test_unmatched),
'y_test_unmatched': y_test_unmatched.astype(np.int32)
}
def interleave_dataset(X_split, y_split):
'''Take paired observations in the channel dimension and convert them
to alternating batches
----------------------
N x 2 x W x H --> 2*N x 1 x W x H
Args
----
X_split : TYPE
Description
y_split : TYPE
Description
Deleted Parameters
------------------
X_split (TYPE) : Description
y_split (TYPE) : Description
'''
# TODO: account for color images
n_batch, n_channels, n_height, n_width = X_split.shape
n_obs = n_batch * n_channels
n_feats = n_height * n_width
X_interleaved = np.empty((n_obs, n_feats), dtype=theano.config.floatX)
y_interleaved = np.empty((n_obs,), dtype=np.int32)
X_interleaved[0::2] = X_split[:, 0, ...].reshape(n_batch, n_feats)
X_interleaved[1::2] = X_split[:, 1, ...].reshape(n_batch, n_feats)
y_interleaved[0::2] = y_split.copy()
y_interleaved[1::2] = y_split.copy()
return X_interleaved.reshape(n_obs, 1, n_height, n_width), y_interleaved
def shuffle_dataset(X, y):
'''Randomly permute the order of the observations and their associated labels
Parameters
----------
X : TYPE
Description
y : TYPE
Description
'''
indices = np.random.permutation(len(y))
return X[indices, ...], y[indices, ...]
def get_balanced_shuffled_dataset(X_matched, y_matched, X_unmatched, y_unmatched):
'''Shuffles dataset, producing training data with similar number of matched
and unmatched observations. There are often much more unmatched
observations, so this method is used to sample from the larger set of
unmatched observations, while using as many matched observations as
there are, but randomly permuting their order.
Parameters
----------
X_matched : TYPE
Description
y_matched : TYPE
Description
X_unmatched : TYPE
Description
y_unmatched : TYPE
Description
'''
npairs = X_matched.shape[0]
# Shuffle order
X_matched, y_matched = shuffle_dataset(X_matched, y_matched)
X_unmatched, y_unmatched = shuffle_dataset(X_unmatched, y_unmatched)
# Sample same number of unmatched data
X_train = np.concatenate((X_matched, X_unmatched[:npairs]))
y_train = np.concatenate((y_matched, y_unmatched[:npairs]))
# Shuffle again so that batches aren't all matched/unmatched
X_train, y_train = shuffle_dataset(X_train, y_train)
return X_train, y_train
def make_shared(X, dtype):
'''Convert `X` to a theano shared variable with the given type.
Parameters
----------
X : TYPE
Description
dtype : TYPE
Description
'''
return theano.shared(np.asarray(X, dtype=dtype), borrow=True)
def generate_new_dataset_batch(X_matched, y_matched, X_unmatched, y_unmatched, batch_size):
'''Generator which loops through a randomly permuted ordering of the dataset.
This method requires the generated pairs of the data, which is a much
higher number of observations than the original dataset.
If you cannot fit the entire dataset into memory, use the slower method:
`generate_new_dataset_batch_from_idxs`
Returns X_train, y_train
Parameters
----------
X_matched : TYPE
Description
y_matched : TYPE
Description
X_unmatched : TYPE
Description
y_unmatched : TYPE
Description
batch_size : TYPE
Description
'''
# Generate a new shuffled, balanced dataset
X_train, y_train = get_balanced_shuffled_dataset(
X_matched, y_matched, X_unmatched, y_unmatched)
# Interleave pairs into sequential batches which will be used in the
# distance/loss functions appropriately
X_train, y_train = interleave_dataset(X_train, y_train)
nobs = len(X_train)
# Make sure it is even
batch_size = batch_size + (batch_size % 2)
# Loop until we're out of observations
batch_start = 0
batch_end = batch_size
while batch_start < np.min((nobs, (nobs - batch_size))):
yield X_train[batch_start:batch_end, ...], y_train[batch_start:batch_end, ...]
batch_start = batch_end
batch_end = batch_start + batch_size
def generate_new_dataset_batch_from_idxs(
X, y, X_matched_idxs, y_matched_idxs,
X_unmatched_idxs, y_unmatched_idxs, batch_size):
'''Generator which loops through a randomly permuted ordering of the dataset.
This method requires the generated pairs of the data as indexes.
Returns X_train, y_train
Parameters
----------
X : TYPE
Description
y : TYPE
Description
X_matched_idxs : TYPE
Description
y_matched_idxs : TYPE
Description
X_unmatched_idxs : TYPE
Description
y_unmatched_idxs : TYPE
Description
batch_size : TYPE
Description
'''
# Generate a new shuffled, balanced dataset
X_train, y_train = get_balanced_shuffled_dataset(
X_matched_idxs, y_matched_idxs, X_unmatched_idxs, y_unmatched_idxs)
# Interleave pairs into sequential batches which will be used in the distance/loss functions appropriately
# TODO: account for color images
X_train, y_train = interleave_dataset(
X_train[..., np.newaxis, np.newaxis], y_train)
X_train = np.squeeze(X_train).astype(np.int32)
y_train = np.squeeze(y_train).astype(np.int32)
nobs = len(X_train)
# Make sure it is even
batch_size = batch_size + (batch_size % 2)
# Loop until we're out of observations
batch_start = 0
batch_end = batch_size
while batch_start < np.min((nobs, (nobs - batch_size))):
yield X[X_train[batch_start:batch_end, ...], ...], y_train[batch_start:batch_end, ...]
batch_start = batch_end
batch_end = batch_start + batch_size
class SiameseNetPredictor(object):
'''Loads a pre-trained Deep Net for Face Verification which uses a
Siamese Net distance function + LogisticRegression on the final feature
layer. Requires the pretrained model in the directory results
Attributes
----------
clf : TYPE
Description
fn : TYPE
Description
lcn : TYPE
Description
result : TYPE
Description
'''
def __init__(self, images, filename='./lfw.pkl'):
"""Summary"""
# Load the pretrained model
self.result = pickle.load(open(filename, 'rb'))
print(self.result['params'])
self.grayscale = self.result['params']['b_convert_to_grayscale']
self.normalization = self.result['params']['normalization']
self.net = ConvSiameseNet(
input_channels=(1
if self.grayscale
else 3),
input_width=self.result['params']['resolution'][0],
input_height=self.result['params']['resolution'][1],
n_out=self.result['params']['n_features'],
distance_fn=self.result['params']['distance_fn'],
nonlinearity=self.result['params']['nonlinearity'])
if self.result['params']['model_type'] == 'custom':
self.net.use_custom_model()
elif self.result['params']['model_type'] == 'hani':
self.net.use_hani_model()
elif self.result['params']['model_type'] == 'chopra':
self.net.use_chopra_model()
else:
print('Unrecognized model!')
self.net.set_from_parameters(
pickle.loads(self.result['model_parameters']))
pred = lasagne.layers.get_output(self.net.model, self.net.x,
deterministic=True)
# Compile
self.fn = theano.function([self.net.x], [pred])
# We'll hash functions for every layer if/when user asks for them
self.fns = {}
# Train final regressor on entire dataset
# (cheating, but...¯\_(ツ)_/¯)
Xs = self.result['prediction']['X']
ys = self.result['prediction']['y']
Xs_L1 = np.abs(Xs[:, :self.net.n_out] - Xs[:, self.net.n_out:])
self.clf = LogisticRegression()
self.clf.fit(Xs_L1, ys)
# Load normalization kernel
# (previously created using LCN on the training set)
# self.lcn = pickle.loads(self.result['LCN'])
if self.grayscale:
X = np.concatenate([img[np.newaxis, np.newaxis, ...]
for img in images], axis=0)
else:
X = np.concatenate([img[np.newaxis, ...]
for img in images], axis=0)
print(X.shape)
if self.normalization == 'LCN':
lcn = LCN(
sigma=round(0.0625 * self.result['params']['resolution'][0]),
subtractive=False)
lcn.fit(X)
self.norm = lcn
elif self.normalization == 'LCN-':
lcn = LCN(
sigma=round(0.0625 * self.result['params']['resolution'][0]),
subtractive=True)
lcn.fit(X)
self.norm = lcn
elif self.normalization == 'ZCA':
zca = ZCA(bias=0.1)
zca.fit(X)
self.norm = zca
elif self.normalization == '-1:1':
self.norm = lambda x: ((x - np.min(x)) / (np.max(x) - np.min(x)) * 2.0 - 1.0)
def preprocess(self, X):
'''Take an image in X, and transform it with local contrast normalization.
Parameters
----------
X : numpy.ndarray
image to perform local contrast normalization on
Returns
-------
img : numpy.ndarray
Local contrast normalized image
'''
res = None
try:
res = self.norm.transform(X)
except:
res = self.norm(X)
pass
return res
def features_for_layer(self, X, layer_num):
if layer_num in self.fns.keys():
fn = self.fns[layer_num]
else:
layer_output = lasagne.layers.get_output(
lasagne.layers.get_all_layers(
self.net.model)[layer_num],
self.net.x, deterministic=True)
fn = theano.function([self.net.x], [layer_output])
self.fns[layer_num] = fn
out = fn(lasagne.utils.floatX(X))
return out
def features(self, X):
'''Get siamese net features for the images in X.
Parameters
----------
X : numpy.ndarray
N x C x W x H tensor defining the N images of W x H.
For colorscale, C = 3, while for grayscale, C = 1.
Returns
-------
features : numpy.ndarray
N x M array of features
'''
return self.fn(X)
def predict(self, X):
'''Predict whether images contain the same face or not.
Parameters
----------
X : numpy.ndarray
2*N x C x W x H tensor defining the N sequence of image pairs W x H.
For colorscale, C = 3, while for grayscale, C = 1.
Returns
-------
predictions : numpy.ndarray
N x 1 vector of True/False predictions of whether the image
pairs contain the same face or not.
'''
features = self.fn(X)
Xs_L1 = np.abs(features[0][0::2] - features[0][1::2])
final = self.clf.predict(Xs_L1)
return final
def get_normalization(self):
'''Return the normalization type of the pre-trained network.
Returns
-------
normalization_type : string
'LCN', 'LCN-', '-1:1', 'ZCA'
'''
return self.result['params']['normalization']
def get_crop(self):
'''Return the crop type of the pre-trained network.'''
return self.result['params']['crop']
def get_resolution(self):
'''Return the resolution of the images required by the pre-trained network.
Returns
-------
(%d, %d) : tuple
Resolution of the image
'''
return self.result['params']['resolution']
def get_colorscale(self):
'''Return the colorscale of the images required by the pre-trained network
Returns
-------
is_grayscale : bool
True if grayscale, else, False for RGB color.
'''
return self.result['params']['b_convert_to_grayscale']
class ConvSiameseNet:
"""Builds an object used for training a siamese net
with different types of models and options.
Attributes
----------
batch_size : TYPE
Description
batch_slice : TYPE
Description
distance_fn : TYPE
Description
hyperparameter_margin : TYPE
Description
hyperparameter_threshold : TYPE
Description
index : TYPE
Description
input_channels : TYPE
Description
input_height : TYPE
Description
input_width : TYPE
Description
l_in : TYPE
Description
learning_rate : TYPE
Description
loss_fn : TYPE
Description
model : TYPE
Description
n_out : TYPE
Description
nonlinearity : TYPE
Description
srng : TYPE
Description
test_x : TYPE
Description
train_x : TYPE
Description
update : TYPE
Description
validation_x : TYPE
Description
weight_init : TYPE
Description
x : TYPE
Description
y : TYPE
Description
"""
def __init__(self,
input_channels,
input_width,
input_height,
n_out,
batch_size=None,
distance_fn='l1',
nonlinearity='scaled_tanh'):
"""Builds a ConvSiameseNet for training.
Parameters
----------
input_channels : TYPE
Description
input_width : TYPE
Description
input_height : TYPE
Description
n_out : TYPE
Description
batch_size : TYPE, optional
Description
distance_fn : str, optional
Description
nonlinearity : str, optional
Description
Raises
------
ValueError
Description
"""
self.input_channels = input_channels
self.input_width = input_width
self.input_height = input_height
self.n_out = n_out
self.batch_size = batch_size
self.l_in = lasagne.layers.InputLayer(
shape=(None, input_channels, input_width, input_height))
self.n_out = n_out
self.srng = theano.sandbox.rng_mrg.MRG_RandomStreams()
self.loss_fn = contrastive_loss
if distance_fn.lower() == 'cosine':
self.distance_fn = distance_cosine
elif distance_fn.lower() == 'l1':
self.distance_fn = distance_L1
elif distance_fn.lower() == 'l2':
self.distance_fn = distance_L2
else:
raise ValueError(
'Must specify distance as either "cosine", "l1", or "l2".')
self.x = T.tensor4('x')
self.y = T.ivector('y')
if nonlinearity == 'scaled_tanh':
self.nonlinearity = lasagne.nonlinearities.ScaledTanH(
scale_in=2. / 3, scale_out=1.7159)
elif nonlinearity == 'rectify':
self.nonlinearity = lasagne.nonlinearities.rectify
else:
raise ValueError(
'Must specify nonlinearity as either "scaled_tanh" or "rectify".')
self.weight_init = lasagne.init.Normal(std=0.05, mean=0.0)
def use_hani_model(self, dropout_pct=0.0, b_spatial=False):
"""Summary
Parameters
----------
dropout_pct : float, optional
Description
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
self.model = self.get_hani_2014_net(
self.l_in, dropout_pct=dropout_pct, b_spatial=b_spatial)
def use_custom_model(self, b_spatial=False):
"""Summary
Parameters
----------
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
self.model = self.get_custom_net(self.l_in, b_spatial=b_spatial)
def use_chopra_model(self, dropout_pct=0.0, b_spatial=False):
"""Summary
Parameters
----------
dropout_pct : float, optional
Description
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
self.model = self.get_chopra_net(
self.l_in, dropout_pct=dropout_pct, b_spatial=b_spatial)
def use_deepid_model(self, b_spatial=False):
"""Summary
Parameters
----------
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
self.model = self.get_deep_id_net(self.l_in, b_spatial=b_spatial)
def get_spatial_transform_net(self, input_layer):
"""Summary
Parameters
----------
input_layer : TYPE
Description
Returns
-------
name : TYPE
Description
"""
# http://lasagne.readthedocs.org/en/latest/modules/layers/special.html?highlight=trainable#lasagne.layers.TransformerLayer
# Localization network
# Spatial Transformer Networks <NAME>, <NAME>, <NAME>, <NAME> Submitted on 5 Jun 2015
# Here we set up the layer to initially do the identity transform,
# similarly to [R34]. Note that you will want to use a localization
# with linear output. If the output from the localization networks
# is [t1, t2, t3, t4, t5, t6] then t1 and t5 determines zoom, t2
# and t4 determines skewness, and t3 and t6 move the center
# position.
b = np.zeros((2, 3), dtype=theano.config.floatX)
b[0, 0] = 1
b[1, 1] = 1
b = b.flatten()
loc_l1 = lasagne.layers.MaxPool2DLayer(input_layer, pool_size=(2, 2))
loc_l2 = lasagne.layers.Conv2DLayer(
loc_l1,
num_filters=20,
filter_size=(5, 5),
W=self.weight_init
)
loc_l3 = lasagne.layers.MaxPool2DLayer(loc_l2, pool_size=(2, 2))
loc_l4 = lasagne.layers.Conv2DLayer(
loc_l3,
num_filters=20,
filter_size=(5, 5),
W=self.weight_init
)
loc_l5 = lasagne.layers.DenseLayer(
loc_l4,
num_units=50,
W=self.weight_init
)
loc_out = lasagne.layers.DenseLayer(
loc_l5,
num_units=6,
b=b,
W=self.weight_init,
nonlinearity=lasagne.nonlinearities.identity
)
# Transformer network
transformed_input_layer = lasagne.layers.TransformerLayer(
input_layer, loc_out, downsample_factor=2.0)
print('Transformed Input Shape: ',
transformed_input_layer.output_shape)
return transformed_input_layer
def get_chopra_net(self, input_layer, dropout_pct=0.0, b_spatial=False):
'''Return a lasagne network defining the siamese network
<NAME>., <NAME>., & <NAME>. (2005). Learning a similiarty
metric discriminatively, with application to face verification.
Proceedings of IEEE Conference on Computer Vision and Pattern
Recognition, 349–356.
Modifications
-------------
dropout_pct -- Instead of a fixed connection layer, use dropout
with this much percentage [0.5]
b_spatial -- Prepend a spatial transformer network which applies
an affine transformation and a 2x crop [False]
Args
----
input_layer : TYPE
Description
dropout_pct : float, optional
Description
b_spatial : bool, optional
Description
Deleted Parameters
------------------
input_layer (TYPE) : Description
dropout_pct (float : Description
optional), b_spatial (bool : Description
'''
l_conv1 = None
if b_spatial:
# returns a 15x40x40
l_conv1 = lasagne.layers.Conv2DLayer(
self.get_spatial_transform_net(input_layer),
num_filters=15,
filter_size=(7, 7),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
else:
# returns a 15x40x40
l_conv1 = lasagne.layers.Conv2DLayer(
input_layer,
num_filters=15,
filter_size=(7, 7),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
# returns a 15x20x20
l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(2, 2))
# returns a 45x15x15
l_conv2 = lasagne.layers.Conv2DLayer(
l_pool1,
num_filters=45,
filter_size=(6, 6),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
# returns a 45x5x5
l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(3, 3))
l_pool2_dropout = lasagne.layers.DropoutLayer(l_pool2, p=dropout_pct)
# returns a 250x1x1
l_conv3 = lasagne.layers.Conv2DLayer(
l_pool2_dropout,
num_filters=250,
filter_size=(5, 5),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_hidden = lasagne.layers.DenseLayer(
l_conv3,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
model = lasagne.layers.DenseLayer(
l_hidden,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
return model
def get_custom_net(self, input_layer, b_spatial=False):
'''Return a lasagne network defining a custom siamese network
Modifications
-------------
dropout_pct -- Instead of a fixed connection layer, use dropout
with this much percentage [0.5]
b_spatial -- Prepend a spatial transformer network which applies an
affine transformation and a 2x crop [False]
Args
----
input_layer : TYPE
Description
b_spatial : bool, optional
Description
Deleted Parameters
------------------
input_layer (TYPE) : Description
b_spatial (bool, optional) : Description
'''
l_conv1a = None
if b_spatial:
l_conv1a = lasagne.layers.Conv2DLayer(
self.get_spatial_transform_net(input_layer),
num_filters=16,
filter_size=(3, 3),
nonlinearity=self.relu,
W=self.weight_init
)
else:
l_conv1a = lasagne.layers.Conv2DLayer(
input_layer,
num_filters=16,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_conv1b = lasagne.layers.Conv2DLayer(
l_conv1a,
num_filters=32,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1b, pool_size=(2, 2))
l_conv2a = lasagne.layers.Conv2DLayer(
l_pool1,
num_filters=32,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_conv2b = lasagne.layers.Conv2DLayer(
l_conv2a,
num_filters=64,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2b, pool_size=(2, 2))
l_conv3a = lasagne.layers.Conv2DLayer(
l_pool2,
num_filters=64,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_conv3b = lasagne.layers.Conv2DLayer(
l_conv3a,
num_filters=128,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool3 = lasagne.layers.MaxPool2DLayer(l_conv3b, pool_size=(2, 2))
l_full4 = lasagne.layers.DenseLayer(
l_pool3,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
model = lasagne.layers.DenseLayer(
l_full4,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
return model
# this model actually requires a different training procedure, of
# recognition then verification
def get_deep_id_net(self, input_layer, b_spatial=False):
"""Summary
Parameters
----------
input_layer : TYPE
Description
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
l_conv1 = None
# flip = False
# returns a 20x52x44
if b_spatial:
l_conv1 = lasagne.layers.Conv2DLayer(
self.get_spatial_transform_net(input_layer),
num_filters=20,
filter_size=(4, 4),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
else:
l_conv1 = lasagne.layers.Conv2DLayer(
input_layer,
num_filters=20,
filter_size=(4, 4),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(3, 3))
l_conv2 = lasagne.layers.Conv2DLayer(
l_pool1,
num_filters=40,
filter_size=(3, 3),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(3, 3))
l_conv3 = lasagne.layers.Conv2DLayer(
l_pool2,
num_filters=60,
filter_size=(3, 3),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool3 = lasagne.layers.MaxPool2DLayer(l_conv3, pool_size=(3, 3))
l_conv4 = lasagne.layers.Conv2DLayer(
l_pool3,
num_filters=80,
filter_size=(2, 2),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
model = lasagne.layers.DenseLayer(
l_conv4,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
return model
def get_deep_id2_plus_net(self, input_layer, b_spatial=False):
"""Summary
Parameters
----------
input_layer : TYPE
Description
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
l_conv1 = None
# flip = False
# returns a 20x52x44
if b_spatial:
l_conv1 = lasagne.layers.Conv2DLayer(
self.get_spatial_transform_net(input_layer),
num_filters=128,
filter_size=(3, 4, 4),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
else:
l_conv1 = lasagne.layers.Conv2DLayer(
input_layer,
num_filters=128,
filter_size=(3, 4, 4),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(3, 3))
l_conv2 = lasagne.layers.Conv2DLayer(
l_pool1,
num_filters=128,
filter_size=(3, 3),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(3, 3))
l_conv3 = lasagne.layers.Conv2DLayer(
l_pool2,
num_filters=128,
filter_size=(3, 3),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool3 = lasagne.layers.MaxPool2DLayer(l_conv3, pool_size=(3, 3))
l_conv4 = lasagne.layers.Conv2DLayer(
l_pool3,
num_filters=128,
filter_size=(2, 2),
stride=(1, 1),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
model = lasagne.layers.DenseLayer(
l_conv4,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
return model
def get_hani_2014_net(self, input_layer, dropout_pct=0.5, b_spatial=False):
'''
Return a lasagne network defining the siamese network in
--------------------------------------------------------
<NAME>., & <NAME>. (2014). A convolutional neural
network approach for face verification. High Performance Computing
& Simulation (HPCS), 2014 International Conference on, (3), 707–714.
doi:10.1109/HPCSim.2014.6903759
Modifications
-------------
dropout_pct -- Instead of a fixed connection layer, use dropout with
this much percentage [0.5]
b_spatial -- Prepend a spatial transformer network which applies an
affine transformation and a 2x crop [False]
Args
----
input_layer : TYPE
Description
dropout_pct : float, optional
Description
b_spatial : bool, optional
Description
Deleted Parameters
------------------
input_layer (TYPE) : Description
dropout_pct (float : Description
optional), b_spatial (bool : Description
'''
# from lasagne.layers.corrmm import Conv2DMMLayer
l_conv1 = None
# flip = False
if b_spatial:
# returns a 5x21x21
l_conv1 = lasagne.layers.Conv2DLayer(
self.get_spatial_transform_net(input_layer),
num_filters=5,
filter_size=(6, 6),
stride=(2, 2),
nonlinearity=self.nonlinearity,
# flip_filters=flip,
W=self.weight_init
)
else:
# returns a 5x21x21
l_conv1 = lasagne.layers.Conv2DLayer(
input_layer,
num_filters=5,
filter_size=(6, 6),
stride=(2, 2),
nonlinearity=self.nonlinearity,
# flip_filters=flip,
W=self.weight_init
)
# returns a 14x6x6
l_conv2 = lasagne.layers.Conv2DLayer(
l_conv1,
num_filters=14,
filter_size=(6, 6),
stride=(2, 2),
nonlinearity=self.nonlinearity,
# flip_filters=flip,
W=self.weight_init
)
l_dropout2 = lasagne.layers.DropoutLayer(l_conv2, p=dropout_pct)
# returns a 60x1x1
l_conv3 = lasagne.layers.Conv2DLayer(
l_dropout2,
num_filters=60,
filter_size=(6, 6),
nonlinearity=self.nonlinearity,
# flip_filters=flip,
W=self.weight_init
)
l_hidden = lasagne.layers.DenseLayer(
l_conv3,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
model = lasagne.layers.DenseLayer(
l_hidden,
num_units=self.n_out,
nonlinearity=lasagne.nonlinearities.identity,
W=lasagne.init.Uniform()
)
return model
def build_model(self,
X_train,
y_train,
X_valid,
y_valid,
X_test,
y_test,
update=lasagne.updates.adam,
hyperparameter_margin=2.0,
hyperparameter_threshold=5.0,
learning_rate=0.0001):
'''Given data for train, valid, and test, apply update with the
given hyperparameters and learning rates, returning the models
for train, valid, and test.
Parameters
----------
X_train : numpy.ndarray
Example input data used to set network shape
y_train : numpy.ndarray
Example labels used to set network shape
X_valid : numpy.ndarray
Example input data used to set network shape
y_valid : TYPE
Example labels used to set network shape
X_test : TYPE
Example input data used to set network shape
y_test : TYPE
Example labels used to set network shape
update : Attribute, optional
Lasagne update rule to apply
hyperparameter_margin : float, optional
Total energy expected in the contrastive loss function
hyperparameter_threshold : float, optional
Simple thresholding of the final loss used for approximating the label
learning_rate : float, optional
How much to move in the gradient
'''
self.learning_rate = theano.shared(lasagne.utils.floatX(learning_rate))
self.hyperparameter_threshold = lasagne.utils.floatX(
hyperparameter_threshold)
self.hyperparameter_margin = lasagne.utils.floatX(
hyperparameter_margin)
self.train_x = X_train
self.validation_x = X_valid
self.test_x = X_test
self.update = update
self.index = T.iscalar('index')
self.batch_slice = slice(
self.index * self.batch_size, (self.index + 1) * self.batch_size)
# Training Loss
y_pred = lasagne.layers.get_output(
self.model, self.x, deterministic=False)
avg_loss = self.loss_fn(y_pred, self.y, self.hyperparameter_margin)
loss = avg_loss / self.batch_size
# Validation Loss
y_pred_eval = lasagne.layers.get_output(
self.model, self.x, deterministic=True)
avg_loss = self.loss_fn(
y_pred_eval, self.y, self.hyperparameter_margin)
loss_eval = avg_loss / self.batch_size
# loss_eval = loss_eval.mean()
# Validation Accuracy
pred = self.distance_fn(y_pred_eval)
accuracy = T.mean(T.eq(T.lt(pred, self.hyperparameter_threshold), self.y[
0::2]), dtype=theano.config.floatX)
# Find weight params to update during backprop, and use adam updater
all_params = lasagne.layers.get_all_params(self.model, trainable=True)
updates = lasagne.updates.adam(
loss, all_params, learning_rate=self.learning_rate)
# Setup each model and return
train_model = theano.function(
[self.x, self.y], [loss, y_pred], updates=updates)
validate_model = theano.function(
[self.x, self.y], [loss_eval, accuracy, y_pred_eval])
test_model = theano.function(
[self.x, self.y], [loss_eval, accuracy, y_pred_eval])
return train_model, validate_model, test_model
def get_evaluation_model(self):
"""Return a theano function allowing you to directly compute
the siamese net features.
Returns
-------
fn : theano.function
The theano function which expects the input layer and returns the
siamese net features. Does not require pairs (e.g. N can = 1).
"""
y_pred = lasagne.layers.get_output(
self.model, self.x, deterministic=True)
fn = theano.function([self.x], [y_pred])
return fn
def retrieve_parameters(self):
"""Get stored parameters from the theano model.
This function can be used in conjunction with set_from_parameters to save
and restore model parameters.
Returns
-------
model_parameters : list of numpy.array
A list of numpy arrays representing the parameter values.
"""
return lasagne.layers.get_all_param_values(self.model)
def set_from_parameters(self, parameters):
"""Set the stored parameters of the internal theano model.
This function can be used in conjunction with retrieve_parameters to save
and restore model parameters.
Parameters
----------
parameters : list of numpy.array
A list of numpy arrays representing the parameter values, must match
the number of parameters.
Every parameter's shape must match the shape of its new value.
"""
lasagne.layers.set_all_param_values(self.model, parameters)
def load_model(self, filename='model.pkl'):
"""Set the stored parameters of the internal theano model.
This function can be used in conjunction with save_model to save
and restore model parameters.
Parameters
----------
filename : str, optional
Location of pickle file containing the model parameters.
"""
params = pickle.load(open(filename, 'rb'))
lasagne.layers.set_all_param_values(self.model, params)
def save_model(self, filename='model.pkl'):
"""Get stored parameters from the theano model and store in the given filename.
This function can be used in conjunction with load_model to save
and restore model parameters.
Parameters
----------
filename : str, optional
Location of pickle file containing the model parameters.
"""
params = lasagne.layers.get_all_param_values(self.model)
pickle.dump(params, open(filename, 'wb'))
def get_learning_rate(self):
return self.learning_rate.get_value()
def set_learning_rate(self, lr):
self.learning_rate.set_value(lasagne.utils.floatX(lr))
def distance_L2(x):
"""L2 distance for the Siamese architecture.
Batches should be fed in pairs of images which the loss
helps to optimize the distance of. This is the siamese part of the architecture
which fakes having two networks and just uses the batch's dimension to help
define the parallel networks.
Parameters
----------
x : theano.tensor
Tensor with pairs of batches
Returns
-------
l2_dist : theano.tensor
L2 Distance between pairs
"""
x_a = x[0::2]
x_b = x[1::2]
return T.sum((x_a - x_b)**2, axis=1)
def distance_L1(x):
"""L1 distance for the Siamese architecture.
Batches should be fed in pairs of images which the loss
helps to optimize the distance of. This is the siamese part of the architecture
which fakes having two networks and just uses the batch's dimension to help
define the parallel networks.
Parameters
----------
x : theano.tensor
Tensor with pairs of batches
Returns
-------
l1_dist : theano.tensor
L1 Distance between pairs
"""
x_a = x[0::2]
x_b = x[1::2]
return T.sum(T.abs_(x_a - x_b), axis=1)
def l2norm(x):
"""L2 norm.
Parameters
----------
x : theano.tensor
Vector to take norm of.
Returns
-------
l2_norm : theano.tensor
L2 norm of vector in x
"""
return T.sqrt(T.sum(T.sqr(x), axis=1))
def distance_cosine(x, e=1e-6):
"""Cosine distance for the Siamese architecture.
Batches should be fed in pairs of images which the loss
helps to optimize the distance of. This is the siamese part of the architecture
which fakes having two networks and just uses the batch's dimension to help
define the parallel networks.
Parameters
----------
x : theano.tensor
Description
e : float, optional
Epsilon to prevent divide by zero
Returns
-------
distance : theano.tensor
Cosine distance between pairs
"""
x_a = x[0::2]
x_b = x[1::2]
return T.sum(x_a * x_b, axis=1) / T.maximum(l2norm(x_a) * l2norm(x_b), e)
# def contrastive_loss(y_pred, y_true, Q=20.0):
# eq. 8
# E_w = distance_L1(y_pred)
# y = y_true[0::2]
# eq 9
# Decrease energy for matched pair: (0 = unmatched, 1 = matched)
# L_G = (1.0 - y) * (2.0 / Q) * (E_w ** 2)
# L_I = (y) * 2.0 * Q * T.exp((-2.7726 * E_w) / Q)
# L = L_G + L_I
# avg_loss = T.mean(L)
# return avg_loss
def contrastive_loss(y_pred, y_true, margin=20.0):
"""Contrastive loss for the Siamese Architecture.
Batches should be fed in pairs of images which the loss helps to optimize
the distance of. This is the siamese part of the architecture which fakes
having two networks and just uses the batch's dimension to help define the
parallel networks.
Parameters
----------
y_pred : theano.tensor
Predicted features (n_batch x n_features)
y_true : theano.tensor
Actual features (n_batch x n_features)
margin : float, optional
Hyperparameter defining total free energy
Returns
-------
loss : theano.tensor
Mean loss
"""
x1 = y_pred[0::2]
x2 = y_pred[1::2]
d = T.sum((x1 - x2)**2, axis=1)
y = y_true[0::2]
return T.mean(y * d + (1.0 - y) * T.maximum(margin - d, 0.0))
def continue_siamese_net_training(filename):
'''Continues the training of the siamese network with parameters defined
in the given filename.
Parameters
----------
filename : string
The path to the file defining the trained siamese network in progress.
'''
results_file = pickle.load(open(filename, 'rb'))
params = results_file['params']
print(params)
run_siamese_net_training(
dataset=params['dataset'],
spatial=params['spatial_transform'],
batch_size=params['batch_size'],
n_out=params['n_features'],
model_type=params['model_type'],
n_epochs=params['n_epochs'],
num_files=params['n_files'],
learning_rate=params['learning_rate'],
normalization=params['normalization'],
crop_factor=params['crop'],
resolution=params['resolution'][0],
hyperparameter_margin=params['hyperparameter_margin'],
hyperparameter_threshold=params['hyperparameter_threshold'],
dropout_pct=params['dropout_pct'],
nonlinearity=params['nonlinearity'],
distance_fn=params['distance_fn'],
b_convert_to_grayscale=params['b_convert_to_grayscale'],
filename=filename + 'continued.pkl'
)
def run_siamese_net_training(dataset,
spatial,
batch_size,
learning_rate,
model_type,
n_epochs,
n_out,
num_files,
normalization,
resolution,
crop_factor,
hyperparameter_margin,
hyperparameter_threshold,
nonlinearity,
distance_fn,
b_convert_to_grayscale,
filename=None,
path_to_data=None,
b_load_idxs_only=True):
'''Run training of a siamese net for the given parameters, saving
results to a pickle file defined by the given parameters:
Parameters
----------
dataset : string
Name of the dataset to use: 'lfw', 'olivetti'
spatial : bool
Whether to prepent a spatial transformer network or not
batch_size : int
Number of observations in a batch
learning_rate : float
Learning Rate
model_type : string
Which model to use: 'hani', 'chopra', or 'custom'
n_epochs : Integer
Number of epochs to train for.
n_out : int
Number of neurons in the final output layer of the Siamese Network
num_files : int
Number of files to load for each person
normalization : string
Method of normalization to apply: '-1:1', 'LCN', 'LCN-', 'ZCA'
resolution : int
Image resolution to scale to (square pixels only)
crop_factor : float
Factor to scale bounds of the detected face.
1.0 means the face is tightly cropped,
< 1.0, the face is cropped even tighter
> 1.0, more of the outside of the face is included.
hyperparameter_margin : float
Total free energy of the contrastive loss equation
hyperparameter_threshold : float
Threshold to apply to L1 norm of final output layers defining
whether faces match or not
nonlinearity : string
"rectify" or "scaled_tanh"
distance_fn : string
"L1", "L2", or "Cosine"
b_convert_to_grayscale : bool
Color images are automatically converted to grayscale (C = 1)
filename : str, optional
Where to store results
path_to_data : string
Where to find the dataset (defaults to current working directory)
b_load_idxs_only : bool
If False, the entire dataset's pairs are loaded into memory
Advised to load idxs only for lfw as it requires > 60 GB.
Deleted Parameters
------------------
bCropToHaarBBox : bool
Crop images to frontal face cascade
'''
if filename is None:
filename = str('dataset_%s' % dataset +
'_transform_%d' % int(spatial) +
'_batch_%d' % batch_size +
'_lr_%f' % learning_rate +
'_model_%s' % model_type +
'_epochs_%d' % n_epochs +
'_normalization_%s' % normalization +
'_cropfactor_%0.02f' % crop_factor +
'_nout_%d' % n_out +
'_resolution_%d' % resolution +
'_numfiles_%d' % num_files +
'_q_%2.02f' % hyperparameter_margin +
'_t_%2.02f' % hyperparameter_threshold +
'_nonlinearity_%s' % nonlinearity +
'_distancefn_%s' % distance_fn +
'_grayscale_%d.pkl' % b_convert_to_grayscale)
filename = os.path.join('results', filename)
results = None
model = None
if os.path.isfile(filename):
try:
results = pickle.load(open(filename, 'rb'))
if 'epochs' in results.keys():
if len(results['epochs']) >= n_epochs:
print('Already process(ing/ed); exiting.')
return
# else:
# continue where it left off
# if 'model' in results.keys():
# model = pickle.loads(results['model'])
# model.set_from_parameters(pickle.loads(results['model_parameters']))
except:
pass
print("""Dataset: %s
\rSpatial: %d
\rBatch Size: %d
\rNum Features: %d
\rModel Type: %s
\rNum Epochs: %d
\rNum Files: %d
\rLearning Rate: %f
\rNormalization: %s
\rCrop Factor: %f
\rResolution: %d
\rHyperparameter Margin: %f
\rHyperparameter Threshold: %f
\rNon-Linearity: %s
\rGrayscale: %d
\rDistance Function: %s\n
\rWriting results to: %s\n""" % (dataset,
int(spatial),
batch_size,
n_out,
model_type,
n_epochs,
num_files,
learning_rate,
normalization,
crop_factor,
resolution,
hyperparameter_margin,
hyperparameter_threshold,
nonlinearity,
int(b_convert_to_grayscale),
distance_fn,
filename))
if model_type == 'deepid':
b_convert_to_grayscale = False
if b_convert_to_grayscale:
input_channels = 1
else:
input_channels = 3
# TODO: if continuing a result from a left off epoch, the dataset will
# have been generated differently. how should I handle this? store the
# pairs, too big a file? store the rng, what about parameters?
print('Loading dataset...')
data = load_pairs(
dataset=dataset,
normalization=normalization,
resolution=(resolution, resolution),
split=(0.8, 0.2, 0.2),
crop_factor=1.2,
n_files_per_person=num_files,
path_to_data=path_to_data,
b_load_idxs_only=b_load_idxs_only,
b_convert_to_grayscale=b_convert_to_grayscale)
print('Initializing Siamese Network...')
print(data['X'].shape)
X_train = np.zeros(np.hstack((batch_size, data['X'].shape[1:])))
y_train = np.zeros(np.hstack((batch_size, data['y'].shape[1:])))
if model is None:
model = ConvSiameseNet(input_channels=input_channels,
input_width=X_train.shape[2],
input_height=X_train.shape[3],
n_out=n_out,
batch_size=batch_size,
nonlinearity=nonlinearity,
distance_fn=distance_fn)
if model_type == 'hani':
model.use_hani_model(dropout_pct=0.0, b_spatial=spatial)
elif model_type == 'custom':
model.use_custom_model(b_spatial=spatial)
elif model_type == 'chopra':
model.use_chopra_model(
dropout_pct=0.0, b_spatial=spatial)
elif model_type == 'deepid':
model.use_deepid_model(b_spatial=spatial)
else:
print(
'Unrecognized model type! Choose between \'hani\', \'chopra\', or \'custom\'')
sys.exit(2)
train_model, validate_model, test_model = model.build_model(
X_train, y_train,
X_train, y_train,
X_train, y_train,
hyperparameter_margin=hyperparameter_margin,
hyperparameter_threshold=hyperparameter_threshold,
learning_rate=learning_rate
)
if results is None:
results = {
'params':
{
'dataset': dataset,
'spatial_transform': spatial,
'batch_size': batch_size,
'n_features': n_out,
'model_type': model_type,
'n_epochs': n_epochs,
'n_files': num_files,
'learning_rate': learning_rate,
'normalization': normalization,
'crop': crop_factor,
'resolution': (resolution, resolution),
'hyperparameter_margin': hyperparameter_margin,
'hyperparameter_threshold': hyperparameter_threshold,
'nonlinearity': nonlinearity,
'distance_fn': distance_fn,
'b_convert_to_grayscale': b_convert_to_grayscale
},
'epochs': [],
'prediction':
{
'X': None,
'y': None,
'imgs': None,
'auc': [],
'F1': [],
'log_loss': [],
'W': []
},
'model': None,
'model_parameters': None
}
delta_loss = 1.0
epoch = len(results['epochs'])
prev_loss = 0
while delta_loss > 1e-6 and epoch < n_epochs:
# Training
clf = LogisticRegression()
X_train, y_train = | np.zeros((0, n_out * 2)) | numpy.zeros |
import numpy as np
from scipy.stats import norm, truncnorm
def _em_step_body_(args):
"""
Does a step of the EM algorithm, needed to dereference args to support parallelism
"""
return _em_step_body(*args)
def _em_step_body(Z, r_lower, r_upper, sigma, num_ord_updates=1):
"""
Iterate the rows over provided matrix
"""
num, p = Z.shape
Z_imp = np.copy(Z)
C = np.zeros((p,p))
for i in range(num):
try:
c, z_imp, z = _em_step_body_row(Z[i,:], r_lower[i,:], r_upper[i,:], sigma)
except:
np.savetxt("Z.txt", Z)
print(Z)
Z_imp[i,:] = z_imp
Z[i,:] = z
C += c
return C, Z_imp, Z
def _em_step_body_row(Z_row, r_lower_row, r_upper_row, sigma, num_ord_updates=1):
"""
The body of the em algorithm for each row
Returns a new latent row, latent imputed row and C matrix, which, when added
to the empirical covariance gives the expected covariance
Args:
Z_row (array): (potentially missing) latent entries for one data point
r_lower_row (array): (potentially missing) lower range of ordinal entries for one data point
r_upper_row (array): (potentially missing) upper range of ordinal entries for one data point
sigma (matrix): estimate of covariance
num_ord (int): the number of ordinal columns
Returns:
C (matrix): results in the updated covariance when added to the empircal covariance
Z_imp_row (array): Z_row with latent ordinals updated and missing entries imputed
Z_row (array): inpute Z_row with latent ordinals updated
"""
Z_imp_row = np.copy(Z_row)
p = Z_imp_row.shape[0]
num_ord = r_upper_row.shape[0]
C = np.zeros((p,p))
obs_indices = np.where(~np.isnan(Z_row))[0]
missing_indices = np.setdiff1d(np.arange(p), obs_indices)
ord_in_obs = np.where(obs_indices < num_ord)[0]
ord_obs_indices = obs_indices[ord_in_obs]
# obtain correlation sub-matrices
# obtain submatrices by indexing a "cartesian-product" of index arrays
sigma_obs_obs = sigma[np.ix_(obs_indices,obs_indices)]
sigma_obs_missing = sigma[np.ix_(obs_indices, missing_indices)]
sigma_missing_missing = sigma[np.ix_(missing_indices, missing_indices)]
if len(missing_indices) > 0:
tot_matrix = np.concatenate((np.identity(len(sigma_obs_obs)), sigma_obs_missing), axis=1)
intermed_matrix = | np.linalg.solve(sigma_obs_obs, tot_matrix) | numpy.linalg.solve |
from detectron2.data.datasets import register_coco_instances
from dafne.utils.sort_corners import sort_quadrilateral
from detectron2.utils.colormap import colormap
from detectron2.data.datasets.coco import load_coco_json
from detectron2.data import (
DatasetCatalog,
MetadataCatalog,
DatasetMapper,
transforms as T,
)
import xml.etree.ElementTree as ET
from detectron2.structures import BoxMode, PolygonMasks, RotatedBoxes
from detectron2.data import detection_utils as utils
import copy
import torch
import contextlib
import datetime
import io
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
from fvcore.common.file_io import PathManager, file_lock
from fvcore.common.timer import Timer
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
import os
logger = logging.getLogger(__name__)
def xywha2xy4(xywha): # a represents the angle(degree), clockwise, a=0 along the X axis
x, y, w, h, a = xywha
corner = np.array([[-w / 2, -h / 2], [w / 2, -h / 2], [w / 2, h / 2], [-w / 2, h / 2]])
# a = np.deg2rad(a)
transform = np.array([[np.cos(a), - | np.sin(a) | numpy.sin |
import numpy as np
from core.polymer_chain import Polymer
from core.polymer_chain import RandomChargePolymer
from pymatgen import Molecule
from utils import dihedral_tools
import unittest
__author__ = "<NAME>"
class TestPolymer(unittest.TestCase):
@classmethod
def setUpClass(cls):
# setup for polymer class
cls.monomer_num = 25
cls.monomer_len = 1.5
cls.link_len = 0.5
cls.link_angle = 15.0
cls.sample_num = 1000
cls.prob_angle = np.array([[0.0, -180.0], [0.2, -90.0], [0.3, -45.0], [0.4, 0.0],
[0.5, 45.0], [0.6, 90.0], [0.8, 180.0]])
# create polymer
cls.polymer = Polymer(cls.monomer_num, cls.monomer_len, cls.link_len, cls.link_angle,
cls.prob_angle, cls.sample_num)
# manually calculated bead or atom positions
# the first commented out positions are for when l1 is first instead of l2
"""cls.linear_pos_values = np.array([[0.0, 0.0, 0.0], [1.5, 0.0, 0.0],
[1.9829629131445341, -0.12940952255126037, 0.0],
[3.4829629131445339, -0.12940952255126037, 0.0],
[3.9659258262890682, 0.0, 0.0],
[5.4659258262890678, 0.0, 0.0],
[5.9488887394336016, -0.12940952255126037, 0.0],
[7.4488887394336016, -0.12940952255126037, 0.0]])"""
cls.linear_chain_actual = np.array([[0.0, 0.0, 0.0], [1.5, 0.0, 0.0],
[1.9829629131445341, 0.12940952255126037, 0.0],
[3.4829629131445339, 0.12940952255126037, 0.0],
[3.9659258262890682, 0.0, 0.0],
[5.4659258262890678, 0.0, 0.0],
[5.9488887394336016, 0.12940952255126037, 0.0],
[7.4488887394336016, 0.12940952255126037, 0.0]])
def test_build_chain(self):
np.testing.assert_almost_equal(self.linear_chain_actual, self.polymer.chain[:8])
def test_random_angle(self):
angle_num = self.monomer_num - 1
self.polymer.rotate_chain()
np.testing.assert_equal(angle_num, len(self.polymer.dihedral_set))
for angle in self.polymer.dihedral_set:
self.assertIn(angle, self.prob_angle[:, 1])
def test_rotate_chain(self):
self.polymer.rotate_chain()
# this makes a fake molecule and checks all the dihedral angles
fake_atoms = []
fake_atom_coords = []
for coord in self.polymer.relax_chain:
fake_atoms.append('C')
fake_atom_coords.append(coord)
fake_mol = Molecule(species=fake_atoms, coords=fake_atom_coords)
# find all the dihedral angles
dihedral_list_actual = []
for site, val in enumerate(fake_mol, 1):
if site <= len(fake_mol) - 3 and site % 2 != 0:
da = round(dihedral_tools.get_dihedral(fake_mol, [site, site + 1, site + 2, site + 3]))
# this if statement ensures 180 == -180 and 0 == -0
if da == -180.0 or da == -0.0:
da = abs(da)
dihedral_list_actual.append(da)
self.assertEqual(len(dihedral_list_actual), len(self.polymer.dihedral_set))
rotate_chain_dihedral_set = []
# again this loop ensures 180 == -180 and 0 == -0
for angle in self.polymer.dihedral_set:
if angle == -180.0 or angle == -0.0:
rotate_chain_dihedral_set.append(abs(angle))
else:
rotate_chain_dihedral_set.append(angle)
np.testing.assert_almost_equal(dihedral_list_actual, rotate_chain_dihedral_set)
def test_tangent_auto_corr(self):
# check case where all tangent vectors are aligned
self.polymer.tangent_auto_corr(self.polymer.chain)
for stat in self.polymer.tangent_corr:
np.testing.assert_allclose(stat.mean, 1.0)
def test_unit_normal_vectors(self):
self.polymer._unit_normal_vectors(self.polymer.chain)
np.testing.assert_array_equal(len(self.polymer.unit_normal), self.monomer_num)
totally_planar_normal = np.array([0.0, 0.0, 1.0])
for u_vec in self.polymer.unit_normal:
np.testing.assert_almost_equal(u_vec ** 2, totally_planar_normal ** 2)
self.polymer._unit_normal_vectors(self.polymer.relax_chain)
for u_vec in self.polymer.unit_normal:
np.testing.assert_almost_equal(np.linalg.norm(u_vec), 1.0)
calc_u_vectors = np.zeros((self.monomer_num, 3))
index = 0
for i, pt in enumerate(self.polymer.relax_chain):
if i == 0:
vec1 = self.polymer.relax_chain[i + 1] - pt
vec2 = self.polymer.relax_chain[i + 2] - pt
calc_u_vectors[i] = np.cross(vec1, vec2)
calc_u_vectors[i] /= np.linalg.norm(calc_u_vectors[i])
index += 1
if i % 2 != 0 and i < (len(self.polymer.relax_chain) - 2):
vec1 = self.polymer.relax_chain[i + 1] - pt
vec2 = self.polymer.relax_chain[i + 2] - pt
calc_u_vectors[index] = np.cross(vec1, vec2)
calc_u_vectors[index] /= np.linalg.norm(calc_u_vectors[index])
index += 1
np.testing.assert_almost_equal(self.polymer.unit_normal ** 2, calc_u_vectors ** 2)
def test_p2_order_param(self):
# two case all aligned, and isotropic
# case 1 all aligned
z_unit = np.array([0., 0., 1.] * 1000)
z_unit.shape = (1000, 3)
self.polymer.p2_order_param(unit_vectors=z_unit)
np.testing.assert_almost_equal(np.trace(self.polymer.director_matrix), 0.0)
np.testing.assert_almost_equal(self.polymer.s_order_param.mean, 1.0)
# case 2 isotropic
# generate uniform vectors on a unit sphere
index = 0
n = 50000
iso_unit = np.zeros((n, 3))
while index <= (n - 1):
chi_1 = np.random.uniform(0.0, 1.0, 1)
chi_2 = np.random.uniform(0.0, 1.0, 1)
xhi_1 = 1 - (2 * chi_1)
xhi_2 = 1 - (2 * chi_2)
xhi_sq = xhi_1 ** 2 + xhi_2 ** 2
if xhi_sq < 1:
iso_unit[index] = [2 * xhi_1 * ((1 - xhi_sq) ** (1. / 2.)),
2 * xhi_2 * ((1 - xhi_sq) ** (1. / 2.)),
1 - 2 * xhi_sq]
index += 1
self.polymer.p2_order_param(unit_vectors=iso_unit)
np.testing.assert_almost_equal(np.trace(self.polymer.director_matrix), 0.0)
np.testing.assert_almost_equal(self.polymer.s_order_param.mean, 0.0, decimal=1)
def test_p2_auto_corr(self):
samples = 200
p2_polymer = Polymer(self.monomer_num, self.monomer_len, self.link_len, self.link_angle, self.prob_angle)
p2_polymer.p2_auto_corr(p2_polymer.chain)
# check correlation is 1 when all aligned
for stat in p2_polymer.s_x_corr:
np.testing.assert_allclose(1.0, stat.mean)
# check the correlation over a bunch of samples
pair_interacts = int((self.monomer_num * (self.monomer_num + 1)) / 2)
# adds 1 to all lists for case where everything is aligned
ensemble_list = [[1.0] for i in range(self.monomer_num)]
# loops of the number of samples
for sample in range(1, samples):
p2_polymer.rotate_chain()
p2_polymer.p2_auto_corr(p2_polymer.relax_chain)
polymer_list = []
for i in range(self.monomer_num):
pair_list = []
for j in range(i, self.monomer_num, 1):
pair_list.append(((3. / 2.) * (np.dot(p2_polymer.unit_normal[i],
p2_polymer.unit_normal[j]) ** 2)) - (1. / 2.))
polymer_list.append(pair_list)
for l in polymer_list:
for i, val in enumerate(l):
ensemble_list[i].append(val)
actual_means = [np.mean(i) for i in ensemble_list]
# check the right number of pair interactions were sampled
# checks all the self interactions
np.testing.assert_equal(int((samples * self.monomer_num)), int(p2_polymer.s_x_corr[0].k))
# checks the longest interaction only 1 per polymer chain sample
np.testing.assert_equal(int(samples), int(p2_polymer.s_x_corr[-1].k))
for i, stat in enumerate(p2_polymer.s_x_corr):
# print(actual_means[i], stat.mean)
np.testing.assert_allclose(actual_means[i], stat.mean, atol=0.01, rtol=0.0)
def test_sample_chain(self):
# sample by looping over rotate_chains
# start a new chain
sample_polymer = Polymer(self.monomer_num, self.monomer_len, self.link_len, self.link_angle,
self.prob_angle, sample_num=self.sample_num)
end_to_end = []
for i in range(self.sample_num):
sample_polymer.rotate_chain()
end_to_end.append(sample_polymer.end_to_end[-1])
mean_ete = np.mean(end_to_end)
std_ete = np.std(end_to_end)
# sample using polymer class
sample_polymer.sample_chains()
# print(mean_ete, sample_polymer.ete_stats.mean[-1])
# print(std_ete, sample_polymer.ete_stats.stdev[-1])
np.testing.assert_allclose(mean_ete, sample_polymer.ete_stats.mean[-1], atol=0.85, rtol=0.0)
np.testing.assert_allclose(std_ete, sample_polymer.ete_stats.stdev[-1], atol=0.85, rtol=0.0)
class TestRandomChargedPolymer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.monomer_num = 51
cls.monomer_len = 1.5
cls.link_len = 0.5
cls.link_angle = 15.0
cls.sample_num = 500
cls.prob_angle = np.array([[0.0, -180.0], [0.2, -90.0], [0.3, -45.0], [0.4, 0.0],
[0.5, 45.0], [0.6, 90.0], [0.8, 180.0]])
cls.c_monomer_len = 1.6
cls.c_link_len = 0.6
cls.c_link_angle = 14.0
cls.c_prob_angle = np.array([[0.0, 175.0], [0.5, 5.0]])
cls.c_polymer = RandomChargePolymer(cls.monomer_num, cls.monomer_len, cls.link_len, cls.link_angle,
cls.prob_angle, cls.c_monomer_len, cls.c_link_len, cls.c_link_angle,
cls.c_prob_angle, cls.sample_num)
def test_c_random_angle(self):
self.c_polymer.shuffle_charged_chain(10)
c_angle_num = 10
self.assertEqual(c_angle_num, len(self.c_polymer.c_dihedral_set))
for angle in self.c_polymer.c_dihedral_set:
self.assertIn(angle, self.c_prob_angle[:, 1])
def test_shuffle_charged_chain(self):
self.c_polymer.shuffle_charged_chain(10)
# check position lists are same length
self.assertEqual(len(self.c_polymer.relax_chain), len(self.c_polymer.charged_chain))
# loop through the chain and check dihedral angles
fake_atoms = []
fake_atom_coords = []
for coord in self.c_polymer.charged_chain:
fake_atoms.append('C')
fake_atom_coords.append(coord)
fake_mol = Molecule(species=fake_atoms, coords=fake_atom_coords)
# find all the dihedral angles
dihedral_list_actual = []
for site, val in enumerate(fake_mol, 1):
if site <= len(fake_mol) - 3 and site % 2 != 0:
da = round(dihedral_tools.get_dihedral(fake_mol, [site, site + 1, site + 2, site + 3]))
# this if statement ensures 180 == -180 and 0 == -0
if da == -180.0 or da == -0.0:
da = abs(da)
dihedral_list_actual.append(da)
self.assertEqual(len(dihedral_list_actual), len(self.c_polymer.shuffle_dihedral_set))
shuffle_dihedral_set = []
# again this loop ensures 180 == -180 and 0 == -0
for angle in self.c_polymer.shuffle_dihedral_set:
if angle == -180.0 or angle == -0.0:
shuffle_dihedral_set.append(abs(angle))
else:
shuffle_dihedral_set.append(angle)
np.testing.assert_almost_equal(dihedral_list_actual, shuffle_dihedral_set)
def test_c_build_chain(self):
self.c_polymer.shuffle_charged_chain(0)
# check the length is right
self.assertEqual(self.monomer_num * 2, len(self.c_polymer.c_chain))
# check that build_chain and c_build_chain are the same when there are 0 excited dihedrals
| np.testing.assert_almost_equal(self.c_polymer.chain, self.c_polymer.c_chain) | numpy.testing.assert_almost_equal |
import numpy as np
import numpy.linalg as linalg
import math
import unittest
from trio.common.camera import Camera
from trio.common.math import normalize, column, euclidean, homogeneous
from trio.common.matrix import matrix_rank, matrix_ypr, \
matrix_decompose_ypr, matrix_look_at, matrix_intrinsic, \
matrix_permute_ecef, matrix_decompose_camera, matrix_decompose_projection, \
matrix_relative_rotation
from .utils import equal_matrices, equal_arrays
def normalized_camera_ray(fov):
"""
Helper function to generate a normalized camera ray from angles.
"""
x = math.tan(fov[0])
y = math.tan(fov[1])
z = 1.0
return normalize(np.array([x, y, z]))
class CommonMatrixTestCase(unittest.TestCase):
def test_matrix_ypr(self):
# Start with a matrix with no rotation.
zero_m = matrix_ypr(np.radians((0, 0, 0)))
self.assertEqual(3, matrix_rank(zero_m))
self.assertEqual((3, 3), zero_m.shape)
# It shall be equal to the identity matrix.
eye_m = np.eye(3, dtype=float)
self.assertTrue(equal_matrices(eye_m, zero_m))
# Continue with a "random" rotation.
random_m = matrix_ypr(np.radians((-87, 13.2, 37)))
self.assertEqual(3, matrix_rank(random_m))
self.assertEqual((3, 3), random_m.shape)
# Rotation matrix multiplied with its transpose shall be the identity.
self.assertTrue(equal_matrices(eye_m, random_m.T @ random_m))
def test_matrix_decompose_ypr(self):
# Start with a matrix with no rotation.
zero_m = matrix_ypr(np.radians((0, 0, 0)))
zero_d = np.degrees(matrix_decompose_ypr(zero_m))
self.assertAlmostEqual(0, zero_d[0])
self.assertAlmostEqual(0, zero_d[1])
self.assertAlmostEqual(0, zero_d[2])
# Continue with a "random" rotation.
random_m = matrix_ypr(np.radians((-87, 13.2, 37)))
random_d = np.degrees(matrix_decompose_ypr(random_m))
self.assertAlmostEqual(-87, random_d[0])
self.assertAlmostEqual(13.2, random_d[1])
self.assertAlmostEqual(37, random_d[2])
def test_matrix_look_at_decompose_ypr(self):
# Start with a matrix that shall have no rotation.
zero_m = matrix_look_at(np.array([0, 0, 0]),
np.array([2, 0, 0]),
np.array([0, 0, 1]))
self.assertEqual(3, matrix_rank(zero_m))
# It shall be equal to the identity matrix.
eye_m = np.eye(3, dtype=float)
self.assertTrue(equal_matrices(eye_m, zero_m))
# Continue with a random look at.
random_m = matrix_look_at(np.array([4, 3.3, 2.9]),
np.array([0, 0, 0]),
np.array([0, 0.6, 0.7]))
self.assertEqual(3, matrix_rank(random_m))
# Decompose and recreate with ypr - shall be equal matrices.
ypr = matrix_decompose_ypr(random_m)
ypr_m = matrix_ypr(np.array(ypr))
self.assertTrue(equal_matrices(random_m, ypr_m))
def intrinsic_matrix(self, fov, rect):
i = matrix_intrinsic(fov, rect)
iInv = linalg.inv(i)
# Build test data for the image mid point and the four corners.
xs = [(np.array([0, 0]),
np.array([rect[0] + rect[2] / 2.0, rect[1] + rect[3] / 2.0])),
(np.array([-fov[0], -fov[1]]) / 2.0,
np.array([rect[0], rect[1]])),
(np.array([fov[0], -fov[1]]) / 2.0,
np.array([rect[0] + rect[2], rect[1]])),
(np.array([-fov[0], fov[1]]) / 2.0,
np.array([rect[0], rect[1] + rect[3]])),
(np.array([fov[0], fov[1]]) / 2.0,
np.array([rect[0] + rect[2], rect[1] + rect[3]]))
]
for x in xs:
ray = normalized_camera_ray(x[0])
px = euclidean(i @ column(ray))
self.assertTrue(equal_matrices(column(x[1]), px))
# Test inversion.
ray2 = normalize(iInv @ homogeneous(px))
self.assertTrue(equal_matrices(column(ray), ray2))
def test_intrinsic_matrix(self):
# Test unit size image size.
self.intrinsic_matrix(np.radians((30, 20)),
np.array([-0.5, -0.5, 1.0, 1.0]))
# Test ordinary image size type.
self.intrinsic_matrix(np.radians((30, 20)),
np.array([0, 0, 720 - 1, 480 - 1]))
def test_matrix_decompose_camera(self):
# Create some random camera position.
c = Camera(np.array([1899.8, 3678, -8765.5]),
np.radians((-90, 33, 4)),
np.radians((30, 20)))
permute = matrix_permute_ecef()
decomp = matrix_decompose_camera(c.camera_matrix, permute)
# Compare.
self.assertTrue(equal_arrays(np.radians((-90, 33, 4)),
np.array(decomp[0])))
self.assertTrue(equal_arrays(np.array([1899.8, 3678, -8765.5]),
decomp[1]))
def test_matrix_decompose_projection(self):
# Create some random camera position.
c = Camera(np.array([1899.8, 3678, -8765.5]),
np.radians((-90, 33, 4)),
np.radians((30, 20)))
intrinsic = matrix_intrinsic(np.radians((30, 20)),
| np.array([-0.5, -0.5, 1.0, 1.0]) | numpy.array |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
import numpy as np
import sys
PY3 = sys.version_info[0] == 3
def lambda_underscore(): # Module level named lambda-function to make defaultdict picklable
return "_"
class LetterConfig:
def __init__(self,letters=None, vowels=None, pos_lookup=None):
if letters is None:
self.letters = defaultdict(set)
self.vowels = set()
self.pos_lookup = defaultdict(lambda: "_")
else:
letter_cats = ["current_letter", "prev_prev_letter", "prev_letter", "next_letter", "next_next_letter", "prev_grp_first", "prev_grp_last", "next_grp_first", "next_grp_last"]
self.letters = defaultdict(set)
if "group_in_lex" in letters or "current_letter" in letters: # Letters dictionary already instantiated - we are loading from disk
self.letters.update(letters)
else:
for cat in letter_cats:
self.letters[cat] = letters
self.vowels = vowels
self.pos_lookup = pos_lookup
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
class MultiColumnLabelEncoder(LabelEncoder):
"""
Wraps sklearn LabelEncoder functionality for use on multiple columns of a
pandas dataframe.
"""
def __init__(self, columns=None):
self.encoder_dict = {}
if isinstance(columns, list):
self.columns = np.array(columns)
else:
self.columns = columns
def fit(self, dframe):
"""
Fit label encoder to pandas columns.
Access individual column classes via indexing `self.all_classes_`
Access individual column encoders via indexing
`self.all_encoders_`
"""
# if columns are provided, iterate through and get `classes_`
if self.columns is not None:
# ndarray to hold LabelEncoder().classes_ for each
# column; should match the shape of specified `columns`
self.all_classes_ = | np.ndarray(shape=self.columns.shape, dtype=object) | numpy.ndarray |
# Script which downloads and preprocesses a collection of UCI datasets.
# Flag --dir specifies the relative path at which the script will work.
# The script creates a directory called "data" under --dir and downloads
# the UCI data there. It then preprocesses the data to a format ready
# to be consumed by the models. By default, this will overwrite an existing
# "data" directory.
#
# For each dataset, the script creates an x.npy and y.npy array containing
# the model input and outputs under data/dataset_name/.
import os
import shutil
import zipfile
import urllib.request
import argparse
import numpy as np
import pandas as pd
from scipy.io import arff
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, \
StandardScaler, \
OrdinalEncoder, \
Binarizer
from sklearn.utils import as_float_array
from sklearn.pipeline import Pipeline
argparser = argparse.ArgumentParser()
argparser.add_argument("--dir", dest="dir", required=True, type=str)
argparser.add_argument("--no-download", dest='no_download', action="store_true")
args = argparser.parse_args()
# =============================================================================
# Dataset downloader
# =============================================================================
def download_dataset(root_dir, dataset_name, info):
print(f'Downloading {dataset_name} dataset')
dataset_dir = f'{root_dir}/{dataset_name}'
if os.path.exists(os.path.join(dataset_dir, "data.npy")):
print(f'{dataset_name} dataset already exists!')
return
else:
if not os.path.exists(dataset_dir):
os.mkdir(dataset_dir)
base_url = info['base_url']
# Handle the case where the data must be unzipped
if info['zipped']:
# Download zip file
url = f"{base_url}/{info['zipfile']}"
save_location = f"{dataset_dir}/{info['zipfile']}"
urllib.request.urlretrieve(url, save_location)
# Unzip data
with zipfile.ZipFile(save_location, 'r') as zip_handle:
zip_handle.extractall(save_location[:-4])
data = []
for i, file_name in enumerate(info['files']):
save_location = f'{dataset_dir}/{file_name}'
if not info['zipped']:
if not os.path.exists(save_location):
url = f"{base_url}/{file_name}"
urllib.request.urlretrieve(url, save_location)
if ('post-download' in info) and \
(info['post-download'][i] is not None):
info['post-download'][i](save_location)
_data = None
if ('pre-process' in info) and \
(info['pre-process'][i] is not None):
_data = info['pre-process'][i](save_location)
elif save_location[-5:] == '.xlsx':
_data = np.array(pd.read_excel(save_location), dtype=str)
else:
_data = np.loadtxt(save_location,
dtype=str,
delimiter=info['delimiter'])
_data = _data[1:] if info['drop-header'] else _data
try:
rows_with_missing = np.any(_data == '?', axis=1)
except np.AxisError:
rows_with_missing = np.any(np.isnan(_data), axis=1)
print(f'{np.sum(rows_with_missing)}/{rows_with_missing.shape[0]} '
f'rows had missing data\n')
if info['remove-missing']:
_data = _data[~rows_with_missing, :]
data.append(_data)
data = | np.concatenate(data, axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
import numpy as np
from inspect import isfunction
from dramkit.gentools import isnull
from sklearn.preprocessing import OneHotEncoder
class ELMClassifier(object):
'''
| 极限学习机,分类任务
| 记输入为层X,输出层为Y,隐藏层为H,样本量为Nsmp、X特征数为NcolX、
| 隐藏层节点数为n_hide、Y特征数为NcolY,则ELM的过程为:
| H(Nsmp*n_hide) = X(Nsmp*NcolX) * w(NcolX*n_hide) + b((Nsmp*1)*n_hide)
| Y(Nsmp*NcolY) = H(Nsmp*n_hide) * beta(n_hide*NcolY)
| ELM的训练过程:W和b随机生成,beta则利用公式求解析解(beta = H的MP广义逆 * Y)
References
----------
- https://blog.csdn.net/m0_37922734/article/details/80424133
- https://blog.csdn.net/qq_32892383/article/details/90760481
- https://blog.csdn.net/qq_40360172/article/details/105175946
'''
def __init__(self, n_hide=10, func_act='softplus',
w_low=-1, w_up=1, b_low=-1, b_up=1,
c=None, random_state=5262):
'''
Parameters
----------
n_hide : int
隐层节点数
func_act: str, function
激活函数,可选['softplus', 'sigmoid', 'tanh']或自定义
w_low : float
输入层—>隐层权重w取值下限
w_up : float
输入层—>隐层权重w取值上限
b_low : float
输入层—>隐层偏置项b取值下限
b_up : float
输入层—>隐层偏置项b取值上限
c : float, None
正则化参数
random_state : None, int
随机数种子
'''
self.n_hide = n_hide # 隐藏层节点数
# 输入层—>隐层权重w和偏置项b取值上下限
self.w_low = w_low
self.w_up = w_up
self.b_low = b_low
self.b_up = b_up
self.w = '未初始化参数(shape: NcolX*{})'.format(n_hide)
self.b = '未初始化参数(shape: 1*{})'.format(n_hide)
self.beta = '未初始化参数(shape: {}*NcolY)'.format(n_hide)
# 正则化参数
self.c = c
# 激活函数
if func_act == 'softplus':
self.func_act = self.softplus
elif func_act == 'sigmoid':
self.func_act = self.sigmoid
elif func_act == 'tanh':
self.func_act = self.tanh
else:
if isfunction(func_act):
self.func_act = func_act
else:
raise ValueError('不能识别的激活函数,请检查!')
# 其他参数
self.random_state = random_state
@staticmethod
def sigmoid(x):
'''sigmoid激活函数'''
return 1.0 / (1 + np.exp(-x))
@staticmethod
def softplus(x):
'''softplus激活函数 '''
return np.log(1 + np.exp(x))
@staticmethod
def tanh(x):
'''tanh激活函数'''
return (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))
def fit(self, x_train, y_train):
'''
模型训练
Parameters
----------
x_train : pd.DataFrame, np.array
训练集输入,每行一个样本
y_train : pd.DataFrame, np.array
训练集输出,每行一个样本
'''
x_train, y_train = np.array(x_train), np.array(y_train)
Nsmp, NcolX = x_train.shape[0], x_train.shape[1] # 样本数和特征数
# 将标签进行onehot编码
self.Yonehot = None
if len(y_train.shape) == 1 or y_train.shape[1] == 1:
self.Yonehot = OneHotEncoder()
y_train = self.Yonehot.fit_transform(y_train.reshape(-1, 1)).toarray()
# 随机数种子
if isnull(self.random_state):
rnd_w = np.random.RandomState()
rnd_b = np.random.RandomState()
else:
rnd_w = np.random.RandomState(self.random_state)
rnd_b = np.random.RandomState(self.random_state)
# 输入层——>隐藏层权重w随机化
self.w = rnd_w.uniform(self.w_low, self.w_up, (NcolX, self.n_hide))
# 输入层——>隐藏层偏置b随机化
self.b = rnd_b.uniform(self.b_low, self.b_up, (1, self.n_hide))
Bhide= np.ones([Nsmp, self.n_hide]) * self.b
# 隐层计算
Hide = np.matrix(self.func_act(np.dot(x_train, self.w) + Bhide))
# beta计算
if isnull(self.c):
iMP = np.linalg.pinv(Hide) # Moore–Penrose广义逆
self.beta = np.dot(iMP, y_train)
else:
Hide_ = np.dot(Hide.T, Hide) + Nsmp / self.c
iMP = np.linalg.pinv(Hide_) # Moore–Penrose广义逆
iMP_ = np.dot(iMP, Hide.T)
self.beta = np.dot(iMP_, y_train)
return self
def predict(self, x):
'''模型预测,x每行为一个待预测样本'''
Nsmp = x.shape[0]
Bhide = np.ones([Nsmp, self.n_hide]) * self.b
Hide = np.matrix(self.func_act(np.dot(x, self.w) + Bhide))
y_pred_ = | np.dot(Hide, self.beta) | numpy.dot |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Common functions to transform image.
Code: https://github.com/fepegar/torchio
"""
# Import
import numpy as np
from scipy.spatial.transform import Rotation
from scipy.ndimage import map_coordinates
from .transform import compose
from .transform import gaussian_random_field
from .transform import affine_flow
from .utils import interval
def affine(arr, rotation=10, translation=10, zoom=0.2, seed=None):
""" Random affine transformation.
Parameters
----------
arr: array
the input data.
rotation: float or 2-uplet, default 10
the rotation in degrees of the simulated movements. Larger
values generate more distorted images.
translation: float or 2-uplet, default 10
the translation in voxel of the simulated movements. Larger
values generate more distorted images.
zoom: float, default 0.2
the zooming magnitude. Larger values generate more distorted images.
seed: int, default None
seed to control random number generator.
Returns
-------
transformed: array
the transformed input data.
"""
rotation = interval(rotation)
translation = interval(translation)
| np.random.seed(seed) | numpy.random.seed |
import numpy as np
from numpy import linalg as np_linalg
from scipy import linalg as sp_linalg
from scipy import stats as sp_stats
from tqdm import tqdm
class RBLasso():
def __init__(self, alpha=1.0, rao_s2=False, keep_history=False, init_ols=True):
"""Parameters:
* alpha: the lasso (a.k.a. "lambda") regularization constant.
* rao_s2: if True, use a Rao-Blackwellized estimate for the variance term.
* keep_history: if True, keep the value of each parameter during every iteration.
* init_ols: if True, initialize inference with OLS solution (not possible if p > n).
"""
self.__alpha = alpha
self.__rao_s2 = rao_s2
self.__keep_history = keep_history
self.__iterations = 0
self.__init_ols = init_ols
self.__beta = None
self.__sigma = None
self.__tau = None
self.__X_mean = None
self.__X_norm = None
self.__y_mean = None
def fit(self, X, y, num_iter=1000):
"""Fits the model, running for num_iter iterations."""
## compute norms and means
self.__X_mean = np.mean(X, axis=0)
self.__X_norm = np_linalg.norm(X, axis=0)
self.__y_mean = np.mean(y)
## center X and y
X = (X - self.__X_mean) / self.__X_norm
y = y - self.__y_mean
## compute XtX and Xty
XtX = np.dot(X.T, X)
Xty = np.dot(X.T, y)
## if doing rao-blackwellized sigma, compute yTy
if self.__rao_s2:
yTy = np.sum(y**2)
## initialize beta, tau and sigma arrays, depending on keep_history flag.
arr_size = num_iter if self.__keep_history else 2
## initialize beta
self.__beta = np.zeros((arr_size, X.shape[1]))
if self.__init_ols and X.shape[0] > X.shape[1]:
## OLS estimate for beta, if possible
chol_XtX = sp_linalg.cho_factor(XtX)
self.__beta[0] = sp_linalg.cho_solve(chol_XtX, Xty)
else:
## otherwise, random uniform
self.__beta[0] = 2*np.random.rand(X.shape[1]) - 1.0
## initialize sigma with RSS of beta.
self.__sigma = np.zeros(arr_size)
resid = y - np.dot(X, self.__beta[0])
self.__sigma[0] = resid.var()
## initialize taus
self.__tau = np.zeros((arr_size, X.shape[1]))
## iterate
for cur_iter in tqdm(range(1, num_iter)):
prev_pos = (cur_iter - 1) if self.__keep_history else (cur_iter - 1) % 2
next_pos = cur_iter if self.__keep_history else cur_iter % 2
## update taus
tau_loc = 0.5*(np.log(self.__alpha) + np.log(self.__sigma[prev_pos])) - np.log(np.abs(self.__beta[prev_pos]))
tau_scale = self.__alpha
self.__tau[next_pos] = sp_stats.invgauss.rvs(mu=np.exp(tau_loc) / tau_scale, scale=tau_scale)
## update beta
beta_A = XtX + np.diag(self.__tau[next_pos])
beta_A_chol = sp_linalg.cho_factor(beta_A)
beta_mu = sp_linalg.cho_solve(beta_A_chol, Xty)
beta_cov = sp_linalg.cho_solve(beta_A_chol, np.diag( | np.repeat(self.__sigma[prev_pos], X.shape[1]) | numpy.repeat |
import numpy as np
from scipy.optimize import minimize
from scipy.interpolate import interp1d
def compute_fwd_interpolation(fwd, swap_rate_end, zero_rates_known, start_tenor, end_tenor):
"""
Interpolates forward rates as described by EIOPA (constant forward rates between liquid maturities).
Source: 'Background Document On The Opinion On The 2020 Review Of Solvency II' p.787f.
:param fwd: fwd to be calculated: 1d-Ndarray
:param swap_rate_end: last liquid swap rate in interval. E.g. for 15y --> 20y period this would be 20y swap rate: 1d-Ndarray
:param zero_rates_known: zero rates known so far: 1d-Ndarray
:param start_tenor: last known tenor / maturity: float
:param end_tenor: final tenor / maturity (end of interpolation): float
:return: cash value of swap (par swap = 1)
"""
left_side_1 = sum([1. / ((1+rate)**(t+1)) for t, rate in enumerate(zero_rates_known)])
left_side_2 = sum([1. / ((1+fwd)**t) for t in range(1, end_tenor - start_tenor + 1)])
left_side = left_side_1 + (1. / (1+zero_rates_known[-1])**start_tenor) * left_side_2
right_side = 1. / ((1+zero_rates_known[start_tenor-1])**(start_tenor) * (1+fwd)**(end_tenor - start_tenor))
return swap_rate_end * left_side + right_side
def error_fwd_interpolation(fwd, args):
"""
Error function for numeric procedure required to interpolate between observed liquid market rates
:param fwd: dummy forward rate (rate is later used for numeric operation)
:param args: optimization parameters
:return: (cash value of swap - 1) ---> if fwd rate is solved for this should be minimized
"""
swap_rate_end = args[0]
zero_rates_known = args[1]
start_tenor = args[2]
end_tenor = args[3]
res = compute_fwd_interpolation(fwd, swap_rate_end, zero_rates_known, start_tenor, end_tenor)
return np.abs(res-1)
def interpolate_fwd(fwd, swap_rate_end, zero_rates_known, start_tenor, end_tenor):
"""
Interpolates forward rates according to methodology described by EIOPA review 2020.
Source: 'Background Document On The Opinion On The 2020 Review Of Solvency II' p.788
:param fwd: fwd to be calculated: 1d-Ndarray
:param swap_rate_end: last liquid swap rate in interval. E.g. for 15y --> 20y period this would be 20y swap rate: 1d-Ndarray
:param zero_rates_known: zero rates known so far: 1d-Ndarray
:param start_tenor: last known tenor / maturity: float
:param end_tenor: final tenor / maturity (end of interpolation): float
:return: final_fwd that can be used for calculation of all zero rates in between the observed swap rates: 1d-Ndarray
"""
# Optimization tolerance
TOLERANCE = 1e-10
# Number of assets
number_of_fwds = len(fwd)
# Long only weights to be assigned
bound = (-1.0, 1.0)
bounds = tuple(bound for asset in range(number_of_fwds))
# Optimisation (minimize error)
optimize_result = minimize(fun=error_fwd_interpolation,
x0=fwd,
args=[swap_rate_end, zero_rates_known, start_tenor, end_tenor],
method='SLSQP',
bounds=bounds,
tol=TOLERANCE,
options={'disp': False})
# Recover the weights from the optimised object
final_fwd = optimize_result.x
return final_fwd
def compute_interpolated_zero(swap_rates_market, liquid_maturities):
"""
Based on swap rates with maturity of 1-12, 15, 20, 25, 30, 40 and 50 years this function builds the forward and zero rates.
Source: "Background Document On The Opinion On The 2020 Review Of Solvency II" p.787f.
:param swap_rates_market: market rates where index position indicates maturity (e.g. [4] corresponds to a maturity of 5y): Ndarray
:param liquid_maturities: maturities of the respective interest rates: 1d-Ndarry
:return: contains interpolated zero curve based on liquid swap rates: list
"""
# Creates dummy to start interpolation
fwd_dummy = np.array([0.01])
# Number of liquid maturities
N = len(liquid_maturities)
# Construct zero rates
zero_rates_market = np.array(swap_to_zero(swap_rates_market))
# Starting point of zero curve with interpolation
zero_rates_market_interpolated = [zero_rates_market[0]]
# Loop through each liquid rate pair and see whether interpolation is required
for liquid_idx in range(1, N):
t1 = liquid_maturities[liquid_idx]
t2 = liquid_maturities[liquid_idx - 1]
t = t1 - t2
if t > 1:
fwd = interpolate_fwd(fwd=fwd_dummy, swap_rate_end=swap_rates_market[t1 - 1],
zero_rates_known=np.array(zero_rates_market_interpolated), start_tenor=t2,
end_tenor=t1)
z = (1 + zero_rates_market_interpolated[-1]) ** t2
for zero_runs in range(1, t + 1):
z_temp = (z * (1 + fwd) ** zero_runs) ** (1. / (t2 + zero_runs))
zero_rates_market_interpolated.append(z_temp[0] - 1.)
else:
# Assuming this can only happen for short maturities in the beginning
zero_rates_market_interpolated.append(zero_rates_market[t1 - 1])
return zero_rates_market_interpolated
def extract_fwds(zero_rates_market_interpolated, liquid_maturities, fsp=20):
"""
Extracts main (interpolated) forwards.
Source: "Background Document On The Opinion On The 2020 Review Of Solvency II" p.787ff.
:param zero_rates_market_interpolated: interpolated zero rates where index position indicates maturity (e.g. [4] corresponds to a maturity of 5y): 1d-Ndarray
:param liquid_maturities: maturities of the respective interest rates: 1d-Ndarray
:param fsp: maturity in years of first smoothing point: float
:return: forwards_pre_fsp & forwards_llfr (rates required for llfr calculation): list
"""
# Number of liquid maturities
N = len(liquid_maturities)
# Init
forwards_pre_fsp = [zero_rates_market_interpolated[0]]
forwards_llfr = []
# Loop through each liquid rate pair and calculate required fwds
for liquid_idx in range(1, N):
t1 = liquid_maturities[liquid_idx]
t2 = liquid_maturities[liquid_idx - 1]
t = t1 - t2
if t1 <= fsp:
fwd = ((1 + zero_rates_market_interpolated[t1 - 1]) ** t1 / (
1 + zero_rates_market_interpolated[t2 - 1]) ** t2) ** (1 / t) - 1
forwards_pre_fsp.append(fwd)
if t1 == fsp:
fwd = ((1 + zero_rates_market_interpolated[t1 - 1]) ** t1 / (
1 + zero_rates_market_interpolated[t2 - 1]) ** t2) ** (1 / t) - 1
forwards_llfr.append(fwd)
if t1 > fsp:
t = t1 - fsp
fwd = ((1 + zero_rates_market_interpolated[t1 - 1]) ** t1 / (
1 + zero_rates_market_interpolated[fsp - 1]) ** fsp) ** (1 / t) - 1
forwards_llfr.append(fwd)
return forwards_pre_fsp, forwards_llfr
def compute_llfr(fwd, volume=np.array([3.3, 1.45 , 6, 0.3, 0.4]), va=0.0):
"""
Calculates last liquid forward rate (llfr) as described by EIOPA review 2020.
Source: "Background Document On The Opinion On The 2020 Review Of Solvency II" p.788f.
volume[0] = 20y
volume[1] = 25y
volume[2] = 30y
volume[3] = 40y
volume[4] = 50y
fwd[0] = 15y-20y --> 15y5y + VA
fwd[1] = 20y-25y --> 20y5y
fwd[2] = 20y-30y --> 20y10y
fwd[3] = 20y-40y --> 20y20y
fwd[4] = 20y-50y --> 20y30y
:param fwd: forward rates: 1d-Ndarray
:param volume: respective volume / weighting of forward rates: 1d-Ndarray
:param va: volatility adjustment in bps (as float, e.g. 10): float
:return: last liquid forward rate
"""
weight = volume / volume.sum()
fwds_incl_va = fwd.copy()
fwds_incl_va[0] = fwds_incl_va[0] + va / 10000.0
llfr = fwds_incl_va * weight
return np.array([llfr.sum()])
def compute_curve_with_va(forwards_pre_fsp, liquid_maturities, fsp=20, va=0):
"""
Constructs zero curve from forwards until FPS - potentially including VA.
Source: "Background Document On The Opinion On The 2020 Review Of Solvency II" p.788f.
:param forwards_pre_fsp: forwards between liquid rates: 1d-Ndarray
:param liquid_maturities: maturities of the respective interest rates: 1d-Ndarray
:param fsp: maturity in years of first smoothing point: float
:param va: volatility adjustment: float
:return: contains interpolated zero curve up to a maturity of 20y including va: list
"""
# Number of liquid maturities smaller than or equal to fsp
N = len(liquid_maturities[liquid_maturities <= fsp])
# Input check
assert N == len(forwards_pre_fsp)
# Add va to all forwards
forwards_pre_fsp_incl_va = forwards_pre_fsp.copy()
forwards_pre_fsp_incl_va = [fwd_rate + va / 10000.0 for fwd_rate in forwards_pre_fsp_incl_va]
# Init
zero_rates = [forwards_pre_fsp_incl_va[0]]
# Loop through each liquid rate pair and calculate required fwds
for liquid_idx in range(1, N):
t1 = liquid_maturities[liquid_idx]
t2 = liquid_maturities[liquid_idx - 1]
t = t1 - t2
if t > 1:
fwd = (1 + forwards_pre_fsp_incl_va[liquid_idx])
base_rate = ((1 + zero_rates[-1]) ** t2)
for zero_run in range(1, t + 1):
z = base_rate * fwd ** zero_run
z = z ** (1 / (t2 + zero_run)) - 1
zero_rates.append(z)
else:
z = ((1 + zero_rates[-1]) ** t2) * ((1 + forwards_pre_fsp_incl_va[liquid_idx]) ** t)
z = z ** (1 / t1) - 1
zero_rates.append(z)
return zero_rates
def big_b(h, alpha=0.1):
"""
Helper function
"""
top = 1 - np.exp(-alpha*h)
bottom = alpha * h
return top / bottom
def extrapolate_fwds(h, ufr, llfr, alpha=0.10):
"""
Extrapolates forward rates beyond fsp.
Source: "Background Document On The Opinion On The 2020 Review Of Solvency II" p.789.
fwd_fsp_fsp_plus_h:
fwd that can be used for calculation of all zero rates in between the observed swap rates
:param h: forward rate beyond fsp (at year 20+h): 1d-Ndarray
:param ufr: ultimate forward rate: 1d-Ndarray
:param llfr: last liquid forward rate: 1d-Ndarray
:param alpha: convergence speed: float
:return: fwd_fsp_fsp_plus_h: 1d-Ndarray
"""
fwd_fsp_fsp_plus_h = np.log(1 + ufr) + (llfr - np.log(1 + ufr)) * big_b(h, alpha)
return fwd_fsp_fsp_plus_h
def extrapolate_zero(known_zero_rates, ufr, llfr, alpha=0.10, fsp=20):
"""
Extrapolation of zero rates beyond fsp.
Source: "Background Document On The Opinion On The 2020 Review Of Solvency II" p.790.
:param known_zero_rates:
:param ufr: ultimate forward rate
:param llfr: last liquid forward rate
:param alpha: convergence speed
:param fsp: first-smoothing-point
:return: extrapolated_zero_rates: 1d-Ndarray
"""
# FSP
z_fsp = known_zero_rates[fsp - 1]
# Extrapolated zero rates
extrapolated_zero_rates = known_zero_rates[0:fsp]
# Regardless of fsp we want to calculate the extrapolated rates with a maturity of up to 120y
up_to = 120 - fsp
# Calculate extrapolated zero rates up to and including 120y maturity
for h in range(1, up_to + 1):
z = np.exp((fsp * z_fsp + h * extrapolate_fwds(h, ufr, llfr, alpha)) / (fsp + h)) - 1
extrapolated_zero_rates.append(z[0])
return extrapolated_zero_rates
def alternative_extrapolation(input_rates, input_liquid, ufr, fsp=20, alpha=None, va=0.0, volume_traded=np.array([3.3, 1.45, 6, 0.3, 0.4])):
"""
Wrapper function for alternative extrapolation method of SII curves.
Source: "Background Document On The Opinion On The 2020 Review Of Solvency II" p.783-790.
:param input_rates:
:param input_liquid:
:param ufr: ultimate forward rate
:param fsp: first-smoothing-point
:param alpha: convergence speed
:param va: volatility adjustment
:param volume_traded: weighting for llfr
:return:
"""
# Assign base variables
liquid_maturities = np.where(input_liquid == 1)[0] + 1
liquid_rates_swap = input_rates[ | np.where(input_liquid == 1) | numpy.where |
import numpy as np
def RotX(th):
sth, cth = np.sin(th), | np.cos(th) | numpy.cos |
#from IPython import embed
import torch.nn as nn
import logging
import torch
import torch.nn.functional as F
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.ops import ContextBlock, DeformConv, ModulatedDeformConv
from ..registry import AGG,BACKBONES
from ..utils import build_conv_layer, build_norm_layer
import numpy as np
from mmcv.cnn import normal_init
from ..utils import ConvModule, bias_init_with_prob
@AGG.register_module
class STSN_trans(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN_trans,self).__init__()
offset_channels = 18
#agg1
self.offset1 = nn.Conv2d(in_channels,offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.sim1 = nn.Conv2d(in_channels,1,
kernel_size=1,stride=1,padding=0,dilation=1)
#agg2
self.offset2 = nn.Conv2d(in_channels,offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.sim2 = nn.Conv2d(in_channels,1,
kernel_size=1,stride=1,padding=0,dilation=1)
#agg3
self.offset3 = nn.Conv2d(in_channels,offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.sim3 = nn.Conv2d(in_channels,1,
kernel_size=1,stride=1,padding=0,dilation=1)
#agg4
self.offset4 = nn.Conv2d(in_channels,offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.sim4 = nn.Conv2d(in_channels,1,
kernel_size=1,stride=1,padding=0,dilation=1)
#agg5
self.offset5 = nn.Conv2d(in_channels,offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.sim5 = nn.Conv2d(in_channels,1,
kernel_size=1,stride=1,padding=0,dilation=1)
self.grid_x=nn.Parameter(torch.arange(2000).view(2000,1).expand(2000,2000).float())
self.grid_y=nn.Parameter(torch.arange(2000).view(1,2000).expand(2000,2000).float())
self.relu=nn.LeakyReLU(inplace=True)
self.offset=[]
self.mask=[]
# print('init transform kernel')
# self.trans_kernel=torch.from_numpy(np.load('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy'))
# self.trans_kernel=nn.Parameter(self.trans_kernel)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.offset1, std=0.01)
normal_init(self.sim1, std=0.01)
normal_init(self.offset2, std=0.01)
normal_init(self.sim2, std=0.01)
normal_init(self.offset3, std=0.01)
normal_init(self.sim3, std=0.01)
normal_init(self.offset4, std=0.01)
normal_init(self.sim4, std=0.01)
normal_init(self.offset5, std=0.01)
normal_init(self.sim5, std=0.01)
def agg1(self,support,reference,test=False):
n=reference.shape[0]
h=reference.shape[-2]
w=reference.shape[-1]
fuse=torch.cat([support,reference],dim=1)
offset=self.offset1(fuse)
feature=[]
weight=[]
warp_grid=[]
for i in range(9):
grid_x=self.grid_x[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i,:,:]
grid_y=self.grid_y[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i+1,:,:]
grid=torch.cat([2*(grid_x.unsqueeze(3)/h-0.5),2*(grid_y.unsqueeze(3)/w-0.5)],dim=3)
new_feature=torch.nn.functional.grid_sample(support,grid)
feature.append(new_feature)
weight.append(torch.nn.functional.cosine_similarity(reference,new_feature,dim=1).unsqueeze(1))
warp_grid.append(grid)
feature=torch.stack(feature,dim=4)
weight=torch.stack(weight,dim=4)
weight=torch.nn.functional.softmax(weight,dim=4)
warp_feature=torch.sum(feature*weight,dim=4)
# print(torch.stack(warp_grid,dim=4).shape,weight.shape)
trans=torch.sum(torch.stack(warp_grid,dim=4)*weight.squeeze(1).unsqueeze(3),dim=4)
if test:
return warp_feature,trans,[offset,weight,trans]
else:
return warp_feature,trans
def agg2(self,support,reference,test=False):
n=reference.shape[0]
h=reference.shape[-2]
w=reference.shape[-1]
fuse=torch.cat([support,reference],dim=1)
offset=self.offset2(fuse)
feature=[]
weight=[]
warp_grid=[]
for i in range(9):
grid_x=self.grid_x[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i,:,:]
grid_y=self.grid_y[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i+1,:,:]
grid=torch.cat([2*(grid_x.unsqueeze(3)/h-0.5),2*(grid_y.unsqueeze(3)/w-0.5)],dim=3)
new_feature=torch.nn.functional.grid_sample(support,grid)
feature.append(new_feature)
weight.append(torch.nn.functional.cosine_similarity(reference,new_feature,dim=1).unsqueeze(1))
warp_grid.append(grid)
feature=torch.stack(feature,dim=4)
weight=torch.stack(weight,dim=4)
weight=torch.nn.functional.softmax(weight,dim=4)
warp_feature=torch.sum(feature*weight,dim=4)
# print(torch.stack(warp_grid,dim=4).shape,weight.shape)
trans=torch.sum(torch.stack(warp_grid,dim=4)*weight.squeeze(1).unsqueeze(3),dim=4)
if test:
return warp_feature,trans,[offset,weight,trans]
else:
return warp_feature,trans
def agg3(self,support,reference,test=False):
n=reference.shape[0]
h=reference.shape[-2]
w=reference.shape[-1]
fuse=torch.cat([support,reference],dim=1)
offset=self.offset3(fuse)
feature=[]
weight=[]
warp_grid=[]
for i in range(9):
grid_x=self.grid_x[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i,:,:]
grid_y=self.grid_y[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i+1,:,:]
grid=torch.cat([2*(grid_x.unsqueeze(3)/h-0.5),2*(grid_y.unsqueeze(3)/w-0.5)],dim=3)
new_feature=torch.nn.functional.grid_sample(support,grid)
feature.append(new_feature)
weight.append(torch.nn.functional.cosine_similarity(reference,new_feature,dim=1).unsqueeze(1))
warp_grid.append(grid)
feature=torch.stack(feature,dim=4)
weight=torch.stack(weight,dim=4)
weight=torch.nn.functional.softmax(weight,dim=4)
warp_feature=torch.sum(feature*weight,dim=4)
# print(torch.stack(warp_grid,dim=4).shape,weight.shape)
trans=torch.sum(torch.stack(warp_grid,dim=4)*weight.squeeze(1).unsqueeze(3),dim=4)
if test:
return warp_feature,trans,[offset,weight,trans]
else:
return warp_feature,trans
def agg4(self,support,reference,test=False):
n=reference.shape[0]
h=reference.shape[-2]
w=reference.shape[-1]
fuse=torch.cat([support,reference],dim=1)
offset=self.offset4(fuse)
feature=[]
weight=[]
warp_grid=[]
for i in range(9):
grid_x=self.grid_x[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i,:,:]
grid_y=self.grid_y[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i+1,:,:]
grid=torch.cat([2*(grid_x.unsqueeze(3)/h-0.5),2*(grid_y.unsqueeze(3)/w-0.5)],dim=3)
new_feature=torch.nn.functional.grid_sample(support,grid)
feature.append(new_feature)
weight.append(torch.nn.functional.cosine_similarity(reference,new_feature,dim=1).unsqueeze(1))
warp_grid.append(grid)
feature=torch.stack(feature,dim=4)
weight=torch.stack(weight,dim=4)
weight=torch.nn.functional.softmax(weight,dim=4)
warp_feature=torch.sum(feature*weight,dim=4)
# print(torch.stack(warp_grid,dim=4).shape,weight.shape)
trans=torch.sum(torch.stack(warp_grid,dim=4)*weight.squeeze(1).unsqueeze(3),dim=4)
if test:
return warp_feature,trans,[offset,weight,trans]
else:
return warp_feature,trans
def agg5(self,support,reference,test=False):
n=reference.shape[0]
h=reference.shape[-2]
w=reference.shape[-1]
fuse=torch.cat([support,reference],dim=1)
offset=self.offset5(fuse)
feature=[]
weight=[]
warp_grid=[]
for i in range(9):
grid_x=self.grid_x[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i,:,:]
grid_y=self.grid_y[:h,:w].unsqueeze(0).expand(n,h,w)+offset[:,2*i+1,:,:]
grid=torch.cat([2*(grid_x.unsqueeze(3)/h-0.5),2*(grid_y.unsqueeze(3)/w-0.5)],dim=3)
new_feature=torch.nn.functional.grid_sample(support,grid)
feature.append(new_feature)
weight.append(torch.nn.functional.cosine_similarity(reference,new_feature,dim=1).unsqueeze(1))
warp_grid.append(grid)
feature=torch.stack(feature,dim=4)
weight=torch.stack(weight,dim=4)
weight=torch.nn.functional.softmax(weight,dim=4)
warp_feature=torch.sum(feature*weight,dim=4)
# print(torch.stack(warp_grid,dim=4).shape,weight.shape)
trans=torch.sum(torch.stack(warp_grid,dim=4)*weight.squeeze(1).unsqueeze(3),dim=4)
if test:
return warp_feature,trans,[offset,weight,trans]
else:
return warp_feature,trans
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
shuffle_id2=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]=shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]-1
print('shuffle id:',shuffle_id)
print('shuffle id2:',shuffle_id2)
for i in [-1,-2,-3,-4,-5]:
reference=datas[i]+0
support=datas[i][shuffle_id,:,:,:]+0
n=reference.shape[0]
h=reference.shape[-2]
w=reference.shape[-1]
if i==-1:
tk_feature,trans=self.agg[i](support,reference,test)
else:
trans=trans.transpose(1,3)
trans=torch.nn.functional.interpolate(trans*2,(w,h)).transpose(1,3)
support=torch.nn.functional.grid_sample(support,trans)
tk_feature,trans=self.agg[i](support,reference,test)
# weight1=torch.nn.functional.cosine_similarity(reference,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
# weight0=torch.ones_like(weight1)
# weight=torch.nn.functional.softmax(torch.cat([weight0,weight1],dim=1),dim=1)
# feature=torch.cat([reference.unsqueeze(1),tk_feature.unsqueeze(1)],dim=1)
# agg_feature=torch.sum(feature*weight,dim=1)
output.append(tk_feature)
return_out=[]
for i in [-1,-2,-3,-4,-5]:
return_out.append(output[i])
return tuple(return_out)
def forward_test(self,datas,test=True):
output=[]
self.offset=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
for i in [-1,-2,-3,-4,-5]:
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
# print(shuffle_id)
# print(reference.shape,support.shape)
n=reference.shape[0]
h=reference.shape[-2]
w=reference.shape[-1]
if i==-1:
tk_feature,trans,offset=self.agg[i](support,reference,test)
else:
trans=trans.transpose(1,3)
trans=torch.nn.functional.interpolate(trans*2,(w,h)).transpose(1,3)
support=torch.nn.functional.grid_sample(support,trans)
tk_feature,trans,offset=self.agg[i](support,reference,test)
# weight1=torch.nn.functional.cosine_similarity(reference,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
# weight0=torch.ones_like(weight1)
# weight=torch.nn.functional.softmax(torch.cat([weight0,weight1],dim=1),dim=1)
# feature=torch.cat([reference.unsqueeze(1),tk_feature.unsqueeze(1)],dim=1)
# agg_feature=torch.sum(feature*weight,dim=1)
output.append(tk_feature)
self.offset.append(offset)
return_out=[]
for i in [-1,-2,-3,-4,-5]:
return_out.append(output[i])
return tuple(return_out)
@AGG.register_module
class STSN(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
#agg1
self.conv11_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv11 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv12_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv12 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv13_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv13 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv14_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv14 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg2
self.conv21_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv21 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv22_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv22 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv23_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv23 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg3
self.conv31_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv31 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv32_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv32 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv33_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv33 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg4
self.conv41_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv41 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv42_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv42 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg5
self.conv51_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv51 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv52_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv52 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.offset=[]
self.mask=[]
print('init transform kernel')
self.trans_kernel=torch.from_numpy(np.load('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy'))
self.trans_kernel=nn.Parameter(self.trans_kernel)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv11_offset, std=0.01)
normal_init(self.conv11, std=0.01)
normal_init(self.conv12_offset, std=0.01)
normal_init(self.conv12, std=0.01)
normal_init(self.conv13_offset, std=0.01)
normal_init(self.conv13, std=0.01)
normal_init(self.conv14_offset, std=0.01)
normal_init(self.conv14, std=0.01)
normal_init(self.conv21_offset, std=0.01)
normal_init(self.conv21, std=0.01)
normal_init(self.conv22_offset, std=0.01)
normal_init(self.conv22, std=0.01)
normal_init(self.conv23_offset, std=0.01)
normal_init(self.conv23, std=0.01)
normal_init(self.conv31_offset, std=0.01)
normal_init(self.conv31, std=0.01)
normal_init(self.conv32_offset, std=0.01)
normal_init(self.conv32, std=0.01)
normal_init(self.conv33_offset, std=0.01)
normal_init(self.conv33, std=0.01)
normal_init(self.conv41_offset, std=0.01)
normal_init(self.conv41, std=0.01)
normal_init(self.conv42_offset, std=0.01)
normal_init(self.conv42, std=0.01)
normal_init(self.conv51_offset, std=0.01)
normal_init(self.conv51, std=0.01)
normal_init(self.conv52_offset, std=0.01)
normal_init(self.conv52, std=0.01)
def agg1(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv11_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv11(feature_f0, offset, mask)
offset_mask2 = self.conv12_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv12(out, offset, mask)
offset_mask3 = self.conv13_offset(out)
offset = offset_mask3[:, :18 * self.deformable_groups, :, :]
mask = offset_mask3[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv13(out, offset, mask)
offset_mask4 = self.conv14_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
print(torch.max(mask,dim=1)[0].mean())
kernel_weight=self.trans_kernel.detach()*9
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv14(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
# print('agg1',feature_f0.device,self.conv11_offset.weight.device)
offset1=self.conv11_offset(feature_f0)
feature_f1=self.conv11(feature_f0,offset1)
offset2=self.conv12_offset(feature_f1)
feature_f2=self.conv12(feature_f1,offset2)
offset3=self.conv13_offset(feature_f2)
feature_f3=self.conv13(feature_f2,offset3)
offset4=self.conv14_offset(feature_f3)
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv14(support,offset4)
if test:
return agg_features,offset4
else:
return agg_features
def agg2(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv21_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv21(feature_f0, offset, mask)
offset_mask2 = self.conv22_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv22(out, offset, mask)
offset_mask4 = self.conv23_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv23(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv21_offset(feature_f0)
feature_f1=self.conv21(feature_f0,offset1)
offset2=self.conv22_offset(feature_f1)
feature_f2=self.conv22(feature_f1,offset2)
offset3=self.conv23_offset(feature_f2)
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv23(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg3(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv31_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv31(feature_f0, offset, mask)
offset_mask2 = self.conv32_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv32(out, offset, mask)
offset_mask4 = self.conv33_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv33(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv31_offset(feature_f0)
feature_f1=self.conv31(feature_f0,offset1)
offset2=self.conv32_offset(feature_f1)
feature_f2=self.conv32(feature_f1,offset2)
offset3=self.conv33_offset(feature_f2)
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv33(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg4(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv41_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv41(feature_f0, offset, mask)
offset_mask4 = self.conv42_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv42(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv41_offset(feature_f0)
feature_f1=self.conv41(feature_f0,offset1)
offset2=self.conv42_offset(feature_f1)
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv42(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def agg5(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv51_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv51(feature_f0, offset, mask)
offset_mask4 = self.conv52_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv52(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv51_offset(feature_f0)
feature_f1=self.conv51(feature_f0,offset1)
offset2=self.conv52_offset(feature_f1)
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv52(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
print('shuffle id:',shuffle_id)
for i in range(len(datas)):
reference=datas[i]+0
support=datas[i][shuffle_id,:,:,:]+0
# print('split',support.device,reference.device,self.conv11.weight.device)
tk_feature=self.agg[i](support,reference,test)
output.append(tk_feature)
return tuple(output)
def forward_test(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn test')
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
if self.with_modulated_dcn:
tk_feature,soffset4,mask4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
self.mask.append(mask4)
else:
tk_feature,soffset4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
output.append(tk_feature)
return tuple(output)
@AGG.register_module
class STSN_fuse(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN_fuse,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
#agg1
self.conv11_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv11 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv12_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv12 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv13_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv13 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv14_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv14 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg2
self.conv21_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv21 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv22_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv22 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv23_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv23 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg3
self.conv31_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv31 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv32_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv32 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv33_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv33 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg4
self.conv41_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv41 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv42_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv42 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg5
self.conv51_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv51 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv52_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv52 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.offset=[]
self.mask=[]
print('init transform kernel')
self.trans_kernel=torch.from_numpy(np.load('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy'))
self.trans_kernel=nn.Parameter(self.trans_kernel)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv11_offset, std=0.01)
normal_init(self.conv11, std=0.01)
normal_init(self.conv12_offset, std=0.01)
normal_init(self.conv12, std=0.01)
normal_init(self.conv13_offset, std=0.01)
normal_init(self.conv13, std=0.01)
normal_init(self.conv14_offset, std=0.01)
normal_init(self.conv14, std=0.01)
normal_init(self.conv21_offset, std=0.01)
normal_init(self.conv21, std=0.01)
normal_init(self.conv22_offset, std=0.01)
normal_init(self.conv22, std=0.01)
normal_init(self.conv23_offset, std=0.01)
normal_init(self.conv23, std=0.01)
normal_init(self.conv31_offset, std=0.01)
normal_init(self.conv31, std=0.01)
normal_init(self.conv32_offset, std=0.01)
normal_init(self.conv32, std=0.01)
normal_init(self.conv33_offset, std=0.01)
normal_init(self.conv33, std=0.01)
normal_init(self.conv41_offset, std=0.01)
normal_init(self.conv41, std=0.01)
normal_init(self.conv42_offset, std=0.01)
normal_init(self.conv42, std=0.01)
normal_init(self.conv51_offset, std=0.01)
normal_init(self.conv51, std=0.01)
normal_init(self.conv52_offset, std=0.01)
normal_init(self.conv52, std=0.01)
def agg1(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv11_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv11(feature_f0, offset, mask)
offset_mask2 = self.conv12_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv12(out, offset, mask)
offset_mask3 = self.conv13_offset(out)
offset = offset_mask3[:, :18 * self.deformable_groups, :, :]
mask = offset_mask3[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv13(out, offset, mask)
offset_mask4 = self.conv14_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
# print('mask weight',torch.max(mask,dim=1)[0].mean().item())
kernel_weight=self.trans_kernel.detach()*9
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv14(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
# print('agg1',feature_f0.device,self.conv11_offset.weight.device)
offset1=self.conv11_offset(feature_f0)
feature_f1=self.conv11(feature_f0,offset1)
offset2=self.conv12_offset(feature_f1)
feature_f2=self.conv12(feature_f1,offset2)
offset3=self.conv13_offset(feature_f2)
feature_f3=self.conv13(feature_f2,offset3)
offset4=self.conv14_offset(feature_f3)
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv14(support,offset4)
if test:
return agg_features,offset4
else:
return agg_features
def agg2(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv21_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv21(feature_f0, offset, mask)
offset_mask2 = self.conv22_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv22(out, offset, mask)
offset_mask4 = self.conv23_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv23(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv21_offset(feature_f0)
feature_f1=self.conv21(feature_f0,offset1)
offset2=self.conv22_offset(feature_f1)
feature_f2=self.conv22(feature_f1,offset2)
offset3=self.conv23_offset(feature_f2)
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv23(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg3(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv31_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv31(feature_f0, offset, mask)
offset_mask2 = self.conv32_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv32(out, offset, mask)
offset_mask4 = self.conv33_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv33(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv31_offset(feature_f0)
feature_f1=self.conv31(feature_f0,offset1)
offset2=self.conv32_offset(feature_f1)
feature_f2=self.conv32(feature_f1,offset2)
offset3=self.conv33_offset(feature_f2)
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv33(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg4(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv41_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv41(feature_f0, offset, mask)
offset_mask4 = self.conv42_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv42(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv41_offset(feature_f0)
feature_f1=self.conv41(feature_f0,offset1)
offset2=self.conv42_offset(feature_f1)
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv42(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def agg5(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv51_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv51(feature_f0, offset, mask)
offset_mask4 = self.conv52_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv52(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv51_offset(feature_f0)
feature_f1=self.conv51(feature_f0,offset1)
offset2=self.conv52_offset(feature_f1)
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv52(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
agg_output=[]
refer_out=[]
support1_out=[]
support2_out=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
shuffle_id2=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]=shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]-1
print('shuffle id:',shuffle_id)
print('shuffle id2:',shuffle_id2)
for i in range(len(datas)):
reference=datas[i]+0
support=datas[i][shuffle_id,:,:,:]+0
tk_feature1=self.agg[i](support.detach(),reference.detach(),test)
support=datas[i][shuffle_id2,:,:,:]+0
tk_feature2=self.agg[i](support.detach(),reference.detach(),test)
weight1=torch.nn.functional.cosine_similarity(reference.detach(),tk_feature1,dim=1).unsqueeze(1).unsqueeze(1)
weight2=torch.nn.functional.cosine_similarity(reference.detach(),tk_feature2,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.ones_like(weight1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1,weight2],dim=1),dim=1)
print('agg weight',(weight[:,0,...]).mean().item())
feature=torch.cat([reference.unsqueeze(1),tk_feature1.unsqueeze(1),tk_feature2.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
agg_output.append(agg_feature)
refer_out.append(reference)
support1_out.append(tk_feature1)
support2_out.append(tk_feature2)
# print(len(agg_output),len(refer_out),len(support1_out),print(support2_out))
return [tuple(agg_output),tuple(refer_out),tuple(support1_out),tuple(support2_out)]
def forward_test(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn test')
self.offset=[]
self.mask=[]
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
if self.with_modulated_dcn:
tk_feature,soffset4,mask4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
self.mask.append(mask4)
else:
tk_feature,soffset4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
weight1=torch.nn.functional.cosine_similarity(reference,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.ones_like(weight1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1],dim=1),dim=1)
feature=torch.cat([reference.unsqueeze(1),tk_feature.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
output.append(agg_feature)
return tuple(output)
def forward_eval(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn eval')
self.offset=[]
self.mask=[]
refer_out=[]
agg_out=[]
support_out=[]
self.offset=[]
self.mask=[]
for i in range(datas[0].shape[0]-1):
support_out.append([])
self.offset.append([])
self.mask.append([])
out=[]
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
refer_out.append(reference)
if datas[i].shape[0]>1:
support=datas[i][1:,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][:1,:,:,:]+0
weight0=torch.ones_like(torch.nn.functional.cosine_similarity(reference,reference,dim=1).unsqueeze(1).unsqueeze(1))
feature=reference.unsqueeze(1)
for j in range(support.shape[0]):
tk_feature,offset,mask=self.agg[i](support[j:j+1,...],reference,test)
weight=torch.nn.functional.cosine_similarity(reference,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.cat([weight0,weight],dim=1)
feature=torch.cat([feature,tk_feature.unsqueeze(1)],dim=1)
support_out[j].append(tk_feature)
self.offset[j].append(offset)
self.mask[j].append(mask)
weight=torch.nn.functional.softmax(weight0,dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
agg_out.append(agg_feature)
for i in range(datas[0].shape[0]-1):
support_out[i]=tuple(support_out[i])
out=[tuple(refer_out),tuple(agg_out)]+support_out
return out
@AGG.register_module
class STSN_fuse_c(nn.Module):
#fuse on each channel
def __init__(self,in_channels,out_channels,dcn):
super(STSN_fuse_c,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
#agg1
self.conv11_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv11 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv12_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv12 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv13_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv13 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv14_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv14 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg2
self.conv21_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv21 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv22_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv22 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv23_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv23 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg3
self.conv31_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv31 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv32_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv32 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv33_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv33 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg4
self.conv41_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv41 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv42_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv42 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg5
self.conv51_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv51 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv52_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv52 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.weight_conv=nn.Sequential(nn.Conv3d(out_channels,out_channels,
kernel_size=(3,1,1), stride=1,padding=(1,0,0),dilation=1),
nn.Conv3d(out_channels,out_channels,
kernel_size=(3,1,1), stride=1,padding=(1,0,0),dilation=1))
self.relu=nn.LeakyReLU(inplace=True)
self.offset=[]
self.mask=[]
print('init transform kernel')
self.trans_kernel=torch.from_numpy(np.load('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy'))
self.trans_kernel=nn.Parameter(self.trans_kernel)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv11_offset, std=0.01)
normal_init(self.conv11, std=0.01)
normal_init(self.conv12_offset, std=0.01)
normal_init(self.conv12, std=0.01)
normal_init(self.conv13_offset, std=0.01)
normal_init(self.conv13, std=0.01)
normal_init(self.conv14_offset, std=0.01)
normal_init(self.conv14, std=0.01)
normal_init(self.conv21_offset, std=0.01)
normal_init(self.conv21, std=0.01)
normal_init(self.conv22_offset, std=0.01)
normal_init(self.conv22, std=0.01)
normal_init(self.conv23_offset, std=0.01)
normal_init(self.conv23, std=0.01)
normal_init(self.conv31_offset, std=0.01)
normal_init(self.conv31, std=0.01)
normal_init(self.conv32_offset, std=0.01)
normal_init(self.conv32, std=0.01)
normal_init(self.conv33_offset, std=0.01)
normal_init(self.conv33, std=0.01)
normal_init(self.conv41_offset, std=0.01)
normal_init(self.conv41, std=0.01)
normal_init(self.conv42_offset, std=0.01)
normal_init(self.conv42, std=0.01)
normal_init(self.conv51_offset, std=0.01)
normal_init(self.conv51, std=0.01)
normal_init(self.conv52_offset, std=0.01)
normal_init(self.conv52, std=0.01)
def agg1(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv11_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv11(feature_f0, offset, mask)
offset_mask2 = self.conv12_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv12(out, offset, mask)
offset_mask3 = self.conv13_offset(out)
offset = offset_mask3[:, :18 * self.deformable_groups, :, :]
mask = offset_mask3[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv13(out, offset, mask)
offset_mask4 = self.conv14_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
# print('mask weight',torch.max(mask,dim=1)[0].mean().item())
kernel_weight=self.trans_kernel.detach()*9
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv14(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
# print('agg1',feature_f0.device,self.conv11_offset.weight.device)
offset1=self.conv11_offset(feature_f0)
feature_f1=self.conv11(feature_f0,offset1)
offset2=self.conv12_offset(feature_f1)
feature_f2=self.conv12(feature_f1,offset2)
offset3=self.conv13_offset(feature_f2)
feature_f3=self.conv13(feature_f2,offset3)
offset4=self.conv14_offset(feature_f3)
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv14(support,offset4)
if test:
return agg_features,offset4
else:
return agg_features
def agg2(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv21_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv21(feature_f0, offset, mask)
offset_mask2 = self.conv22_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv22(out, offset, mask)
offset_mask4 = self.conv23_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv23(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv21_offset(feature_f0)
feature_f1=self.conv21(feature_f0,offset1)
offset2=self.conv22_offset(feature_f1)
feature_f2=self.conv22(feature_f1,offset2)
offset3=self.conv23_offset(feature_f2)
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv23(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg3(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv31_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv31(feature_f0, offset, mask)
offset_mask2 = self.conv32_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv32(out, offset, mask)
offset_mask4 = self.conv33_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv33(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv31_offset(feature_f0)
feature_f1=self.conv31(feature_f0,offset1)
offset2=self.conv32_offset(feature_f1)
feature_f2=self.conv32(feature_f1,offset2)
offset3=self.conv33_offset(feature_f2)
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv33(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg4(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv41_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv41(feature_f0, offset, mask)
offset_mask4 = self.conv42_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv42(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv41_offset(feature_f0)
feature_f1=self.conv41(feature_f0,offset1)
offset2=self.conv42_offset(feature_f1)
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv42(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def agg5(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv51_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv51(feature_f0, offset, mask)
offset_mask4 = self.conv52_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv52(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv51_offset(feature_f0)
feature_f1=self.conv51(feature_f0,offset1)
offset2=self.conv52_offset(feature_f1)
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv52(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
print('fuse channel')
agg_output=[]
refer_out=[]
support1_out=[]
support2_out=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
shuffle_id2=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]=shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]-1
# print('shuffle id:',shuffle_id)
# print('shuffle id2:',shuffle_id2)
for i in range(len(datas)):
reference=datas[i]+0
support=datas[i][shuffle_id,:,:,:]+0
tk_feature1=self.agg[i](support.detach(),reference.detach(),test)
support=datas[i][shuffle_id2,:,:,:]+0
tk_feature2=self.agg[i](support.detach(),reference.detach(),test)
# weight1=torch.nn.functional.cosine_similarity(reference.detach(),tk_feature1,dim=1).unsqueeze(1).unsqueeze(1)
# weight2=torch.nn.functional.cosine_similarity(reference.detach(),tk_feature2,dim=1).unsqueeze(1).unsqueeze(1)
# weight0=torch.ones_like(weight1)
# weight=torch.nn.functional.softmax(torch.cat([weight0,weight1,weight2],dim=1),dim=1)
weight_f=torch.cat([reference.unsqueeze(2),tk_feature1.unsqueeze(2),tk_feature2.unsqueeze(2)],dim=2)
weight_f=self.weight_conv(weight_f)
weight=torch.nn.functional.softmax(weight_f,dim=2)
print('agg weight',(weight[:,:,0,...]).max().item(),((weight[:,:,0,...]).max()>0.7).float().sum().item())
feature=torch.cat([reference.unsqueeze(2),tk_feature1.unsqueeze(2),tk_feature2.unsqueeze(2)],dim=2)
agg_feature=torch.sum(feature*weight,dim=2)
agg_output.append(agg_feature)
refer_out.append(reference)
support1_out.append(tk_feature1)
support2_out.append(tk_feature2)
# print(len(agg_output),len(refer_out),len(support1_out),print(support2_out))
return [tuple(agg_output),tuple(refer_out),tuple(support1_out),tuple(support2_out)]
def forward_test(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn test')
self.offset=[]
self.mask=[]
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
if self.with_modulated_dcn:
tk_feature,soffset4,mask4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
self.mask.append(mask4)
else:
tk_feature,soffset4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
weight1=torch.nn.functional.cosine_similarity(reference,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.ones_like(weight1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1],dim=1),dim=1)
feature=torch.cat([reference.unsqueeze(1),tk_feature.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
output.append(agg_feature)
return tuple(output)
def forward_eval(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn eval')
self.offset=[]
self.mask=[]
refer_out=[]
agg_out=[]
support_out=[]
self.offset=[]
self.mask=[]
for i in range(datas[0].shape[0]-1):
support_out.append([])
self.offset.append([])
self.mask.append([])
out=[]
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
refer_out.append(reference)
if datas[i].shape[0]>1:
support=datas[i][1:,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][:1,:,:,:]+0
# weight0=torch.ones_like(torch.nn.functional.cosine_similarity(reference,reference,dim=1).unsqueeze(1).unsqueeze(1))
feature=reference.unsqueeze(2)
for j in range(support.shape[0]):
tk_feature,offset,mask=self.agg[i](support[j:j+1,...],reference,test)
# weight=torch.nn.functional.cosine_similarity(reference,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
# weight0=torch.cat([weight0,weight],dim=1)
feature=torch.cat([feature,tk_feature.unsqueeze(2)],dim=2)
support_out[j].append(tk_feature)
self.offset[j].append(offset)
self.mask[j].append(mask)
weight_f=feature+0
weight_f=self.weight_conv(weight_f)
weight=torch.nn.functional.softmax(weight_f,dim=2)
agg_feature=torch.sum(feature*weight,dim=2)
agg_out.append(agg_feature)
for i in range(datas[0].shape[0]-1):
support_out[i]=tuple(support_out[i])
out=[tuple(refer_out),tuple(agg_out)]+support_out
return out
@AGG.register_module
class STSN_fuse_t(nn.Module):
#trainable dcn weight
def __init__(self,in_channels,out_channels,dcn):
super(STSN_fuse_t,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
#agg1
self.conv11_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv11 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv12_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv12 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv13_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv13 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv14_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv14 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg2
self.conv21_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv21 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv22_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv22 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv23_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv23 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg3
self.conv31_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv31 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv32_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv32 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv33_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv33 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg4
self.conv41_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv41 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv42_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv42 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg5
self.conv51_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv51 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv52_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv52 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.offset=[]
self.mask=[]
print('init transform kernel')
self.trans_kernel=torch.from_numpy(np.load('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy'))
self.trans_kernel=nn.Parameter(self.trans_kernel)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv11_offset, std=0.01)
normal_init(self.conv11, std=0.01)
normal_init(self.conv12_offset, std=0.01)
normal_init(self.conv12, std=0.01)
normal_init(self.conv13_offset, std=0.01)
normal_init(self.conv13, std=0.01)
normal_init(self.conv14_offset, std=0.01)
normal_init(self.conv14, std=0.01)
normal_init(self.conv21_offset, std=0.01)
normal_init(self.conv21, std=0.01)
normal_init(self.conv22_offset, std=0.01)
normal_init(self.conv22, std=0.01)
normal_init(self.conv23_offset, std=0.01)
normal_init(self.conv23, std=0.01)
normal_init(self.conv31_offset, std=0.01)
normal_init(self.conv31, std=0.01)
normal_init(self.conv32_offset, std=0.01)
normal_init(self.conv32, std=0.01)
normal_init(self.conv33_offset, std=0.01)
normal_init(self.conv33, std=0.01)
normal_init(self.conv41_offset, std=0.01)
normal_init(self.conv41, std=0.01)
normal_init(self.conv42_offset, std=0.01)
normal_init(self.conv42, std=0.01)
normal_init(self.conv51_offset, std=0.01)
normal_init(self.conv51, std=0.01)
normal_init(self.conv52_offset, std=0.01)
normal_init(self.conv52, std=0.01)
def agg1(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv11_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv11(feature_f0, offset, mask)
offset_mask2 = self.conv12_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv12(out, offset, mask)
offset_mask3 = self.conv13_offset(out)
offset = offset_mask3[:, :18 * self.deformable_groups, :, :]
mask = offset_mask3[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv13(out, offset, mask)
offset_mask4 = self.conv14_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
# mask=torch.nn.functional.softmax(mask,dim=1)
# print('mask weight',torch.max(mask,dim=1)[0].mean().item())
# kernel_weight=self.trans_kernel.detach()*9
# self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv14(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
# print('agg1',feature_f0.device,self.conv11_offset.weight.device)
offset1=self.conv11_offset(feature_f0)
feature_f1=self.conv11(feature_f0,offset1)
offset2=self.conv12_offset(feature_f1)
feature_f2=self.conv12(feature_f1,offset2)
offset3=self.conv13_offset(feature_f2)
feature_f3=self.conv13(feature_f2,offset3)
offset4=self.conv14_offset(feature_f3)
# self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv14(support,offset4)
if test:
return agg_features,offset4
else:
return agg_features
def agg2(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv21_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv21(feature_f0, offset, mask)
offset_mask2 = self.conv22_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv22(out, offset, mask)
offset_mask4 = self.conv23_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
# mask=torch.nn.functional.softmax(mask,dim=1)
# kernel_weight=self.trans_kernel.detach()*9
# self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv23(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv21_offset(feature_f0)
feature_f1=self.conv21(feature_f0,offset1)
offset2=self.conv22_offset(feature_f1)
feature_f2=self.conv22(feature_f1,offset2)
offset3=self.conv23_offset(feature_f2)
# self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv23(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg3(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv31_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv31(feature_f0, offset, mask)
offset_mask2 = self.conv32_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv32(out, offset, mask)
offset_mask4 = self.conv33_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
# mask=torch.nn.functional.softmax(mask,dim=1)
# kernel_weight=self.trans_kernel.detach()*9
# self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv33(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv31_offset(feature_f0)
feature_f1=self.conv31(feature_f0,offset1)
offset2=self.conv32_offset(feature_f1)
feature_f2=self.conv32(feature_f1,offset2)
offset3=self.conv33_offset(feature_f2)
# self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv33(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg4(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv41_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv41(feature_f0, offset, mask)
offset_mask4 = self.conv42_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
# mask=torch.nn.functional.softmax(mask,dim=1)
# kernel_weight=self.trans_kernel.detach()*9
# self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv42(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv41_offset(feature_f0)
feature_f1=self.conv41(feature_f0,offset1)
offset2=self.conv42_offset(feature_f1)
# self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv42(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def agg5(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv51_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv51(feature_f0, offset, mask)
offset_mask4 = self.conv52_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
# mask=torch.nn.functional.softmax(mask,dim=1)
# kernel_weight=self.trans_kernel.detach()*9
# self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv52(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv51_offset(feature_f0)
feature_f1=self.conv51(feature_f0,offset1)
offset2=self.conv52_offset(feature_f1)
# self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv52(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
agg_output=[]
refer_out=[]
support1_out=[]
support2_out=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
shuffle_id2=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]=shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]-1
# print('shuffle id:',shuffle_id)
# print('shuffle id2:',shuffle_id2)
for i in range(len(datas)):
reference=datas[i]+0
tk_feature0=self.agg[i](reference.detach(),reference.detach(),test)
support=datas[i][shuffle_id,:,:,:]+0
tk_feature1=self.agg[i](support.detach(),reference.detach(),test)
support=datas[i][shuffle_id2,:,:,:]+0
tk_feature2=self.agg[i](support.detach(),reference.detach(),test)
weight1=torch.nn.functional.cosine_similarity(tk_feature0.detach(),tk_feature1,dim=1).unsqueeze(1).unsqueeze(1)
weight2=torch.nn.functional.cosine_similarity(tk_feature0.detach(),tk_feature2,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.ones_like(weight1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1,weight2],dim=1),dim=1)
# print('agg weight',(weight[:,0,...]).mean().item())
feature=torch.cat([tk_feature0.unsqueeze(1),tk_feature1.unsqueeze(1),tk_feature2.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
agg_output.append(agg_feature)
refer_out.append(tk_feature0)
support1_out.append(tk_feature1)
support2_out.append(tk_feature2)
# print(len(agg_output),len(refer_out),len(support1_out),print(support2_out))
return [tuple(agg_output),tuple(refer_out),tuple(support1_out),tuple(support2_out)]
def forward_test(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn test')
self.offset=[]
self.mask=[]
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
if self.with_modulated_dcn:
tk_feature,soffset4,mask4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
self.mask.append(mask4)
else:
tk_feature,soffset4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
weight1=torch.nn.functional.cosine_similarity(reference,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.ones_like(weight1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1],dim=1),dim=1)
feature=torch.cat([reference.unsqueeze(1),tk_feature.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
output.append(agg_feature)
return tuple(output)
def forward_eval(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn eval')
self.offset=[]
self.mask=[]
refer_out=[]
agg_out=[]
support_out=[]
self.offset=[]
self.mask=[]
for i in range(datas[0].shape[0]-1):
support_out.append([])
self.offset.append([])
self.mask.append([])
out=[]
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
support=datas[i][1:,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][:1,:,:,:]+0
tk_feature0=self.agg[i](reference.detach(),reference.detach(),test)
refer_out.append(tk_feature0)
weight0=torch.ones_like(torch.nn.functional.cosine_similarity(tk_feature0,tk_feature0,dim=1).unsqueeze(1).unsqueeze(1))
feature=tk_feature0.unsqueeze(1)
for j in range(support.shape[0]):
tk_feature,offset,mask=self.agg[i](support[j:j+1,...],reference,test)
weight=torch.nn.functional.cosine_similarity(tk_feature0,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.cat([weight0,weight],dim=1)
feature=torch.cat([feature,tk_feature.unsqueeze(1)],dim=1)
support_out[j].append(tk_feature)
self.offset[j].append(offset)
self.mask[j].append(mask)
weight=torch.nn.functional.softmax(weight0,dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
agg_out.append(agg_feature)
for i in range(datas[0].shape[0]-1):
support_out[i]=tuple(support_out[i])
out=[tuple(refer_out),tuple(agg_out)]+support_out
return out
@AGG.register_module
class STSN_fuse_r(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN_fuse_r,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
#agg1
self.conv11_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv11 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv12_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv12 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv13_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv13 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv14_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv14 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg2
self.conv21_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv21 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv22_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv22 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv23_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv23 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg3
self.conv31_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv31 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv32_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv32 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv33_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv33 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg4
self.conv41_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv41 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv42_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv42 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg5
self.conv51_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv51 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv52_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv52 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.offset=[]
self.mask=[]
print('init transform kernel')
# for i in range(256):
# for j in range(256):
# for m in range(3):
# for n in range(3):
# if i==j:
# self.trans_kernel[i,j,m,n]=self.trans_kernel[i,j,m,n]/self.trans_kernel[i,j,m,n]
# self.trans_kernel[i,j,m,n]=self.trans_kernel[i,j,m,n]/9
# else:
# self.trans_kernel[i,j,m,n]=self.trans_kernel[i,j,m,n]*0
# np.save('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy',self.trans_kernel.data.cpu().numpy())
self.trans_kernel=torch.from_numpy(np.load('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy'))
self.trans_kernel=nn.Parameter(self.trans_kernel)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv11_offset, std=0.01)
normal_init(self.conv11, std=0.01)
normal_init(self.conv12_offset, std=0.01)
normal_init(self.conv12, std=0.01)
normal_init(self.conv13_offset, std=0.01)
normal_init(self.conv13, std=0.01)
normal_init(self.conv14_offset, std=0.01)
normal_init(self.conv14, std=0.01)
normal_init(self.conv21_offset, std=0.01)
normal_init(self.conv21, std=0.01)
normal_init(self.conv22_offset, std=0.01)
normal_init(self.conv22, std=0.01)
normal_init(self.conv23_offset, std=0.01)
normal_init(self.conv23, std=0.01)
normal_init(self.conv31_offset, std=0.01)
normal_init(self.conv31, std=0.01)
normal_init(self.conv32_offset, std=0.01)
normal_init(self.conv32, std=0.01)
normal_init(self.conv33_offset, std=0.01)
normal_init(self.conv33, std=0.01)
normal_init(self.conv41_offset, std=0.01)
normal_init(self.conv41, std=0.01)
normal_init(self.conv42_offset, std=0.01)
normal_init(self.conv42, std=0.01)
normal_init(self.conv51_offset, std=0.01)
normal_init(self.conv51, std=0.01)
normal_init(self.conv52_offset, std=0.01)
normal_init(self.conv52, std=0.01)
def agg1(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv11_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
# mask=torch.nn.functional.softmax(mask,dim=1)
# kernel_weight=self.trans_kernel.detach()*9
# self.conv11.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv11(feature_f0, offset, mask)
offset_mask2 = self.conv12_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv12(out, offset, mask)
offset_mask3 = self.conv13_offset(out)
offset = offset_mask3[:, :18 * self.deformable_groups, :, :]
mask = offset_mask3[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv13(out, offset, mask)
offset_mask4 = self.conv14_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv14(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
# print('agg1',feature_f0.device,self.conv11_offset.weight.device)
offset1=self.conv11_offset(feature_f0)
feature_f1=self.conv11(feature_f0,offset1)
offset2=self.conv12_offset(feature_f1)
feature_f2=self.conv12(feature_f1,offset2)
offset3=self.conv13_offset(feature_f2)
feature_f3=self.conv13(feature_f2,offset3)
offset4=self.conv14_offset(feature_f3)
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv14(support,offset4)
if test:
return agg_features,offset4
else:
return agg_features
def agg2(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv21_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv21(feature_f0, offset, mask)
offset_mask2 = self.conv22_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv22(out, offset, mask)
offset_mask4 = self.conv23_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv23(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv21_offset(feature_f0)
feature_f1=self.conv21(feature_f0,offset1)
offset2=self.conv22_offset(feature_f1)
feature_f2=self.conv22(feature_f1,offset2)
offset3=self.conv23_offset(feature_f2)
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv23(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg3(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv31_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv31(feature_f0, offset, mask)
offset_mask2 = self.conv32_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv32(out, offset, mask)
offset_mask4 = self.conv33_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv33(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv31_offset(feature_f0)
feature_f1=self.conv31(feature_f0,offset1)
offset2=self.conv32_offset(feature_f1)
feature_f2=self.conv32(feature_f1,offset2)
offset3=self.conv33_offset(feature_f2)
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv33(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg4(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv41_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv41(feature_f0, offset, mask)
offset_mask4 = self.conv42_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv42(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv41_offset(feature_f0)
feature_f1=self.conv41(feature_f0,offset1)
offset2=self.conv42_offset(feature_f1)
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv42(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def agg5(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv51_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv51(feature_f0, offset, mask)
offset_mask4 = self.conv52_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv52(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv51_offset(feature_f0)
feature_f1=self.conv51(feature_f0,offset1)
offset2=self.conv52_offset(feature_f1)
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv52(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
shuffle_id2=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]=shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]-1
print('shuffle id:',shuffle_id)
print('shuffle id2:',shuffle_id2)
for i in range(len(datas)):
reference=datas[i]+0
tk_feature0=self.agg[i](reference,reference,test)
support=datas[i][shuffle_id,:,:,:]+0
tk_feature1=self.agg[i](support,reference,test)
support=datas[i][shuffle_id2,:,:,:]+0
tk_feature2=self.agg[i](support,reference,test)
weight1=torch.nn.functional.cosine_similarity(reference,tk_feature1,dim=1).unsqueeze(1).unsqueeze(1)
weight2=torch.nn.functional.cosine_similarity(reference,tk_feature2,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.nn.functional.cosine_similarity(reference,tk_feature0,dim=1).unsqueeze(1).unsqueeze(1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1,weight2],dim=1),dim=1)
feature=torch.cat([tk_feature0.unsqueeze(1),tk_feature1.unsqueeze(1),tk_feature2.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
output.append(agg_feature)
self.loss_trans=(torch.nn.functional.l1_loss(tk_feature0,reference.detach(),reduction='mean')+ \
0.1*torch.nn.functional.l1_loss(tk_feature1,reference.detach(),reduction='mean')+ \
0.1*torch.nn.functional.l1_loss(tk_feature2,reference.detach(),reduction='mean'))/1.2+ \
self.loss_trans
self.loss_trans=self.loss_trans/len(datas)
return tuple(output),self.loss_trans
def forward_test(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn test')
self.offset=[]
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
tk_feature0,offset=self.agg[i](reference,reference,test)
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
if self.with_modulated_dcn:
print(i)
tk_feature1,offset_mask=self.agg[i](support,reference,test)
self.offset.append(offset_mask)
else:
tk_feature1,offset=self.agg[i](support,reference,test)
self.offset.append(offset)
weight1=torch.nn.functional.cosine_similarity(reference,tk_feature1,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.nn.functional.cosine_similarity(reference,tk_feature0,dim=1).unsqueeze(1).unsqueeze(1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1],dim=1),dim=1)
feature=torch.cat([tk_feature0.unsqueeze(1),tk_feature1.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
output.append(agg_feature)
return tuple(output)
class STSN_fuse_ori(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN_fuse_ori,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
#agg1
self.conv11_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv11 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv12_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv12 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv13_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv13 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv14_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv14 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg2
self.conv21_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv21 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv22_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv22 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv23_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv23 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg3
self.conv31_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv31 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv32_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv32 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv33_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv33 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg4
self.conv41_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv41 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv42_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv42 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
#agg5
self.conv51_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv51 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv52_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv52 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.offset=[]
self.mask=[]
print('init transform kernel')
self.trans_kernel=torch.from_numpy(np.load('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy'))
self.trans_kernel=nn.Parameter(self.trans_kernel)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv11_offset, std=0.01)
normal_init(self.conv11, std=0.01)
normal_init(self.conv12_offset, std=0.01)
normal_init(self.conv12, std=0.01)
normal_init(self.conv13_offset, std=0.01)
normal_init(self.conv13, std=0.01)
normal_init(self.conv14_offset, std=0.01)
normal_init(self.conv14, std=0.01)
normal_init(self.conv21_offset, std=0.01)
normal_init(self.conv21, std=0.01)
normal_init(self.conv22_offset, std=0.01)
normal_init(self.conv22, std=0.01)
normal_init(self.conv23_offset, std=0.01)
normal_init(self.conv23, std=0.01)
normal_init(self.conv31_offset, std=0.01)
normal_init(self.conv31, std=0.01)
normal_init(self.conv32_offset, std=0.01)
normal_init(self.conv32, std=0.01)
normal_init(self.conv33_offset, std=0.01)
normal_init(self.conv33, std=0.01)
normal_init(self.conv41_offset, std=0.01)
normal_init(self.conv41, std=0.01)
normal_init(self.conv42_offset, std=0.01)
normal_init(self.conv42, std=0.01)
normal_init(self.conv51_offset, std=0.01)
normal_init(self.conv51, std=0.01)
normal_init(self.conv52_offset, std=0.01)
normal_init(self.conv52, std=0.01)
def agg1(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv11_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv11(feature_f0, offset, mask)
offset_mask2 = self.conv12_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv12(out, offset, mask)
offset_mask3 = self.conv13_offset(out)
offset = offset_mask3[:, :18 * self.deformable_groups, :, :]
mask = offset_mask3[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv13(out, offset, mask)
offset_mask4 = self.conv14_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv14(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
# print('agg1',feature_f0.device,self.conv11_offset.weight.device)
offset1=self.conv11_offset(feature_f0)
feature_f1=self.conv11(feature_f0,offset1)
offset2=self.conv12_offset(feature_f1)
feature_f2=self.conv12(feature_f1,offset2)
offset3=self.conv13_offset(feature_f2)
feature_f3=self.conv13(feature_f2,offset3)
offset4=self.conv14_offset(feature_f3)
self.conv14.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv14(support,offset4)
if test:
return agg_features,offset4
else:
return agg_features
def agg2(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv21_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv21(feature_f0, offset, mask)
offset_mask2 = self.conv22_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv22(out, offset, mask)
offset_mask4 = self.conv23_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv23(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv21_offset(feature_f0)
feature_f1=self.conv21(feature_f0,offset1)
offset2=self.conv22_offset(feature_f1)
feature_f2=self.conv22(feature_f1,offset2)
offset3=self.conv23_offset(feature_f2)
self.conv23.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv23(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg3(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv31_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv31(feature_f0, offset, mask)
offset_mask2 = self.conv32_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv32(out, offset, mask)
offset_mask4 = self.conv33_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv33(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv31_offset(feature_f0)
feature_f1=self.conv31(feature_f0,offset1)
offset2=self.conv32_offset(feature_f1)
feature_f2=self.conv32(feature_f1,offset2)
offset3=self.conv33_offset(feature_f2)
self.conv33.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv33(support,offset3)
if test:
return agg_features,offset3
else:
return agg_features
def agg4(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv41_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv41(feature_f0, offset, mask)
offset_mask4 = self.conv42_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv42(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv41_offset(feature_f0)
feature_f1=self.conv41(feature_f0,offset1)
offset2=self.conv42_offset(feature_f1)
self.conv42.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv42(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def agg5(self,support,reference,test=False):
feature_f0=torch.cat([support,reference],dim=1)
if self.with_modulated_dcn:
offset_mask1 = self.conv51_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv51(feature_f0, offset, mask)
offset_mask4 = self.conv52_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
# mask = mask.sigmoid()
mask=torch.nn.functional.softmax(mask,dim=1)
kernel_weight=self.trans_kernel.detach()*9
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv52(support, offset, mask)
if test:
return out,offset,mask
else:
return out
else:
offset1=self.conv51_offset(feature_f0)
feature_f1=self.conv51(feature_f0,offset1)
offset2=self.conv52_offset(feature_f1)
self.conv52.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv52(support,offset2)
if test:
return agg_features,offset2
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
shuffle_id2=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]=shuffle_id2[shuffle_id2==np.arange(datas[0].shape[0])]-1
print('shuffle id:',shuffle_id)
print('shuffle id2:',shuffle_id2)
for i in range(len(datas)):
reference=datas[i]+0
support=datas[i][shuffle_id,:,:,:]+0
tk_feature1=self.agg[i](support,reference,test)
support=datas[i][shuffle_id2,:,:,:]+0
tk_feature2=self.agg[i](support,reference,test)
weight1=torch.nn.functional.cosine_similarity(reference,tk_feature1,dim=1).unsqueeze(1).unsqueeze(1)
weight2=torch.nn.functional.cosine_similarity(reference,tk_feature2,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.ones_like(weight1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1,weight2],dim=1),dim=1)
feature=torch.cat([reference.unsqueeze(1),tk_feature1.unsqueeze(1),tk_feature2.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
output.append(agg_feature)
return tuple(output)
def forward_test(self,datas,test=True):
output=[]
self.agg=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
print('stsn test')
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
if self.with_modulated_dcn:
tk_feature,soffset4,mask4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
self.mask.append(mask4)
else:
tk_feature,soffset4=self.agg[i](support,reference,test)
self.offset.append(soffset4)
weight1=torch.nn.functional.cosine_similarity(reference,tk_feature,dim=1).unsqueeze(1).unsqueeze(1)
weight0=torch.ones_like(weight1)
weight=torch.nn.functional.softmax(torch.cat([weight0,weight1],dim=1),dim=1)
feature=torch.cat([reference.unsqueeze(1),tk_feature.unsqueeze(1)],dim=1)
agg_feature=torch.sum(feature*weight,dim=1)
output.append(agg_feature)
return tuple(output)
@AGG.register_module
class STSN_ada_dcn(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN_ada_dcn,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
self.conv1_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv1 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv2_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv2 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv3_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv3 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv4_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv4 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.neck=nn.Sequential(
build_conv_layer(None, 256, 512, kernel_size=1, stride=1, padding=0,bias=False),
nn.GroupNorm(32,512),
nn.LeakyReLU(inplace=True),
build_conv_layer(None, 512, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.GroupNorm(32,256),
nn.LeakyReLU(inplace=True),
build_conv_layer(None, 256, 256, kernel_size=1, stride=1, padding=0, bias=False))
self.agg_set=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
self.offset=[]
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
def agg1(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features1=self.conv1(features,offset1)
offset2=self.conv2_offset(agg_features1)
agg_features2=self.conv2(agg_features1,offset2)
offset3=self.conv3_offset(agg_features2)
agg_features3=self.conv3(agg_features2,offset3)
offset4=self.conv4_offset(agg_features3)
agg_features=self.conv4(support,offset4)
if test:
return agg_features,[offset1,offset2,offset3,offset4]
else:
return agg_features
def agg2(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features1=self.conv1(features,offset1)
offset2=self.conv2_offset(agg_features1)
agg_features2=self.conv2(agg_features1,offset2)
offset4=self.conv4_offset(agg_features2)
agg_features=self.conv4(support,offset4)
if test:
return agg_features,[offset1,offset2,offset4]
else:
return agg_features
def agg3(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features1=self.conv1(features,offset1)
offset4=self.conv4_offset(agg_features1)
agg_features=self.conv4(support,offset4)
if test:
return agg_features,[offset1,offset4]
else:
return agg_features
def agg4(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset4=self.conv4_offset(features)
agg_features=self.conv4(support,offset4)
if test:
return agg_features,[offset4]
else:
return agg_features
def agg5(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset4=self.conv4_offset(features)
agg_features=self.conv4(support,offset4)
if test:
return agg_features,[offset4]
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
output=[]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
print('shuffle id:',shuffle_id)
for i in range(len(datas)):
reference=datas[i]+0
support=datas[i][shuffle_id,:,:,:]+0
tt_feature=self.agg_set[i](reference,reference,test)
# print(self.roffset4.shape)
stt=self.neck(tt_feature)
# stt=tt_feature
# ttweight=torch.exp(self.similarity(torch.cat([stt,stt],dim=1)).unsqueeze(1))#(b,1,w,h)
ttweight=torch.nn.functional.cosine_similarity(stt,stt,dim=1).unsqueeze(1)
# print(ttweight.max(),ttweight.min())
tk_feature=self.agg_set[i](support,reference,test)
stk=self.neck(tk_feature)
# stk=tk_feature
# tkweight=torch.exp(self.similarity(torch.cat([stt,stk],dim=1)).unsqueeze(1))
tkweight=torch.nn.functional.cosine_similarity(stt,stk,dim=1).unsqueeze(1)
# print(tkweight.max(),tkweight.min())
weights=torch.cat([ttweight.unsqueeze(0),tkweight.unsqueeze(0)],dim=0)#(2,b,1,w,h)
weights=F.softmax(weights,dim=0)
print('support weight','scale:',i*8,torch.mean(weights[1,:,:,:]).item(),torch.min(weights[1,:,:,:]).item(),torch.max(weights[1,:,:,:]).item())
features=torch.cat([tt_feature.unsqueeze(0),tk_feature.unsqueeze(0)],dim=0)#(2,b,c,w,h)
agg_features=torch.sum(weights*features,dim=0)#(b,c,w,h)
output.append(agg_features)
# print(agg_features.shape)
return tuple(output)
# return stt
def forward_test(self,datas,test=True):
output=[]
print('test')
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
# print(shuffle_id)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
# support=datas[:1,:,:,:]+0
# print(datas.shape)
tt_feature,roffset=self.agg_set[i](reference,reference,test)
# print(self.roffset4.shape)
stt=self.neck(tt_feature)
# stt=tt_feature
# ttweight=torch.exp(self.similarity(torch.cat([stt,stt],dim=1)).unsqueeze(1))#(b,1,w,h)
ttweight=torch.nn.functional.cosine_similarity(stt,stt,dim=1).unsqueeze(1)
# print(ttweight.max(),ttweight.min())
tk_feature,soffset=self.agg_set[i](support,reference,test)
self.offset.append(soffset)
stk=self.neck(tk_feature)
# stk=tk_feature
# tkweight=torch.exp(self.similarity(torch.cat([stt,stk],dim=1)).unsqueeze(1))
tkweight=torch.nn.functional.cosine_similarity(stt,stk,dim=1).unsqueeze(1)
# print(tkweight.max(),tkweight.min())
weights=torch.cat([ttweight.unsqueeze(0),tkweight.unsqueeze(0)],dim=0)#(2,b,1,w,h)
weights=F.softmax(weights,dim=0)
# print(torch.max((weights).abs()))
self.weight=weights
features=torch.cat([tt_feature.unsqueeze(0),tk_feature.unsqueeze(0)],dim=0)#(2,b,c,w,h)
agg_features=torch.sum(weights*features,dim=0)#(b,c,w,h)
output.append(agg_features)
# print(agg_features.shape)
return tuple(output)
@AGG.register_module
class STSN_atrous_dcn(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN_atrous_dcn,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
self.conv1_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=2,dilation=2)
self.conv1 = conv_op(out_channels, out_channels, kernel_size=3, stride=1,
padding=2, dilation=2, deformable_groups=self.deformable_groups, bias=False)
self.conv2_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=4, dilation=4)
self.conv2 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=4,dilation=4,deformable_groups=self.deformable_groups,bias=False)
self.conv3_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=8,dilation=8)
self.conv3 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=8,dilation=8,deformable_groups=self.deformable_groups,bias=False)
self.conv4_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=16,dilation=16)
self.conv4 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=16,dilation=16,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.neck=nn.Sequential(
build_conv_layer(None, 256, 512, kernel_size=1, stride=1, padding=0,bias=False),
nn.GroupNorm(32,512),
nn.LeakyReLU(inplace=True),
build_conv_layer(None, 512, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.GroupNorm(32,256),
nn.LeakyReLU(inplace=True),
build_conv_layer(None, 256, 256, kernel_size=1, stride=1, padding=0, bias=False))
self.agg_set=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
self.offsets_record=[]
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
def agg1(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features1=self.conv1(support,offset1)
offset2=self.conv2_offset(features)
agg_features2=self.conv2(support,offset2)
offset3=self.conv3_offset(features)
agg_features3=self.conv3(support,offset3)
offset4=self.conv4_offset(features)
agg_features4=self.conv4(support,offset4)
agg_features=0.25*(agg_features1+agg_features2+agg_features3+agg_features4)
if test:
return agg_features,offset1,offset2,offset3,offset4
else:
return agg_features
def agg2(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features1=self.conv1(support,offset1)
offset2=self.conv2_offset(features)
agg_features2=self.conv2(support,offset2)
offset3=self.conv3_offset(features)
agg_features3=self.conv3(support,offset3)
agg_features=0.33*(agg_features1+agg_features2+agg_features3)
if test:
return agg_features,offset1,offset2,offset3
else:
return agg_features
def agg3(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features1=self.conv1(support,offset1)
offset2=self.conv2_offset(features)
agg_features2=self.conv2(support,offset2)
agg_features=0.5*(agg_features1+agg_features2)
if test:
return agg_features,offset1,offset2
else:
return agg_features
def agg4(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features=self.conv1(support,offset1)
if test:
return agg_features,offset1
else:
return agg_features
def agg5(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features=self.conv1(support,offset1)
if test:
return agg_features,offset1
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
output=[]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
print('shuffle id:',shuffle_id)
for i in range(len(datas)):
reference=datas[i]+0
support=datas[i][shuffle_id,:,:,:]+0
tt_feature=self.agg_set[i](reference,reference,test)
# print(self.roffset4.shape)
stt=self.neck(tt_feature)
# stt=tt_feature
# ttweight=torch.exp(self.similarity(torch.cat([stt,stt],dim=1)).unsqueeze(1))#(b,1,w,h)
ttweight=torch.nn.functional.cosine_similarity(stt,stt,dim=1).unsqueeze(1)
# print(ttweight.max(),ttweight.min())
tk_feature=self.agg_set[i](support,reference,test)
stk=self.neck(tk_feature)
# stk=tk_feature
# tkweight=torch.exp(self.similarity(torch.cat([stt,stk],dim=1)).unsqueeze(1))
tkweight=torch.nn.functional.cosine_similarity(stt,stk,dim=1).unsqueeze(1)
# print(tkweight.max(),tkweight.min())
weights=torch.cat([ttweight.unsqueeze(0),tkweight.unsqueeze(0)],dim=0)#(2,b,1,w,h)
weights=F.softmax(weights,dim=0)
print('support weight','scale:',i*8,torch.mean(weights[1,:,:,:]).item(),torch.min(weights[1,:,:,:]).item(),torch.max(weights[1,:,:,:]).item())
features=torch.cat([tt_feature.unsqueeze(0),tk_feature.unsqueeze(0)],dim=0)#(2,b,c,w,h)
agg_features=torch.sum(weights*features,dim=0)#(b,c,w,h)
output.append(agg_features)
# print(agg_features.shape)
return tuple(output)
# return stt
def forward_test(self,datas,test=True):
output=[]
for i in range(len(datas)):
reference=datas[i][:1,:,:,:]+0
if datas[i].shape[0]>1:
shuffle_id=np.random.randint(low=1,high=datas[i].shape[0],size=1)
# print(shuffle_id)
support=datas[i][shuffle_id,:,:,:]+0
else:
shuffle_id=[0]
support=datas[i][shuffle_id,:,:,:]+0
# support=datas[:1,:,:,:]+0
# print(datas.shape)
tt_feature,roffset1,roffset2,roffset3,roffset4=self.agg_set[i](reference,reference,test)
if(roffset1.shape[-1]==20):
self.roffset1,self.roffset2,self.roffset3,self.roffset4=roffset1,roffset2,roffset3,roffset4
# print(self.roffset4.shape)
stt=self.neck(tt_feature)
# stt=tt_feature
# ttweight=torch.exp(self.similarity(torch.cat([stt,stt],dim=1)).unsqueeze(1))#(b,1,w,h)
ttweight=torch.nn.functional.cosine_similarity(stt,stt,dim=1).unsqueeze(1)
# print(ttweight.max(),ttweight.min())
tk_feature,soffset1,soffset2,soffset3,soffset4=self.agg_set[i](support,reference,test)
if(soffset1.shape[-1]==20):
self.soffset1,self.soffset2,self.soffset3,self.soffset4=soffset1,soffset2,soffset3,soffset4
stk=self.neck(tk_feature)
# stk=tk_feature
# tkweight=torch.exp(self.similarity(torch.cat([stt,stk],dim=1)).unsqueeze(1))
tkweight=torch.nn.functional.cosine_similarity(stt,stk,dim=1).unsqueeze(1)
# print(tkweight.max(),tkweight.min())
weights=torch.cat([ttweight.unsqueeze(0),tkweight.unsqueeze(0)],dim=0)#(2,b,1,w,h)
weights=F.softmax(weights,dim=0)
# print(torch.max((weights).abs()))
if(roffset1.shape[-1]==20):
self.weight=weights
features=torch.cat([tt_feature.unsqueeze(0),tk_feature.unsqueeze(0)],dim=0)#(2,b,c,w,h)
agg_features=torch.sum(weights*features,dim=0)#(b,c,w,h)
output.append(agg_features)
# print(agg_features.shape)
return tuple(output)
@AGG.register_module
class STSN_ms(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN_ms,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
self.conv1_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv1 = conv_op(out_channels, out_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv1_1 = conv_op(out_channels, out_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv2_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv2 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv2_1 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv3_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv3 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv3_1 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv4_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv4 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv4_1 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv5_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv5 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.neck=nn.Sequential(
build_conv_layer(None, 256, 512, kernel_size=1, stride=1, padding=0,bias=False),
nn.GroupNorm(32,512),
nn.LeakyReLU(inplace=True),
build_conv_layer(None, 512, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.GroupNorm(32,256),
nn.LeakyReLU(inplace=True),
build_conv_layer(None, 256, 256, kernel_size=1, stride=1, padding=0, bias=False))
self.offsets_record=[]
self.agg_set=[self.agg1,self.agg2,self.agg3,self.agg4,self.agg5]
self.pre_dcn=[self.conv1_1,self.conv2_1,self.conv3_1,self.conv4_1]
dcn_base = np.arange(-1,
2).astype(np.float64)
dcn_base_y = np.repeat(dcn_base, 3)
dcn_base_x = np.tile(dcn_base, 3)
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
def agg1(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv1_offset(features)
agg_features=self.conv1(support,offset1)
return agg_features,offset1
def agg2(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv2_offset(features)
agg_features=self.conv2(support,offset1)
return agg_features,offset1
def agg3(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv3_offset(features)
agg_features=self.conv3(support,offset1)
return agg_features,offset1
def agg4(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv4_offset(features)
agg_features=self.conv4(support,offset1)
return agg_features,offset1
def agg5(self,support,reference,test=False):
features=torch.cat([support,reference],dim=1)
offset1=self.conv5_offset(features)
agg_features=self.conv5(support,offset1)
return agg_features,offset1
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
output=[]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]=shuffle_id[shuffle_id==np.arange(datas[0].shape[0])]-1
print('shuffle id:',shuffle_id)
offset_ori=self.dcn_base_offset
for i in [4,3,2,1,0]:
reference=datas[i]+0
support=datas[i][shuffle_id,:,:,:]+0
bias=torch.zeros(reference.shape[0],2,reference.shape[-2],reference.shape[-1]).cuda(datas[0].device)
template=torch.ones_like(bias).float()
template[:,:,0]*=torch.reshape(torch.arange(template.shape[-2]).float(),(2000,1))
template[:,:,1]*=torch.reshape(torch.arange(template.shape[-1]).float(),(1,2000))
if i!=4:
# offset_ori[:,:9,:,:]=self.dcn_base_offset[:,:9,:,:]*bias[:,0,:,:]*torch.mean(offset_ori[:,:9,:,:],dim=1,keepdim=True)
# offset_ori[:,9:,:,:]=self.dcn_base_offset[:,9:,:,:]*bias[:,1,:,:]*torch.mean(offset_ori[:,9:,:,:],dim=1,keepdim=True)
offset_ori=torch.nn.functional.interpolate(bias,scale_factor=2,mode='bilinear')*2
# new_support=self.pre_dcn[i](support,offset_ori)
tt_feature,_=self.agg_set[i](reference,reference,test)
stt=self.neck(tt_feature)
ttweight=torch.nn.functional.cosine_similarity(stt,stt,dim=1).unsqueeze(1)
tk_feature,offset=self.agg_set[i](new_support,reference,test)
bias=offset.view(offset.shape[0],2,9,offset.shape[-2],offset.shape[-1])
bias=torch.mean(bias, dim=2)
stk=self.neck(tk_feature)
tkweight=torch.nn.functional.cosine_similarity(stt,stk,dim=1).unsqueeze(1)
weights=torch.cat([ttweight.unsqueeze(0),tkweight.unsqueeze(0)],dim=0)#(2,b,1,w,h)
weights=F.softmax(weights,dim=0)
print('support weight','scale:',i*8,torch.mean(weights[1,:,:,:]).item(),torch.min(weights[1,:,:,:]).item(),torch.max(weights[1,:,:,:]).item())
features=torch.cat([tt_feature.unsqueeze(0),tk_feature.unsqueeze(0)],dim=0)#(2,b,c,w,h)
agg_features=torch.sum(weights*features,dim=0)#(b,c,w,h)
output.append(agg_features)
# print(agg_features.shape)
return tuple(output)
@AGG.register_module
class STSN_s2_ori(nn.Module):
def __init__(self,in_channels,out_channels,dcn):
super(STSN_s2_ori,self).__init__()
self.deformable_groups = dcn.get('deformable_groups', 1)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
self.conv1_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3, stride=1,padding=1,dilation=1)
self.conv1 = conv_op(in_channels, in_channels, kernel_size=3, stride=1,
padding=1, dilation=1, deformable_groups=self.deformable_groups, bias=False)
self.conv2_offset = nn.Conv2d(in_channels, self.deformable_groups * offset_channels,
kernel_size=3, stride=1, padding=1, dilation=1)
self.conv2 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv3_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv3 = conv_op(in_channels,in_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.conv4_offset = nn.Conv2d(in_channels,self.deformable_groups * offset_channels,
kernel_size=3,stride=1,padding=1,dilation=1)
self.conv4 = conv_op(out_channels,out_channels,kernel_size=3,stride=1,
padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
# self.conv5 = DeformConv(out_channels,out_channels,kernel_size=3,stride=1,
# padding=1,dilation=1,deformable_groups=self.deformable_groups,bias=False)
self.relu=nn.LeakyReLU(inplace=True)
self.offset=[]
# self.norm1 = nn.GroupNorm(32,in_channels)
# self.norm2 = nn.GroupNorm(32,in_channels)
# self.norm3 = nn.GroupNorm(32,in_channels)
# self.norm4 = nn.GroupNorm(32,out_channels)
# self.similarity=nn.Sequential(
# build_conv_layer(None, 512, 256, kernel_size=1, stride=1, padding=0,bias=False),
# nn.GroupNorm(32,256),
# nn.LeakyReLU(inplace=True),
# build_conv_layer(None, 256, 128, kernel_size=1, stride=1, padding=0, bias=False),
# nn.GroupNorm(32,128),
# nn.LeakyReLU(inplace=True),
# build_conv_layer(None, 128, 64, kernel_size=1, stride=1, padding=0, bias=False),
# nn.GroupNorm(32,64),
# nn.LeakyReLU(inplace=True),
# build_conv_layer(None, 64, 1, kernel_size=1, stride=1, padding=0, bias=False),
# )
self.neck=nn.Sequential(
build_conv_layer(None, 256, 512, kernel_size=1, stride=1, padding=0,bias=False),
nn.GroupNorm(32,512),
nn.LeakyReLU(inplace=True),
build_conv_layer(None, 512, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.GroupNorm(32,256),
nn.LeakyReLU(inplace=True),
build_conv_layer(None, 256, 256, kernel_size=1, stride=1, padding=0, bias=False))
self.trans_kernel=self.conv4.weight.detach()
print('init transform kernel')
# for i in range(256):
# for j in range(256):
# for m in range(3):
# for n in range(3):
# if i==j:
# self.trans_kernel[i,j,m,n]=self.trans_kernel[i,j,m,n]/self.trans_kernel[i,j,m,n]
# self.trans_kernel[i,j,m,n]=self.trans_kernel[i,j,m,n]/9
# else:
# self.trans_kernel[i,j,m,n]=self.trans_kernel[i,j,m,n]*0
# np.save('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy',self.trans_kernel.data.cpu().numpy())
self.trans_kernel=torch.from_numpy(np.load('/home/ld/RepPoints/mmdetection/mmdet/ops/dcn/init_kernel.npy'))
self.trans_kernel=nn.Parameter(self.trans_kernel)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv1_offset, std=0.01)
normal_init(self.conv1, std=0.01)
normal_init(self.conv2_offset, std=0.01)
normal_init(self.conv2, std=0.01)
normal_init(self.conv3_offset, std=0.01)
normal_init(self.conv3, std=0.01)
normal_init(self.conv4_offset, std=0.01)
normal_init(self.conv4, std=0.01)
def agg(self,support,reference,test=False):
# features=torch.cat([support,reference],dim=1)
feature_f0=torch.cat([support,reference],dim=1)
print(feature_f0.device,self.conv1.weight.device)
if self.with_modulated_dcn:
offset_mask1 = self.conv1_offset(feature_f0)
offset = offset_mask1[:, :18 * self.deformable_groups, :, :]
mask = offset_mask1[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv1(feature_f0, offset, mask)
offset_mask2 = self.conv2_offset(out)
offset = offset_mask2[:, :18 * self.deformable_groups, :, :]
mask = offset_mask2[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv2(out, offset, mask)
offset_mask3 = self.conv3_offset(out)
offset = offset_mask3[:, :18 * self.deformable_groups, :, :]
mask = offset_mask3[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv3(out, offset, mask)
offset_mask4 = self.conv4_offset(out)
offset = offset_mask4[:, :18 * self.deformable_groups, :, :]
mask = offset_mask4[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
kernel_weight=self.trans_kernel.detach()*9
self.conv4.weight=nn.Parameter(self.trans_kernel.detach())
out = self.conv4(support, offset, mask)
if test:
return out,offset_mask1,offset_mask2,offset_mask3,offset_mask4
else:
return out
else:
offset1=self.conv1_offset(feature_f0)
feature_f1=self.conv1(feature_f0,offset1)
offset2=self.conv2_offset(feature_f1)
feature_f2=self.conv2(feature_f1,offset2)
offset3=self.conv3_offset(feature_f2)
feature_f3=self.conv3(feature_f2,offset3)
offset4=self.conv4_offset(feature_f3)
# print(self.conv4_offset.weight.shape)
# print(self.conv4.weight.shape)
self.conv4.weight=nn.Parameter(self.trans_kernel.detach())
agg_features=self.conv4(support,offset4)
self.offset4=offset4
# offset4=torch.max(offset4.abs(),dim=1,keepdim=True).expand_as(offset4)
# if offset4.shape[-1]==156:
# print('load')
# offset4_load=np.load('/home/ld/RepPoints/debug/agg_st_support/0/offset4.npy')
# offset4_load=torch.from_numpy(offset4_load).to(support.device)
# print('offset check in stsn',(offset4==offset4_load).all())
# #true for video 0
# support_load=np.load('/home/ld/RepPoints/debug/agg_st_support/2/support_f.npy')
# support_load=torch.from_numpy(support_load).to(support.device)
# print('support check in stsn',(support==support_load).all())
# agg_features=self.conv4(support_load,offset4_load)
# np.save('/home/ld/RepPoints/debug/feature_change/2/agg_f.npy',agg_features.data.cpu().numpy())
# np.save('/home/ld/RepPoints/debug/feature_change/2/support_f.npy',support_load.data.cpu().numpy())
# # np.save('/home/ld/RepPoints/debug/feature_change/0/refer_f.npy',reference.data.cpu().numpy())
# agg_load=np.load('/home/ld/RepPoints/debug/agg_st_support/2/agg_f.npy')
# agg_load=torch.from_numpy(agg_load).to(support.device)
# print('agg check in stsn',(agg_features==agg_load).all())
# # exit()
# #True for video 2
# self.offset4=offset4
# self.support_f=support
# self.agg_f=agg_features
# self.refer_f=reference
# return agg_features,offset1,offset2,offset3,offset4
# #?shape
# # offset4=torch.rand_like(offset4)
# else:
# agg_features=self.conv4(support,offset4)
# y_offset=[]
# for i in range(9):
# y_offset.append(offset4[:,2*i+1,:,:])
# y_offset=torch.stack(y_offset,dim=0)
# print(torch.max(y_offset.abs()))
# agg_features=(agg_features+support)/2
# agg_features=self.norm4(agg_features)
# agg_features=self.relu(agg_features)
if test:
return agg_features,offset1,offset2,offset3,offset4
else:
return agg_features
def forward(self,datas,test=False):
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
output=[]
shuffle_id=np.random.randint(low=0,high=datas[0].shape[0],size=datas[0].shape[0])
shuffle_id[shuffle_id== | np.arange(datas[0].shape[0]) | numpy.arange |
import math
import numpy as np
import random
import sys
#Helper Methods ------------------------------------------------------------------------
# Finds the mean/centroid of a cluster.
def centerOfCluster(cluster):
cluster = np.asarray(cluster)
center =np.array([0,0])
result = np.array([0,0])
for i in range(len(cluster)):
result = np.sum([cluster[i], result], axis = 0)
numberOfPoints = len(cluster)
if numberOfPoints > 0:
result = result * (1 / numberOfPoints)
return result
else:
print ("A cluster is empty")
sys.exit()
# Calculates the distance between 2 2d vectors.
def distVector(vectorA, vectorB):
vectorA = | np.asarray(vectorA) | numpy.asarray |
# Imports.
import numpy as np
#########################################################
# Set the hyperparameters for the Neural Network to use.
#########################################################
iterations = int(3e3)
learning_rate = 5e-1
hidden_nodes = 25
output_nodes = 1
class NeuralNetwork(object):
""" Class implementation of an Artificial Neural Network (ANN).
"""
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
""" Initialize an ANN instance.
Params
======
input_nodes (int): Number of nodes in the input layer
hidden_nodes (int): Number of nodes in the hidden layer
output_nodes (int): Number of nodes in the output layer
learning_rate (float): Learning rate
"""
# Set the number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights.
self.weights_input_to_hidden = np.random.normal(
0.,
self.input_nodes**-.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(
0.,
self.hidden_nodes**-.5,
(self.hidden_nodes, self.output_nodes))
# Set the learning rate.
self.lr = learning_rate
# Setting the activation function to a sigmoid function.
# Here, we define a function with a lambda expression.
self.activation_function = lambda x : 1. / (1. + (np.exp(-x)))
def train(self, features, targets):
""" Train the ANN on batch of features and targets.
Params
======
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
"""
# Set the parameters.
n_records = features.shape[0]
delta_weights_i_h = | np.zeros(self.weights_input_to_hidden.shape) | numpy.zeros |
## Functions for evaluation of model performance
## TODO: references to source for each function where applicable
## TODO: write documentation for each function
# Modules
from nltk.translate.bleu_score import sentence_bleu
import matplotlib.pyplot as plt
import numpy as np
def decode_sequence(input_seq, encoder_model, decoder_model, mapping_input, mapping_output):
'''
Take input as one-hot encoded vector and predict the output.
:param input_seq: one-hot encoded input word
:param encoder_model: trained model encoder (see 'inference' in seq2seq.py)
:param decoder_model: trained model decoder
:param mapping_input: hash tables from character --> integer and vice versa
:param mapping_output: hash tables from character --> integer and vice versa
:return: predicted pronunciation
:adapted from: https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html
'''
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = | np.zeros((1, 1, mapping_output.n_chars)) | numpy.zeros |
import numpy as np
import pandas as pd
import threading
import time
import pickle
import tsfresh
from psutil import cpu_percent
from tsfresh import extract_features
from tsfresh import select_features
from tsfresh.utilities.dataframe_functions import impute
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import scipy as sp
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler,normalize
from scipy import io
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial.distance import pdist, cdist, squareform
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import recall_score, f1_score, precision_score
from tsfresh.feature_extraction import extract_features, ComprehensiveFCParameters
from tsfresh.feature_extraction.settings import from_columns
from tsfresh.feature_extraction import feature_calculators
from tsfresh.feature_extraction import EfficientFCParameters
import os
import glob
from tsfresh.feature_extraction import extract_features, EfficientFCParameters
def Recovery (DataName): #Recovery function
#Changing Work Folder
add_path1 = "/PCA_Analyses/"
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd ()
working_path = os.getcwd() + '/Model'
PCA_Analyses_path = working_path + add_path1
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
if DataName == 'D_S_parameters':
try:
# Now change to Kernel directory
os.chdir( Kernel_path )
Final_Target = np.genfromtxt('FinalTarget.csv', delimiter = ',')
# Now change to Recovery directory
os.chdir( Recovery_path )
P_N_groups = int(np.load('M_N_groups.npy'))
Output_Id = int(np.load('ID.npy'))
P_N_Ids = int(np.load('N_IDs.npy'))
# Now change to base directory
os.chdir( base_path )
Output = {'FinalTarget': Final_Target,
'M_N_groups': P_N_groups,
'ID': Output_Id,
'N_IDs': P_N_Ids}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("D_S_parameters Recovered!")
return Output
except:
print("D_S_parameters not recovered =(" + '\033[0m')
elif DataName == 'ExtractedNames':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
extracted_names = np.load('extracted_names.npy')
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("ExtractedNames recovered!")
return extracted_names
except:
print('\033[93m' + "ExtractedNames not recovered =(" + '\033[0m')
elif DataName == 'SelectedFeatures':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
Output_Id = int(np.load('ID.npy'))
# Now change to Kernel directory
os.chdir( Kernel_path )
features_filtered_1 = pd.read_csv('features_filtered_' + str(Output_Id) + '.csv')
# Now change to base directory
os.chdir( base_path )
Output = {'FeaturesFiltered': features_filtered_1,
'ID': Output_Id}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("SelectedFeatures recovered!")
return Output
except:
print('\033[93m' + "SelectedFeatures not recovered =(" + '\033[0m')
elif DataName == 'ReducedFeatures':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
Output_Id = int(np.load('ID.npy'))
# Now change to PCA Analyses directory
os.chdir( PCA_Analyses_path )
features_reduzidas = np.genfromtxt("features_reduzidas_" + str(Output_Id) + ".csv", delimiter=',')
# Now change to base directory
os.chdir( base_path )
Output = {'ReducedFeatures': features_reduzidas,
'ID': Output_Id}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("ReducedFeatures recovered!")
return Output
except:
print('\033[93m' + "ReducedFeatures not recovered =(" + '\033[0m')
elif DataName == 'SODA_parameters_processing_parameters':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
Output_Id = int(np.load('ID.npy'))
processing_parameters = np.load(('processing_parameters.npy'), allow_pickle=True)
processing_parameters = processing_parameters.tolist()
distances = np.load(('distances.npy'), allow_pickle=True)
distances = distances.tolist()
min_granularity = np.load('Min_g.npy')
max_granularity = np.load('Max_g.npy')
pace = np.load('Pace.npy')
Output = {'Distances': distances,
'Min_g': min_granularity,
'Max_g': max_granularity,
'Pace': pace,
'ID': Output_Id}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("SODA_parameters_processing_parameters recovered!")
return Output, processing_parameters
except:
print('\033[93m' + "SODA_parameters_processing_parameters not recovered =(" + '\033[0m')
elif DataName == 'ClassificationPar':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
Output_Id = int(np.load('ID.npy'))
pace = np.load("Pace.npy")
distances = np.load(('distances.npy'), allow_pickle=True)
distances = distances.tolist()
define_percent = np.load('define_percent.npy')
Output = {'Percent': define_percent,
'Distances': distances,
'Pace': pace,
'ID': Output_Id}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("ClassificationPar recovered!")
return Output
except:
print('\033[93m' + "ClassificationPar not recovered =(" + '\033[0m')
elif DataName == 'ModelPar':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
# load the model from disk
model = pickle.load(open("Model.sav", 'rb'))
X_test = np.load('X_test.npy')
y_test = np.load('y_test.npy')
Output = {'Model': model,
'X': X_test,
'Y': y_test}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("ModelPar recovered!")
return Output
except:
print('\033[93m' + "ModelPar not recovered =(" + '\033[0m')
else:
print('\033[93m' + "Wrong name lad/lass, please check de Recovery input" + '\033[0m')
def scale(X, x_min, x_max): #Normalization
nom = (X-X.min(axis=0))*(x_max-x_min)
denom = X.max(axis=0) - X.min(axis=0)
if denom==0:
denom = 1
return x_min + nom/denom
def format_func(value, tick_number): #Plot Formater
# find number of multiples of pi/2
N = int(value)
if N == 0:
return "X1"
elif N == 50:
return "X50"
elif N == 100:
return "X100"
elif N == 150:
return "X150"
elif N == 200:
return "X200"
elif N == 250:
return "X250"
elif N == 300:
return "X300"
elif N == 350:
return "X350"
elif N == 400:
return "X400"
elif N == 450:
return "X450"
elif N == 500:
return "X500"
elif N == 550:
return "X550"
elif N == 600:
return "X600"
elif N == 650:
return "X650"
elif N == 700:
return "X700"
elif N == 750:
return "X750"
elif N == 800:
return "X800"
elif N == 850:
return "X850"
def DataSlicer (Output_Id, id_per_group=20, Choice='All'):
''' Function to Slice a time series dataset into several datasets
for save RAM during model execution
Parameters:
------
Output_Id : int
identifier for the dataset
id_per_group: int, optional
number of time series per division (default is 20)
Choice : str, optional
option of data, can be ['Main Data', 'Eminence Data', 'All'] (default is 'All')
Returns:
-------
dictionary, with the following items
'FinalTarget': np.array
targets of the entire dataset
'M_N_groups': int
number of groups
'ID': int
identifier for the dataset
'N_IDs': int
number of time series
'''
print('Data Slicer Control Output')
print('----------------------------------')
#Changing Work Folder
add_path1 = "/Input/"
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd()
working_path = os.getcwd()
Input_path = working_path + add_path1
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
# Now change to Input directory
os.chdir( Input_path )
# Loading the required input
Full_data = np.genfromtxt('Output_' + str(int(Output_Id)) + '.csv', delimiter=',')
#E_data = np.genfromtxt('Eminence_Data_' + str(Output_Id) + '.csv', delimiter=',')
columns = Full_data.shape[1]
data = Full_data[:,2:columns-1]
info = Full_data[:,0:2]
#centralizar os dados e colocá-los com desvioPadrão=1
#scaler = MinMaxScaler(feature_range=(-1,1)).fit(data)
#data = scaler.transform(data)
P_data = np.concatenate((info,data), axis=1)
Target = Full_data[:,columns-1]
print('Full Matrix: ' + str(Full_data.shape))
print('Main Data: ' + str(P_data.shape))
print('Labels: ' + str(Target.shape))
#print('Eminence Data: ' + str(E_data.shape))
# Now change to Kernel directory
os.chdir( Kernel_path )
#pickle.dump(scaler, open('norm.sav', 'wb'))
###______________________________________________________________________###
### ProDiMes Slicing Parameters ###
P_N_Ids = int(np.amax(P_data,axis=0)[0])
P_N_voos = int(np.amax(P_data,axis=0)[1])
P_last_group = int(P_N_Ids % id_per_group)
if P_last_group != 0:
P_N_groups = int((P_N_Ids / id_per_group) + 1)
else:
P_N_groups = int (P_N_Ids / id_per_group)
### Formating Final Target ###
Final_Target = np.zeros((P_N_Ids))
p_n_good = 0
p_n_bad = 0
aquired_time = P_N_Ids*P_N_voos/1000
for i in range (P_N_Ids):
if Target [i*P_N_voos] == 0:
p_n_good += 1
else:
p_n_bad += 1
Final_Target[i] = Target [i*P_N_voos]
print ('Total Number of Ids: ' + str(P_N_Ids))
print ('Number of healthy Ids: ' + str(p_n_good))
print ('Number of falty Ids: ' + str(p_n_bad))
print ('Total lifetime: ' + str(aquired_time) + ' s')
print ('Main data Number of mesures: ' + str(P_N_voos ))
print ('Main data Number of groups: ' + str(P_N_groups ))
print ('Main data Last group: ' + str(P_last_group ))
print ('___________________________________________')
###______________________________________________________________________###
### Eminences Slicing Parameters ###
#E_N_Ids = int(np.amax(E_data,axis=0)[0] - np.amax(P_data,axis=0)[0])
#E_N_voos = int(np.amax(E_data,axis=0)[1]) + 1
#E_last_group = int(E_N_Ids % id_per_group)
#if (E_last_group != 0):
# E_N_groups = int((E_N_Ids / id_per_group) + 1)
#else:
# E_N_groups = int (E_N_Ids / id_per_group)
#print ('Eminences Number of Ids: ' + str(E_N_Ids ))
#print ('Eminences Number of flights: ' + str(E_N_voos ))
#print ('Eminences Number of groups: ' + str(E_N_groups ))
#print ('Eminences Last group: ' + str(E_last_group ))
#np.savetxt(('Target_' + str(int(Output_Id)) + '.csv'), Final_Target, delimiter = ',')
###______________________________________________________________________###
### Slicing Prodimes Data ###
if (Choice =='Main Data') or (Choice =='All'):
for i in range (P_N_groups):
Data = np.zeros(((id_per_group * P_N_voos),columns-1))
for j in range (id_per_group):
for k in range (P_N_voos):
if (i < (P_N_groups - 1)):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
elif (P_last_group == 0) and (i == (P_N_groups - 1)):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
if (P_last_group != 0) and (i == (P_N_groups - 1)):
Data = np.zeros(((P_last_group * P_N_voos),columns-1))
for j in range (P_last_group):
for k in range (P_N_voos):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
np.savetxt(('Data_' + str(i) + '.csv'), Data, delimiter = ',')
###______________________________________________________________________###
### Slicing Eminences ###
'''
if (Choice == 'Eminence Data') or (Choice =='All'):
for i in range (E_N_groups):
Data = np.zeros(((id_per_group * E_N_voos),columns-3))
for j in range (id_per_group):
for k in range (E_N_voos):
if (i < (E_N_groups - 1)):
Data[(j * E_N_voos) + k,:] = E_data [(((i * id_per_group + j) * E_N_voos) + k ) ,:]
if (E_last_group != 0) and (i == (E_N_groups - 1)):
Data = np.zeros(((E_last_group * E_N_voos),columns-3))
for j in range (E_last_group):
for k in range (E_N_voos):
Data[(j * E_N_voos) + k,:] = E_data [(((i * id_per_group + j) * E_N_voos) + k ) ,:]
np.savetxt(('Eminence_' + str(i) + '.csv'), Data, delimiter = ',')
'''
np.savetxt(('FinalTarget.csv'), Final_Target, delimiter = ',')
# Now change to Recovery directory
os.chdir( Recovery_path )
np.save(('M_N_groups.npy'), P_N_groups)
np.save(('ID.npy'), Output_Id)
np.save(('N_IDs.npy'), P_N_Ids)
# Now change back to Base directory
os.chdir( base_path )
Output = {'FinalTarget': Final_Target,
'M_N_groups': P_N_groups,
'ID': Output_Id,
'N_IDs': P_N_Ids}
return Output
def TSFRESH_Extraction(D_S_parameters):
''' Function to extract features of the time series using
TSFRESH method
Parameters:
------
D_S_parameters : dictionary, with the following items
'FinalTarget': np.array
targets of the entire dataset
'M_N_groups': int
number of groups
'ID': int
identifier for the dataset
'N_IDs': int
number of time series
Returns:
-------
list
a list of string with the name of the extracted features by TSFRESH
'''
print(' ')
print('TSFRESH Control Output')
print('----------------------------------')
#Changing Work Folder
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd()
working_path = os.getcwd()
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
# Now change to Kernel directory
os.chdir( Kernel_path )
###______________________________________________________________________###
### Feature Extraction ###
#E_N_groups = np.load('E_N_groups.npy')
P_N_groups = D_S_parameters['M_N_groups']
for i in range(P_N_groups):
Data = np.genfromtxt('Data_' + str(i) + '.csv', delimiter=',')
data = pd.DataFrame(Data, columns= ['id','time'] + ['Sensor_' + str(x) for x in range(1,(Data.shape[1]-1))])
Data_extracted_features = extract_features(data,column_id = "id", column_sort="time",n_jobs=4,disable_progressbar=True)
extracted_names = list(Data_extracted_features.columns)
np.savetxt('Data_Features_' + str(i) + '.csv', Data_extracted_features.values, delimiter=',')
#for i in range(E_N_groups):
# data = pd.DataFrame(np.genfromtxt('Eminence_' + str(i) + '.csv', delimiter=','),
# columns= ['id','time','sensor_1','sensor_2','sensor_3','sensor_4',
# 'sensor_5','sensor_6','sensor_7'])
# extracted_features = extract_features(data, column_id = "id", column_sort="time")
# np.savetxt('Eminence_Features_' + str(i) + '.csv', extracted_features, delimiter=',')
# Now change to Recovery directory
os.chdir( Recovery_path )
np.save('extracted_names.npy',extracted_names)
# Now change back to base directory
os.chdir( base_path )
print("Number of Extracted Features: {}".format(len(extracted_names)))
return extracted_names
def tsfresh_chucksize(full_data,output_id):
# Loading the required input
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
n_measures = int(max(info[:,1]))
target = full_data[::n_measures,-1]
u, idx = np.unique(info[:,0], return_index=True)
df = pd.DataFrame(np.concatenate((info,data), axis=1), columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
with open('Kernel/valid_features_dict.pkl', 'rb') as f:
kind_to_fc_parameters = pickle.load(f)
columns = []
for i,x in enumerate(kind_to_fc_parameters):
aux = pd.DataFrame(np.hstack((df.loc[:,:'time'].values,
df.loc[:,x].values.reshape((-1,1)))),
columns=['id','time',x])
aux2 = tsfresh.extract_features(aux, column_id="id", column_sort="time",
default_fc_parameters=kind_to_fc_parameters[x],
#chunksize=3*24000,
n_jobs=4,
disable_progressbar=False)
for j in range(len(aux2.columns.tolist())):columns.append(aux2.columns.tolist()[j])
if i == 0:
extracted_features = np.array(aux2.values)
else:
extracted_features = np.hstack((extracted_features,aux2.values))
final_features = pd.DataFrame(extracted_features,columns=columns)
filtered_features = select_features(final_features, target,n_jobs=4)
filtered_features.sort_index(inplace = True)
with open('Kernel/final_target_' + output_id + '.pkl', 'wb') as f:
pickle.dump(target, f)
# Extracting the selected features dictionary from pandas data frame
kind_to_fc_parameters = tsfresh.feature_extraction.settings.from_columns(filtered_features)
# Saving dictionary for the on-line phase
with open('Kernel/kind_to_fc_parameters.pkl', 'wb') as f:
pickle.dump(kind_to_fc_parameters, f)
with open('Kernel/columns.pkl', 'wb') as f:
pickle.dump(filtered_features.columns.to_list(), f)
Output = {'FeaturesFiltered': filtered_features,
'FinalTarget': target,
'ID': int(output_id)}
return Output
def tsfresh_chucksize_test(output_id):
# Loading the required input
full_data = np.genfromtxt('Input/Output_' + output_id + '.csv',
delimiter=',')
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
n_measures = int(max(info[:,1]))
target = full_data[::n_measures,-1]
u, idx = np.unique(info[:,0], return_index=True)
df = pd.DataFrame(np.concatenate((info,data), axis=1), columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
extracted_features = tsfresh.extract_features(df, column_id="id", column_sort="time", n_jobs=4,default_fc_parameters=EfficientFCParameters())
return extracted_features
def tsfresh_NaN_filter(output_id,fft=False):
"""
Given an output_id, this function
withdraw all NaN features from the
TSFRESH extraction;
Inputs:
-output_id: str() -> the given id
-fft: True or False -> filter fft features
Outputs:
- Saves via picklen in ./Kernel/
an extraction dictonary without
features that generates NaN
"""
df = tsfresh_chucksize_test(output_id)
features = df.columns
nan_columns = []
for col in features:
data = df.loc[:,col].values
nan_test = np.isnan(data)
aux = col.split('__')[1].split('_')[0]
if aux == 'fft' and fft == True:
nan_columns.append(col)
elif any(nan == True for nan in nan_test):
nan_columns.append(col)
print('Percentage of invalid features: ', len(nan_columns)*100/len(features))
valid_features = []
for i in range(len(features)):
if features[i] not in nan_columns:
valid_features.append(features[i])
print('Percentage of valid features: ', len(valid_features)*100/len(features))
valid_features_dict = from_columns(valid_features)
with open('Kernel/valid_features_dict.pkl', 'wb') as f:
pickle.dump(valid_features_dict, f)
return
def tsfresh_ensemble(output_id):
# Loading the required input
full_data = np.genfromtxt('Input/Output_{}.csv'.format(output_id),
delimiter=',')
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
n_measures = int(max(info[:,1]))
n_timeseries = int(max(info[:,0]))
label = full_data[::n_measures,-1]
scaler = MinMaxScaler(feature_range=(-1,1)).fit(data)
data = scaler.transform(data)
with open('Kernel/scaler.pkl', 'wb') as f:
pickle.dump(scaler, f)
full_data = np.concatenate((info,data), axis=1)
divisions = 1
idx = np.random.choice(range(n_timeseries),n_timeseries,replace=False)
idx_division = np.array_split(idx,divisions)
for i,div in enumerate(idx_division):
div.sort()
indices = [d2 for d1 in div for d2 in range(d1*n_measures,(d1+1)*n_measures)]
ensemble_data = full_data[indices,:]
ensemble_label = label[div]
df = pd.DataFrame(ensemble_data, columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
extracted_features = tsfresh.extract_features(df, column_id="id", column_sort="time", n_jobs=0)
features = extracted_features.columns
nan_columns = []
for col in features:
nan_test = np.isnan(extracted_features.loc[:,col].values)
if any(nan == True for nan in nan_test):
nan_columns.append(col)
print(' - Percentage of invalid features: ', len(nan_columns)*100/len(features))
cleaned_features = features.drop(nan_columns)
cleaned_df = extracted_features[cleaned_features]
filtered_df, relevance_table = selection.select_features(cleaned_df, ensemble_label, n_jobs=0)
relevance_table.fillna(value=100)
if i == 0:
relevance_table_final = relevance_table.copy()
extracted_features_final = extracted_features.copy()
else:
relevance_table_final.p_value = relevance_table_final.p_value + relevance_table.p_value
extracted_features_final = pd.concat([extracted_features_final,extracted_features], axis=0)
extracted_features_final = extracted_features_final.sort_index()
relevance_table_final.p_value = relevance_table_final.p_value/divisions
relevance_table_final.relevant = relevance_table_final.p_value < 0.0029
relevant_features = relevance_table_final[relevance_table_final.relevant].feature
extracted_features_final = extracted_features_final[relevant_features]
kind_to_fc_parameters = from_columns(relevant_features)
with open('Kernel/kind_to_fc_parameters.pkl', 'wb') as f:
pickle.dump(kind_to_fc_parameters, f)
with open('Kernel/columns.pkl', 'wb') as f:
pickle.dump(relevant_features.keys().tolist(), f)
with open('Kernel/final_target_{}.pkl'.format(output_id), 'wb') as f:
pickle.dump(label, f)
Output = {'FeaturesFiltered': extracted_features_final,
'FinalTarget': label,
'ID': int(output_id)}
return Output
def dynamic_tsfresh (total_data, mode='prototype'):
''' Function for ONLINE mode
This function read the data from the acquisition module and executes a
dynamic and lighter version of TSFRESH.
Parameters:
------
output_id : int
identifier of the seed dataset
extracted_names: list
Returns:
-------
dataframe #########################################################
'''
data = total_data[:,2:-1]
info = total_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
total_data = np.concatenate((info,data), axis=1)
# ----------------------------------------------------------------- #
df = pd.DataFrame(total_data, columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,(total_data.shape[1]-1))])
# Loading feature dictionary
with open('Kernel/kind_to_fc_parameters.pkl', 'rb') as f:
kind_to_fc_parameters = pickle.load(f)
# Loading column names
with open('Kernel/columns.pkl', 'rb') as f:
original_columns = pickle.load(f)
columns = []
for i,x in enumerate(kind_to_fc_parameters):
aux = pd.DataFrame(np.hstack((df.loc[:,:'time'].values,
df.loc[:,x].values.reshape((-1,1)))),
columns=['id','time',x])
aux2 = tsfresh.extract_features(aux, column_id="id", column_sort="time",
default_fc_parameters=kind_to_fc_parameters[x],#chunksize=24000,
n_jobs=0
#disable_progressbar=True
)
for j in range(len(aux2.columns.tolist())):columns.append(aux2.columns.tolist()[j])
if i == 0:
extracted_features = np.array(aux2.values)
else:
extracted_features = np.hstack((extracted_features,aux2.values))
final_features = pd.DataFrame(extracted_features,columns=columns)
final_features = final_features[original_columns]
return impute(final_features), extracted_features
def test_tsfresh (SelectedFeatures,extracted_features):
tsf_offline = SelectedFeatures['FeaturesFiltered'].values
tsf_online = extracted_features.values
equal = np.equal(tsf_offline,tsf_online)
n_errors = 0
error_size = []
for i in range(equal.shape[0]):
for j in range(equal.shape[1]):
if equal[i,j]== False:
n_errors += 1
error_size.append(100*(tsf_offline[i,j]-tsf_online[i,j])/tsf_online[i,j])
error_size = pd.DataFrame(error_size)
error_size = impute(error_size)
print('Porcentagem de amostrar erradas (%): ',n_errors*100/(equal.shape[0]*equal.shape[1]))
print('Média de erro percentual (%): ',np.mean(error_size[0]))
print('Desvio (%): ',np.std(error_size[0]))
def PCA_calc (SelectedFeatures,N_PCs,Chose = 'Analytics',it=0):
''' Function to project and execute a Principal Components Analysis
Parameters:
------
SelectedFeatures : dictionary, with the following items
'FeaturesFiltered': pd.DataFrame
contain the output data of TSFRESH, i.e., the dataset with features selected by the hypothesis test
'FinalTarget': np.array
targets of the entire dataset
'ID': int
identifier for the dataset
N_PCs: int
number of Principal Components to mantain
Chose: str
type of analysis, can be ['Test', 'Calc', 'Specific', 'Analytics']
(default is 'Analytics')
Returns:
-------
dictionary, with the following items
'ReducedFeatures': np.array
contain the output data of PCA, i.e., the dataset with Principal Componentes projected by PCA
'ID': int
identifier for the dataset
'''
if (Chose == 'Test') or (Chose == 'Calc') or (Chose == 'Specific') or (Chose == 'Analytics'):
#Changing Work Folder
add_path1 = "/PCA_Analyses/"
add_path2 = "/Input/"
add_path3 = "/Kernel/"
add_path4 = "/PCA_Analyses/Figures/"
base_path = os.getcwd()
working_path = os.getcwd()
PCA_Analyses_path = working_path + add_path1
Input_path = working_path + add_path2
Kernel_path = working_path + add_path3
PCA_Figures_path = working_path + add_path4
# Now change to PCA Figures directory
os.chdir( Kernel_path )
print(' ')
print('PCA Control Output')
print('----------------------------------')
Output_Id = SelectedFeatures['ID']
features = SelectedFeatures['FeaturesFiltered']
Target = SelectedFeatures['FinalTarget']
selected_names = list(features.columns)
#centralizar os dados e colocá-los com desvioPadrão=1
scaler = StandardScaler().fit(features)
features_padronizadas = scaler.transform(features)
#features_padronizadas = pd.DataFrame(features_padronizadas)
pickle.dump(scaler, open('pca_scaler.sav', 'wb'))
pca= PCA(n_components = N_PCs)
pca.fit(features_padronizadas)
# save the model to disk
pickle.dump(pca, open('pca.sav', 'wb'))
variacao_percentual_pca = np.round(pca.explained_variance_ratio_ * 100, decimals = 2)
# Now change to PCA Figures directory
fig = plt.figure(figsize=[16,8])
ax = fig.subplots(1,1)
ax.bar(x=['PC' + str(x) for x in range(1,(N_PCs+1))],height=variacao_percentual_pca[0:N_PCs])
ax.set_ylabel('Percentage of Variance Held',fontsize=27)
ax.set_xlabel('Principal Components',fontsize=20)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
ax.grid()
#plt.show()
fig.savefig('Percentage_of_Variance_Held__{}__{}.png'.format(Output_Id,it), bbox_inches='tight')
print('Variation maintained: %.2f' % variacao_percentual_pca.sum())
print(' ')
if (Chose != 'Test'):
features_reduzidas = pca.transform(features_padronizadas)
print('Filtered Features')
print('-' * 20)
print(np.size(features_padronizadas,0))
print(np.size(features_padronizadas,1))
print('-' * 20)
print('Reduced Features')
print('-' * 20)
print(np.size(features_reduzidas,0))
print(np.size(features_reduzidas,1))
if (Chose != 'Test'):
### Análise de atributos ###
eigen_matrix = np.array(pca.components_)
eigen_matrix = pow((pow(eigen_matrix,2)),0.5) #invertendo valores negativos
for i in range (eigen_matrix.shape[0]):
LineSum = sum(eigen_matrix[i,:])
for j in range (eigen_matrix.shape[1]):
eigen_matrix[i,j] = ((eigen_matrix[i,j]*100)/LineSum)
if Chose == 'Specific':
### Análise Expecífica ###
fig = plt.figure(figsize=[16,int(8*N_PCs)])
fig.suptitle('Contribution percentage per PC', fontsize=16)
ax = fig.subplots(int(N_PCs),1)
for i in range (int(N_PCs)):
s = eigen_matrix[i,:]
ax[i].bar(x=range(0,(eigen_matrix.shape[1])),height=s)
ax[i].set(xlabel='Features', ylabel='Contribution Percentage', title = 'PC ' + str(i+1))
ax[i].grid()
# Hide x labels and tick labels for top plots and y ticks for right plots.
for axs in ax.flat:
axs.label_outer()
plt.show()
fig.savefig('Contribution_Percentage_Per_PC_{}.png'.format(Output_Id), bbox_inches='tight')
if (Chose == 'Analytics'):
### Análise Geral ###
weighted_contribution = np.zeros((2,eigen_matrix.shape[1]))
for i in range (eigen_matrix.shape[1]):
NumeratorSum = 0
for j in range (N_PCs):
NumeratorSum += eigen_matrix[j,i] * variacao_percentual_pca[j]
weighted_contribution[0,i] = NumeratorSum / sum(variacao_percentual_pca)
df_weighted_contribution = pd.DataFrame(weighted_contribution,columns=selected_names)
df_weighted_contribution = df_weighted_contribution.drop([1])
df_weighted_contribution = df_weighted_contribution.sort_values(by=0, axis=1, ascending=False)
#pd.set_option('display.max_rows', len(df_weighted_contribution))
#print(type(df_weighted_contribution))
#print(df_weighted_contribution.head())
#pd.reset_option('display.max_rows')
#Creating Separated Data Frames por Sensors and Features Contribution
sensors_names = [None] * int(df_weighted_contribution.shape[1])
features_names = [None] * int(df_weighted_contribution.shape[1])
general_features = [None] * int(df_weighted_contribution.shape[1])
for i, names in zip(range (df_weighted_contribution.shape[1]), df_weighted_contribution.columns):
c = '__'
words = names.split(c)
sensors_names[i] = words[0]
general_features[i]= words[1]
features_names[i] = c.join(words[1:])
#print(names)
#print(words)
#print(sensors_names[i])
#print(features_names[i])
#print(50*'-')
unique_sensors_names = np.ndarray.tolist(np.unique(np.array(sensors_names)))
unique_general_feature = np.ndarray.tolist(np.unique(np.array(general_features)))
unique_features_names = np.ndarray.tolist(np.unique(np.array(features_names)))
sensors_contribution = pd.DataFrame (np.zeros((2,np.shape(unique_sensors_names)[0])), columns=unique_sensors_names)
general_features_contribution = pd.DataFrame (np.zeros((2,np.shape(unique_general_feature)[0])), columns=unique_general_feature)
features_contribution = pd.DataFrame (np.zeros((2,np.shape(unique_features_names)[0])), columns=unique_features_names)
sensors_contribution = sensors_contribution.drop([1])
general_features_contribution = general_features_contribution.drop([1])
features_contribution = features_contribution.drop([1])
# For the output Formating
"""
unique_sensors_names = np.ndarray.tolist(np.unique(np.array(sensors_names)))
unique_features_names = np.ndarray.tolist(np.unique(np.array(features_names)))
sensor_dt = np.transpose(np.vstack((unique_sensors_names,np.asarray(np.zeros(np.shape(unique_sensors_names)[0]),object))))
feature_dt = np.transpose(np.vstack((unique_features_names,np.asarray(np.zeros(np.shape(unique_features_names)[0]),object))))
sensors_contribution = pd.DataFrame(sensor_dt,columns = ['Sensor','Contribution'])
features_contribution = pd.DataFrame(feature_dt,columns = ['Feature','Contribution'])
"""
#print(sensors_contribution.head())
#print(features_contribution.head())
#Creating dictionaries form Data Frame orientation
"""
Creates a mapping from kind names to fc_parameters objects
(which are itself mappings from feature calculators to settings)
to extract only the features contained in the columns.
To do so, for every feature name in columns this method
1. split the column name into col, feature, params part
2. decide which feature we are dealing with (aggregate with/without params or apply)
3. add it to the new name_to_function dict
4. set up the params
:param columns: containing the feature names
:type columns: list of str
:param columns_to_ignore: columns which do not contain tsfresh feature names
:type columns_to_ignore: list of str
:return: The kind_to_fc_parameters object ready to be used in the extract_features function.
:rtype: dict
"""
weighted_contribution_dic = {}
for col in df_weighted_contribution.columns:
# Split according to our separator into <col_name>, <feature_name>, <feature_params>
parts = col.split('__')
n_parts = len(parts)
if n_parts == 1:
raise ValueError("Splitting of columnname {} resulted in only one part.".format(col))
kind = parts[0]
feature = c.join(parts[1:])
feature_name = parts[1]
if kind not in weighted_contribution_dic:
weighted_contribution_dic[kind] = {}
if not hasattr(feature_calculators, feature_name):
raise ValueError("Unknown feature name {}".format(feature_name))
sensors_contribution.loc[0,kind] += df_weighted_contribution.loc[0,col]
general_features_contribution.loc[0,feature_name] += df_weighted_contribution.loc[0,col]
features_contribution.loc[0,feature] += df_weighted_contribution.loc[0,col]
weighted_contribution_dic[kind][feature] = df_weighted_contribution.loc[0,col]
# End of the tsfresh stolen function
"""
sensors_dic = {}
for i in range(len(unique_sensors_names)):
sensors_dic[unique_sensors_names[i]] = i
features_dic = {}
for i in range(len(unique_features_names)):
features_dic[unique_features_names[i]] = i
#Suming the contibution for Sensors and Features
for i in range(df_weighted_contribution.shape[0]):
names = df_weighted_contribution.loc[i,'tsfresh_info']
c = '__'
words = names.split(c)
S= words[0]
F= c.join(words[1:])
sensors_contribution.loc[sensors_dic[S],'Contribution'] += df_weighted_contribution.loc[i,'Contribution']
features_contribution.loc[features_dic[F],'Contribution'] += df_weighted_contribution.loc[i,'Contribution']
sensors_contribution = sensors_contribution.sort_values(by=['Contribution'], ascending=False)
features_contribution = features_contribution.sort_values(by=['Contribution'], ascending=False)
"""
features_contribution = features_contribution.sort_values(by=0, axis=1, ascending=False)
general_features_contribution = general_features_contribution.sort_values(by=0, axis=1, ascending=False)
features_indexes = [x for x in range(1,(features_contribution.shape[0])+1)]
general_features_indexes = [x for x in range(1,(general_features_contribution.shape[0])+1)]
features_contribution.set_index(pd.Index(features_indexes))
general_features_contribution.set_index(pd.Index(general_features_indexes))
sorted_sensors_contribution = sensors_contribution.values[0,:]
sorted_features_contribution = features_contribution.values[0,:]
sorted_general_features_contribution = general_features_contribution.values[0,:]
#Ploting Cntribution Sensors Results
fig = plt.figure(figsize=[16,8])
#fig.suptitle('Sensors Weighted Contribution Percentage', fontsize=16)
ax = fig.subplots(1,1)
s = sorted_sensors_contribution[:]
ax.bar(x=['Voltage','Current'],height=s)
plt.ylabel('Relevance Percentage',fontsize = 20)
plt.xlabel('Sensors',fontsize = 20)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=18)
ax.grid()
plt.show()
fig.savefig('Sensor_Weighted_Contribution_Percentage_{}.png'.format(Output_Id), bbox_inches='tight')
#Ploting Cntribution Features Results
fig = plt.figure(figsize=[16,8])
#fig.suptitle('Features Weighted Contribution Percentage', fontsize=16)
ax = fig.subplots(1,1)
s = sorted_features_contribution[:]
ax.bar(x=range(0,(sorted_features_contribution.shape[0])),height=s)
plt.ylabel('Relevance Percentage',fontsize = 20)
plt.xlabel('Features',fontsize = 20)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=18)
ax.xaxis.set_major_locator(plt.MultipleLocator(50))
ax.xaxis.set_minor_locator(plt.MultipleLocator(50))
ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax.grid()
plt.show()
fig.savefig('Features_Weighted_Contribution_Percentage_{}.png'.format(Output_Id), bbox_inches='tight')
### Análise Geral para os 20 melhores atributos completos ###
fig = plt.figure(figsize=[16,8])
#fig.suptitle('Best Features Weighted Contribution Percentage', fontsize=16)
#print('Porcentagem de pertinência: ', np.sum(sorted_features_contribution[0:140]))
#print('Number of Selected Features: ', sorted_features_contribution.shape[0])
ax = fig.subplots(1,1)
s = sorted_features_contribution[0:20]
ax.bar(x=['X' + str(x) for x in range(1,(20+1))],height=s)
plt.ylabel('Relevance Percentage',fontsize = 20)
plt.xlabel('Features',fontsize = 20)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=18)
ax.grid()
plt.show()
fig.savefig('{}th_Best_Features_Weighted_Contribution_Percentage_{}.png'.format(20,Output_Id), bbox_inches='tight')
### Análise Geral para os 20 melhores atributos gerais ###
fig = plt.figure(figsize=[16,8])
#fig.suptitle('Best Features Weighted Contribution Percentage', fontsize=16)
#print('Porcentagem de pertinência: ', np.sum(sorted_features_contribution[0:140]))
#print('Number of Selected Features: ', sorted_features_contribution.shape[0])
ax = fig.subplots(1,1)
s = sorted_features_contribution[0:20]
ax.bar(x=['X' + str(x) for x in range(1,(20+1))],height=s)
plt.ylabel('Relevance Percentage',fontsize = 20)
plt.xlabel('Features',fontsize = 20)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=18)
ax.grid()
ax.set_ylim([s[-1]-0.05,s[0]+0.05])
plt.show()
fig.savefig('{}th_Best_Features_Weighted_Contribution_Percentage_{}_zoom.png'.format(20,Output_Id), bbox_inches='tight')
#Ploting the data of the most relevant sensor with the best features
sensors_contribution.values[:,0]
name_1 = df_weighted_contribution.columns[0]
name_2 = df_weighted_contribution.columns[1]
name_3 = df_weighted_contribution.columns[2]
#pd.set_option('display.max_columns', len(features))
#print(features)
#pd.reset_option('display.max_columns')
x = features.loc[:,name_1].values
y = features.loc[:,name_2].values
z = features.loc[:,name_3].values
data_saida = np.array([x, y, z]).T
np.savetxt('atributos.csv', data_saida, delimiter=',')
x = scale(x,-1,1)
y = scale(y,-1,1)
z = scale(z,-1,1)
x_bom=[]
x_ruim=[]
y_bom=[]
y_ruim=[]
z_bom=[]
z_ruim=[]
for i in range(len(Target)):
if Target[i] == 0:
x_bom.append(x[i])
y_bom.append(y[i])
z_bom.append(z[i])
if Target[i] == 1:
x_ruim.append(x[i])
y_ruim.append(y[i])
z_ruim.append(z[i])
os.chdir( base_path )
#np.savetxt('x_bom.csv', x_bom, delimiter=',')
#np.savetxt('x_ruim.csv', x_ruim, delimiter=',')
os.chdir( PCA_Figures_path )
fig = plt.figure(figsize=[14,10])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_bom, y_bom, z_bom, c = 'blue' )
ax.scatter(x_ruim, y_ruim, z_ruim, c = 'red' )
plt.ylabel('X2',fontsize = 20,labelpad=18)
plt.xlabel('X1',fontsize = 20, labelpad=18)
ax.set_zlabel('X3', fontsize = 20, labelpad=12)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.tick_params(axis='z', labelsize=16)
ax.grid()
red_patch = mpatches.Patch(color='red', label='Non-Funcional Tools')
blue_patch = mpatches.Patch(color='blue', label='Funcional Tools')
plt.legend(handles=[red_patch,blue_patch],fontsize = 20)
#plt.show()
fig.savefig('ScatterPlot_PCA_{}.png'.format(Output_Id), bbox_inches='tight')
# -------------------------------------------
fig = plt.figure(figsize=[21,7])
ax = fig.subplots(1,3)
ax[0].scatter(x_bom, y_bom, c = 'blue' )
ax[0].scatter(x_ruim, y_ruim, c = 'red' )
ax[0].set_xlabel('X1',fontsize = 20)
ax[0].set_ylabel('X2',fontsize = 20)
ax[0].grid()
ax[1].scatter(x_bom, z_bom, c = 'blue' )
ax[1].scatter(x_ruim, z_ruim, c = 'red' )
ax[1].set_xlabel('X1',fontsize = 20)
ax[1].set_ylabel('X3',fontsize = 20)
ax[1].grid()
ax[2].scatter(y_bom, z_bom, c = 'blue' )
ax[2].scatter(y_ruim, z_ruim, c = 'red' )
ax[2].set_xlabel('X2',fontsize = 20,)
ax[2].set_ylabel('X3',fontsize = 20)
ax[2].grid()
#plt.show()
fig.savefig('X1X2X3_{}.png'.format(Output_Id), bbox_inches='tight')
# -------------------------------------------
# Now change to PCA Analyses directory
os.chdir( PCA_Analyses_path )
general_features_contribution.to_csv('unique_features_used_{}.csv'.format(Output_Id),index = False)
sensors_contribution.to_csv('sensors_weighted_contribution_{}.csv'.format(Output_Id), index=True)
features_contribution.to_csv('features_weighted_contribution_{}.csv'.format(Output_Id), index=True)
# Now change to PCA Analyses directory
# -------------------------------------------
x = features_reduzidas[:,0]
y = features_reduzidas[:,1]
z = features_reduzidas[:,2]
x_bom=[]
x_ruim=[]
y_bom=[]
y_ruim=[]
z_bom=[]
z_ruim=[]
for i in range(len(Target)):
if Target[i] == 0:
x_bom.append(x[i])
y_bom.append(y[i])
z_bom.append(z[i])
if Target[i] == 1:
x_ruim.append(x[i])
y_ruim.append(y[i])
z_ruim.append(z[i])
os.chdir( PCA_Figures_path )
fig = plt.figure(figsize=[14,10])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_bom, y_bom, z_bom, c = 'blue' )
ax.scatter(x_ruim, y_ruim, z_ruim, c = 'red' )
plt.ylabel('PC2',fontsize = 20,labelpad=18)
plt.xlabel('PC1',fontsize = 20, labelpad=18)
ax.set_zlabel('PC3', fontsize = 20, labelpad=12)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.tick_params(axis='z', labelsize=16)
ax.grid()
red_patch = mpatches.Patch(color='red', label='Non-Funcional Tools')
blue_patch = mpatches.Patch(color='blue', label='Funcional Tools')
plt.legend(handles=[red_patch,blue_patch],fontsize = 20)
#plt.show()
fig.savefig('ScatterPlot_features__{}__{}.png'.format(Output_Id,it), bbox_inches='tight')
# -------------------------------------------
fig = plt.figure(figsize=[21,7])
ax = fig.subplots(1,3)
ax[0].scatter(x_bom, y_bom, c = 'blue' )
ax[0].scatter(x_ruim, y_ruim, c = 'red' )
ax[0].set_xlabel('PC1',fontsize = 20)
ax[0].set_ylabel('PC2',fontsize = 20)
ax[0].grid()
ax[1].scatter(x_bom, z_bom, c = 'blue' )
ax[1].scatter(x_ruim, z_ruim, c = 'red' )
ax[1].set_xlabel('PC1',fontsize = 20)
ax[1].set_ylabel('PC3',fontsize = 20)
ax[1].grid()
ax[2].scatter(y_bom, z_bom, c = 'blue' )
ax[2].scatter(y_ruim, z_ruim, c = 'red' )
ax[2].set_xlabel('PC2',fontsize = 20,)
ax[2].set_ylabel('PC3',fontsize = 20)
ax[2].grid()
#plt.show()
fig.savefig('PC1PC2PC3__{}__{}.png'.format(Output_Id,it), bbox_inches='tight')
# -------------------------------------------
# -------------------------------------------
os.chdir( PCA_Analyses_path )
np.savetxt("features_reduzidas_" + str(Output_Id) + ".csv", features_reduzidas, delimiter=',')
Output = {'ReducedFeatures': features_reduzidas,
'ID': Output_Id}
elif (Chose == 'Test'):
Output = {'ID': Output_Id}
# Now change back to base directory
os.chdir( base_path )
return Output
def PCA_projection (features):
''' Function for ONLINE mode
This function project the data into a trained PCA.
Parameters:
------
features: dataframe
#############################################################
Returns:
-------
dataframe
contain the output data of PCA, i.e., the dataset with Principal Componentes projected by PCA
'''
loaded_scaler = pickle.load(open('Kernel/pca_scaler.sav', 'rb'))
features_padronizadas = loaded_scaler.transform(features)
#centralizar os dados e colocá-los com desvioPadrão=1
#scaler = StandardScaler().fit(features)
#features_padronizadas = scaler.transform(features)
pca= PCA(n_components = 3)
pca.fit(features_padronizadas)
variacao_percentual_pca = np.round(pca.explained_variance_ratio_ * 100, decimals = 2)
print('Variation maintained: %.2f' % variacao_percentual_pca.sum())
print(' ')
features_reduzidas = pca.transform(features_padronizadas)
"""
# load the model from disk
loaded_pca = pickle.load(open('Kernel/pca.sav', 'rb'))
scaler = StandardScaler().fit(features)
features_padronizadas = scaler.transform(features)
features_padronizadas = scaler.transform(features)
features_reduzidas = loaded_pca.transform(features_padronizadas)
"""
return features_reduzidas
class cpu_usage(threading.Thread):### Thread to calculate duration and mean cpu percente usage in a SODA classifier
def __init__(self):
threading.Thread.__init__(self)
self.control = True
def run(self):
cpu = []
t_inicial = time.time()
while self.control:
cpu.append(cpu_percent(interval=1, percpu=True))
t_final = time.time()
self.deltatime = t_final - t_inicial
self.mean_cpu = np.mean(cpu)
def stop(self):
self.control = False
def join(self):
threading.Thread.join(self)
return self.deltatime, self.mean_cpu
def grid_set(data, N): #SODA process
_ , W = data.shape
AvD1 = data.mean(0)
X1 = np.mean(np.sum(np.power(data,2),axis=1))
grid_trad = np.sqrt(2*(X1 - AvD1*AvD1.T))/N
Xnorm = np.sqrt(np.sum(np.power(data,2),axis=1))
aux = Xnorm
for _ in range(W-1):
aux = np.insert(aux,0,Xnorm.T,axis=1)
data = data / aux
seq = np.argwhere(np.isnan(data))
if tuple(seq[::]): data[tuple(seq[::])] = 1
AvD2 = data.mean(0)
grid_angl = np.sqrt(1-AvD2*AvD2.T)/N
return X1, AvD1, AvD2, grid_trad, grid_angl
def pi_calculator(Uniquesample, mode):#SODA process
UN, W = Uniquesample.shape
if mode == 'euclidean' or mode == 'mahalanobis' or mode == 'cityblock' or mode == 'chebyshev' or mode == 'canberra':
AA1 = Uniquesample.mean(0)
X1 = sum(sum(np.power(Uniquesample,2)))/UN
DT1 = X1 - sum(np.power(AA1,2))
aux = []
for i in range(UN): aux.append(AA1)
aux2 = [Uniquesample[i]-aux[i] for i in range(UN)]
uspi = np.sum(np.power(aux2,2),axis=1)+DT1
if mode == 'minkowski':
AA1 = Uniquesample.mean(0)
X1 = sum(sum(np.power(Uniquesample,2)))/UN
DT1 = X1 - sum(np.power(AA1,2))
aux = np.matrix(AA1)
for i in range(UN-1): aux = np.insert(aux,0,AA1,axis=0)
aux = np.array(aux)
uspi = np.sum(np.power(cdist(Uniquesample, aux, mode, p=1.5),2),1)+DT1
if mode == 'cosine':
Xnorm = np.matrix(np.sqrt(np.sum(np.power(Uniquesample,2),axis=1))).T
aux2 = Xnorm
for i in range(W-1):
aux2 = np.insert(aux2,0,Xnorm.T,axis=1)
Uniquesample1 = Uniquesample / aux2
AA2 = np.mean(Uniquesample1,0)
X2 = 1
DT2 = X2 - np.sum(np.power(AA2,2))
aux = []
for i in range(UN): aux.append(AA2)
aux2 = [Uniquesample1[i]-aux[i] for i in range(UN)]
uspi = np.sum(np.sum(np.power(aux2,2),axis=1),axis=1)+DT2
return uspi
def Globaldensity_Calculator(data, distancetype):#SODA process
Uniquesample, J, K = np.unique(data, axis=0, return_index=True, return_inverse=True)
Frequency, _ = np.histogram(K,bins=len(J))
uspi1 = pi_calculator(Uniquesample, distancetype)
sum_uspi1 = sum(uspi1)
Density_1 = uspi1 / sum_uspi1
uspi2 = pi_calculator(Uniquesample, 'cosine')
sum_uspi2 = sum(uspi2)
Density_2 = uspi2 / sum_uspi2
GD = (Density_2+Density_1) * Frequency
index = GD.argsort()[::-1]
GD = GD[index]
Uniquesample = Uniquesample[index]
Frequency = Frequency[index]
return GD, Uniquesample, Frequency
def chessboard_division(Uniquesample, MMtypicality, interval1, interval2, distancetype):#SODA process
L, W = Uniquesample.shape
if distancetype == 'euclidean':
W = 1
BOX = [Uniquesample[k] for k in range(W)]
BOX_miu = [Uniquesample[k] for k in range(W)]
BOX_S = [1]*W
BOX_X = [sum(Uniquesample[k]**2) for k in range(W)]
NB = W
BOXMT = [MMtypicality[k] for k in range(W)]
for i in range(W,L):
if distancetype == 'minkowski':
a = cdist(Uniquesample[i].reshape(1,-1), BOX_miu, metric=distancetype, p=1.5)
else:
a = cdist(Uniquesample[i].reshape(1,-1), BOX_miu, metric=distancetype)
b = np.sqrt(cdist(Uniquesample[i].reshape(1,-1), BOX_miu, metric='cosine'))
distance = np.array([a[0],b[0]]).T
SQ = []
for j,d in enumerate(distance):
if d[0] < interval1 and d[1] < interval2:
SQ.append(j)
#SQ = np.argwhere(distance[::,0]<interval1 and (distance[::,1]<interval2))
COUNT = len(SQ)
if COUNT == 0:
BOX.append(Uniquesample[i])
NB = NB + 1
BOX_S.append(1)
BOX_miu.append(Uniquesample[i])
BOX_X.append(sum(Uniquesample[i]**2))
BOXMT.append(MMtypicality[i])
if COUNT >= 1:
DIS = distance[SQ[::],0]/interval1 + distance[SQ[::],1]/interval2 # pylint: disable=E1136 # pylint/issues/3139
b = np.argmin(DIS)
BOX_S[SQ[b]] = BOX_S[SQ[b]] + 1
BOX_miu[SQ[b]] = (BOX_S[SQ[b]]-1)/BOX_S[SQ[b]]*BOX_miu[SQ[b]] + Uniquesample[i]/BOX_S[SQ[b]]
BOX_X[SQ[b]] = (BOX_S[SQ[b]]-1)/BOX_S[SQ[b]]*BOX_X[SQ[b]] + sum(Uniquesample[i]**2)/BOX_S[SQ[b]]
BOXMT[SQ[b]] = BOXMT[SQ[b]] + MMtypicality[i]
return BOX, BOX_miu, BOX_X, BOX_S, BOXMT, NB
def ChessBoard_PeakIdentification(BOX_miu,BOXMT,NB,Internval1,Internval2, distancetype):#SODA process
Centers = []
n = 2
ModeNumber = 0
if distancetype == 'minkowski':
distance1 = squareform(pdist(BOX_miu,metric=distancetype, p=1.5))
else:
distance1 = squareform(pdist(BOX_miu,metric=distancetype))
distance2 = np.sqrt(squareform(pdist(BOX_miu,metric='cosine')))
for i in range(NB):
seq = []
for j,(d1,d2) in enumerate(zip(distance1[i],distance2[i])):
if d1 < n*Internval1 and d2 < n*Internval2:
seq.append(j)
Chessblocak_typicality = [BOXMT[j] for j in seq]
if max(Chessblocak_typicality) == BOXMT[i]:
Centers.append(BOX_miu[i])
ModeNumber = ModeNumber + 1
return Centers, ModeNumber
def cloud_member_recruitment(ModelNumber,Center_samples,Uniquesample,grid_trad,grid_angl, distancetype):#SODA process
L, W = Uniquesample.shape
Membership = np.zeros((L,ModelNumber))
Members = np.zeros((L,ModelNumber*W))
Count = []
if distancetype == 'minkowski':
distance1 = cdist(Uniquesample,Center_samples, metric=distancetype, p=1.5)/grid_trad
else:
distance1 = cdist(Uniquesample,Center_samples, metric=distancetype)/grid_trad
distance2 = np.sqrt(cdist(Uniquesample, Center_samples, metric='cosine'))/grid_angl
distance3 = distance1 + distance2
B = distance3.argmin(1)
for i in range(ModelNumber):
seq = []
for j,b in enumerate(B):
if b == i:
seq.append(j)
Count.append(len(seq))
Membership[:Count[i]:,i] = seq
Members[:Count[i]:,W*i:W*(i+1)] = [Uniquesample[j] for j in seq]
MemberNumber = Count
#Converte a matriz para vetor E SOMA +1 PARA NAO TER CONJUNTO 0'
B = B.A1
B = [x+1 for x in B]
return Members,MemberNumber,Membership,B
def SelfOrganisedDirectionAwareDataPartitioning(Input, Mode='Offline'):#SODA process
''' Main function of SODA
Parameters:
------
Input: dictionary, with the following items
'GridSize': float
current granularity value
'StaticData': np.matrix
data
'DistanceType': str
current magnitude distance metrics, can be
['euclidean', 'mahalanobis', 'cityblock', 'chebyshev', 'minkowski', 'canberra']
Mode: str
SODA Algorithm mode, can be ['Offline', 'Evolvig']
(default = 'Offline')
Returns:
-------
dictionary, with the following items
'C': list
list of center coordenates
'IDX': list
list of the corresponding index of the data cloud to which each event belongs
'SystemParams': dictionary, with the following items
'BOX': list
'BOX_miu': list
'BOX_S': list
'NB': int
'XM': float
'L': int
number of events
'AvM': np.matrix
'AvA': np.matrix
'GridSize': int
current granularity value
'DistanceType': str
current magnitude distance metrics, can be
['euclidean', 'mahalanobis', 'cityblock', 'chebyshev', 'minkowski', 'canberra']
'''
if Mode == 'Offline':
data = Input['StaticData']
L = data.shape[0]
N = Input['GridSize']
distancetype = Input['DistanceType']
X1, AvD1, AvD2, grid_trad, grid_angl = grid_set(data,N)
GD, Uniquesample, Frequency = Globaldensity_Calculator(data, distancetype)
BOX,BOX_miu,BOX_X,BOX_S,BOXMT,NB = chessboard_division(Uniquesample,GD,grid_trad,grid_angl, distancetype)
Center,ModeNumber = ChessBoard_PeakIdentification(BOX_miu,BOXMT,NB,grid_trad,grid_angl, distancetype)
Members,Membernumber,Membership,IDX = cloud_member_recruitment(ModeNumber,Center,data,grid_trad,grid_angl,
distancetype)
Boxparameter = {'BOX': BOX,
'BOX_miu': BOX_miu,
'BOX_S': BOX_S,
'NB': NB,
'XM': X1,
'L': L,
'AvM': AvD1,
'AvA': AvD2,
'GridSize': N}
if Mode == 'Evolving':
#TODO
print(Mode)
Output = {'C': Center,
'IDX': IDX,
'SystemParams': Boxparameter,
'DistanceType': distancetype}
return Output
def SODA (ReducedFeatures, min_granularity, max_granularity, pace):#SODA
''' Start of SODA
Parameters:
------
ReducedFeatures : dictionary, with the following items
'ReducedFeatures': np.array
contain the output data of PCA, i.e., the dataset with Principal Componentes projected by PCA
'ID': int
identifier for the dataset
min_granularity: float
first value of granularity for SODA algorithm
max_granularity: float
final value of granularity for SODA algorithm
pace: float
increase of granularity for SODA algorithm
Returns:
-------
dictionary, with the following items
'Distances': list
list of magnitude distances metrics used in SODA algorithm,
'Min_g': float
first value of granularity for SODA algorithm
'Max_g': float
final value of granularity for SODA algorithm
'Pace': float
increase of granularity for SODA algorithm
'ID':int
identifier for the dataset
list of dictionaries, with the following items
'DistanceType': str
magnitude distance metric used
'Granularity': float
granularity used
'Time': datetime.datetime
execution time of SODA for this specific granularity and magnitude
distance metric
'CPUPercent': float
mean percentage of CPU usage for this specific granularity and magnitude
distance metric
'''
#Changing Work Folder
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd()
working_path = os.getcwd()
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
DataSetID = ReducedFeatures['ID']
data = ReducedFeatures['ReducedFeatures']
data = np.matrix(data)
distances = ['euclidean']#, 'mahalanobis', 'cityblock', 'chebyshev', 'minkowski', 'canberra']
processing_parameters = []
#### Looping SODA within the chosen granularities and distances ####
# Now change to Kernel directory
os.chdir( Kernel_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
for g in np.arange(int(min_granularity), int (max_granularity + pace), pace):
for d in distances:
### Start Thread
time_cpu_thread = cpu_usage()
time_cpu_thread.start()
Input = {'GridSize':g, 'StaticData':data, 'DistanceType': d}
out = SelfOrganisedDirectionAwareDataPartitioning(Input,'Offline')
### Interrupt Thread and Calculate Parameters
time_cpu_thread.stop()
deltatime, mean_cpu = time_cpu_thread.join()
pp = {'DistanceType': d,
'Granularity': g,
'Time': deltatime,
'CPUPercent': mean_cpu}
processing_parameters.append(pp)
np.savetxt('SODA_' + d + '_label_' + str (DataSetID) + '_' + str("%.2f" % g) + '.csv', out['IDX'],delimiter=',')
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
np.save(('processing_parameters.npy'), processing_parameters)
np.save(('distances.npy'), distances)
np.save(('Min_g.npy'), min_granularity)
np.save(('Max_g.npy'), max_granularity)
np.save(('Pace.npy'), pace)
Output = {'Distances': distances,
'Min_g': min_granularity,
'Max_g': max_granularity,
'Pace': pace,
'ID': DataSetID}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
return Output, processing_parameters
def GroupingAlgorithm (SODA_parameters,define_percent, processing_parameters,DataSetID): #Grouping Algorithm
#Changing Work Folder
add_path1 = "/PCA_Analyses/"
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
add_path4 = "/Grouping_Analyses/"
base_path = os.getcwd()
working_path = os.getcwd()
PCA_Analyses_path = working_path + add_path1
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
Grouping_Analyses_path = working_path + add_path4
print(' ')
print('Grouping Algorithm Control Output')
print('----------------------------------')
#### imput data ####
DataSetID = SODA_parameters['ID']
min_granularity = SODA_parameters['Min_g']
max_granularity = SODA_parameters['Max_g']
pace = SODA_parameters['Pace']
distances = SODA_parameters['Distances']
# Change to Kernel directory
os.chdir(Kernel_path)
with open('final_target_'+str(DataSetID)+'.pkl', 'rb') as f:
y_original = pickle.load(f)
# functional engines
#n_IDs_gp1 = 0 # non-functional engines
#n_IDs_gp2 = 3598 # eminent fault engines
for d in distances:
for g in np.arange(int(min_granularity), int (max_granularity + pace), pace):
### Start Thread
time_cpu_thread = cpu_usage()
time_cpu_thread.start()
s = 'SODA_' + d + '_label_' + str(DataSetID) + '_' + str("%.2f" % g) + '.csv'
#### Data-base Imput ####
# Now change to Kernel directory
os.chdir( Kernel_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
SodaOutput = np.genfromtxt( s , delimiter=',')
# Now change to PCA Analyses directory
os.chdir( PCA_Analyses_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
SelectedFeatures = np.genfromtxt('features_reduzidas_' + str(DataSetID) + '.csv' , delimiter=',')
#### Program Matrix's and Variables ####
n_DA_planes = np.max(SodaOutput)
Percent = np.zeros((int(n_DA_planes),3))
n_IDs_per_gp = np.zeros((int(n_DA_planes),2))
n_tot_Id_per_DA = np.zeros((int(n_DA_planes),1))
decision = np.zeros(int(n_DA_planes))
selected_samples = np.zeros(2)
n_DA_excluded = 0
n_excluded = 0
n_gp0 = 0
n_gp1 = 0
n_gp2 = 0
n_data_def = 0
k = 0
#### Definition Percentage Calculation #####
for i in range(y_original.shape[0]):
if y_original[i] == 0:
n_IDs_per_gp [int(SodaOutput[i]-1),0] += 1
else:
n_IDs_per_gp [int(SodaOutput[i]-1),1] += 1
n_tot_Id_per_DA [int(SodaOutput[i]-1)] += 1
for i in range(int(n_DA_planes)):
Percent[i,0] = (n_IDs_per_gp[i,0] / n_tot_Id_per_DA[i]) * 100
Percent[i,1] = (n_IDs_per_gp[i,1] / n_tot_Id_per_DA[i]) * 100
#Percent[i,2] = ((n_tot_Id_per_DA[i] - (n_IDs_per_gp[i,0] + n_IDs_per_g[i,1])) / n_tot_Id_per_DA[i]) * 100
#### Using Definition Percentage as Decision Parameter ####
for i in range(Percent.shape[0]): # pylint: disable=E1136 # pylint/issues/3139
if (Percent[i,0] >= define_percent):
n_gp0 = n_gp0 + 1
decision[i] = 0
elif (Percent[i,1] >= define_percent):
n_gp1 = n_gp1 + 1
decision[i] = 1
elif (Percent[i,2] >= define_percent):
n_gp2 = n_gp2 + 1
decision[i] = 2
else:
n_DA_excluded += 1
decision[i] = -1
#### Using decision matrix to determine the number of excluded data
for i in range (len (decision)):
if decision[i] == -1:
n_excluded += np.sum(n_IDs_per_gp[i,:])
#### Passing data of well defined DA planes to SelectedData and defining labels
SelectedData = np.zeros((int(SelectedFeatures.shape[0] - n_excluded),int(SelectedFeatures.shape[1])))# pylint: disable=E1136 # pylint/issues/3139
ClassifiersLabel = np.zeros((int(SelectedFeatures.shape[0] - n_excluded)))# pylint: disable=E1136 # pylint/issues/3139
ComparisonLabel = np.zeros((int(y_original.shape[0] - n_excluded)))# pylint: disable=E1136 # pylint/issues/3139
for i in range (SodaOutput.shape[0]): # pylint: disable=E1136 # pylint/issues/3139
if decision[int (SodaOutput[i]-1)] != -1:
SelectedData[k] = SelectedFeatures[i]
ClassifiersLabel [k] = decision[int (SodaOutput[i]-1)]
ComparisonLabel [k] = y_original[i]
k += 1
for i in range (decision.shape[0]): # pylint: disable=E1136 # pylint/issues/3139
if decision[i] != -1:
selected_samples[0] += n_IDs_per_gp[i,0]
selected_samples[1] += n_IDs_per_gp[i,1]
#### Printing Processed Data, ID's and Percentage
# Now change to Kernel directory
os.chdir( Kernel_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
#np.savetxt('X_' + str(define_percent) + '_' + d + '_Labels_' + str(DataSetID) + '_' + str("%.2f" % g) + '.csv', SelectedData, delimiter=',')
#np.savetxt('Y_' + str(define_percent) + '_' + d + '_Labels_' + str(DataSetID) + '_' + str("%.2f" % g) + '.csv', ClassifiersLabel, delimiter=',')
#np.savetxt('Original_Y_' + str(define_percent) + '_' + d + '_Labels_' + str(DataSetID) + '_' + str("%.2f" % g) + '.csv', ComparisonLabel, delimiter=',')
np.save('X_' + str(define_percent) + '_' + d + '_Labels_' + str(DataSetID) + '_' + str("%.2f" % g) + '.npy', SelectedData)
np.save('Y_' + str(define_percent) + '_' + d + '_Labels_' + str(DataSetID) + '_' + str("%.2f" % g) + '.npy', ClassifiersLabel)
np.save('Original_Y_' + str(define_percent) + '_' + d + '_Labels_' + str(DataSetID) + '_' + str("%.2f" % g) + '.npy', ComparisonLabel)
### Interrupt Thread and recalculate parameters
time_cpu_thread.stop()
deltatime, mean_cpu = time_cpu_thread.join()
for pp in processing_parameters:
if pp['DistanceType'] == d and pp['Granularity'] == g:
aux = pp
break
totaltime = deltatime + aux['Time']
cpu_percent = (mean_cpu + aux['CPUPercent'])/2
### Printig Analitics results
print(s)
print('Number of data clouds: %d' % n_DA_planes)
print('Number of good tools groups: %d' % n_gp0)
print('Number of worn tools groups: %d' % n_gp1)
print('Number of excluded data clouds: %d' % n_DA_excluded)
print('Number of samples: %d' % int(SodaOutput.shape[0])) # pylint: disable=E1136 # pylint/issues/3139
print('Number of good tools samples: %d' % int(selected_samples[0]))
print('Number of worn tools samples: %d' % int(selected_samples[1]))
print('Number of excluded samples: %d' % n_excluded)
print('Data representation loss: %.2f' % (100-((SelectedData.shape[0] / SelectedFeatures.shape[0]) * 100))) # pylint: disable=E1136 # pylint/issues/3139
print('Analyse execution time: %.6f segundos' % totaltime)
print('Avarage CPU usage: %.2f' % cpu_percent)
print('---------------------------------------------------')
#### Saving Processed Data, ID's and Percentage
# Now change to Kernel directory
os.chdir( Grouping_Analyses_path )
Grouping_Analyse = open("Grouping_Analyse_ID_" + str(DataSetID) + "_min_" + str(min_granularity) + "_max_" + str(max_granularity) + '_' + str(define_percent) +"%.txt","a+")
Grouping_Analyse.write(s)
Grouping_Analyse.write('\nNumber of data clouds: %d\n' % n_DA_planes)
Grouping_Analyse.write('Number of good tools groups: %d\n' % n_gp0)
Grouping_Analyse.write('Number of worn tools groups: %d\n' % n_gp1)
Grouping_Analyse.write('Number of excluded data clouds: %d\n' % n_DA_excluded)
Grouping_Analyse.write('Number of samples: %d\n' % int(SodaOutput.shape[0]))
Grouping_Analyse.write('Number of good tools samples: %d\n' % int(selected_samples[0]))
Grouping_Analyse.write('Number of worn tools samples: %d\n' % int(selected_samples[1]))
Grouping_Analyse.write('Number of excluded samples: %d\n' % n_excluded)
Grouping_Analyse.write('Data representation loss: %.2f\n' % (100-((SelectedData.shape[0] / SelectedFeatures.shape[0]) * 100))) # pylint: disable=E1136 # pylint/issues/3139
Grouping_Analyse.write('Analyse execution time: %.6f segundos\n' % totaltime)
Grouping_Analyse.write('Avarage CPU usage: %.2f\n' % cpu_percent)
Grouping_Analyse.write('---------------------------------------------------')
Grouping_Analyse.close()
#np.savetxt('Percent.csv',define_percent,delimiter = ',')
# Now change to base directory
os.chdir( Recovery_path )
np.save("define_percent.npy",define_percent)
Output = {'Percent': define_percent,
'Distances': distances,
'Pace': pace,
'ID': DataSetID}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
return Output
def Classification (ClassificationPar, min_granularity,max_granularity,n_a): #Classifiers
#Changing Work Folder
add_path1 = "/Classification/"
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd()
working_path = os.getcwd()
Classification_path = working_path + add_path1
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
# Change to Kernel directory
os.chdir(Kernel_path)
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(gamma='scale'),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(),
RandomForestClassifier(n_estimators=100),
MLPClassifier(alpha=1,max_iter=500),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
Output_ID = ClassificationPar['ID']
distances = ClassificationPar['Distances']
pace = ClassificationPar['Pace']
gra = np.arange(min_granularity,max_granularity,pace)
for d in distances:
for g in gra:
try:
# Now change to Kernel directory
os.chdir( Kernel_path )
# preprocess dataset, split into training and test part
Accuracy = np.zeros((n_a, len(classifiers) + 1))
Precision = np.zeros((n_a, len(classifiers) + 1))
Recall = np.zeros((n_a, len(classifiers) + 1))
F1 = np.zeros((n_a, len(classifiers) + 1))
s = str (int(ClassificationPar['Percent'] )) + '_' + d + '_Labels_' + str(int(ClassificationPar['ID'])) + '_' + str("%.2f" % g) + '.npy'
X = np.load('X_' + s)
y_soda = np.load('Y_' + s)
y_original = np.load('Original_Y_' + s)
#Loop into numbeer od samples
for i in range(Accuracy.shape[0]): # pylint: disable=E1136 # pylint/issues/3139
k = 0
# iterate over classifiers
X_train, X_test, y_train_soda, y_test_soda, y_train_original, y_test_original = train_test_split(X, y_soda, y_original, test_size=.4)
for name, clf in zip(names, classifiers):
clf.fit(X_train, y_train_soda)
pickle.dump(clf, open('model.sav', 'wb'))
score = clf.score(X_test, y_test_original)
y_predict = list(clf.predict(X_test))
Accuracy[i,k] = score*100
Precision[i,k] = precision_score(y_test_original, y_predict, zero_division=0)*100
Recall[i,k] = recall_score(y_test_original, y_predict)*100
F1[i,k] = f1_score(y_test_original, y_predict)*100
k +=1
ClassifiersLabel = list(clf.predict(X_test))
results = []
latex_results = []
#Calculinng Mean and Std. Derivation
for i in range(len(names)):
results.append(['{:.2f} \u00B1 {:.2f}%'.format(np.mean(Accuracy[:,i]),np.std(Accuracy[:,i])),
'{:.2f} \u00B1 {:.2f}%'.format(np.mean(Precision[:,i]),np.std(Precision[:,i])),
'{:.2f} \u00B1 {:.2f}%'.format(np.mean(Recall[:,i]),np.std(Recall[:,i])),
'{:.2f} \u00B1 {:.2f}%'.format(np.mean(F1[:,i]),np.std(F1[:,i]))])
latex_results.append(['{:.2f} $\pm$ {:.2f} &'.format(np.mean(Accuracy[:,i]),np.std(Accuracy[:,i])),
'{:.2f} $\pm$ {:.2f} &'.format( | np.mean(Precision[:,i]) | numpy.mean |
import numpy as np
import pytest
from scipy.stats import (bootstrap, BootstrapDegenerateDistributionWarning,
monte_carlo_test, permutation_test)
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy import stats
from scipy import special
from .. import _resampling as _resampling
from scipy._lib._util import rng_integers
from scipy.optimize import root
def test_bootstrap_iv():
message = "`data` must be a sequence of samples."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean)
message = "`data` must contain at least one sample."
with pytest.raises(ValueError, match=message):
bootstrap(tuple(), np.mean)
message = "each sample in `data` must contain two or more observations..."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1]), np.mean)
message = ("When `paired is True`, all samples must have the same length ")
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True)
message = "`vectorized` must be `True` or `False`."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean, vectorized='ekki')
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, axis=1.5)
message = "could not convert string to float"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, confidence_level='ni')
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=1000.5)
message = "`method` must be in"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, method='ekki')
message = "`method = 'BCa' is only available for one-sample statistics"
def statistic(x, y, axis):
mean1 = np.mean(x, axis)
mean2 = np.mean(y, axis)
return mean1 - mean2
with pytest.raises(ValueError, match=message):
bootstrap(([.1, .2, .3], [.1, .2, .3]), statistic, method='BCa')
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, random_state='herring')
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_bootstrap_batch(method, axis):
# for one-sample statistics, batch size shouldn't affect the result
np.random.seed(0)
x = np.random.rand(10, 11, 12)
res1 = bootstrap((x,), np.mean, batch=None, method=method,
random_state=0, axis=axis, n_resamples=100)
res2 = bootstrap((x,), np.mean, batch=10, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_equal(res2.confidence_interval.low, res1.confidence_interval.low)
assert_equal(res2.confidence_interval.high, res1.confidence_interval.high)
assert_equal(res2.standard_error, res1.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_paired(method):
# test that `paired` works as expected
np.random.seed(0)
n = 100
x = np.random.rand(n)
y = np.random.rand(n)
def my_statistic(x, y, axis=-1):
return ((x-y)**2).mean(axis=axis)
def my_paired_statistic(i, axis=-1):
a = x[i]
b = y[i]
res = my_statistic(a, b)
return res
i = np.arange(len(x))
res1 = bootstrap((i,), my_paired_statistic, random_state=0)
res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("paired", [True, False])
def test_bootstrap_vectorized(method, axis, paired):
# test that paired is vectorized as expected: when samples are tiled,
# CI and standard_error of each axis-slice is the same as those of the
# original 1d sample
if not paired and method == 'BCa':
# should re-assess when BCa is extended
pytest.xfail(reason="BCa currently for 1-sample statistics only")
np.random.seed(0)
def my_statistic(x, y, z, axis=-1):
return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis)
shape = 10, 11, 12
n_samples = shape[axis]
x = np.random.rand(n_samples)
y = np.random.rand(n_samples)
z = np.random.rand(n_samples)
res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=0, n_resamples=100)
reshape = [1, 1, 1]
reshape[axis] = n_samples
x = np.broadcast_to(x.reshape(reshape), shape)
y = np.broadcast_to(y.reshape(reshape), shape)
z = np.broadcast_to(z.reshape(reshape), shape)
res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_allclose(res2.confidence_interval.low,
res1.confidence_interval.low)
assert_allclose(res2.confidence_interval.high,
res1.confidence_interval.high)
assert_allclose(res2.standard_error, res1.standard_error)
result_shape = list(shape)
result_shape.pop(axis)
assert_equal(res2.confidence_interval.low.shape, result_shape)
assert_equal(res2.confidence_interval.high.shape, result_shape)
assert_equal(res2.standard_error.shape, result_shape)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_against_theory(method):
# based on https://www.statology.org/confidence-intervals-python/
data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=0)
alpha = 0.95
dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data))
expected_interval = dist.interval(confidence=alpha)
expected_se = dist.std()
res = bootstrap((data,), np.mean, n_resamples=5000,
confidence_level=alpha, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4)
assert_allclose(res.standard_error, expected_se, atol=3e-4)
tests_R = {"basic": (23.77, 79.12),
"percentile": (28.86, 84.21),
"BCa": (32.31, 91.43)}
@pytest.mark.parametrize("method, expected", tests_R.items())
def test_bootstrap_against_R(method, expected):
# Compare against R's "boot" library
# library(boot)
# stat <- function (x, a) {
# mean(x[a])
# }
# x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
# 23, 34, 50, 81, 89, 121, 134, 213)
# # Use a large value so we get a few significant digits for the CI.
# n = 1000000
# bootresult = boot(x, stat, n)
# result <- boot.ci(bootresult)
# print(result)
x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
23, 34, 50, 81, 89, 121, 134, 213])
res = bootstrap((x,), np.mean, n_resamples=1000000, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected, rtol=0.005)
tests_against_itself_1samp = {"basic": 1780,
"percentile": 1784,
"BCa": 1784}
@pytest.mark.parametrize("method, expected",
tests_against_itself_1samp.items())
def test_bootstrap_against_itself_1samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n = 100 # size of sample
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The true mean is 5
dist = stats.norm(loc=5, scale=1)
stat_true = dist.mean()
# Do the same thing 2000 times. (The code is fully vectorized.)
n_replications = 2000
data = dist.rvs(size=(n_replications, n))
res = bootstrap((data,),
statistic=np.mean,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
tests_against_itself_2samp = {"basic": 892,
"percentile": 890}
@pytest.mark.parametrize("method, expected",
tests_against_itself_2samp.items())
def test_bootstrap_against_itself_2samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n1 = 100 # size of sample 1
n2 = 120 # size of sample 2
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The statistic we're interested in is the difference in means
def my_stat(data1, data2, axis=-1):
mean1 = np.mean(data1, axis=axis)
mean2 = np.mean(data2, axis=axis)
return mean1 - mean2
# The true difference in the means is -0.1
dist1 = stats.norm(loc=0, scale=1)
dist2 = stats.norm(loc=0.1, scale=1)
stat_true = dist1.mean() - dist2.mean()
# Do the same thing 1000 times. (The code is fully vectorized.)
n_replications = 1000
data1 = dist1.rvs(size=(n_replications, n1))
data2 = dist2.rvs(size=(n_replications, n2))
res = bootstrap((data1, data2),
statistic=my_stat,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
@pytest.mark.parametrize("method", ["basic", "percentile"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_3samp(method, axis):
def statistic(*data, axis=0):
# an arbitrary, vectorized statistic
return sum((sample.mean(axis) for sample in data))
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
y = np.random.rand(4, 5)
z = np.random.rand(4, 5)
res1 = bootstrap((x, y, z), statistic, vectorized=True,
axis=axis, n_resamples=100, method=method, random_state=0)
res2 = bootstrap((x, y, z), statistic_1d, vectorized=False,
axis=axis, n_resamples=100, method=method, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107")
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_1samp(method, axis):
def statistic(x, axis=0):
# an arbitrary, vectorized statistic
return x.mean(axis=axis)
def statistic_1d(x):
# the same statistic, not vectorized
assert x.ndim == 1
return statistic(x, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
res1 = bootstrap((x,), statistic, vectorized=True, axis=axis,
n_resamples=100, batch=None, method=method,
random_state=0)
res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis,
n_resamples=100, batch=10, method=method,
random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_degenerate(method):
data = 35 * [10000.]
if method == "BCa":
with np.errstate(invalid='ignore'):
with pytest.warns(BootstrapDegenerateDistributionWarning):
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (np.nan, np.nan))
else:
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (10000., 10000.))
assert_equal(res.standard_error, 0)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_gh15678(method):
# Check that gh-15678 is fixed: when statistic function returned a Python
# float, method="BCa" failed when trying to add a dimension to the float
rng = np.random.default_rng(354645618886684)
dist = stats.norm(loc=2, scale=4)
data = dist.rvs(size=100, random_state=rng)
data = (data,)
res = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563))
# this always worked because np.apply_along_axis returns NumPy data type
ref = bootstrap(data, stats.skew, method=method, n_resamples=100,
random_state=np.random.default_rng(9563), vectorized=False)
assert_allclose(res.confidence_interval, ref.confidence_interval)
| assert_allclose(res.standard_error, ref.standard_error) | numpy.testing.assert_allclose |
# Copyright (c) <NAME>.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory.
# parallel optimization retry of a list of problems.
import numpy as np
import _pickle as cPickle
import bz2
import multiprocessing as mp
from scipy.optimize import OptimizeResult
from fcmaes.optimizer import logger, de_cma, eprint
from fcmaes import advretry
def minimize(problems, ids=None, num_retries = min(256, 8*mp.cpu_count()),
keep = 0.7, optimizer = de_cma(1500), logger = None, datafile = None):
"""Minimization of a list of optimization problems by first applying parallel retry
to filter the best ones and then applying coordinated retry to evaluate these further.
Can replace mixed integer optimization if the integer variables are narrowly bound.
In this case all combinations of these integer values can be enumerated to generate a
list of problem instances each representing one combination. See for instance
https://www.esa.int/gsp/ACT/projects/gtop/tandem where there is a problem instance for each
planet sequence.
Parameters
----------
problems: list
list of objects providing name, fun and bounds attributes like fcmaes.astro.Astrofun
ids: list, optional
list of objects corresponding to the list of problems used in logging to identify the
problem variant currently logged. If None, the index of the problem
variant is used instead.
num_retries: int, optional
number of coordinated retries applied in the problem filter for each problem
in each iteration.
keep: float, optional
rate of the problems kept after each iteration. 100*(1 - keep) % will be deleted.
optimizer: optimizer.Optimizer, optional
optimizer to use for the problem filter.
logger, optional
logger for log output. If None, logging
is switched off. Default is a logger which logs both to stdout and
appends to a file.
datafile, optional
file to persist / retrieve the internal state of the optimizations.
Returns
-------
dictionary( optimizer -> ret): scipy.OptimizeResult
The optimization result is represented as an ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array,
``fun`` the best function value, ``nfev`` the number of function evaluations,
``success`` a Boolean flag indicating if the optimizer exited successfully. """
solver = multiretry(logger)
n = len(problems)
for i in range(n):
id = str(i+1) if ids is None else ids[i]
solver.add(problem_stats(problems[i], id, i, num_retries, logger))
if not datafile is None:
solver.load(datafile)
while solver.size() > 1:
solver.retry(optimizer)
to_remove = int(round((1.0 - keep) * solver.size()))
if to_remove == 0 and keep < 1.0:
to_remove = 1
solver.remove_worst(to_remove)
solver.dump()
if not datafile is None:
solver.save(datafile)
idx = solver.values_all().argsort()
return list(np.asarray(solver.all_stats)[idx])
class problem_stats:
def __init__(self, prob, id, index, num_retries = 64, logger = None):
self.store = advretry.Store(prob.bounds, logger = logger, num_retries=num_retries)
self.prob = prob
self.name = prob.name
self.fun = prob.fun
self.num_retries = num_retries
self.retries = 0
self.value = 0
self.id = id
self.index = index
self.ret = None
def retry(self, optimizer):
self.retries += self.num_retries
self.ret = advretry.retry(self.fun, self.store, optimizer.minimize)
self.value = self.store.get_y_best()
class multiretry:
def __init__(self, logger = None):
self.problem_stats = []
self.all_stats = []
self.logger = logger
def add(self, stats):
self.problem_stats.append(stats)
self.all_stats.append(stats)
def retry(self, optimizer):
for ps in self.problem_stats:
if not self.logger is None:
self.logger.info("problem " + ps.prob.name + ' ' + str(ps.id))
ps.retry(optimizer)
def values(self):
return np.array([ps.value for ps in self.problem_stats])
def remove_worst(self, n = 1):
idx = self.values().argsort()
self.problem_stats = list(np.asarray(self.problem_stats)[idx])
for _ in range(n):
self.problem_stats.pop(-1)
def size(self):
return len(self.problem_stats)
def dump(self):
if not self.logger is None:
for i in range(self.size()):
ps = self.problem_stats[i]
self.logger.info(str(ps.id) + ' ' + str(ps.value))
def dump_all(self):
if not self.logger is None:
idx = self.values_all().argsort()
self.all_stats = list(np.asarray(self.all_stats)[idx])
for i in range(len(self.all_stats)):
ps = self.all_stats[i]
self.logger.info(str(ps.id) + ' ' + str(ps.value))
def values_all(self):
return | np.array([ps.value for ps in self.all_stats]) | numpy.array |
import numpy as np
var1= | np.array([(2,4,6), (1,3,5)]) | numpy.array |
"""
Copyright (c) 2021, <NAME>
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import traceback as tb
from dataclasses import dataclass
import re as re
import numpy as np
it_function_dict = {}
class Accumulator:
def __init__(self, accumulation_seed):
self.accumulation_seed = accumulation_seed
def append(self, element_or_substring):
if isinstance(self.accumulation_seed, str):
self.accumulation_seed += element_or_substring
elif isinstance(self.accumulation_seed, list):
self.accumulation_seed.append(element_or_substring)
return Accumulator(self.accumulation_seed)
class StringBuilder:
def __init__(self):
self.string = ''
def __new__(cls):
return Accumulator('')
class PyKot:
def __init__(self, variable, recall=False):
self.variable = variable
self.recall = recall
self.var = variable
def __repr__(self):
return str(self.variable)
def last_index(self): # lastIndex()
raise_type_error_if_merited("last_index()", self.variable, str, list, tuple, type(np.array([])))
return PyKot(len(self.variable) - 1)
def drop(self, drop_from_front: int): # drop(n)
raise_type_error_if_merited("drop(Int)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
result = self.variable[drop_from_front:]
result = post_type_work(result, original_type)
return PyKot(result, True)
def drop_last(self, drop_from_back: int): # dropLast(n)
raise_type_error_if_merited("drop_last(Int)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
result = self.variable[:(len(self.variable) - drop_from_back)]
result = post_type_work(result, original_type)
return PyKot(result, True)
def drop_while(self, it_expression): # dropWhile(it expression)
raise_type_error_if_merited("drop_while(it expression)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
while it_expression.in_line_function(self.variable[0]):
self.variable = self.variable[1:]
result = post_type_work(self.variable, original_type)
return PyKot(result, True)
def drop_last_while(self, it_expression): # dropLastWhile(it expression)
raise_type_error_if_merited("drop_last_while(it expression)",
self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
while it_expression.in_line_function(self.variable[-1]):
self.variable = self.variable[:-1]
result = post_type_work(self.variable, original_type)
return PyKot(result, True)
def take(self, take_from_front: int): # take(n)
raise_type_error_if_merited("take(Int)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
result = self.variable[:take_from_front]
result = post_type_work(result, original_type)
return PyKot(result, True)
def take_last(self, take_from_back: int): # take_last(n)
raise_type_error_if_merited("take_last(Int)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
result = self.variable[len(self.variable) - take_from_back:]
result = post_type_work(result, original_type)
return PyKot(result, True)
def take_while(self, it_expression): # take_while(it expression)
raise_type_error_if_merited("take_while(it expression)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
if type_compliance(self.variable, str):
result = ''
while it_expression.in_line_function(self.variable[0]):
result += self.variable[0]
self.variable = self.variable[1:]
else:
result = []
while it_expression.in_line_function(self.variable[0]):
result.append(self.variable[0])
self.variable = self.variable[1:]
result = post_type_work(result, original_type)
return PyKot(result, True)
def take_last_while(self, it_expression): # take_last_while(it expression)
raise_type_error_if_merited("take_last_while(it expression)",
self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
if type_compliance(self.variable, str):
result = ''
while it_expression.in_line_function(self.variable[-1]):
result += self.variable[-1]
self.variable = self.variable[:-1]
else:
result = []
while it_expression.in_line_function(self.variable[-1]):
result.append(self.variable[-1])
self.variable = self.variable[:-1]
result = post_type_work(result, original_type)
return PyKot(result, True)
def length(self): # length()
raise_type_error_if_merited("length()", self.variable, str, list, tuple, type(np.array([])))
return PyKot(len(self.variable))
def first(self): # first()
raise_type_error_if_merited("first()", self.variable, str, list, tuple, type(np.array([])))
return PyKot(self.variable[0], True)
def last(self): # last()
raise_type_error_if_merited("last()", self.variable, str, list, tuple, type(np.array([])))
return PyKot(self.variable[-1], True)
def trim_margin(self, margin="|"): # trimMargin(margin)
raise_type_error_if_merited("trim_margin(margin='|')", self.variable, str)
return PyKot(self.variable[(self.variable.find(margin) + len(margin)):], True)
def compare_to(self, comparison: str, ignorecase=False): # compareTo(String, ignorecase=False)
self.variable, original_type = pre_type_work(self.variable)
comparison, original_type_comparison = pre_type_work(comparison)
if type_compliance(self.variable, dict):
self.variable = tuple(self.variable)
if type_compliance(comparison, dict):
comparison = tuple(comparison)
if ignorecase:
self.variable = self.variable.lower()
comparison = comparison.lower()
original = [self.variable, comparison]
sort_compare = [self.variable, comparison]
sort_compare.sort()
sort_compare = -1 if sort_compare == original else 1
return PyKot(0 if self.variable == comparison else sort_compare)
def sub_string(self, first_index, second_index): # subString(i, j)
raise_type_error_if_merited("sub_string(Int, Int)", self.variable, str)
first_index, valid1, second_index, valid2 = unwrap_it(first_index, second_index)
if valid1:
first_index = first_index(self.variable)
if valid2:
second_index = second_index(self.variable)
return PyKot(self.variable[first_index: second_index], True)
def split(self, delimiter=' ', *additional_delimiters, ignorecase=False): # split(delimiter) or
# split(delimiter, ignorecase=True) or split(delimiter.toRegex()) or split(regex(delimiter))
raise_type_error_if_merited("split(delimiter=' ', *additional_delimiters, ignorecase=False)",
self.variable, str)
if ignorecase:
string = self.variable.lower()
delimiter_list = [delimiter.lower()] + [d.lower() for d in additional_delimiters]
else:
string = self.variable
delimiter_list = [delimiter] + [d for d in additional_delimiters]
if type_compliance(delimiter, type(re.compile(''))):
result = re.split(delimiter, self.variable)
else:
delimiter_indexes = []
found = 0
for delimiter in delimiter_list:
while found != -1 and (len(string) - found) >= len(delimiter):
found = string.find(delimiter, found, len(string) - 1)
if found == -1:
continue
delimiter_indexes.append(found)
found += len(delimiter)
delimiter_indexes.append(found)
found = 0
delimiter_indexes.append(0)
delimiter_indexes.sort()
delimiter_indexes.append(-1)
di = iter(delimiter_indexes)
delimiter_indexes = list(zip(di, di))
result = [self.variable[i:] if j == -1 else self.variable[i: j] for i, j in delimiter_indexes]
return PyKot(tuple(result), True)
def sub_sequence(self, first_index: int, second_index: int): # subSequence(i, j)
raise_type_error_if_merited("sub_string(Int, Int)", self.variable, str)
first_index, valid1, second_index, valid2 = unwrap_it(first_index, second_index)
if valid1:
first_index = first_index(self.variable)
if valid2:
second_index = second_index(self.variable)
return PyKot(self.variable[first_index: second_index], True)
def lines(self): # lines()
raise_type_error_if_merited("lines()", self.variable, str)
return PyKot(self.variable.splitlines(), True)
def capitalize(self): # capitalize()
raise_type_error_if_merited("capitalize()", self.variable, str)
return PyKot(self.variable.capitalize(), True)
def to_regex(self): # toRegex()
raise_type_error_if_merited("to_regex()", self.variable, str)
return re.compile(self.variable)
def replace(self, old_value: str, new_value: str, ignorecase=False): # replace(old, new, ignorecase=False)
raise_type_error_if_merited("replace(String, String, ignorecase=False)", self.variable, str)
if ignorecase:
find_index = self.variable.lower().find(old_value.lower())
if find_index == -1:
return PyKot(self.variable, True)
return PyKot(self.variable[:find_index] + new_value + self.variable[(find_index + len(old_value)):], True)
return PyKot(self.variable.replace(old_value, new_value), True)
def ends_with(self, substring): # endsWith(substring)
raise_type_error_if_merited("ends_with(String)", self.variable, str, list, tuple, type(np.array([])))
if type_compliance(self.variable, str):
result = True if self.variable[-len(substring):] == substring else False
else:
self.variable = unpack_array(self.variable)
result = True
for element in self.variable:
if not type_compliance(element, str):
raise TypeError("All elements in iterable must be a String to use ends_with()")
if result:
result = True if element[-len(substring):] == substring else False
return PyKot(result, True)
def plus(self, string_or_int): # plus(String) or plus(Int)
raise_type_error_if_merited("plus(String) or plus(Int)", self.variable, str, int)
if type_compliance(self.variable, str) and type_compliance(string_or_int, int):
string_or_int = str(string_or_int)
elif type_compliance(self.variable, int) and type_compliance(string_or_int, str):
string_or_int = int(string_or_int)
return PyKot(self.variable + string_or_int, True)
def get(self, index): # get()
raise_type_error_if_merited("get(Int)", self.variable, str, list, tuple, type(np.array([])), dict)
if isinstance(self.variable[index], type(np.array([1])[0])):
result = int(self.variable[index])
elif isinstance(self.variable[index], type(np.array([1.0])[0])):
result = float(self.variable[index])
elif isinstance(self.variable, dict):
result = self.variable[index] if index in self.variable.keys() else None
else:
result = self.variable[index]
return PyKot(result, True)
def to_string(self): # toString()
raise_type_error_if_merited("to_string()", self.variable, str, int, list, tuple, range, dict)
if isinstance(self.variable, str):
result = self.variable
else:
result = str(self.variable)
return PyKot(result, True)
def content_to_string(self): # contentToString()
raise_type_error_if_merited("content_to_string()", self.variable, list, tuple, type(np.array([])))
return PyKot(str([x for x in self.variable]), True)
def any(self, predicate=None): # any(predicate)
raise_type_error_if_merited("any(), any(value), or any(predicate)",
self.variable, list, tuple, dict, type(np.array([])))
result = unpack_array(self.variable)
if type_compliance(predicate, type(it())):
predicate = predicate.in_line_function
if type_compliance(self.variable, dict):
if not type_compliance(predicate, str, int):
result = True if len(list(filter(predicate, self.variable.items()))) > 0 else False
else:
if not type_compliance(predicate, str, int):
result = True if len(list(filter(predicate, result))) > 0 else False
if type_compliance(predicate, str, int):
if type_compliance(self.variable, dict):
result = True if predicate in self.variable.keys() else False
else:
result = True if predicate in self.variable else False
if predicate is None:
if self.variable:
result = True
else:
result = False
return PyKot(result, True)
def none(self): # any(predicate)
raise_type_error_if_merited("any(), any(value), or any(predicate)",
self.variable, list, tuple, dict, type(np.array([])))
return PyKot(False if unpack_array(self.variable) else True, True)
def to_list(self): # toList()
raise_type_error_if_merited("to_list()", self.variable, list, tuple, dict, type(np.array([])))
if type_compliance(self.variable, tuple):
result = self.variable
elif type_compliance(self.variable, dict):
result = tuple([(key, self.variable[key]) for key in self.variable.keys()])
else:
result = tuple(self.variable)
return PyKot(result, True)
def to_mutable_list(self): # toMutableList()
raise_type_error_if_merited("to_mutable_list()", self.variable, list, tuple, dict, type(np.array([])))
if isinstance(self.variable, tuple):
result = list(self.variable)
elif type_compliance(self.variable, dict):
result = [(key, self.variable[key]) for key in self.variable.keys()]
elif type_compliance(self.variable, type(np.array([]))):
result = [x for x in unpack_array(self.variable)]
else:
result = self.variable
return PyKot(result, True)
def contains(self, element): # contains(element)
raise_type_error_if_merited("contains()", self.variable, list, tuple, dict, type(np.array([])))
if isinstance(self.variable, dict):
return PyKot(element in self.variable.keys(), True)
return PyKot(element in self.variable, True)
def filter(self, predicate): # filter(predicate)
raise_type_error_if_merited("filter(function)", self.variable, list, tuple, dict, type(np.array([])))
predicate = predicate.in_line_function
if type_compliance(self.variable, dict):
new_map = dict(tuple(filter(predicate, self.variable.items())))
result = new_map
else:
result = list(filter(predicate, self.variable))
return PyKot(result, True)
def filter_not(self, predicate): # filterNot(predicate)
raise_type_error_if_merited("filter_not(function)", self.variable, list, tuple, dict, type(np.array([])))
predicate = predicate.in_line_function
if type_compliance(self.variable, dict):
new_map = {}
do_not_include = list(filter(predicate, self.variable.items()))
do_not_include = [x for x, y in do_not_include]
for key in self.variable.keys():
if key not in do_not_include:
new_map[key] = self.variable[key]
result = new_map
else:
new_list = []
do_not_include = list(filter(predicate, self.variable))
for value in [unpack_array_element(x) for x in self.variable]:
if value not in do_not_include:
new_list.append(value)
result = new_list
return PyKot(result, True)
def filter_indexed(self, predicate): # filter_indexed(predicate)
raise_type_error_if_merited("filter_indexed(predicate)", self.variable, list, tuple, type(np.array([])))
raise_type_error_if_merited("filter_indexed()", predicate, type(lambda x: x))
return PyKot([y for x, y in enumerate(unpack_array(self.variable)) if predicate(x, y)], True)
def filter_not_null(self): # filter_not_null()
raise_type_error_if_merited("filter_not_null()", self.variable, list, tuple, type(np.array([])))
return PyKot([x for x in unpack_array(self.variable) if x is not None])
def filter_is_instance(self, acceptable_type): # filter_is_instance(type)
raise_type_error_if_merited("filter_is_instance(acceptable_type)",
self.variable, list, tuple, type(np.array([])))
return PyKot([x for x in unpack_array(self.variable) if type(x) == acceptable_type])
def partition(self, predicate): # partition(predicate)
raise_type_error_if_merited("partition(predicate)", self.variable, list, tuple, type(np.array([])))
if type_compliance(predicate, type(it())):
predicate = predicate.in_line_function
match = []
rest = []
for element in unpack_array(self.variable):
if predicate(element):
match.append(element)
else:
rest.append(element)
return PyKot((tuple(match), tuple(rest)), True)
def for_each(self, *statements): # forEach( statements )
raise_type_error_if_merited("for_each(*statements)", self.variable, list, tuple, type(np.array([])), dict)
if type_compliance(self.variable, dict):
useful_list = [PyKot(self.variable[x]) for x in self.variable.keys()]
for value in useful_list:
for statement in statements:
statement(value)
else:
useful_list = [PyKot(unpack_array_element(x)) for x in self.variable]
for value in useful_list:
for statement in statements:
statement(value)
return PyKot(self.variable, True)
def also(self, *statements): # also( statements )
raise_type_error_if_merited("also(*statements)", self.variable,
str, int, range, list, tuple, type(np.array([])), dict)
if type_compliance(self.variable, dict):
useful_list = [PyKot(self.variable[x]) for x in self.variable.keys()]
for value in useful_list:
for statement in statements:
statement(value)
elif type_compliance(self.variable, range, list, tuple, type(np.array([]))):
useful_list = [PyKot(unpack_array_element(x)) for x in self.variable]
for value in useful_list:
for statement in statements:
statement(value)
elif type_compliance(self.variable, str, int):
for statement in statements:
statement(self.variable)
return PyKot(self.variable, True)
def let(self, *statements): # let( statements )
raise_type_error_if_merited("let(*statements)", self.variable,
str, int, range, list, tuple, type(np.array([])), dict, type(None))
if self.variable is None:
return PyKot(self.variable, True)
if type_compliance(self.variable, dict):
useful_list = [PyKot(self.variable[x]) for x in self.variable.keys()]
for value in useful_list:
for statement in statements:
statement(value)
elif type_compliance(self.variable, range, list, tuple, type(np.array([]))):
useful_list = [PyKot(unpack_array_element(x)) for x in self.variable]
for value in useful_list:
for statement in statements:
statement(value)
elif type_compliance(self.variable, str, int):
for statement in statements:
statement(self.variable)
return PyKot(self.variable, True)
def find(self, predicate): # find(predicate)
raise_type_error_if_merited("find(predicate)", self.variable, list, tuple, type(np.array([])))
predicate = predicate.in_line_function
found = list(filter(predicate, self.variable))
if len(found) == 0:
return PyKot(None, True)
return PyKot(found[0], True)
def find_last(self, predicate): # findLast(predicate)
raise_type_error_if_merited("find_last(predicate)", self.variable, list, tuple, type(np.array([])))
predicate = predicate.in_line_function
found = list(filter(predicate, self.variable))
if len(found) == 0:
return PyKot(None, True)
return PyKot(found[-1], True)
def with_index(self): # withIndex()
raise_type_error_if_merited("with_index()", self.variable, list, tuple, type(np.array([])))
new_variable = [(i, e) for i, e in enumerate(self.variable)]
if type_compliance(self.variable, list):
return PyKot(new_variable, True)
return PyKot(tuple(new_variable), True)
def grouping_by(self, predicate): # groupingBy(it expression)
raise_type_error_if_merited("grouping_by(predicate)", self.variable, list, tuple, type(np.array([])))
predicate = predicate.in_line_function
output_map = {}
for element in self.variable:
if predicate(element) in output_map:
output_map[predicate(element)] = output_map[predicate(element)] + [element]
continue
output_map[predicate(element)] = [element]
return PyKot(output_map, True)
def size(self): # .size()
raise_type_error_if_merited("size()", self.variable, list, tuple, type(np.array([])), dict)
if type_compliance(self.variable, dict):
return len(self.variable.items())
return PyKot(len(self.variable), True)
def min_or_null(self): # minOrNull()
raise_type_error_if_merited("min_or_null()", self.variable, list, tuple, type(np.array([])))
if len(self.variable) == 0:
return PyKot(None, True)
useful_list = [unpack_array_element(x) for x in self.variable]
useful_list.sort()
return PyKot(useful_list[0], True)
def min_by_or_null(self, predicate): # minByOrNull(predicate)
raise_type_error_if_merited("min_by_or_null()", self.variable, list, tuple, type(np.array([])))
predicate = predicate.in_line_function
useful_list = list(filter(predicate, [unpack_array_element(x) for x in self.variable]))
if len(useful_list) == 0:
return PyKot(None, True)
if len(useful_list) == len(self.variable):
useful_list.sort(key=predicate)
else:
useful_list.sort()
return PyKot(useful_list[0], True)
def max_or_null(self): # maxOrNull()
raise_type_error_if_merited("max_or_null()", self.variable, list, tuple, type(np.array([])))
if len(self.variable) == 0:
return PyKot(None, True)
useful_list = [unpack_array_element(x) for x in self.variable]
useful_list.sort()
return PyKot(useful_list[-1], True)
def max_by_or_null(self, predicate): # maxByOrNull(predicate)
raise_type_error_if_merited("max_by_or_null()", self.variable, list, tuple, type(np.array([])))
predicate = predicate.in_line_function
useful_list = list(filter(predicate, [unpack_array_element(x) for x in self.variable]))
if len(useful_list) == 0:
return PyKot(None, True)
if len(useful_list) == len(self.variable):
useful_list.sort(key=predicate)
else:
useful_list.sort()
return PyKot(useful_list[-1], True)
def average(self): # average()
raise_type_error_if_merited("max_or_null()", self.variable, list, tuple, type(np.array([])))
useful_list = [x for x in self.variable]
return PyKot(int(sum(useful_list) / len(useful_list)), True)
def sum(self): # sum()
raise_type_error_if_merited("max_or_null()", self.variable, list, tuple, type(np.array([])))
return PyKot(int(sum([x for x in self.variable])), True)
def count(self): # count()
raise_type_error_if_merited("max_or_null()", self.variable, list, tuple, type(np.array([])))
return PyKot(len([x for x in self.variable]), True)
def add(self, element): # add(element)
raise_type_error_if_merited("add()", self.variable, list)
self.variable.append(element)
return PyKot(self.variable, True)
def add_all(self, *args): # addAll(elements)
raise_type_error_if_merited("add_all(element) or add_all(element, ..., element)", self.variable, list)
self.variable += [arg for arg in args]
return PyKot(self.variable, True)
def keys(self): # keys()
raise_type_error_if_merited("keys()", self.variable, dict)
return PyKot(self.variable.keys(), True)
def values(self): # values()
raise_type_error_if_merited("values()", self.variable, dict)
return PyKot(self.variable.values(), True)
def each_count(self): # eachCount()
raise_type_error_if_merited("each_count()", self.variable, dict)
output_map = dict([(key, len(self.variable[key]))
if type_compliance(self.variable[key], list, tuple, type( | np.array([]) | numpy.array |
import numpy as np
# Constant
def Constant_vit():
# (1,7)-RLL constraint, 4 states, 4 error propagations
# Encoder_Dict[a][b]: a stands for each state, b stands for (1 - input tags, 2 - output words, 3 - next state)
encoder_dict = {
1 : {
'input' : np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
'output' : np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 0, 0]]),
'next_state' : np.array([[1], [2], [3], [3]])
},
2 : {
'input' : np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
'output' : np.array([[1, 0, 0], [1, 0, 0], [1, 0, 1], [1, 0, 1]]),
'next_state' : np.array([[1], [2], [3], [4]])
},
3 : {
'input' : np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
'output' : np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 1]]),
'next_state' : np.array([[1], [2], [3], [4]])
},
4 : {
'input' : np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
'output' : np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0]]),
'next_state' : np.array([[1], [2], [3], [3]])
}
}
encoder_definite = {'m' : 0, 'a' : 2}
sbd_dict = {
'i1' : ['00x', '01x', '1xx', '20x', '21x', '40x', '41x', '5xx'],
'i2' : ['042', '044', '045', '05x', '101', '100', '242', '244', '245', '25x', '40x',
'41x', '442', '444', '445', '45x', '500', '501', '52x', '12x', '00x', '01x'],
'list' : {
0 : np.array([[0, 0, 0]]), 1 : np.array([[0, 0, 1]]), 2 : np.array([[0, 1, 0]]),
4 : np.array([[1, 0, 0]]), 5 : np.array([[1, 0, 1]])
},
'num_list' : np.array([[0, 1, 2, 4, 5]])
}
# channel state machine
channel_dict = {
'state_machine' : np.array([
[0, 0], [0, 1], [1, 2], [2, 3], [2, 4], [3, 7], [4, 8], [4, 9],
[5, 0], [5, 1], [6, 2], [7, 5], [7, 6], [8, 7], [9, 8], [9, 9]
]),
'in_out' : np.array([
[0, 0], [1, 1], [1, 3], [0, 2], [1, 3], [0, -2], [0, 0], [1, 1],
[0, -1], [1, 0], [1, 2], [0, -3], [1, -2], [0, -3], [0, -1], [1, 0]
]),
'state_label' : np.array([
[0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 1, 1, 2],
[0, 1, 1, 0, 3], [0, 1, 1, 1, 4], [1, 0, 0, 0, 5],
[1, 0, 0, 1, 6], [1, 1, 0, 0, 7], [1, 1, 1, 0, 8], [1, 1, 1, 1, 9]
]),
'num_state' : 10,
'ini_state' : 0
}
channel_in_out_norm = np.zeros(channel_dict['in_out'].shape)
channel_in_out_norm[:, 0] = channel_dict['in_out'][:, 0]
channel_in_out_norm[:, 1] = channel_dict['in_out'][:, 1] / np.sqrt(10)
channel_dict['in_out'] = channel_in_out_norm
# List of dummy bits paths
dummy_dict = {
0 : np.array([[0, 0, 0, 0, 0]]), 1 : np.array([[2, 3, 7, 5, 0]]),
2 : np.array([[3, 7, 5, 0, 0]]), 3 : np.array([[7, 5, 0, 0, 0]]),
4 : np.array([[8, 7, 5, 0, 0]]), 5 : np.array([[0, 0, 0, 0, 0]]),
6 : np.array([[2, 3, 7, 5, 0]]), 7 : np.array([[5, 0, 0, 0, 0]]),
8 : np.array([[7, 5, 0, 0, 0]]), 9 : | np.array([[8, 7, 5, 0, 0]]) | numpy.array |
"""Test the derivative coupling calculation."""
import os
import shutil
from typing import Sequence
import numpy as np
from assertionlib import assertion
from nanoqm.common import DictConfig, is_data_in_hdf5, retrieve_hdf5_data
from nanoqm.workflows.input_validation import process_input
from nanoqm.workflows.workflow_coupling import workflow_derivative_couplings
from .utilsTest import PATH_TEST, remove_files
def test_fast_couplings(tmp_path):
"""Check the derivative couplings workflow"""
run_derivative_coupling(tmp_path, 'input_fast_test_derivative_couplings.yml')
def test_unrestricted_alphas(tmp_path):
"""Test the derivative coupling for the alphas spin orbitals."""
run_derivative_coupling(tmp_path, 'input_couplings_alphas.yml', "alphas")
def test_unrestricted_betas(tmp_path):
"""Test the derivative coupling for the alphas spin orbitals."""
run_derivative_coupling(tmp_path, 'input_couplings_both.yml', "both")
def run_derivative_coupling(tmp_path: str, input_file: str, orbitals_type: str = "") -> None:
"""Check that the couplings run."""
path_input = PATH_TEST / input_file
config = process_input(path_input, 'derivative_couplings')
config["scratch_path"] = tmp_path
tmp_hdf5 = os.path.join(tmp_path, 'fast_couplings.hdf5')
shutil.copy(config.path_hdf5, tmp_hdf5)
config['path_hdf5'] = tmp_hdf5
config['write_overlaps'] = True
try:
check_results(config, tmp_hdf5, orbitals_type)
# Run the calculation again to test that the data is read from the hdf5
check_results(config, tmp_hdf5, orbitals_type)
finally:
remove_files()
def check_results(config: DictConfig, tmp_hdf5: str, orbitals_type: str) -> None:
"""Check the computed results stored in the HDF5 file."""
if orbitals_type != "both":
hamiltonians, _ = workflow_derivative_couplings(config)
check_couplings(config, tmp_hdf5, orbitals_type)
check_hamiltonians(hamiltonians)
else:
result_alphas, result_betas = workflow_derivative_couplings(config)
check_couplings(config, tmp_hdf5, "alphas")
check_couplings(config, tmp_hdf5, "betas")
check_hamiltonians(result_alphas[0])
check_hamiltonians(result_betas[0])
def check_couplings(config: DictConfig, tmp_hdf5: str, orbitals_type: str) -> None:
"""Check that the couplings have meaningful values."""
def create_paths(keyword: str) -> list:
return [os.path.join(orbitals_type, f'{keyword}_{x}')
for x in range(len(config.geometries) - 1)]
overlaps = create_paths('overlaps')
couplings = create_paths('coupling')
# Check that couplings and overlaps exists
assertion.truth(is_data_in_hdf5(tmp_hdf5, overlaps))
assertion.truth(is_data_in_hdf5(tmp_hdf5, couplings))
# All the elements are different of inifinity or nan
tensor_couplings = np.stack(retrieve_hdf5_data(tmp_hdf5, couplings))
assertion.truth(np.isfinite(tensor_couplings).all())
# Check that the couplings are anti-symetric
for mtx in tensor_couplings[:]:
assertion(np.allclose(mtx, -mtx.T))
# Check that there are not NaN
assertion.truth(not np.all(np.isnan(tensor_couplings)))
def check_hamiltonians(hamiltonians: Sequence[str]) -> None:
"""Check that the hamiltonians were written correctly."""
energies = np.stack([np.diag(np.loadtxt(ts[1])) for ts in hamiltonians])
couplings = np.stack([np.loadtxt(ts[0]) for ts in hamiltonians])
# check that energies and couplings are finite values
assertion.truth( | np.isfinite(energies) | numpy.isfinite |
# Copyright (C) 2018 <NAME>
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
from phonopy.harmonic.force_constants import distribute_force_constants
def get_fc2(supercell,
primitive,
disp_dataset,
atom_list=None,
log_level=0):
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
numbers = supercell.get_atomic_numbers()
natom = len(numbers)
disps, forces = _collect_disps_and_forces(disp_dataset)
p2s_map = primitive.p2s_map
p2p_map = primitive.p2p_map
if log_level:
print("-------------------------------"
" ALM FC2 start "
"------------------------------")
print("ALM by <NAME>, https://github.com/ttadano/ALM")
if log_level == 1:
print("Use -v option to watch detailed ALM log.")
try:
from alm import ALM
except ImportError:
raise ModuleNotFoundError("ALM python module was not found.")
sys.stdout.flush()
with ALM(lattice, positions, numbers) as alm:
if log_level > 0:
log_level_alm = log_level - 1
else:
log_level_alm = 0
alm.set_verbosity(log_level_alm)
alm.define(1)
alm.set_displacement_and_force(disps, forces)
alm.optimize()
if (atom_list == p2s_map).all():
fc2 = np.zeros((len(p2s_map), natom, 3, 3),
dtype='double', order='C')
for fc, indices in zip(*alm.get_fc(1, mode='origin')):
v1, v2 = indices // 3
c1, c2 = indices % 3
fc2[p2p_map[v1], v2, c1, c2] = fc
elif atom_list is None or (atom_list == np.range(natom)):
fc2 = np.zeros((natom, natom, 3, 3), dtype='double', order='C')
for fc, indices in zip(*alm.get_fc(1, mode='all')):
v1, v2 = indices // 3
c1, c2 = indices % 3
fc2[v1, v2, c1, c2] = fc
else: # This case would not happen.
fc2 = | np.zeros((natom, natom, 3, 3), dtype='double', order='C') | numpy.zeros |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Class to do trained model inference in beam."""
import importlib
import os
import struct
import subprocess as sp
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.datasets import preprocess
from ldif.datasets import shapenet
from ldif.inference import experiment as experiments
from ldif.inference import extract_mesh
from ldif.inference import metrics
from ldif.model import model as sdf_model
from ldif.representation import structured_implicit_function
from ldif.util import camera_util
from ldif.util import file_util
from ldif.util import gaps_util
from ldif.util import geom_util
from ldif.util import geom_util_np
from ldif.util import gpu_util
from ldif.util import path_util
from ldif.util import py_util
from ldif.util import sdf_util
from ldif.util import np_util
from ldif.util.file_util import log
# pylint: enable=g-bad-import-order
importlib.reload(extract_mesh)
importlib.reload(structured_implicit_function)
importlib.reload(sdf_model)
importlib.reload(geom_util)
class TrainedNetwork(object):
"""A base class for all networks trained in XManager."""
def __init__(self, job, ckpt, use_gpu, **kwargs): # pylint: disable=unused-argument
self.job = job
self.ckpt = ckpt
self.graph = tf.Graph()
self.use_gpu = use_gpu
@classmethod
def from_experiment(cls,
experiment,
xid,
ckpt_idx,
use_temp_ckpts=None,
overrides=None,
use_gpu=True,
**kwargs):
"""Instantiates a TrainedNetwork from an experiment object."""
job = experiment.job_from_xmanager_id(xid, must_be_visible=True)
if use_temp_ckpts is not None:
job.set_use_temp_ckpts(use_temp_ckpts)
if overrides is not None:
for k, v in overrides.items():
setattr(job.model_config.hparams, k, v)
if ckpt_idx == 0:
log.error('Please select a checkpoint and rerun. Valid checkpoints:')
log.error(str(job.all_checkpoint_indices))
return
must_equal = ckpt_idx != -1
ckpt = job.latest_checkpoint_before(ckpt_idx, must_equal=must_equal)
log.info(f'Loading checkpoint {ckpt.abspath}')
return cls(job, ckpt, use_gpu, **kwargs)
@classmethod
def from_modeldir(cls,
model_directory,
model_name,
experiment_name,
xid,
ckpt_idx,
overrides=None,
use_temp_ckpts=True,
use_gpu=True,
**kwargs):
"""Creates a TrainedModel from a model directory root and name."""
experiment = experiments.Experiment(model_directory, model_name,
experiment_name)
return cls.from_experiment(experiment, xid, ckpt_idx, use_temp_ckpts,
overrides, use_gpu, **kwargs)
@classmethod
def from_identifiers(cls,
user,
model_name,
experiment_name,
xid,
ckpt_idx,
overrides=None,
use_temp_ckpts=None,
charged_user='viscam',
use_gpu=True,
**kwargs):
"""Creates a trained network from experiment identifiers."""
raise ValueError('No longer supported.')
def restore(self):
"""Creates a session with restored model variables."""
with self.graph.as_default():
if self.use_gpu:
# For now these are disabled since it is difficult to work on
# all GPUs.
#allowable_frac = gpu_util.get_allowable_fraction_without(
# mem_to_reserve=1024 + 512, cuda_device_index=0) # ~1GB
#gpu_options = tf.GPUOptions(
# per_process_gpu_memory_fraction=allowable_frac)
#config = tf.ConfigProto(gpu_options=gpu_options)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(device_count={'GPU': 0})
self.session = tf.Session(config=config)
saver = tf.train.Saver()
saver.restore(self.session, self.ckpt.abspath)
def conform_prediction(vector):
"""Forces an arbitrary vector to be a valid (D)SIF."""
vector = vector.copy()
if vector.shape[-1] not in [10, 42]:
raise ValueError('Unimplemented.')
consts, centers, radii_aa, radii_cov = np.split(
vector[..., :10], [1, 4, 7], axis=-1)
consts = np.minimum(consts, 0.0)
radii_aa = np.maximum(radii_aa, 1e-9)
radii_cov = np.clip(radii_cov, -np.pi / 4., np.pi / 4.)
log.verbose(
repr([
x.shape
for x in [consts, centers, radii_aa, radii_cov, vector[..., 10:]]
]))
return np.concatenate(
[consts, centers, radii_aa, radii_cov, vector[..., 10:]], axis=-1)
class SingleViewDepthEncoder(TrainedNetwork):
"""Maps from a single depth image (max-0) to a shape representation."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(SingleViewDepthEncoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
model_config = self.job.model_config
model_config.inputs = shapenet.build_placeholder_interface(
model_config, proto='ShapeNetOneImXyzPC')
training_example = preprocess.preprocess(model_config)
self.depth_input = model_config.inputs['dataset'].depth_render
self.xyz_input = model_config.inputs['dataset'].xyz_render
self.points_input = model_config.inputs['dataset'].surface_point_samples
training_example = preprocess.preprocess(model_config)
observation = sdf_model.Observation(model_config, training_example)
imp_net = sdf_model.StructuredImplicitModel(model_config, 'imp_net')
prediction = imp_net.forward(observation)
structured_implicit = prediction.structured_implicit
self.packed_vector = structured_implicit.vector
self.restore()
def run(self, depth, points, xyz):
"""Runs the network on the input data, returning a (D)SIF."""
h, w = np.squeeze(depth).shape
depth = np.reshape(depth, [1, h, w, 1])
points = np.reshape(points, [1, 10000, 6])
xyz = np.reshape(xyz, [1, h, w, 3])
with self.graph.as_default():
packed_vector = self.session.run(
self.packed_vector,
feed_dict={
self.depth_input: depth,
self.points_input: points,
self.xyz_input: xyz
})
packed_vector = np.reshape(packed_vector,
[self.job.model_config.hparams.sc, -1])
return packed_vector
def run_example(self, ex):
return self.run(ex.max_depth_224[0, ...] * 1000.0,
ex.get_max_world_pts_from_idx(0), ex.max_world_xyz_224[0,
...])
def run_example_bts(self, ex):
return self.run(ex.bts_depth_224[0, ...] * 1000.0,
ex.get_bts_world_pts_from_idx(0), ex.bts_world_xyz_224[0,
...])
class DepthEncoder(TrainedNetwork):
"""Maps from a dodecahedron of depth images to shape elements."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(DepthEncoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
model_config = self.job.model_config
model_config.hparams.bs = 1
model_config.inputs = shapenet.build_placeholder_interface(model_config)
training_example = preprocess.preprocess(model_config)
self.depth_input = model_config.inputs['dataset'].depth_renders
self.points_input = model_config.inputs['dataset'].surface_point_samples
self.nss_input = model_config.inputs['dataset'].near_surface_samples
training_example = preprocess.preprocess(model_config)
if hasattr(training_example, '_tx'):
self.tx = training_example._tx
else:
self.tx = None
observation = sdf_model.Observation(model_config, training_example)
imp_net = sdf_model.StructuredImplicitModel(model_config, 'imp_net')
prediction = imp_net.forward(observation)
structured_implicit = prediction.structured_implicit
self.packed_vector = structured_implicit.vector
# *phew* we have set up the graph... now we need to pull the weights.
self.restore()
def run(self, dodeca, points, nss=None):
"""Runs the network on the input data, returning a (D)SIF."""
dodeca = np.reshape(dodeca, [1, 20, 224, 224, 1])
points = np.reshape(points, [1, 10000, 6])
with self.graph.as_default():
feed_dict = {self.depth_input: dodeca, self.points_input: points}
if nss is not None:
feed_dict[self.nss_input] = np.reshape(nss, [1, 100000, 4])
if self.tx is not None:
packed_vector, tx = self.session.run([self.packed_vector, self.tx],
feed_dict=feed_dict)
else:
packed_vector = self.session.run(
self.packed_vector, feed_dict=feed_dict)
packed_vector = np.reshape(packed_vector,
[self.job.model_config.hparams.sc, -1])
if self.tx is not None:
return packed_vector, np.reshape(tx, [4, 4])
return packed_vector
def run_example(self, ex):
return self.run(ex.depth_images, ex.precomputed_surface_samples_from_dodeca)
class Decoder(TrainedNetwork):
"""A SIF -> Mesh decoder."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(Decoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
self.sif_input = tf.placeholder(tf.float32, self.batched_vector_shape)
# TODO(kgenova) Maybe the net should be handled entirely by the structured
# implicit function? Although there is a difference between the network
# that can give a result from a vector and a simple wrapper for models
# that don't need variables. Maybe it's just intelligent about creating
# the net only when really needed.
if 'silence_implicits' in kwargs and kwargs['silence_implicits']:
self.job.model_config.hparams.ipc = 'f'
log.info('Silencing implicits.')
net = sdf_model.StructuredImplicitModel(
self.job.model_config, name='imp_net')
structured_implicit = (
structured_implicit_function.StructuredImplicit.from_packed_vector(
self.job.model_config, self.sif_input, net))
self.structured_implicit = structured_implicit
self.block_res = 32
self.native_point_count = self.block_res**3
self.sample_locations_ph = tf.placeholder(
tf.float32, shape=[self.block_res, self.block_res, self.block_res, 3])
samples = tf.reshape(self.sample_locations_ph, [1, self.block_res**3, 3])
predicted_alg, predicted_locals = structured_implicit.class_at_samples(
samples, apply_class_transfer=False)
predicted_class = sdf_util.apply_class_transfer(
predicted_alg,
self.job.model_config,
soft_transfer=True,
offset=self.job.model_config.hparams.lset)
vol_shape = [self.block_res, self.block_res, self.block_res]
self.predicted_alg_grid = tf.reshape(predicted_alg, vol_shape)
self.predicted_class_grid = tf.reshape(predicted_class, vol_shape)
effective_element_count = (
structured_implicit_function.get_effective_element_count(
self.job.model_config))
self.local_decisions = tf.reshape(predicted_locals[0], [
effective_element_count, self.block_res, self.block_res,
self.block_res
])
self.base_grid = np_util.make_coordinate_grid_3d(
length=self.block_res,
height=self.block_res,
width=self.block_res,
is_screen_space=False,
is_homogeneous=False).astype(np.float32)
self._world2local = structured_implicit.world2local
self._use_inference_kernel = True
# Influence samples
self.true_sample_count = 10000
self.generic_sample_ph = tf.placeholder(
tf.float32, shape=[self.true_sample_count, 3])
self.predicted_influences = structured_implicit.rbf_influence_at_samples(
tf.expand_dims(self.generic_sample_ph, axis=0))
# Optimizer stuff
self.optimizer_pc = 5000
self.optimizer_samples = tf.placeholder(
tf.float32, shape=[self.optimizer_pc, 3])
optimizer_samples = tf.reshape(self.optimizer_samples,
[1, self.optimizer_pc, 3])
self.predicted_class, _ = structured_implicit.class_at_samples(
optimizer_samples)
self.predicted_class = tf.reshape(self.predicted_class,
[self.optimizer_pc, 1])
self.target_class_ph = tf.placeholder(tf.float32, [self.optimizer_pc, 1])
loss = 'crossentropy'
if loss == 'crossentropy':
clipped_pred = tf.clip_by_value(self.predicted_class, 1e-05, 1 - 1e-05)
self.optimizer_elt_loss = tf.where(self.target_class_ph > 0.5,
-tf.log(clipped_pred),
-tf.log(1 - clipped_pred))
elif loss == 'l1':
self.optimizer_elt_loss = tf.abs(self.target_class_ph -
self.predicted_class)
elif loss == 'l2':
self.optimizer_elt_loss = tf.square(self.target_class_ph -
self.predicted_class)
apply_where_agree = True
if not apply_where_agree:
gt_outside = self.target_class_ph > 0.5
pred_outside = self.predicted_class > 0.5
gt_inside = tf.logical_not(gt_outside)
pred_inside = tf.logical_not(pred_outside)
agree = tf.logical_or(
tf.logical_and(gt_outside, pred_outside),
tf.logical_and(gt_inside, pred_inside))
self.optimizer_elt_loss = tf.where_v2(agree, 0.0,
self.optimizer_elt_loss)
self.optimizer_loss = tf.reduce_mean(self.optimizer_elt_loss)
self.ldif_gradients = tf.gradients(self.optimizer_loss, self.sif_input)
# TODO(kgenova) Currently disabled since it's in testing and hardcodes
# some values.
# self.coords_ph = tf.placeholder(tf.float32, shape=[3])
# self.am_image_ph = tf.placeholder(tf.int32, shape=[224, 224])
# pose_cam2world, pose_eye = self._spherical_to_4x4(self.coords_ph)
# self.pose_error = self._evaluate_pose_error(pose_cam2world, pose_eye,
# self.am_image_ph)
# self.pose3_gradients = tf.gradients(self.pose_error, self.coords_ph)
try:
self.restore()
except ValueError:
log.warning('No variables to restore or restoration otherwise failed.')
@property
def unbatched_vector_shape(self):
shape_count = self.job.model_config.hparams.sc
shape_size = structured_implicit_function.element_dof(self.job.model_config)
return [shape_count, shape_size]
@property
def batched_vector_shape(self):
return [1] + self.unbatched_vector_shape
@property
def use_inference_kernel(self):
return self._use_inference_kernel
@use_inference_kernel.setter
def use_inference_kernel(self, should_use):
self._use_inference_kernel = bool(should_use)
# TODO(kgenova) The intermediate vector should really be its own class...
def savetxt(self, sif_vector, path=None, version='v1'):
"""Saves a (D)SIF as ASCII text in the SIF file format.
Args:
sif_vector: A numpy array containing the ldif to write to disk. Has shape
(element_count, element_length).
path: A string containing the path to the file to write to, if provided.
If none, no file is written.
version: A string with the version identifier. Must equal 'v1'.
Returns:
A string encoding of the (D)SIF.
"""
if version == 'v0':
raise ValueError('SIF v0 files are no longer supported.')
elif version == 'v1':
s = self.encode_sif_v1(sif_vector)
else:
raise ValueError(f'Unrecognized SIF file format: {version}.')
if path is not None:
file_util.writetxt(path, s)
return s
def encode_sif_v1(self, sif_vector):
"""Encodes a ldif to a string, and optionally writes it to disk.
A description of the file format:
Line 1: SIF
Line 2: Three ints separated by spaces. In order:
1) The number of blobs.
2) The version ID for the blob types. I added this to be safe since
last time when we updated to add rotation it broke all the old txt
files. For now it will always be zero, which means the following
eleven explicit parameters will be given per blob (in order):
1 constant. float.
3 centers (XYZ). float.
3 radii (XYZ diagonals). float.
3 radii (roll-pitch-yaw rotations). float.
1 symmetry ID type. int. For now it will be either 0 or 1:
Zero: Not symmetric.
One: Left-right (XY-plane) symmetry.
3) The number of implicit parameters per blob. So it will likely
be between 0-256.
After the first two lines, there is a line for each blob.
Each line will have the explicit parameters followed by the implicit
parameters. They are space separated.
Args:
sif_vector: The SIF vector to encode as a np array. Has shape
(element_count, element_length).
Returns:
A string encoding of v in the ldif v1 file format.
"""
sif_vector = sif_vector.copy()
shape_count = sif_vector.shape[-2]
shape_len = sif_vector.shape[-1]
if shape_len == 7:
off_axis = np.zeros([shape_count, 3])
sif_vector = np.concatenate([sif_vector, off_axis], axis=1)
shape_len = 10
explicit_len = 10
implicit_len = shape_len - explicit_len
sif_vector = np.reshape(sif_vector, [shape_count, shape_len])
has_implicits = implicit_len > 0
if not has_implicits:
assert shape_len == 10
implicit_len = 0
sif_vector[:, 4:7] = np.sqrt(np.maximum(sif_vector[:, 4:7], 0))
header = 'SIF\n%i %i %i\n' % (shape_count, 0, implicit_len)
out = header
for row_idx in range(shape_count):
row = ' '.join(10 * ['%.9g']) % tuple(sif_vector[row_idx, :10].tolist())
symmetry = int(row_idx < self.job.model_config.hparams.lyr)
row += ' %i' % symmetry
if has_implicits:
implicit_params = ' '.join(implicit_len * ['%.9g']) % (
tuple(sif_vector[row_idx, 10:].tolist()))
row += ' ' + implicit_params
row += '\n'
out += row
return out
def render_ellipsoids(self, sif_vector):
"""Renders an ellipsoid image visualizing the (D)SIF RBFs."""
with py_util.py2_temporary_directory() as d:
qpath = d + '/q.txt'
self.savetxt(sif_vector, qpath)
impath = d + '/im.png'
camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
with py_util.x11_server():
cmd = '%s/qview %s -camera %s -image %s' % (path_util.gaps_path(),
qpath, camera, impath)
sp.check_output(cmd, shell=True)
im = file_util.read_image(impath)
return im
def interactive_viewer(self, sif_vector, mesh=None):
"""Opens a GAPS viewer that can display the SIF blobs alongside a mesh."""
with py_util.py2_temporary_directory() as d:
qpath = d + '/q.txt'
self.savetxt(sif_vector, qpath)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
mstr = ''
if mesh is not None:
mpath = d + '/m.ply'
file_util.write_mesh(mpath, mesh)
mstr = f' -input_mesh {mpath}'
cmd = f'{path_util.gaps_path()}/qview {qpath} -camera {init_camera}{mstr}'
sp.check_output(cmd, shell=True)
def world2local(self, sif_vector):
if sif_vector.shape[0] != 1:
sif_vector = np.expand_dims(sif_vector, axis=0)
m = self.session.run(
self._world2local, feed_dict={self.sif_input: sif_vector})
return m
def interactive_mesh_viewer(self, sif_vector, resolution):
"""Opens up an OpenGL session viewing the mesh defined by the SIF/LDIF."""
with py_util.py2_temporary_directory() as d:
mpath = d + '/m.ply'
m = self.extract_mesh(sif_vector, resolution)
file_util.write_mesh(mpath, m)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
cmd = '%s/mshview %s -camera %s' % (path_util.gaps_path(), mpath,
init_camera)
sp.check_output(cmd, shell=True)
def interactive_gridview(self, sif_vector, resolution, extent=0.75):
volume = self._grid_eval(
sif_vector, resolution, extent, extract_parts=False, world2local=None)
return gaps_util.grdview(volume)
def _spherical_to_4x4(self, coords):
"""Turns spherical coords into a 4x4 affine transformation matrix."""
r = coords[0]
theta = coords[1]
phi = coords[2]
st = tf.sin(theta)
x = r * st * tf.cos(phi)
y = r * st * tf.sin(phi)
z = r * tf.cos(theta)
eye = tf.stack([x, y, z], axis=0)
eye = tf.reshape(eye, [1, 3])
center = tf.zeros([1, 3], dtype=tf.float32)
world_up = tf.constant([[0., 1., 0.]], dtype=tf.float32)
world2cam = camera_util.look_at(eye, center, world_up)
cam2world = tf.linalg.inv(world2cam)
cam2world = tf.constant(
[[-9.9398971e-01, 2.7342862e-03, -4.7837296e-03, 1.4993416e-04],
[1.6200442e-09, 8.6298174e-01, 4.9326313e-01, 7.1943283e-01],
[5.5100261e-03, 4.9325553e-01, -8.6296844e-01, -1.2277470e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00]],
dtype=tf.float32)
return tf.reshape(cam2world, [4, 4]), eye
def _evaluate_pose_error(self, cam2world, eye, am_image):
"""Evaluates the error of an estimated 4x4 pose matrix."""
# TODO(kgenova) Thisis a hack that only workds for 3d-r2n2
ray_directions = gaps_util.gaps_depth_image_to_cam_image(
np.ones((224, 224)), xfov=0.422204).astype(np.float32)
tc = 15
t_vals = tf.constant(np.arange(0.75, 2.25, .1), dtype=tf.float32)
t_vals = tf.reshape(t_vals, [1, tc, 1])
ray_count = int(np.prod(ray_directions.shape[:-1]))
ray_directions = tf.reshape(ray_directions, [ray_count, 1, 3])
eye = tf.reshape(eye, [1, 1, 3])
cam_rays = ray_directions * t_vals + eye
world_pts = geom_util.apply_4x4(
cam_rays, cam2world, are_points=True, batch_rank=0, sample_rank=2)
world_pts = tf.reshape(world_pts, [1, ray_count * tc, 3])
self.cam_3dof_pts = world_pts
world_rbfs = self.structured_implicit.rbf_influence_at_samples(world_pts)
eec = world_rbfs.get_shape().as_list()[-1]
assert len(am_image.get_shape().as_list()) == 2
is_bg = tf.reshape(
tf.logical_not(tf.equal(am_image, eec)), [1, ray_count, 1])
am_image = tf.tile(tf.expand_dims(am_image, axis=-1), [1, 1, tc])
flat_am = tf.reshape(am_image, [ray_count * tc, 1])
flat_am = tf.where_v2(tf.equal(flat_am, 45), 0, flat_am)
world_rbfs = tf.reshape(world_rbfs, [ray_count * tc, 45])
max_val = tf.gather(world_rbfs, flat_am, batch_dims=1)
max_val = tf.reshape(max_val, [1, ray_count, tc])
max_val = tf.reduce_max(max_val, axis=-1)
is_bg_mult = tf.cast(is_bg, dtype=tf.float32)
max_val = is_bg_mult * max_val
error = -1.0 * tf.reduce_sum(max_val)
return error
def optimize_3dof_pose(self, sif_vector, am_image, e, step_count=10, lr=1e-6):
"""Tries to fit a pose given a SIF in 3D and a SIF segmentation image."""
if len(sif_vector.shape) == 2:
sif_vector = np.expand_dims(sif_vector, axis=0)
# Now rays is an array of shape [h, w, 3]. The origin is currently [0,0,0]
# because the rays are in camera space (for now).
lr = np.array([0.0, lr, lr], dtype=np.float32)
# Just worry about a single step for now:
# The pose is 3-dof: distance, phi, theta.
coords = np.array([0.812717413913 / 1.75, 0.0, 0.0], dtype=np.float32)
# cam2world, eye = self._spherical_to_4x4(coords)
for i in range(step_count):
log.verbose('Step %i: (%0.4f, %0.4f, %0.4f)' %
(i, coords[0], coords[1], coords[2]))
grad, err, pts = self.session.run(
[self.pose3_gradients, self.pose_error, self.cam_3dof_pts],
feed_dict={
self.am_image_ph: am_image,
self.sif_input: sif_vector,
self.coords_ph: coords
})
grad = grad[0]
log.verbose('Error: %0.2f' % err)
log.verbose('grad: %s' % repr(grad))
log.verbose('pts.shape: ', repr(pts.shape))
assert len(grad.shape) == 1
assert grad.shape[0] == 3
update = lr * grad
log.verbose('Update: ', str(update))
gaps_util.ptsview(pts, mesh=e.v1_gt_mesh)
coords = coords - lr * grad
return coords
def optimize_to_gt(self,
sif_vector,
example,
step_count=1,
lr=0.01,
vis=0,
verbosity=0,
target='all',
samps='nss'):
"""Iteratively optimizes a SIF or LDIF to fit ground truth in/out values."""
if samps == 'nss':
all_samples = example.near_surface_samples.copy()
np.random.shuffle(all_samples)
elif samps == 'uni':
all_samples = example.uniform_samples.copy()
elif samps == 'nssuni':
all_samples = np.concatenate(
[example.near_surface_samples, example.uniform_samples], axis=0)
elif samps == 'dodeca':
depth_ims = example.depth_images / 1000.0
all_samples = geom_util.depth_dodeca_to_samples(depth_ims)
elif samps == 'depth':
depth_idx = 1 # TODO(kgenova) Make this the one in the observation.
depth_ims = example.depth_images / 1000.0
depth_im = depth_ims[0, depth_idx, :, :, :]
cam2world = geom_util.get_dodeca_camera_to_worlds()[depth_idx, :, :]
assert depth_im.shape[0] == 224
assert cam2world.shape[0] == 4
log.verbose('Depth im shape: ', depth_im.shape)
all_samples = geom_util.depth_image_to_samples(depth_im, cam2world)
if verbosity >= 2:
gaps_util.ptsview(all_samples[..., :], self.extract_mesh(sif_vector, 128))
np.random.shuffle(all_samples)
cl = all_samples[:, 3]
all_samples[cl < 0, 3] = 0
all_samples[cl > 0, 3] = 1
samples, gt_class = np.split(all_samples, [3], axis=-1)
samples = samples[:self.optimizer_pc, :]
gt_class = gt_class[:self.optimizer_pc, :]
def print_sat_count(vec):
"""Prints the number of contraints that are satisfied and the total."""
pred = self.class_at_samples(vec, np.reshape(samples, [-1, 3]))
pred_is_out = pred > 0.5
gt_is_out = gt_class > 0.5
log.verbose(pred_is_out.shape, gt_is_out.shape)
agree = np.logical_or(
np.logical_and(pred_is_out, gt_is_out),
np.logical_and(
np.logical_not(pred_is_out), np.logical_not(gt_is_out)))
sat_count = np.count_nonzero(agree)
log.info('%i/%i constraints are satisfied.' %
(sat_count, self.optimizer_pc))
if verbosity >= 1:
log.info('Beginning optimization.')
print_sat_count(sif_vector)
assert gt_class.shape[-1] == 1
sif_vector = sif_vector.copy()
sif_vector = np.expand_dims(sif_vector, axis=0)
cur_vector = sif_vector.copy()
ret_best = False
if ret_best:
min_loss = np.inf
best_vec = cur_vector.copy()
momentum = 0.9
velocity = np.zeros_like(cur_vector)
cur_batch_idx = 0
for i in range(step_count):
batch_start = cur_batch_idx
batch_end = cur_batch_idx + self.optimizer_pc
if batch_end > all_samples.shape[0]:
np.random.shuffle(all_samples)
batch_start = 0
batch_end = self.optimizer_pc
cur_batch_idx = 0
batch_all_samples = all_samples[batch_start:batch_end, :]
cur_batch_idx += self.optimizer_pc
batch_samples, batch_gt_class = np.split(batch_all_samples, [3], axis=-1)
grad = self.session.run(
self.ldif_gradients,
feed_dict={
self.target_class_ph: batch_gt_class,
self.sif_input: cur_vector,
self.optimizer_samples: batch_samples
})[0]
vis_this_time = vis >= 2 or (vis >= 1 and (i == 0 or i == step_count - 1))
print_this_time = verbosity >= 2 or (verbosity >= 1 and not i % 1000)
if vis_this_time or print_this_time:
loss = self.session.run(
self.optimizer_elt_loss,
feed_dict={
self.target_class_ph: batch_gt_class,
self.sif_input: cur_vector,
self.optimizer_samples: batch_samples
})
if ret_best:
lsum = | np.sum(loss) | numpy.sum |
import numpy as np
import pandas as pd
import os
import platform
import datetime
import re
CHAR={
'menu' : u'\u2630',
'tridot' : u'\u26EC',
'apply' : u'\u1809',
'compute' : u'\u2699',
'close' : u'\u274C',
'add' : u'\u2795',
'add_small': u'\ufe62',
'clear' : u'-',
'sun' : u'\u2600',
'suncloud' : u'\u26C5',
'cloud' : u'\u2601',
'check' : u'\u2714',
'help' : u'\u2753'
}
# --------------------------------------------------------------------------------}
# --- ellude
# --------------------------------------------------------------------------------{
def common_start(*strings):
""" Returns the longest common substring
from the beginning of the `strings`
"""
if len(strings)==1:
strings=tuple(strings[0])
def _iter():
for z in zip(*strings):
if z.count(z[0]) == len(z): # check all elements in `z` are the same
yield z[0]
else:
return
return ''.join(_iter())
def common_end(*strings):
if len(strings)==1:
strings=strings[0]
else:
strings=list(strings)
strings = [s[-1::-1] for s in strings]
return common_start(strings)[-1::-1]
def find_leftstop(s):
for i,c in enumerate(reversed(s)):
if c in ['.','_','|']:
i=i+1
return s[:len(s)-i]
return s
def ellude_common(strings,minLength=2):
"""
ellude the common parts of two strings
minLength:
if -1, string might be elluded up until there are of 0 length
if 0 , if a string of zero length is obtained, it will be tried to be extended until a stop character is found
"""
# Selecting only the strings that do not start with the safe '>' char
S = [s for i,s in enumerate(strings) if ((len(s)>0) and (s[0]!= '>'))]
if len(S)==0:
pass
elif len(S)==1:
ns=S[0].rfind('|')+1
ne=0;
else:
ss = common_start(S)
se = common_end(S)
iu = ss[:-1].rfind('_')
ip = ss[:-1].rfind('_')
if iu > 0:
if ip>0:
if iu>ip:
ss=ss[:iu+1]
else:
ss=ss[:iu+1]
iu = se[:-1].find('_')
if iu > 0:
se=se[iu:]
iu = se[:-1].find('.')
if iu > 0:
se=se[iu:]
ns=len(ss)
ne=len(se)
# Reduce start length if some strings end up empty
# Look if any of the strings will end up empty
SSS=[len(s[ns:-ne].lstrip('_') if ne>0 else s[ns:].lstrip('_')) for s in S]
currentMinLength=np.min(SSS)
if currentMinLength<minLength:
delta=minLength-currentMinLength
#print('ss',ss,'ns',ns)
if delta>0:
ss=ss[:-delta]
ns=len(ss)
#print('ss',ss)
ss=find_leftstop(ss)
#print('ss',ss)
if len(ss)==ns:
ns=0
else:
ns=len(ss)+1
for i,s in enumerate(strings):
if len(s)>0 and s[0]=='>':
strings[i]=s[1:]
else:
s=s[ns:-ne] if ne>0 else s[ns:]
strings[i]=s.lstrip('_')
if len(strings[i])==0:
strings[i]='tab{}'.format(i)
return strings
# --------------------------------------------------------------------------------}
# --- Key value
# --------------------------------------------------------------------------------{
def extract_key_tuples(text):
"""
all=(0.1,-2),b=(inf,0), c=(-inf,0.3e+10)
"""
regex = re.compile(r'(?P<key>[\w\-]+)=\((?P<value1>[0-9+epinf.-]*?),(?P<value2>[0-9+epinf.-]*?)\)($|,)')
return {match.group("key"): (np.float(match.group("value1")),np.float(match.group("value2"))) for match in regex.finditer(text.replace(' ',''))}
def extract_key_num(text):
"""
all=0.1, b=inf, c=-0.3e+10
"""
regex = re.compile(r'(?P<key>[\w\-]+)=(?P<value>[0-9+epinf.-]*?)($|,)')
return {match.group("key"): np.float(match.group("value")) for match in regex.finditer(text.replace(' ',''))}
# --------------------------------------------------------------------------------}
# ---
# --------------------------------------------------------------------------------{
# def getMonoFontAbs():
# import wx
# #return wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Monospace')
# if os.name=='nt':
# font=wx.Font(9, wx.TELETYPE, wx.NORMAL, wx.NORMAL, False)
# elif os.name=='posix':
# font=wx.Font(10, wx.TELETYPE, wx.NORMAL, wx.NORMAL, False)
# else:
# font=wx.Font(8, wx.TELETYPE, wx.NORMAL, wx.NORMAL, False)
# return font
#
# def getMonoFont(widget):
# import wx
# font = widget.GetFont()
# font.SetFamily(wx.TELETYPE)
# if platform.system()=='Windows':
# pass
# elif platform.system()=='Linux':
# pass
# elif platform.system()=='Darwin':
# font.SetPointSize(font.GetPointSize()-1)
# else:
# pass
# return font
def getDt(x):
""" returns dt in s """
def myisnat(dt):
if isinstance(dt,pd._libs.tslibs.timedeltas.Timedelta):
try:
dt=pd.to_timedelta(dt) # pandas 1.0
except:
dt=pd.to_timedelta(dt,box=False) # backward compatibility
elif isinstance(dt,datetime.timedelta):
dt=np.array([dt],dtype='timedelta64')[0]
return pd.isna(dt)
# try:
# print('>>>', dt,type(dt))
# isnat=np.isnat(dt)
# except:
# print(type(dt),type(dx))
# isnat=False
# raise
# return isnat
if len(x)<=1:
return np.NaN
if isinstance(x[0],float):
return x[1]-x[0]
if isinstance(x[0],int) or isinstance(x[0],np.int32) or isinstance(x[0],np.int64):
return x[1]-x[0]
# first try with seconds
#print('')
#print('getDT: dx:',x[1]-x[0])
dx = x[1]-x[0]
#print(type(dx))
if myisnat(dx):
# we try the last values (or while loop, but may take a while)
dx = x[-1]-x[-2]
if myisnat(dx):
return np.nan
dt=np.timedelta64(dx,'s').item().total_seconds()
if dt<1:
# try higher resolution
dt= | np.timedelta64(dx,'ns') | numpy.timedelta64 |
# authored by <NAME>
# modified by <NAME> @ 01/06/2017
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
import cv2
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
tf.python.control_flow_ops = control_flow_ops
sio = socketio.Server()
app = Flask(__name__)
model = None
def preprocess_input(img):
''' Crop, resize and convert input image from RGB to HLS colorspace
:param img: np array of uint8
:return: preprocessed image in float32
'''
img = cv2.resize(img[60:140, 40:280], (200, 66))
return cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype("float32")/255.0 - 0.5
@sio.on('telemetry')
def telemetry(sid, data):
# current steering angle of the car
steering_angle = data["steering_angle"]
# current throttle of the car
throttle = data["throttle"]
# current speed of the car
speed = float(data["speed"])
# current image from the center camera of the car
img_string = data["image"]
img = Image.open(BytesIO(base64.b64decode(img_string)))
# preprocess image from center camera
img = | np.array(img, dtype=np.uint8) | numpy.array |
import numpy as np
import six
import pandas as pd
from kavica.transformer.TransformingFunctions import *
from kavica.transformer.BatchTransformer import TransformerFunction
__all__ = ['VerticalTransformer']
# TODO: It is needed to revised
class VerticalTransformer(TransformerFunction):
"""
Transform is a tuple that is included:
(str(new name), func(transform function), list(column/s),bool(replace)
1- the transformation function
2- the column/s that have to be transferred
3- the name of the new column
4- the add the main column or replace it with the transformed
"""
def __init__(self, transformers, rest='transit'):
super(VerticalTransformer, self).__init__()
self.transformers = transformers # [('name','transformation', [columns]),replace]
self.rest = rest # transit | cast
self._remainder = None
self._columns = []
self.data = pd.DataFrame()
self.X = pd.DataFrame()
self.Y = pd.DataFrame()
self.iterator = None
self.yIndex = None
@property
def _transformers(self):
return [(name, trans, reconstructor) for name, trans, _, reconstructor in self.transformers]
@property
def named_transformers_(self):
"""Access the fitted transformer by name.
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
"""
# Use Bunch object to improve autocomplete
return (dict([(name, trans) for name, trans, _, _ in self.transformers]))
# checking the type of the data column that will be transformed
def _get_column(self, X, key):
def _check_key_type(key, superclass):
if isinstance(key, superclass):
return True
if isinstance(key, slice):
return (isinstance(key.start, (superclass, type(None))) and
isinstance(key.stop, (superclass, type(None))))
if isinstance(key, list):
return all(isinstance(x, superclass) for x in key)
if hasattr(key, 'dtype'):
if superclass is int:
return key.dtype.kind == 'i'
else:
# superclass = six.string_types
return key.dtype.kind in ('O', 'U', 'S')
return False
if callable(key):
key = key(X)
# check whether we have string column names or integers
if _check_key_type(key, int):
column_names = False
elif _check_key_type(key, six.string_types):
column_names = True
elif hasattr(key, 'dtype') and np.issubdtype(key.dtype, np.bool_):
# boolean mask
column_names = False
if hasattr(X, 'loc'):
# pandas boolean masks don't work with iloc, so take loc path
column_names = True
else:
raise ValueError("No valid specification of the columns. Only a "
"scalar, list or slice of all integers or all "
"strings, or boolean mask is allowed")
if column_names:
if hasattr(X, 'loc'):
# pandas dataframes
return X.loc[:, key]
else:
raise ValueError("Specifying the columns using strings is only "
"supported for pandas DataFrames")
else:
if hasattr(X, 'iloc'):
return X.iloc[:, key]
else:
# numpy arrays, sparse arrays
return X[:, key]
def _check_key_type(self, key, superclass):
"""
Check that scalar, list or slice is of a certain type.
This is only used in _get_column and _get_column_indices to check
if the `key` (column specification) is fully integer or fully string-like.
Parameters
----------
key : scalar, list, slice, array-like
The column specification to check
superclass : int or six.string_types
The type for which to check the `key`
"""
if isinstance(key, superclass):
return True
if isinstance(key, slice):
return (isinstance(key.start, (superclass, type(None))) and
isinstance(key.stop, (superclass, type(None))))
if isinstance(key, list):
return all(isinstance(x, superclass) for x in key)
if hasattr(key, 'dtype'):
if superclass is int:
return key.dtype.kind == 'i'
else:
# superclass = six.string_types
return key.dtype.kind in ('O', 'U', 'S')
return False
# checking the name of the transformed data
def _validate_names(self, names, X):
invalid_names = None
if len(set(names)) != len(names):
raise ValueError('Provided names are not unique: '
'{0!r}'.format(list(names)))
if not all(name for name in names):
raise ValueError('All the transformation are needed to have name'
.format())
if isinstance(X, pd.DataFrame):
if X.columns.values.dtype != np.int64:
invalid_names = set(names).intersection(X.columns.values)
else:
raise ValueError('The constructor arguments is {} and '
' It should not assinge a name to it.'
.format(X.columns.values.dtype))
elif isinstance(X, np.ndarray):
if X.dtype.names:
invalid_names = set(names).intersection(X.dtype.names)
else:
raise ValueError('The constructor arguments is {} and '
' It should not assign a name to it'
.format('int64'))
if invalid_names:
raise ValueError('Estimator names conflict with constructor '
'arguments: {0!r}'.format(sorted(invalid_names)))
invalid_names = [name for name in names if '__' in name]
if invalid_names:
raise ValueError('Estimator names must not contain __: got '
'{0!r}'.format(invalid_names))
def _get_column_indices(self, X, key):
"""
Get feature column indices for input data X and key.
For accepted values of `key`, see the docstring of _get_column
"""
n_columns = X.shape[1]
if callable(key):
key = key(X)
if self._check_key_type(key, int):
if isinstance(key, int):
return [key]
elif isinstance(key, slice):
return list(range(n_columns)[key])
else:
return list(key)
elif self._check_key_type(key, six.string_types):
try:
all_columns = list(X.columns)
except AttributeError:
raise ValueError("Specifying the columns using strings is only "
"supported for pandas DataFrames")
if isinstance(key, six.string_types):
columns = [key]
elif isinstance(key, slice):
start, stop = key.start, key.stop
if start is not None:
start = all_columns.index(start)
if stop is not None:
# pandas indexing with strings is endpoint included
stop = all_columns.index(stop) + 1
else:
stop = n_columns + 1
return list(range(n_columns)[slice(start, stop)])
else:
columns = list(key)
return [all_columns.index(col) for col in columns]
elif hasattr(key, 'dtype') and np.issubdtype(key.dtype, np.bool_):
# boolean mask
return list(np.arange(n_columns)[key])
else:
raise ValueError("No valid specification of the columns. Only a "
"scalar, list or slice of all integers or all "
"strings, or boolean mask is allowed")
def _validate_rest(self, X, Y=None):
if self.rest not in ('transit', 'cast'):
raise ValueError(
"The rest column needs to be one of 'attach', 'detach',"
" or estimator. '%s' was passed instead" %
self.rest)
n_columns = X.shape[1]
cols = []
yIndex = []
for _, _, columns, _ in self.transformers:
cols.extend(self._get_column_indices(X, columns))
remaining_idx = sorted(list(set(range(n_columns)) - set(cols))) or None
if Y:
self.yIndex = self._get_column_indices(X, Y)
if self.yIndex[0] in remaining_idx:
remaining_idx.remove(self.yIndex[0])
yIndex = self.yIndex
else:
yIndex = []
self._remainder = ('rest', self.rest, remaining_idx, yIndex)
print(self._remainder)
def _validate_transformers(self, X):
if not self.transformers:
return
names, transformers, _, reconstructor = zip(*self.transformers)
# validate names
self._validate_names(names, X)
# validate reconstruction
for t in reconstructor:
if t in ('replace', 'add'):
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All estimators should implement fit and "
"transform, or can be 'replace' or 'save' "
"specifiers. '%s' (type %s) doesn't." %
(t, type(t)))
def _validate_column_callables(self, X):
"""
Converts callable column specifications.
"""
columns = []
for _, _, column, _ in self.transformers:
if callable(column):
column = column(X)
elif column is not int:
column = self._get_column_indices(X, column)
columns.extend(column)
self._columns = columns
def __transform_generator(self, X):
for trasition, column in zip(self.transformers, self._columns):
names, transformers, _, reconstructor, column = (*trasition, column)
yield (column,
X.iloc[:, column].name,
transformers,
names,
reconstructor,
np.array(X.iloc[:, column]),
X.iloc[:, column].dtype.name,)
def fiting(self, X, Y=None):
if isinstance(X, pd.DataFrame):
pass
elif isinstance(X, np.ndarray):
X = pd.DataFrame(X)
elif isinstance(X, list):
X = pd.DataFrame(X)
self.X = pd.DataFrame(index=X.index)
self._validate_column_callables(X)
self._validate_rest(X, Y)
self._validate_transformers(X)
# initiate the output X,Y
if self.rest == 'transit':
self.X = X.iloc[:, self._remainder[2]]
if Y:
self.Y = pd.DataFrame(index=X.index)
if self._remainder[3]:
self.Y = X.iloc[:, self._remainder[3]]
self.iterator = self.__transform_generator(X)
return self
def transform(self):
for transform in self.iterator:
transformedItem = self._transform(transform[5].reshape(1, -1), func=transform[2])
if transform[4] == 'add':
self.X[transform[1]] = transform[5]
elif transform[4] == 'replace':
pass
else:
raise TypeError("It is {} that is not replace or add".format(transform[4]))
self.X[transform[3]] = transformedItem[0]
def main():
data = np.array([(1, 9, 6, 13, 1, 72, 4),
(1, 9, 0, 13, 1, 12, 4),
(2, 2, 45, 23, 24, 13, 16),
(3, 12, 0, 9, 5, 20, 89)])
datanp = np.array([(1, 9, 6),
(1, 9, 0),
(2, 2, 45),
(3, 12, 0)],
dtype={'col1': ('i1', 0, 'title 1'),
'col2': ('f4', 1, 'title 2'),
'col3': ('f4', 1, 'title 3')})
headers = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
df = pd.DataFrame(data, columns=headers)
data1 = [1, 9, 6, 13, 1, 72, 4]
data2 = | np.array([1, 9, 6, 13, 1, 72, 4]) | numpy.array |
import shlex
import subprocess
import cv2
import pybullet_data
from gym import error
import numpy as np
from inspect import currentframe, getframeinfo
import gibson
from gibson.core.physics.robot_locomotors import Fetch
from gibson.core.render.pcrender import PCRenderer
from gibson.data.datasets import ViewDataSet3D
from gibson.envs.env_bases import *
from gibson.envs.env_modalities import CameraRobotEnv, OneViewUI, TwoViewUI, ThreeViewUI, FourViewUI
CALC_OBSTACLE_PENALTY = 1
tracking_camera = {
'yaw': 20,
'z_offset': 0.5,
'distance': 1,
'pitch': -20
}
def quaternion_multiply(quaternion1, quaternion0):
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
-x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64)
class FetchNavigateEnv(CameraRobotEnv):
def _reward(self, action):
raise NotImplementedError()
def __init__(self, config, gpu_idx=0, depth_render_port=5556, use_filler=None):
self.config = config
assert (self.config["envname"] == self.__class__.__name__ or self.config["envname"] == "TestEnv")
if isinstance(use_filler, bool):
self._use_filler = use_filler
else:
self._use_filler = config["use_filler"]
CameraRobotEnv.__init__(self, self.config, gpu_idx,
scene_type="stadium" if self.config["model_id"] == "stadium" else "building",
tracking_camera=tracking_camera, start_port=depth_render_port, use_filler=use_filler)
# print("Finished setting up camera_env")
self.window_width = self.config['window_width']
self.window_height = self.config['window_height']
self._render_width = self.config['window_width']
self._render_height = self.config['window_height']
self._target_labels = self.config['target_labels']
self.keys_to_action = {
(ord('s'),): [-0.05, 0] + [0] * 13, # backward
(ord('w'),): [0.05, 0] + [0] * 13, # forward
(ord('d'),): [0, 0.05] + [0] * 13, # turn right
(ord('a'),): [0, -0.05] + [0] * 13, # turn left
(): [0] * 15
}
# print("[{} {}] Fetch init'd".format(getframeinfo(currentframe()).filename, getframeinfo(currentframe()).lineno))
fetch = Fetch(self.config, env=self)
# print("[{} {}] Introducing robot".format(getframeinfo(currentframe()).filename, getframeinfo(currentframe()).lineno))
self.robot_introduce(fetch)
# print("[{} {}] Introducing scene".format(getframeinfo(currentframe()).filename,
# getframeinfo(currentframe()).lineno))
self.scene_introduce()
# print("[{} {}] Scene Introduced".format(getframeinfo(currentframe()).filename,
# getframeinfo(currentframe()).lineno))
self.total_reward = 0
self.total_frame = 0
self.goal_img = None
self.initial_pos = config['initial_pos']
self.initial_orn = config['initial_orn']
self.step = self._step
self.reset = self._reset
self.nonWheelJoints = [j for j in self.robot.ordered_joints if 'wheel' not in j.joint_name]
self.markers = []
self.marker_ids = []
# Initialize camera to point top down
if self.gui:
pos = self.robot._get_scaled_position()
# orn = self.robot.get_orientation()
pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
pos = | np.array(pos) | numpy.array |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import time
import weakref
from typing import Dict, List, Optional
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.events import EventStorage, get_event_storage
from tensorboardX import SummaryWriter
import os
import cv2
from tqdm import tqdm
import shutil
import xml.dom.minidom
# from .defaults import DefaultPredictor
# from ..data.datasets import get_icard19_dataset
try:
from azureml.core.run import Run
aml_run = Run.get_context()
except ImportError:
aml_run = None
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
class HookBase:
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
::
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
iter += 1
hook.after_train()
Notes:
1. In the hook method, users can access ``self.trainer`` to access more
properties about the context (e.g., model, current iteration, or config
if using :class:`DefaultTrainer`).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
Attributes:
trainer (TrainerBase): A weak reference to the trainer object. Set by the trainer
when the hook is registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
pass
def after_train(self):
"""
Called after the last iteration.
"""
pass
def before_step(self):
"""
Called before each iteration.
"""
pass
def after_step(self):
"""
Called after each iteration.
"""
pass
class TrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self) -> None:
self._hooks: List[HookBase] = []
self.iter: int
self.start_iter: int
self.max_iter: int
self.storage: EventStorage
def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
self.local_rank = self.cfg._rank
self.logging_steps = self.cfg._logging_steps
if self.local_rank in [-1, 0]:
self.tb_writer = SummaryWriter(
logdir=os.path.join(self.cfg._output_dir, 'tblogs'))
self.logger = logging.getLogger(__name__)
self.logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
self.loss_cls, self.logging_loss_cls = 0.0, 0.0
self.loss_box_reg, self.logging_loss_box_reg = 0.0, 0.0
self.loss_mask, self.logging_loss_mask = 0.0, 0.0
self.loss_rpn_cls, self.logging_loss_rpn_cls = 0.0, 0.0
self.loss_rpn_loc, self.logging_loss_rpn_loc = 0.0, 0.0
self.loss_mrcnn_total, self.logging_loss_mrcnn_total = 0.0, 0.0
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
iterator = tqdm(range(start_iter, max_iter),
desc='Iteration',
disable=self.local_rank not in [-1, 0])
for _, self.iter in enumerate(iterator):
self.before_step()
self.res = self.run_step()
self.after_step()
# self.iter == max_iter can be used by `after_train` to
# tell whether the training successfully finished or failed
# due to exceptions.
self.iter += 1
except Exception:
self.logger.exception("Exception during training:")
raise
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
self.storage.iter = self.iter
for h in self._hooks:
h.after_train()
def before_step(self):
# Maintain the invariant that storage.iter == trainer.iter
# for the entire execution of each step
self.storage.iter = self.iter
for h in self._hooks:
h.before_step()
def user_logging(self):
self.loss_cls += self.res['loss_cls']
self.loss_box_reg += self.res['loss_box_reg']
self.loss_mask += self.res['loss_mask']
self.loss_rpn_cls += self.res['loss_rpn_cls']
self.loss_rpn_loc += self.res['loss_rpn_loc']
self.loss_mrcnn_total += self.res['loss_cls'] + self.res['loss_box_reg'] + self.res['loss_mask'] + self.res[
'loss_rpn_cls'] + self.res['loss_rpn_loc']
if self.local_rank in [-1, 0] and self.logging_steps > 0 and self.iter % self.logging_steps == 0:
self.tb_writer.add_scalar('lr',
self.scheduler.get_last_lr()[0], self.iter)
self.tb_writer.add_scalar(
'loss_cls',
(self.loss_cls - self.logging_loss_cls) / self.logging_steps,
self.iter,
)
self.tb_writer.add_scalar(
'loss_box_reg',
(self.loss_box_reg - self.logging_loss_box_reg) / self.logging_steps,
self.iter,
)
self.tb_writer.add_scalar(
'loss_mask',
(self.loss_mask - self.logging_loss_mask) / self.logging_steps,
self.iter,
)
self.tb_writer.add_scalar(
'loss_rpn_cls',
(self.loss_rpn_cls - self.logging_loss_rpn_cls) / self.logging_steps,
self.iter,
)
self.tb_writer.add_scalar(
'loss_rpn_loc',
(self.loss_rpn_loc - self.logging_loss_rpn_loc) / self.logging_steps,
self.iter,
)
self.tb_writer.add_scalar(
'loss_mrcnn_total',
(self.loss_mrcnn_total - self.logging_loss_mrcnn_total) / self.logging_steps,
self.iter,
)
if aml_run is not None:
aml_run.log('lr', self.scheduler.get_last_lr()[0])
aml_run.log('loss_cls', (self.loss_cls - self.logging_loss_cls) / self.logging_steps)
aml_run.log('loss_box_reg', (self.loss_box_reg - self.logging_loss_box_reg) / self.logging_steps)
aml_run.log('loss_mask', (self.loss_mask - self.logging_loss_mask) / self.logging_steps)
aml_run.log('loss_rpn_cls', (self.loss_rpn_cls - self.logging_loss_rpn_cls) / self.logging_steps)
aml_run.log('loss_rpn_loc', (self.loss_rpn_loc - self.logging_loss_rpn_loc) / self.logging_steps)
aml_run.log('loss_mrcnn_total',
(self.loss_mrcnn_total - self.logging_loss_mrcnn_total) / self.logging_steps)
self.logger.info(
'step: {} | lr: {:.4E} | total_loss: {:6.4f} | table loss: {:6.4f} '.format(
self.iter,
self.scheduler.get_last_lr()[0],
(self.loss_mrcnn_total - self.logging_loss_mrcnn_total) / self.logging_steps,
(self.loss_mrcnn_total - self.logging_loss_mrcnn_total) / self.logging_steps,
))
self.logging_loss_cls = self.loss_cls
self.logging_loss_box_reg = self.loss_box_reg
self.logging_loss_mask = self.loss_mask
self.logging_loss_rpn_cls = self.loss_rpn_cls
self.logging_loss_rpn_loc = self.loss_rpn_loc
self.logging_loss_mrcnn_total = self.loss_mrcnn_total
def evaluate(self):
def calc_table_bbox(table_pred, file_name):
table_bbox = []
assert len(table_pred) == len(file_name)
for i in range(len(table_pred)):
cur_item = {
'filename': file_name[i],
'bbox': []
}
raw_bbox = table_pred[i]._fields['pred_boxes'].tensor
res_size = raw_bbox.shape
for j in range(res_size[0]):
cur_bbox = raw_bbox[j].tolist()
x0, y0, x1, y1 = map(int, cur_bbox)
cur_item['bbox'].append([x0, y0, x0, y1, x1, y1, x1, y0])
table_bbox.append(cur_item)
return table_bbox
def save_table_result(table_result, output_path):
for i in tqdm(range(len(table_result))):
pure_filename = table_result[i]['filename']
doc = xml.dom.minidom.Document()
root = doc.createElement('document')
root.setAttribute('filename', table_result[i]['filename'])
doc.appendChild(root)
tables = table_result[i]['bbox']
table_id = 0
for table in tables:
table_id += 1
nodeManager = doc.createElement('table')
nodeManager.setAttribute('id', str(table_id))
bbox_str = '{},{} {},{} {},{} {},{}'.format(table[0], table[1], table[2], table[3], table[4],
table[5], table[6], table[7])
nodeCoords = doc.createElement('Coords')
nodeCoords.setAttribute('points', bbox_str)
nodeManager.appendChild(nodeCoords)
root.appendChild(nodeManager)
filename = '{}-result.xml'.format(pure_filename)
fp = open(os.path.join(output_path, filename), 'w')
doc.writexml(fp, indent='', addindent='\t', newl='\n', encoding="utf-8")
fp.flush()
fp.close()
from .defaults import DefaultPredictor
from ..data.datasets import get_icard19_dataset
from ..evaluation import calc_table_score
self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg._output_dir, 'model_{:0=7}.pth'.format(self.iter - 1))
predictor = DefaultPredictor(self.cfg)
dataset_dicts = get_icard19_dataset("test", self.cfg._data_dir)
# all_data = []
table_preds = []
image_name = []
# for d in random.sample(dataset_dicts, 10):
for d in tqdm(dataset_dicts):
im = cv2.imread(d["file_name"])
outputs = predictor(im)
# all_data.append(d)
table_preds.append(outputs['instances'])
image_name.append(os.path.basename(d['file_name'])[:-4])
assert len(table_preds) == len(image_name)
table_result = calc_table_bbox(table_preds, image_name)
output_path = os.path.join(self.cfg._output_dir, 'checkpoint-{}'.format(self.iter))
output_path = os.path.join(output_path, 'table_predict')
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.makedirs(output_path)
save_table_result(table_result, output_path)
table_final_score = calc_table_score(output_path)
resutls = {}
try:
resutls.update(table_final_score)
except:
resutls.update(
{
'p_six': 0.0, "r_six": 0.0, "f1_six": 0.0,
"p_seven": 0.0, "r_seven": 0.0, "f1_seven": 0.0,
"p_eight": 0.0, "r_eight": 0.0, "f1_eight": 0.0,
"p_nine": 0.0, "r_nine": 0.0, "f1_nine": 0.0,
"wF1": 0.0
}
)
for key in resutls.keys():
self.logger.info('{} = {}\n'.format(key, str(resutls[key])))
# self.logger.info(resutls)
for key, value in resutls.items():
self.tb_writer.add_scalar('eval_threshold:{}_{}'.format(self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST, key), value,
self.iter)
if aml_run is not None:
aml_run.log('eval_threshold:{}_{}'.format(self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST, key), value)
return resutls
def after_step(self):
self.user_logging()
for h in self._hooks:
h.after_step()
if self.iter != 0 and self.iter % self.cfg.SOLVER.CHECKPOINT_PERIOD == 0 and self.local_rank in [-1, 0]:
self.evaluate()
def run_step(self):
raise NotImplementedError
class SimpleTrainer(TrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
def _write_metrics(
self,
loss_dict: Dict[str, torch.Tensor],
data_time: float,
prefix: str = "",
):
"""
Args:
loss_dict (dict): dict of scalar losses
data_time (float): time taken by the dataloader iteration
"""
metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
metrics_dict["data_time"] = data_time
# Gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
storage = get_event_storage()
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: | np.mean([x[k] for x in all_metrics_dict]) | numpy.mean |
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import shapely
import shapely.geometry
import imgaug as ia
from imgaug.testutils import reseed
def main():
time_start = time.time()
test_is_np_array()
test_is_single_integer()
test_is_single_float()
test_is_single_number()
test_is_iterable()
test_is_string()
test_is_single_bool()
test_is_integer_array()
test_is_float_array()
test_is_callable()
test_caller_name()
test_seed()
test_current_random_state()
test_new_random_state()
test_dummy_random_state()
test_copy_random_state()
test_derive_random_state()
test_derive_random_states()
test_forward_random_state()
# test_quokka()
# test_quokka_square()
# test_angle_between_vectors()
# test_draw_text()
test_imresize_many_images()
test_imresize_single_image()
test_pad()
test_compute_paddings_for_aspect_ratio()
test_pad_to_aspect_ratio()
test_pool()
test_avg_pool()
test_max_pool()
test_draw_grid()
# test_show_grid()
# test_do_assert()
# test_HooksImages_is_activated()
# test_HooksImages_is_propagating()
# test_HooksImages_preprocess()
# test_HooksImages_postprocess()
test_Keypoint()
test_KeypointsOnImage()
test_BoundingBox()
test_BoundingBoxesOnImage()
# test_HeatmapsOnImage_get_arr()
# test_HeatmapsOnImage_find_global_maxima()
test_HeatmapsOnImage_draw()
test_HeatmapsOnImage_draw_on_image()
test_HeatmapsOnImage_invert()
test_HeatmapsOnImage_pad()
# test_HeatmapsOnImage_pad_to_aspect_ratio()
test_HeatmapsOnImage_avg_pool()
test_HeatmapsOnImage_max_pool()
test_HeatmapsOnImage_scale()
# test_HeatmapsOnImage_to_uint8()
# test_HeatmapsOnImage_from_uint8()
# test_HeatmapsOnImage_from_0to1()
# test_HeatmapsOnImage_change_normalization()
# test_HeatmapsOnImage_copy()
# test_HeatmapsOnImage_deepcopy()
test_SegmentationMapOnImage_bool()
test_SegmentationMapOnImage_get_arr_int()
# test_SegmentationMapOnImage_get_arr_bool()
test_SegmentationMapOnImage_draw()
test_SegmentationMapOnImage_draw_on_image()
test_SegmentationMapOnImage_pad()
test_SegmentationMapOnImage_pad_to_aspect_ratio()
test_SegmentationMapOnImage_scale()
test_SegmentationMapOnImage_to_heatmaps()
test_SegmentationMapOnImage_from_heatmaps()
test_SegmentationMapOnImage_copy()
test_SegmentationMapOnImage_deepcopy()
test_Polygon___init__()
test_Polygon_xx()
test_Polygon_yy()
test_Polygon_xx_int()
test_Polygon_yy_int()
test_Polygon_is_valid()
test_Polygon_area()
test_Polygon_project()
test_Polygon__compute_inside_image_point_mask()
test_Polygon_is_fully_within_image()
test_Polygon_is_partly_within_image()
test_Polygon_is_out_of_image()
test_Polygon_cut_out_of_image()
test_Polygon_clip_out_of_image()
test_Polygon_shift()
test_Polygon_draw_on_image()
test_Polygon_extract_from_image()
test_Polygon_to_shapely_polygon()
test_Polygon_to_bounding_box()
test_Polygon_from_shapely()
test_Polygon_copy()
test_Polygon_deepcopy()
test_Polygon___repr__()
test_Polygon___str__()
# test_Batch()
test_BatchLoader()
# test_BackgroundAugmenter.get_batch()
# test_BackgroundAugmenter._augment_images_worker()
# test_BackgroundAugmenter.terminate()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_is_np_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((64, 64, 3), dtype=np.uint8),
np.zeros((1, 2), dtype=np.float32),
np.zeros((100,), dtype=np.float64)
]
values_false = [
"A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(),
-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4
]
for value in values_true:
assert ia.is_np_array(value) is True
for value in values_false:
assert ia.is_np_array(value) is False
def test_is_single_integer():
assert ia.is_single_integer("A") is False
assert ia.is_single_integer(None) is False
assert ia.is_single_integer(1.2) is False
assert ia.is_single_integer(1.0) is False
assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) is False
assert ia.is_single_integer(1) is True
assert ia.is_single_integer(1234) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) is True
def test_is_single_float():
assert ia.is_single_float("A") is False
assert ia.is_single_float(None) is False
assert ia.is_single_float(1.2) is True
assert ia.is_single_float(1.0) is True
assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) is True
assert ia.is_single_float(1) is False
assert ia.is_single_float(1234) is False
assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) is False
assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) is False
def test_caller_name():
assert ia.caller_name() == 'test_caller_name'
def test_is_single_number():
class _Dummy(object):
pass
values_true = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4]
values_false = ["A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_single_number(value) is True
for value in values_false:
assert ia.is_single_number(value) is False
def test_is_iterable():
class _Dummy(object):
pass
values_true = [
[0, 1, 2],
["A", "X"],
[[123], [456, 789]],
[],
(1, 2, 3),
(1,),
tuple(),
"A",
"ABC",
"",
np.zeros((100,), dtype=np.uint8)
]
values_false = [1, 100, 0, -100, -1, 1.2, -1.2, True, False, _Dummy()]
for value in values_true:
assert ia.is_iterable(value) is True, value
for value in values_false:
assert ia.is_iterable(value) is False
def test_is_string():
class _Dummy(object):
pass
values_true = ["A", "BC", "1", ""]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0],
_Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_string(value) is True
for value in values_false:
assert ia.is_string(value) is False
def test_is_single_bool():
class _Dummy(object):
pass
values_true = [False, True]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, (1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8), np.zeros((1,), dtype=bool)]
for value in values_true:
assert ia.is_single_bool(value) is True
for value in values_false:
assert ia.is_single_bool(value) is False
def test_is_integer_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_integer_array(value) is True
for value in values_false:
assert ia.is_integer_array(value) is False
def test_is_float_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_float_array(value) is True
for value in values_false:
assert ia.is_float_array(value) is False
def test_is_callable():
def _dummy_func():
pass
_dummy_func2 = lambda x: x
class _Dummy1(object):
pass
class _Dummy2(object):
def __call__(self):
pass
values_true = [_dummy_func, _dummy_func2, _Dummy2()]
values_false = ["A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy1(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_callable(value) == True
for value in values_false:
assert ia.is_callable(value) == False
def test_seed():
ia.seed(10017)
rs = np.random.RandomState(10017)
assert ia.CURRENT_RANDOM_STATE.randint(0, 1000*1000) == rs.randint(0, 1000*1000)
reseed()
def test_current_random_state():
assert ia.current_random_state() == ia.CURRENT_RANDOM_STATE
def test_new_random_state():
seed = 1000
ia.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=False)
rs_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=False)
rs_observed2 = ia.new_random_state(seed=None, fully_random=False)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
ia.seed(seed)
np.random.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=True)
rs_not_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) != rs_not_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=True)
rs_observed2 = ia.new_random_state(seed=None, fully_random=True)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=1234)
rs_observed2 = ia.new_random_state(seed=1234)
rs_expected = np.random.RandomState(1234)
assert rs_observed1.randint(0, 10**6) == rs_observed2.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_dummy_random_state():
assert ia.dummy_random_state().randint(0, 10**6) == np.random.RandomState(1).randint(0, 10**6)
def test_copy_random_state():
rs = np.random.RandomState(1017)
rs_copy = ia.copy_random_state(rs)
assert rs != rs_copy
assert rs.randint(0, 10**6) == rs_copy.randint(0, 10**6)
assert ia.copy_random_state(np.random) == np.random
assert ia.copy_random_state(np.random, force_copy=True) != np.random
def test_derive_random_state():
rs = np.random.RandomState(1017)
rs_observed = ia.derive_random_state(np.random.RandomState(1017))
rs_expected = np.random.RandomState(np.random.RandomState(1017).randint(0, 10**6))
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_derive_random_states():
rs_observed1, rs_observed2 = ia.derive_random_states(np.random.RandomState(1017), n=2)
seed = np.random.RandomState(1017).randint(0, 10**6)
rs_expected1 = np.random.RandomState(seed+0)
rs_expected2 = np.random.RandomState(seed+1)
assert rs_observed1.randint(0, 10**6) == rs_expected1.randint(0, 10**6)
assert rs_observed2.randint(0, 10**6) == rs_expected2.randint(0, 10**6)
def test_forward_random_state():
rs1 = np.random.RandomState(1017)
rs2 = np.random.RandomState(1017)
ia.forward_random_state(rs1)
rs2.uniform()
assert rs1.randint(0, 10**6) == rs2.randint(0, 10**6)
def test_imresize_many_images():
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for c in [1, 3]:
image1 = np.zeros((16, 16, c), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, c), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, c), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, c), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, c), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, c), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, c), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, c), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, c), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
for images_this_iter in [images, list(images)]: # test for ndarray and list(ndarray) input
for interpolation in interpolations:
images_same_observed = ia.imresize_many_images(images_this_iter, (16, 16), interpolation=interpolation)
for image_expected, image_observed in zip(images_this_iter, images_same_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
images_small_observed = ia.imresize_many_images(images_this_iter, (8, 8), interpolation=interpolation)
for image_expected, image_observed in zip(images_small, images_small_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
images_large_observed = ia.imresize_many_images(images_this_iter, (32, 32), interpolation=interpolation)
for image_expected, image_observed in zip(images_large, images_large_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
# test size given as single int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 8)
assert observed.shape == (1, 8, 8, 3)
# test size given as single float
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 2.0)
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 0.5)
assert observed.shape == (1, 2, 2, 3)
# test size given as (float, float)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 2.0))
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 0.5))
assert observed.shape == (1, 2, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 0.5))
assert observed.shape == (1, 8, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 2.0))
assert observed.shape == (1, 2, 8, 3)
# test size given as int+float or float+int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (11, 2.0))
assert observed.shape == (1, 11, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 11))
assert observed.shape == (1, 8, 11, 3)
# test no channels
images = np.zeros((1, 4, 4), dtype=np.uint8)
images_rs = ia.imresize_many_images(images, (2, 2))
assert images_rs.shape == (1, 2, 2)
images = [np.zeros((4, 4), dtype=np.uint8)]
images_rs = ia.imresize_many_images(images, (2, 2))
assert isinstance(images_rs, list)
assert images_rs[0].shape == (2, 2)
# test len 0 input
observed = ia.imresize_many_images(np.zeros((0, 8, 8, 3), dtype=np.uint8), (4, 4))
assert ia.is_np_array(observed)
assert observed.dtype.type == np.uint8
assert len(observed) == 0
observed = ia.imresize_many_images([], (4, 4))
assert isinstance(observed, list)
assert len(observed) == 0
# test images with zero height/width
images = [np.zeros((0, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((4, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((0, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
# test invalid sizes
sizes_all = [(-1, 2), (0, 2)]
sizes_all = sizes_all\
+ [(float(a), b) for a, b in sizes_all]\
+ [(a, float(b)) for a, b in sizes_all]\
+ [(float(a), float(b)) for a, b in sizes_all]\
+ [(-a, -b) for a, b in sizes_all]\
+ [(-float(a), -b) for a, b in sizes_all]\
+ [(-a, -float(b)) for a, b in sizes_all]\
+ [(-float(a), -float(b)) for a, b in sizes_all]
sizes_all = sizes_all\
+ [(b, a) for a, b in sizes_all]
sizes_all = sizes_all\
+ [-1.0, 0.0, -1, 0]
for sizes in sizes_all:
images = [np.zeros((4, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=sizes)
except Exception as exc:
assert "value is zero or lower than zero." in str(exc)
got_exception = True
assert got_exception
# test list input but all with same shape
images = [np.zeros((8, 8, 3), dtype=np.uint8) for _ in range(2)]
observed = ia.imresize_many_images(images, (4, 4))
assert isinstance(observed, list)
assert all([image.shape == (4, 4, 3) for image in observed])
assert all([image.dtype.type == np.uint8 for image in observed])
def test_imresize_single_image():
for c in [-1, 1, 3]:
image1 = np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, abs(c)), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, abs(c)), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, abs(c)), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, abs(c)), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, abs(c)), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
if c == -1:
images = images[:, :, 0]
images_small = images_small[:, :, 0]
images_large = images_large[:, :, 0]
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for interpolation in interpolations:
for image in images:
image_observed = ia.imresize_single_image(image, (16, 16), interpolation=interpolation)
diff = np.abs(image.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
for image, image_expected in zip(images, images_small):
image_observed = ia.imresize_single_image(image, (8, 8), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
for image, image_expected in zip(images, images_large):
image_observed = ia.imresize_single_image(image, (32, 32), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
def test_pad():
# -------
# uint8, int32
# -------
for dtype in [np.uint8, np.int32]:
arr = np.zeros((3, 3), dtype=dtype) + 255
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.array_equal(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, -1] == 0)
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[-1, :] == 0)
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, 0] == 0)
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
assert np.all(arr_pad[:, -2:] == 0)
assert np.all(arr_pad[-3:, :] == 0)
assert np.all(arr_pad[:, :4] == 0)
arr_pad = ia.pad(arr, top=1, cval=10)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 10)
arr = np.zeros((3, 3, 3), dtype=dtype) + 128
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :, 0] == 0)
assert np.all(arr_pad[0, :, 1] == 0)
assert np.all(arr_pad[0, :, 2] == 0)
arr = np.zeros((3, 3), dtype=dtype) + 128
arr[1, 1] = 200
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 128
assert arr_pad[0, 1] == 200
assert arr_pad[0, 2] == 128
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=123)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 123
assert arr_pad[0, 1] == 123
assert arr_pad[0, 2] == 123
assert arr_pad[1, 0] == 0
arr = np.zeros((1, 1), dtype=dtype) + 100
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=200)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 200
assert arr_pad[1, 0] == 175
assert arr_pad[2, 0] == 150
assert arr_pad[3, 0] == 125
assert arr_pad[4, 0] == 100
# -------
# float32, float64
# -------
for dtype in [np.float32, np.float64]:
arr = np.zeros((3, 3), dtype=dtype) + 1.0
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, -1], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[-1, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, 0], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert 0 - 1e-6 < np.max(arr_pad[0, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, -2:]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[-3, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, :4]) < 0 + 1e-6
arr_pad = ia.pad(arr, top=1, cval=0.2)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0.2, 0.2, 0.2]))
arr = np.zeros((3, 3, 3), dtype=dtype) + 0.5
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :, 0], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 1], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 2], dtype([0, 0, 0]))
arr = np.zeros((3, 3), dtype=dtype) + 0.5
arr[1, 1] = 0.75
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.50 - 1e-6 < arr_pad[0, 0] < 0.50 + 1e-6
assert 0.75 - 1e-6 < arr_pad[0, 1] < 0.75 + 1e-6
assert 0.50 - 1e-6 < arr_pad[0, 2] < 0.50 + 1e-6
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=0.4)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.4 - 1e-6 < arr_pad[0, 0] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 1] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 2] < 0.4 + 1e-6
assert 0.0 - 1e-6 < arr_pad[1, 0] < 0.0 + 1e-6
arr = np.zeros((1, 1), dtype=dtype) + 0.6
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=1.0)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert 1.0 - 1e-6 < arr_pad[0, 0] < 1.0 + 1e-6
assert 0.9 - 1e-6 < arr_pad[1, 0] < 0.9 + 1e-6
assert 0.8 - 1e-6 < arr_pad[2, 0] < 0.8 + 1e-6
assert 0.7 - 1e-6 < arr_pad[3, 0] < 0.7 + 1e-6
assert 0.6 - 1e-6 < arr_pad[4, 0] < 0.6 + 1e-6
def test_compute_paddings_for_aspect_ratio():
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 0
assert bottom == 0
assert left == 0
arr = np.zeros((1, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 2
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 1), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 1
arr = np.zeros((2, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 1
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 2), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 1
assert bottom == 0
assert left == 1
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 0.5)
assert top == 2
assert right == 0
assert bottom == 2
assert left == 0
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 2.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 2
def test_pad_to_aspect_ratio():
for dtype in [np.uint8, np.int32, np.float32]:
# aspect_ratio = 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((1, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 1), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((2, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 2), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
# aspect_ratio != 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 0.5)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 8
assert arr_pad.shape[1] == 4
# 3d arr
arr = np.zeros((4, 2, 3), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
assert arr_pad.shape[2] == 3
# cval
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 0
assert np.max(arr_pad[:, -2:]) == 0
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=10)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 10
assert np.max(arr_pad[:, -2:]) == 10
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0 + 1e-6
assert 0 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.1)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0.1 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0.1 + 1e-6
assert 0.1 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0.1 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
# mode
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr[1:3, 1:3] = 200
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, mode="maximum")
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[0:1, 0:2]) == 128
assert np.max(arr_pad[1:3, 0:2]) == 200
assert np.max(arr_pad[3:, 0:2]) == 128
assert np.max(arr_pad[0:1, -2:]) == 128
assert np.max(arr_pad[1:3, -2:]) == 200
assert np.max(arr_pad[3:, -2:]) == 128
# TODO add tests for return_pad_values=True
def test_pool():
# basic functionality with uint8, int32, float32
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.int32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# preserve_dtype off
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average, preserve_dtype=False)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == np.float64
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# maximum function
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.max)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
# 3d array
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr = np.tile(arr[..., np.newaxis], (1, 1, 3))
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2, 3)
assert np.array_equal(arr_pooled[..., 0], arr_pooled[..., 1])
assert np.array_equal(arr_pooled[..., 1], arr_pooled[..., 2])
arr_pooled = arr_pooled[..., 0]
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
# block_size per axis
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, (2, 1), np.average)
assert arr_pooled.shape == (2, 4)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 4]))
assert np.allclose(arr_pooled[0, 1], np.average([1, 5]))
assert np.allclose(arr_pooled[0, 2], np.average([2, 6]))
assert np.allclose(arr_pooled[0, 3], np.average([3, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 12]))
assert np.allclose(arr_pooled[1, 1], np.average([9, 13]))
assert np.allclose(arr_pooled[1, 2], np.average([10, 14]))
assert np.allclose(arr_pooled[1, 3], np.average([11, 15]))
# cval
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 0, 6, 0]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 0, 0]))
assert arr_pooled[1, 1] == int(np.average([10, 0, 0, 0]))
arr = np.uint8([
[0, 1],
[4, 5]
])
arr_pooled = ia.pool(arr, (4, 1), np.average)
assert arr_pooled.shape == (1, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 4, 0, 0]))
assert arr_pooled[0, 1] == int(np.average([1, 5, 0, 0]))
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average, cval=22)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 22, 6, 22]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 22, 22]))
assert arr_pooled[1, 1] == int(np.average([10, 22, 22, 22]))
def test_avg_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.avg_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
def test_max_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.max_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
def test_draw_grid():
image = np.zeros((2, 2, 3), dtype=np.uint8)
image[0, 0] = 64
image[0, 1] = 128
image[1, 0] = 192
image[1, 1] = 256
grid = ia.draw_grid([image], rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid(np.uint8([image]), rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image], rows=1, cols=2)
expected = np.hstack([image, image])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
def test_Keypoint():
eps = 1e-8
# x/y/x_int/y_int
kp = ia.Keypoint(y=1, x=2)
assert kp.y == 1
assert kp.x == 2
assert kp.y_int == 1
assert kp.x_int == 2
kp = ia.Keypoint(y=1.1, x=2.7)
assert 1.1 - eps < kp.y < 1.1 + eps
assert 2.7 - eps < kp.x < 2.7 + eps
assert kp.y_int == 1
assert kp.x_int == 3
# project
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.project((10, 10), (10, 10))
assert kp2.y == 1
assert kp2.x == 2
kp2 = kp.project((10, 10), (20, 10))
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.project((10, 10), (10, 20))
assert kp2.y == 1
assert kp2.x == 4
kp2 = kp.project((10, 10), (20, 20))
assert kp2.y == 2
assert kp2.x == 4
# shift
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.shift(y=1)
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.shift(y=-1)
assert kp2.y == 0
assert kp2.x == 2
kp2 = kp.shift(x=1)
assert kp2.y == 1
assert kp2.x == 3
kp2 = kp.shift(x=-1)
assert kp2.y == 1
assert kp2.x == 1
kp2 = kp.shift(y=1, x=2)
assert kp2.y == 2
assert kp2.x == 4
# __repr__ / __str_
kp = ia.Keypoint(y=1, x=2)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)"
kp = ia.Keypoint(y=1.2, x=2.7)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)"
def test_KeypointsOnImage():
eps = 1e-8
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
# height/width
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3))
assert kpi.height == 10
assert kpi.width == 20
# image instead of shape
kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8))
assert kpi.shape == (10, 20, 3)
# on()
kpi2 = kpi.on((10, 20, 3))
assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)])
kpi2 = kpi.on((20, 40, 3))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
# draw_on_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False)
kps_mask_size3 = np.copy(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1
assert np.all(image_kps[kps_mask_size3] == [0, 255, 0])
assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 0, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [255, 255, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image2 = np.copy(image)
image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False)
assert np.all(image2 == image_kps)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
assert np.all(image2[kps_mask] == [0, 255, 0])
assert np.all(image2[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
# shift
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.shift(x=0, y=0)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(y=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 1
kpi2 = kpi.shift(y=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y - 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y - 1
kpi2 = kpi.shift(x=1, y=2)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 2
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 2
# get_coords_array
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
observed = kpi.get_coords_array()
expected = np.float32([
[1, 2],
[3, 4]
])
assert np.allclose(observed, expected)
# from_coords_array
arr = np.float32([
[1, 2],
[3, 4]
])
kpi = ia.KeypointsOnImage.from_coords_array(arr, shape=(5, 5, 3))
assert 1 - eps < kpi.keypoints[0].x < 1 + eps
assert 2 - eps < kpi.keypoints[0].y < 2 + eps
assert 3 - eps < kpi.keypoints[1].x < 3 + eps
assert 4 - eps < kpi.keypoints[1].y < 4 + eps
# to_keypoint_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = kpi.to_keypoint_image(size=1)
image_size3 = kpi.to_keypoint_image(size=3)
kps_mask = np.zeros((5, 5, 2), dtype=np.bool)
kps_mask[2, 1, 0] = 1
kps_mask[4, 3, 1] = 1
kps_mask_size3 = np.zeros_like(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1, 0] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1, 1] = 1
assert np.all(image[kps_mask] == 255)
assert np.all(image[~kps_mask] == 0)
assert np.all(image_size3[kps_mask] == 255)
assert np.all(image_size3[kps_mask_size3] >= 128)
assert np.all(image_size3[~kps_mask_size3] == 0)
# from_keypoint_image()
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 255
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == 4
assert kpi2.keypoints[1].x == 3
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords={"x": -1, "y": -2}, threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=(-1, -2), threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=None, threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
got_exception = False
try:
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
_ = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords="exception-please", threshold=20,
nb_channels=3)
except Exception as exc:
assert "Expected if_not_found_coords to be" in str(exc)
got_exception = True
assert got_exception
# copy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.copy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 100
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# deepcopy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.deepcopy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# repr/str
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
expected = "KeypointsOnImage([Keypoint(x=1.00000000, y=2.00000000), Keypoint(x=3.00000000, y=4.00000000)], " \
+ "shape=(5, 5, 3))"
assert kpi.__repr__() == kpi.__str__() == expected
def test_BoundingBox():
eps = 1e-8
# properties with ints
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
assert bb.width == 40 - 20
assert bb.height == 30 - 10
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# wrong order of y1/y2, x1/x2
bb = ia.BoundingBox(y1=30, x1=40, y2=10, x2=20, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
# properties with floats
bb = ia.BoundingBox(y1=10.1, x1=20.1, y2=30.9, x2=40.9, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 31
assert bb.x2_int == 41
assert bb.width == 40.9 - 20.1
assert bb.height == 30.9 - 10.1
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# area
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.area == (30-10) * (40-20)
# project
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (10, 10))
assert 10 - eps < bb2.y1 < 10 + eps
assert 20 - eps < bb2.x1 < 20 + eps
assert 30 - eps < bb2.y2 < 30 + eps
assert 40 - eps < bb2.x2 < 40 + eps
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (20, 20))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (5, 5))
assert 10*0.5 - eps < bb2.y1 < 10*0.5 + eps
assert 20*0.5 - eps < bb2.x1 < 20*0.5 + eps
assert 30*0.5 - eps < bb2.y2 < 30*0.5 + eps
assert 40*0.5 - eps < bb2.x2 < 40*0.5 + eps
bb2 = bb.project((10, 10), (10, 20))
assert 10*1 - eps < bb2.y1 < 10*1 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*1 - eps < bb2.y2 < 30*1 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (20, 10))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*1 - eps < bb2.x1 < 20*1 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*1 - eps < bb2.x2 < 40*1 + eps
# extend
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.extend(all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
bb2 = bb.extend(all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
bb2 = bb.extend(top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
bb2 = bb.extend(bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
# intersection
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_inter = bb1.intersection(bb2)
assert bb_inter.x1 == 39
assert bb_inter.x2 == 40
assert bb_inter.y1 == 10
assert bb_inter.y2 == 30
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
bb_inter = bb1.intersection(bb2, default=False)
assert bb_inter is False
# union
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_union = bb1.union(bb2)
assert bb_union.x1 == 20
assert bb_union.x2 == 59
assert bb_union.y1 == 10
assert bb_union.y2 == 30
# iou
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
iou = bb1.iou(bb2)
assert 1.0 - eps < iou < 1.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
iou = bb1.iou(bb2)
assert 0.0 - eps < iou < 0.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20, label=None)
bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25, label=None)
iou = bb1.iou(bb2)
area_union = 10 * 10 + 10 * 10 - 5 * 5
area_intersection = 5 * 5
iou_expected = area_intersection / area_union
assert iou_expected - eps < iou < iou_expected + eps
# is_fully_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_fully_within_image((100, 100, 3)) is True
assert bb.is_fully_within_image((20, 100, 3)) is False
assert bb.is_fully_within_image((100, 30, 3)) is False
assert bb.is_fully_within_image((1, 1, 3)) is False
# is_partly_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_partly_within_image((100, 100, 3)) is True
assert bb.is_partly_within_image((20, 100, 3)) is True
assert bb.is_partly_within_image((100, 30, 3)) is True
assert bb.is_partly_within_image((1, 1, 3)) is False
# is_out_of_image()
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=False) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((20, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((100, 30, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=False, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=False) is False
# cut_out_of_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_cut = bb.cut_out_of_image((100, 100, 3))
eps = np.finfo(np.float32).eps
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image(np.zeros((100, 100, 3), dtype=np.uint8))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert 20 - 2*eps < bb_cut.y2 < 20
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert 30 - 2*eps < bb_cut.x2 < 30
# shift
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_top = bb.shift(top=0)
bb_right = bb.shift(right=0)
bb_bottom = bb.shift(bottom=0)
bb_left = bb.shift(left=0)
assert bb_top.y1 == 10
assert bb_top.x1 == 20
assert bb_top.y2 == 30
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20
assert bb_right.y2 == 30
assert bb_right.x2 == 40
assert bb_bottom.y1 == 10
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20
assert bb_left.y2 == 30
assert bb_left.x2 == 40
bb_top = bb.shift(top=1)
bb_right = bb.shift(right=1)
bb_bottom = bb.shift(bottom=1)
bb_left = bb.shift(left=1)
assert bb_top.y1 == 10+1
assert bb_top.x1 == 20
assert bb_top.y2 == 30+1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20-1
assert bb_right.y2 == 30
assert bb_right.x2 == 40-1
assert bb_bottom.y1 == 10-1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30-1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20+1
assert bb_left.y2 == 30
assert bb_left.x2 == 40+1
bb_top = bb.shift(top=-1)
bb_right = bb.shift(right=-1)
bb_bottom = bb.shift(bottom=-1)
bb_left = bb.shift(left=-1)
assert bb_top.y1 == 10-1
assert bb_top.x1 == 20
assert bb_top.y2 == 30-1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20+1
assert bb_right.y2 == 30
assert bb_right.x2 == 40+1
assert bb_bottom.y1 == 10+1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30+1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20-1
assert bb_left.y2 == 30
assert bb_left.x2 == 40-1
bb_mix = bb.shift(top=1, bottom=2, left=3, right=4)
assert bb_mix.y1 == 10+1-2
assert bb_mix.x1 == 20+3-4
assert bb_mix.y2 == 30+3-4
assert bb_mix.x2 == 40+1-2
# draw_on_image()
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[1:3+1, 1] = True
bb_mask[1:3+1, 3] = True
bb_mask[1, 1:3+1] = True
bb_mask[3, 1:3+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image == 0)
image_bb = bb.draw_on_image(image, color=[255, 0, 0], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 0, 0])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image, color=128, alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [128, 128, 128])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image+100, color=[200, 200, 200], alpha=0.5, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [150, 150, 150])
assert np.all(image_bb[~bb_mask] == [100, 100, 100])
image_bb = bb.draw_on_image((image+100).astype(np.float32), color=[200, 200, 200], alpha=0.5, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1
assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=False,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image[bb_mask] == [255, 255, 255])
assert np.all(image[~bb_mask] == [0, 0, 0])
image = np.zeros_like(image)
bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[2, 0:3] = True
bb_mask[0:3, 2] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:5, 0:5] = True
bb_mask[2, 2] = False
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=2, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:1+1, 1] = True
bb_mask[1, 0:1+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is False
bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is True
# extract_from_image()
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((0, 1), (0, 1), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[8:11, 8:11, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((1, 0), (1, 0), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[0:4, 0:5, :])
# to_keypoints()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
kps = bb.to_keypoints()
assert kps[0].y == 1
assert kps[0].x == 1
assert kps[1].y == 1
assert kps[1].x == 3
assert kps[2].y == 3
assert kps[2].x == 3
assert kps[3].y == 3
assert kps[3].x == 1
# copy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label == "test"
bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2")
assert bb2.y1 == 10
assert bb2.x1 == 20
assert bb2.y2 == 30
assert bb2.x2 == 40
assert bb2.label == "test2"
# deepcopy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"])
bb2 = bb.deepcopy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label[0] == "test"
# BoundingBox_repr()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__repr__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
# test_BoundingBox_str()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__str__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
def test_BoundingBoxesOnImage():
reseed()
# test height/width
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.height == 40
assert bbsoi.width == 50
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
assert bbsoi.height == 40
assert bbsoi.width == 50
# on()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
bbsoi_projected = bbsoi.on((40, 50))
assert bbsoi_projected.bounding_boxes[0].y1 == 10
assert bbsoi_projected.bounding_boxes[0].x1 == 20
assert bbsoi_projected.bounding_boxes[0].y2 == 30
assert bbsoi_projected.bounding_boxes[0].x2 == 40
assert bbsoi_projected.bounding_boxes[1].y1 == 15
assert bbsoi_projected.bounding_boxes[1].x1 == 25
assert bbsoi_projected.bounding_boxes[1].y2 == 35
assert bbsoi_projected.bounding_boxes[1].x2 == 45
bbsoi_projected = bbsoi.on((40*2, 50*2, 3))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
bbsoi_projected = bbsoi.on(np.zeros((40*2, 50*2, 3), dtype=np.uint8))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
# draw_on_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
image = bbsoi.draw_on_image(np.zeros(bbsoi.shape, dtype=np.uint8), color=[0, 255, 0], alpha=1.0, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.all(image[10-1, 20-1, :] == [0, 0, 0])
assert np.all(image[10-1, 20-0, :] == [0, 0, 0])
assert np.all(image[10-0, 20-1, :] == [0, 0, 0])
assert np.all(image[10-0, 20-0, :] == [0, 255, 0])
assert np.all(image[10+1, 20+1, :] == [0, 0, 0])
assert np.all(image[30-1, 40-1, :] == [0, 0, 0])
assert np.all(image[30+1, 40-0, :] == [0, 0, 0])
assert np.all(image[30+0, 40+1, :] == [0, 0, 0])
assert np.all(image[30+0, 40+0, :] == [0, 255, 0])
assert np.all(image[30+1, 40+1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-0, :] == [0, 0, 0])
assert np.all(image[15-0, 25-1, :] == [0, 0, 0])
assert np.all(image[15-0, 25-0, :] == [0, 255, 0])
assert np.all(image[15+1, 25+1, :] == [0, 0, 0])
assert np.all(image[35-1, 45-1, :] == [0, 0, 0])
assert np.all(image[35+1, 45+0, :] == [0, 0, 0])
assert np.all(image[35+0, 45+1, :] == [0, 0, 0])
assert np.all(image[35+0, 45+0, :] == [0, 255, 0])
assert np.all(image[35+1, 45+1, :] == [0, 0, 0])
# remove_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_slim = bbsoi.remove_out_of_image(fully=True, partly=True)
assert len(bbsoi_slim.bounding_boxes) == 1
assert bbsoi_slim.bounding_boxes[0] == bb1
# cut_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
eps = np.finfo(np.float32).eps
bbsoi_cut = bbsoi.cut_out_of_image()
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_cut.bounding_boxes[0].y1 == 10
assert bbsoi_cut.bounding_boxes[0].x1 == 20
assert bbsoi_cut.bounding_boxes[0].y2 == 30
assert bbsoi_cut.bounding_boxes[0].x2 == 40
assert bbsoi_cut.bounding_boxes[1].y1 == 15
assert bbsoi_cut.bounding_boxes[1].x1 == 25
assert bbsoi_cut.bounding_boxes[1].y2 == 35
assert 50 - 2*eps < bbsoi_cut.bounding_boxes[1].x2 < 50
# shift()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift(right=1)
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10
assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1
assert bbsoi_shifted.bounding_boxes[0].y2 == 30
assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1
assert bbsoi_shifted.bounding_boxes[1].y1 == 15
assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1
assert bbsoi_shifted.bounding_boxes[1].y2 == 35
assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1
# copy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 0
# deepcopy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 10
# repr() / str()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, x2=40.0000, y2=30.0000, label=None)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, x2=51.0000, y2=35.0000, label=None)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (bb1_expected, bb2_expected)
assert bbsoi.__repr__() == bbsoi.__str__() == expected
def test_HeatmapsOnImage_draw():
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
for y, x in [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1), (3, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in [(0, 0), (0, 3), (3, 0), (3, 3)]:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in [(1, 1), (1, 2), (2, 1), (2, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v3)
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in range(4):
for x in range(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in range(4):
for x in range(2, 4):
assert np.allclose(heatmaps_drawn[y, x], v2)
def test_HeatmapsOnImage_draw_on_image():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
image = np.uint8([
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0]
assert heatmaps_drawn.shape == (4, 4, 3)
assert np.all(heatmaps_drawn[0:4, 0:2, :] == 0)
assert np.all(heatmaps_drawn[0:4, 2:3, :] == 128) or np.all(heatmaps_drawn[0:4, 2:3, :] == 127)
assert np.all(heatmaps_drawn[0:4, 3:4, :] == 255) or np.all(heatmaps_drawn[0:4, 3:4, :] == 254)
image = np.uint8([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, resize="image", cmap=None)[0]
assert heatmaps_drawn.shape == (2, 2, 3)
assert np.all(heatmaps_drawn[0:2, 0, :] == 0)
assert np.all(heatmaps_drawn[0:2, 1, :] == 128) or np.all(heatmaps_drawn[0:2, 1, :] == 127)
def test_HeatmapsOnImage_invert():
heatmaps_arr = np.float32([
[0.0, 5.0, 10.0],
[-1.0, -2.0, 7.5]
])
expected = np.float32([
[8.0, 3.0, -2.0],
[9.0, 10.0, 0.5]
])
# (H, W)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 3), min_value=-2.0, max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr)
assert np.allclose(heatmaps.invert().get_arr(), expected)
# (H, W, 1)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr[..., np.newaxis], shape=(2, 3), min_value=-2.0, max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr[..., np.newaxis])
assert np.allclose(heatmaps.invert().get_arr(), expected[..., np.newaxis])
def test_HeatmapsOnImage_pad():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, cval=0.5)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, mode="edge")
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]
])
)
def test_HeatmapsOnImage_avg_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.avg_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 0.75],
[0.0, 0.75]])
)
def test_HeatmapsOnImage_max_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.max_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 1.0],
[0.0, 1.0]])
)
def test_HeatmapsOnImage_scale():
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale((4, 4), interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (4, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale(2.0, interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (2, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
def test_SegmentationMapOnImage_bool():
# Test for #189 (boolean mask inputs into SegmentationMapOnImage not working)
arr = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=bool)
assert arr.dtype.type == np.bool_
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
arr = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.bool)
assert arr.dtype.type == np.bool_
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
def test_SegmentationMapOnImage_get_arr_int():
arr = np.int32([
[0, 0, 1],
[0, 2, 1],
[1, 3, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4)
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
arr_c0 = np.float32([
[0.1, 0.1, 0.1],
[0.1, 0.9, 0.1],
[0.0, 0.1, 0.0]
])
arr_c1 = np.float32([
[0.2, 1.0, 0.2],
[0.2, 0.8, 0.2],
[0.0, 0.0, 0.0]
])
arr_c2 = np.float32([
[0.0, 0.0, 0.0],
[0.3, 0.7, 0.3],
[0.1, 0.0, 0.0001]
])
arr = np.concatenate([
arr_c0[..., np.newaxis],
arr_c1[..., np.newaxis],
arr_c2[..., np.newaxis]
], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
expected = np.int32([
[2, 2, 2],
[3, 1, 3],
[3, 1, 0]
])
assert observed.dtype.type == np.int32
assert np.array_equal(observed, expected)
got_exception = False
try:
_ = segmap.get_arr_int(background_class_id=2)
except Exception as exc:
assert "The background class id may only be changed if " in str(exc)
got_exception = True
assert got_exception
observed = segmap.get_arr_int(background_threshold=0.21)
expected = np.int32([
[0, 2, 0],
[3, 1, 3],
[0, 0, 0]
])
assert observed.dtype.type == np.int32
assert np.array_equal(observed, expected)
def test_SegmentationMapOnImage_draw():
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
# simple example with 2 classes
observed = segmap.draw()
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# same example, with resizing to 2x the size
observed = segmap.draw(size=(6, 6))
expected = ia.imresize_single_image(expected, (6, 6), interpolation="nearest")
assert np.array_equal(observed, expected)
# custom choice of colors
col0 = (10, 10, 10)
col1 = (50, 51, 52)
observed = segmap.draw(colors=[col0, col1])
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# background_threshold, background_class and foreground mask
arr_c0 = np.float32([
[0, 0, 0],
[1.0, 0, 0],
[0, 0, 0]
])
arr_c1 = np.float32([
[0, 1, 1],
[0, 1, 1],
[0.1, 1, 1]
])
arr = np.concatenate([
arr_c0[..., np.newaxis],
arr_c1[..., np.newaxis]
], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed, observed_fg = segmap.draw(background_threshold=0.01, return_foreground_mask=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2]
expected = np.uint8([
[col0, col2, col2],
[col1, col2, col2],
[col2, col2, col2]
])
expected_fg = np.array([
[False, True, True],
[True, True, True],
[True, True, True]
], dtype=np.bool)
assert np.array_equal(observed, expected)
assert np.array_equal(observed_fg, expected_fg)
# background_threshold, background_class and foreground mask
# here with higher threshold so that bottom left pixel switches to background
observed, observed_fg = segmap.draw(background_threshold=0.11, return_foreground_mask=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2]
expected = np.uint8([
[col0, col2, col2],
[col1, col2, col2],
[col0, col2, col2]
])
expected_fg = np.array([
[False, True, True],
[True, True, True],
[False, True, True]
], dtype=np.bool)
assert np.array_equal(observed, expected)
assert np.array_equal(observed_fg, expected_fg)
def test_SegmentationMapOnImage_draw_on_image():
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20],
[30, 40, 50],
[60, 70, 80]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
# only image visible
observed = segmap.draw_on_image(image, alpha=0)
assert np.array_equal(observed, image)
# only segmap visible
observed = segmap.draw_on_image(image, alpha=1.0, draw_background=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# only segmap visible - in foreground
observed = segmap.draw_on_image(image, alpha=1.0, draw_background=False)
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[image[0, 0, :], col1, col1],
[image[1, 0, :], col1, col1],
[image[2, 0, :], col1, col1]
])
assert np.array_equal(observed, expected)
# overlay without background drawn
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=False)
col1 = np.uint8(ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1])
expected = np.float32([
[image[0, 0, :], a0*image[0, 1, :] + a1*col1, a0*image[0, 2, :] + a1*col1],
[image[1, 0, :], a0*image[1, 1, :] + a1*col1, a0*image[1, 2, :] + a1*col1],
[image[2, 0, :], a0*image[2, 1, :] + a1*col1, a0*image[2, 2, :] + a1*col1]
])
d_max = np.max(np.abs(observed.astype(np.float32) - expected))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# overlay with background drawn
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# resizing of segmap to image
arr = np.int32([
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20],
[30, 40, 50],
[60, 70, 80]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True, resize="segmentation_map")
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# resizing of image to segmap
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(1, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
image_rs = ia.imresize_single_image(image, arr.shape[0:2], interpolation="cubic")
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True, resize="image")
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image_rs + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
def test_SegmentationMapOnImage_pad():
arr = np.int32([
[0, 1, 1],
[0, 2, 1],
[0, 1, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4, cval=1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="constant", constant_values=1.0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4, mode="edge")
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="edge")
assert | np.allclose(observed, expected) | numpy.allclose |
import numpy
from scipy.stats import rv_discrete
from Utils.DCA import dca_solver, sdr_solver, check_feasibility
from constants import GS_DCA, GS_SDR, DCA_ONLY, SDR_ONLY, PERFECT_AGGREGATION
class SystemOptimizationSolver:
def __init__(self, p, m, lam, delta, p_var_bound, gradient_bound, d, s, k, tau, h_mat, data_size_list,
optimization_scaling_factor):
self.p = p
self.m = m
self.lam = lam
self.delta = delta
self.p_var_bound = p_var_bound
self.gradient_bound = gradient_bound
self.d = d
self.s = s
self.k = k
self.tau = tau
self.h_mat = h_mat
self.data_size_list = data_size_list
self.optimization_scaling_factor = optimization_scaling_factor
def compute_objective_value(self, a, current_device_set):
if len(current_device_set) <= 0:
return 1e6
elif len(current_device_set) > self.m:
object_val = (1 / (1 - self.lam)) * (1 + numpy.sqrt(2 * numpy.log(self.m / self.delta))) * numpy.sqrt(
1 / self.s) * self.gradient_bound
else:
data_size = 0
min_data_size = 1e6
for i in current_device_set:
data_size += self.data_size_list[i]
min_data_size = min(min_data_size, self.data_size_list[i])
first_term = numpy.sqrt(
3 * self.d * self.tau / self.p) * self.optimization_scaling_factor * numpy.linalg.norm(
a) / data_size
# print('objective first term: ' + str(first_term))
second_term = numpy.sqrt(24 * (1 - (data_size / (self.m * self.s))) ** 2 / min_data_size + 1 / self.s) * (
1 / (1 - self.lam)) * (
1 + numpy.sqrt(2 * numpy.log(1 / self.delta))) * self.gradient_bound
object_val = first_term + second_term
# print('objective second term: ' + str(second_term))
return object_val
def gibbs_sampling_based_device_selection(self, mode, beta=0.8, rho=0.8, max_iter=100):
device_entry_vector = numpy.ones(self.m)
cache_a = None
cache_device_entry_vectors = list()
cache_beamforming_list = list()
cache_objective_list = list()
for i in range(max_iter):
index_list = range(self.m)
probability_list = list()
beamforming_list = list()
total_objective_val = 0
scaling_factor = 0
for j in range(self.m):
current_device_entry_vector = numpy.copy(device_entry_vector)
current_device_entry_vector[j] = 1 - current_device_entry_vector[j]
# current_device_set = [x for x in current_device_entry_vector if x == 1]
# print('Gibbs Sampling: iter ' + str(i) + ' neighbor ' + str(j))
# print('current device entry vector: ' + str(current_device_entry_vector))
current_device_set = numpy.where(numpy.array(current_device_entry_vector) == 1)[0].tolist()
# print(current_device_set)
# print(current_device_set)
# current_a = numpy.zeros((self.k, 1))
current_a = None
cache_index = 0
while cache_index < len(cache_device_entry_vectors):
if numpy.array_equal(cache_device_entry_vectors[cache_index], current_device_entry_vector):
break
cache_index += 1
if cache_index != len(cache_device_entry_vectors):
current_a = cache_beamforming_list[cache_index]
current_objective_val = cache_objective_list[cache_index]
else:
if mode == GS_DCA:
current_a = dca_solver(current_device_set, self.h_mat, cache_a=cache_a)
elif mode == GS_SDR:
current_a = sdr_solver(current_device_set, self.h_mat, cache_a=cache_a)
if | numpy.linalg.norm(current_a) | numpy.linalg.norm |
import numpy as np
import matplotlib.pyplot as plt
import os
from astropy.io import fits
import shutil
import catkit.datalogging
def test_data_log_interface():
logger = catkit.datalogging.get_logger(__name__)
# Make sure this doesn't crash, even though nothing should be written out.
logger.log_scalar('tag', 5)
log_dir = './data_log_test'
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
os.makedirs(log_dir)
writer = catkit.datalogging.DataLogWriter(log_dir)
logger.log_scalar('tag2', 10)
# Cleanup
writer.close()
shutil.rmtree(log_dir)
def test_data_log_retrieval():
logger = catkit.datalogging.get_logger(__name__)
log_dir = './data_log_test'
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
os.makedirs(log_dir)
writer = catkit.datalogging.DataLogWriter(log_dir)
catkit.datalogging.DataLogger.add_writer(writer)
scalar = float(np.random.randn(1))
tensor = np.random.randn(100, 250)
curve_x = np.random.randn(30)
curve_y = np.random.randn(30)
plt.plot(curve_x, curve_y)
hdu = fits.PrimaryHDU(tensor)
fits_fname = os.path.join(log_dir, 'tensor.fits')
hdu.writeto(fits_fname)
logger.log_scalar('a', scalar)
logger.log_scalar('a', scalar * 2)
logger.log_scalar('a', scalar * -0.5)
logger.log_tensor('b', tensor)
logger.log_curve('c', curve_x, curve_y)
logger.log_figure('d')
logger.log_fits_file('e', fits_fname)
# Unregister writer
catkit.datalogging.DataLogger.remove_writer(writer)
writer.close()
reader = catkit.datalogging.DataLogReader(log_dir)
wall_time, scalars = reader.get('a')
assert np.allclose(scalars[0], scalar)
assert len(scalars) == 3
wall_time, scalars = reader.get('a', slice(1,None))
assert len(scalars) == 2
assert len(wall_time) == 2
wall_time, tensors = reader.get('b')
assert np.allclose(tensors[0], tensor)
wall_time, curve = reader.get('c')
assert np.allclose(curve[0]['x'], curve_x)
assert np.allclose(curve[0]['y'], curve_y)
wall_time, figs = reader.get('d')
assert figs[0].ndim == 3
wall_time, fits_files = reader.get('e')
assert | np.allclose(fits_files[0][0].data, tensor) | numpy.allclose |
from config import Config
import cv2
import numpy as np
import keyboard
import time
from utils.custom_mouse import mouse
from template_finder import TemplateFinder
from ui_manager import detect_screen_object, ScreenObjects, center_mouse
from utils.misc import wait, trim_black, color_filter, cut_roi
from inventory import consumables, personal
from ui import view
from screen import grab
from dataclasses import dataclass
from logger import Logger
from ocr import Ocr
@dataclass
class BoxInfo:
img: np.ndarray = None
pos: tuple = None
column: int = None
row: int = None
need_id: bool = False
sell: bool = False
keep: bool = False
def __getitem__(self, key):
return super().__getattribute__(key)
def __setitem__(self, key, value):
setattr(self, key, value)
def get_slot_pos_and_img(img: np.ndarray, column: int, row: int) -> tuple[tuple[int, int], np.ndarray]:
"""
Get the pos and img of a specific slot position in Inventory. Inventory must be open in the image.
:param config: The config which should be used
:param img: Image from screen.grab() not cut
:param column: Column in the Inventory
:param row: Row in the Inventory
:return: Returns position and image of the cut area as such: [[x, y], img]
"""
top_left_slot = (Config().ui_pos["inventory_top_left_slot_x"], Config().ui_pos["inventory_top_left_slot_y"])
slot_width = Config().ui_pos["slot_width"]
slot_height= Config().ui_pos["slot_height"]
slot = (top_left_slot[0] + slot_width * column, top_left_slot[1] + slot_height * row)
# decrease size to make sure not to have any borders of the slot in the image
offset_w = int(slot_width * 0.12)
offset_h = int(slot_height * 0.12)
min_x = slot[0] + offset_w
max_x = slot[0] + slot_width - offset_w
min_y = slot[1] + offset_h
max_y = slot[1] + slot_height - offset_h
slot_img = img[min_y:max_y, min_x:max_x]
center_pos = (int(slot[0] + (slot_width // 2)), int(slot[1] + (slot_height // 2)))
return center_pos, slot_img
def slot_has_item(slot_img: np.ndarray) -> bool:
"""
Check if a specific slot in the inventory has an item or not based on color
:param slot_img: Image of the slot
:return: Bool if there is an item or not
"""
slot_img = cv2.cvtColor(slot_img, cv2.COLOR_BGR2HSV)
avg_brightness = np.average(slot_img[:, :, 2])
return avg_brightness > 16.0
def close(img: np.ndarray = None) -> np.ndarray:
img = grab() if img is None else img
if detect_screen_object(ScreenObjects.RightPanel, img).valid or detect_screen_object(ScreenObjects.LeftPanel, img).valid:
keyboard.send("esc")
wait(0.1, 0.2)
if detect_screen_object(ScreenObjects.RightPanel, img).valid or detect_screen_object(ScreenObjects.LeftPanel, img).valid:
success = view.return_to_play()
if not success:
return None
return img
def calc_item_roi(img_pre, img_post):
try:
diff = cv2.absdiff(img_pre, img_post)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
diff_thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)[1]
blue_mask, _ = color_filter(img_pre, Config().colors["blue_slot"])
red_mask, _ = color_filter(img_pre, Config().colors["red_slot"])
green_mask, _ = color_filter(img_post, Config().colors["green_slot"])
blue_red_mask = np.bitwise_or(blue_mask, red_mask)
final = np.bitwise_and.reduce([blue_red_mask, green_mask, diff_thresh])
_, roi = trim_black(final)
return roi
except BaseException as err:
Logger.error(f"_calc_item_roi: Unexpected {err=}, {type(err)=}")
return None
def tome_state(img: np.ndarray = None, tome_type: str = "tp", roi: list = None):
img = img if img is not None else grab()
if (tome_found := TemplateFinder().search([f"{tome_type.upper()}_TOME", f"{tome_type.upper()}_TOME_RED"], img, roi = roi, threshold = 0.8, best_match = True, normalize_monitor = True)).valid:
if tome_found.name == f"{tome_type.upper()}_TOME":
state = "ok"
else:
state = "empty"
position = tome_found.center
else:
state = position = None
return state, position
def id_item_with_tome(item_location: list, id_tome_location: list):
mouse.move(id_tome_location[0], id_tome_location[1], randomize=4, delay_factor=[0.4, 0.8])
wait(0.2, 0.4)
mouse.click(button="right")
wait(0.1)
mouse.move(item_location[0], item_location[1], randomize=4, delay_factor=[0.4, 0.8])
wait(0.1)
mouse.click(button="left")
consumables.increment_need("id", 1)
wait(0.2, 0.4)
def transfer_items(items: list, action: str = "drop") -> list:
#requires open inventory / stash / vendor
img = grab()
filtered = []
left_panel_open = detect_screen_object(ScreenObjects.LeftPanel, img).valid
if action == "drop":
filtered = [ item for item in items if item.keep == False and item.sell == False ]
elif action == "sell":
filtered = [ item for item in items if item.keep == False and item.sell == True ]
if not left_panel_open:
Logger.error(f"transfer_items: Can't perform, vendor is not open")
elif action == "stash":
if detect_screen_object(ScreenObjects.GoldBtnStash, img).valid:
filtered = [ item for item in items if item.keep == True ]
else:
Logger.error(f"transfer_items: Can't perform, stash is not open")
else:
Logger.error(f"transfer_items: incorrect action param={action}")
if filtered:
# if dropping, control+click to drop unless left panel is open, then drag to middle
# if stashing, control+click to stash
# if selling, control+click to sell
# TODO: if purchasing, right-click to buy
# TODO: if purchasing stack, shift+right-click to buy
if (action == "drop" and not left_panel_open) or action in ["sell", "stash"]:
keyboard.send('ctrl', do_release=False)
wait(0.2, 0.4)
for item in filtered:
attempts = 0
prev_gold_img = cut_roi(grab(), roi=Config().ui_roi["inventory_gold_digits"])
while attempts < 2:
# move to item position and left click
mouse.move(*item.pos, randomize=4, delay_factor=[0.2, 0.4])
wait(0.2, 0.4)
mouse.press(button="left")
wait(0.2, 0.4)
mouse.release(button="left")
wait(0.2, 0.4)
# if dropping, drag item to middle if vendor/stash is open
if action == "drop" and left_panel_open:
center_mouse()
wait(0.2, 0.3)
mouse.press(button="left")
wait(0.2, 0.3)
mouse.release(button="left")
wait(0.8, 1)
# check if item is still there
img=grab()
slot_img = get_slot_pos_and_img(img, item.column, item.row)[1]
if not slot_has_item(slot_img):
# item successfully transferred, delete from list
for cnt, o_item in enumerate(items):
if o_item.pos == item.pos:
items.pop(cnt)
break
# check and see if inventory gold count changed
new_gold_img = cut_roi(img, roi=Config().ui_roi["inventory_gold_digits"])
if prev_gold_img.shape == new_gold_img.shape and not(np.bitwise_xor(prev_gold_img, new_gold_img).any()):
Logger.info("Inventory gold is full, force stash")
personal.set_inventory_gold_full(True)
else:
personal.set_inventory_gold_full(False)
break
else:
# item is still there, try again
attempts += 1
if attempts > 1:
Logger.error(f"transfer_items: could not stash in position {item.pos}")
if (action == "drop" and not left_panel_open) or action in ["sell", "stash"]:
keyboard.send('ctrl', do_press=False)
wait(0.1)
return items
# use with caution--unreliable
def read_gold(img: np.ndarray = None, type: str = "inventory"):
if type not in ["vendor", "inventory", "stash"]:
Logger.error(f"read_gold: type {type} not supported")
return False
img = img if img is not None else grab()
img = cut_roi(img, Config().ui_roi[f"{type}_gold_digits"])
# _, img = color_filter(img, Config().colors["gold_numbers"])
img = | np.pad(img, pad_width=[(8, 8),(8, 8),(0, 0)], mode='constant') | numpy.pad |
"""Utility functions for the qp package"""
import numpy as np
from scipy import stats as sps
from scipy.interpolate import interp1d
import sys
epsilon = sys.float_info.epsilon
infty = sys.float_info.max * epsilon
lims = (epsilon, 1.)
CASE_PRODUCT = 0
CASE_FACTOR = 1
CASE_2D = 2
CASE_FLAT = 3
def safelog(arr, threshold=epsilon):
"""
Takes the natural logarithm of an array of potentially non-positive numbers
Parameters
----------
arr: numpy.ndarray, float
values to be logged
threshold: float
small, positive value to replace zeros and negative numbers
Returns
-------
logged: numpy.ndarray
logarithms, with approximation in place of zeros and negative numbers
"""
return np.log(np.array(arr).clip(threshold, np.inf))
_ = """
def normalize_quantiles(in_data, threshold=epsilon, vb=False):
Evaluates PDF from quantiles including endpoints from linear extrapolation
Parameters
----------
in_data: tuple, numpy.ndarray, float
tuple of CDF values iy corresponding to quantiles and the points x at
which those CDF values are achieved
threshold: float, optional
optional minimum threshold for PDF
vb: boolean, optional
be careful and print progress to stdout?
Returns
-------
out_data: tuple, ndarray, float
tuple of values x at which CDF is achieved, including extrema, and
normalized PDF values y at x
(iy, x) = in_data
(xs, ys) = evaluate_quantiles((iy, x), vb=vb)
# xs = xs[1:-1]
# ys = ys[1:-1]
x_min = xs[0] - 2 * iy[0] / ys[0]
x_max = xs[-1] + 2 * (1. - iy[-1]) / ys[-1]
xs = sandwich(xs, (x_min, x_max))
ys = sandwich(ys, (threshold, threshold))
out_data = (xs, ys)
return out_data
"""
def edge_to_center(edges):
"""Return the centers of a set of bins given the edges"""
return 0.5*(edges[1:] + edges[:-1])
def bin_widths(edges):
"""Return the widths of a set of bins given the edges"""
return edges[1:] - edges[:-1]
def get_bin_indices(bins, x):
"""Return the bin indexes for a set of values
If the bins are equal width this will use arithmatic,
If the bins are not equal width this will use a binary search
"""
widths = bin_widths(bins)
n_bins = np.size(bins) - 1
if np.allclose(widths, widths[0]):
idx = np.atleast_1d(np.floor((x-bins[0])/widths[0]).astype(int))
else:
idx = np.atleast_1d(np.searchsorted(bins, x, side='left')-1)
mask = (idx >= 0) * (idx < bins.size-1)
np.putmask(idx, 1-mask, 0)
xshape = np.shape(x)
return idx.reshape(xshape).clip(0, n_bins-1), mask.reshape(xshape)
def normalize_interp1d(xvals, yvals):
"""
Normalize a set of 1D interpolators
Parameters
----------
xvals : array-like
X-values used for the interpolation
yvals : array-like
Y-values used for the interpolation
Returns
-------
ynorm: array-like
Normalized y-vals
"""
#def row_integral(irow):
# return quad(interp1d(xvals[irow], yvals[irow], **kwargs), limits[0], limits[1])[0]
#vv = np.vectorize(row_integral)
#integrals = vv(np.arange(xvals.shape[0]))
integrals = np.sum(xvals[:,1:]*yvals[:,1:] - xvals[:,:-1]*yvals[:,1:], axis=1)
return (yvals.T / integrals).T
def build_kdes(samples, **kwargs):
"""
Build a set of Gaussian Kernal Density Estimates
Parameters
----------
samples : array-like
X-values used for the spline
Keywords
--------
Passed to the `scipy.stats.gaussian_kde` constructor
Returns
-------
kdes : list of `scipy.stats.gaussian_kde` objects
"""
return [ sps.gaussian_kde(row, **kwargs) for row in samples ]
def evaluate_kdes(xvals, kdes):
"""
Build a evaluate a set of kdes
Parameters
----------
xvals : array_like
X-values used for the spline
kdes : list of `sps.gaussian_kde`
The kernel density estimates
Returns
-------
yvals : array_like
The kdes evaluated at the xvamls
"""
return np.vstack([kde(xvals) for kde in kdes])
def get_eval_case(x, row):
""" Figure out which of the various input formats scipy.stats has passed us
Parameters
----------
x : array_like
Pdf x-vals
row : array_like
Pdf row indices
Returns
-------
case : `int`
The case code
xx : array_like
The x-values properly shaped
rr : array_like
The y-values, properly shaped
Notes
-----
The cases are:
CASE_FLAT : x, row have shapes (n) , (n) and do not factor
CASE_FACTOR : x, row can be factors to shapes (1, nx) and (npdf, 1)
CASE_PRODUCT : x, row have shapes (1, nx) and (npdf, 1)
CASE_2D : x, row have shapes (npdf, nx) and (npdf, nx)
"""
nd_x = np.ndim(x)
nd_row = np.ndim(row)
#if nd_x > 2 or nd_row > 2: #pragma: no cover
# raise ValueError("Too many dimensions: x(%s), row(%s)" % (np.shape(x), np.shape(row)))
if nd_x >= 2 and nd_row != 1:
return CASE_2D, x, row
if nd_x >= 2 and nd_row == 1: #pragma: no cover
raise ValueError("Dimension mismatch: x(%s), row(%s)" % (np.shape(x), np.shape(row)))
if nd_row >= 2:
return CASE_PRODUCT, x, row
if np.size(x) == 1 or np.size(row) == 1:
return CASE_FLAT, x, row
xx = np.unique(x)
rr = np.unique(row)
if np.size(xx) == np.size(x):
xx = x
if np.size(rr) == np.size(row):
rr = row
if np.size(xx) * np.size(rr) != np.size(x):
return CASE_FLAT, x, row
return CASE_FACTOR, xx, np.expand_dims(rr, -1)
def evaluate_hist_x_multi_y_flat(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
bins : array_like (N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (n)
The histogram values
"""
assert np.ndim(x) < 2 and np.ndim(row) < 2
idx, mask = get_bin_indices(bins, x)
if derivs is None:
deltas = np.zeros(idx.shape)
else:
deltas = x - bins[idx]
def evaluate_row(idxv, maskv, rv, delta):
if derivs is None:
return np.where(maskv, vals[rv, idxv], 0)
return np.where(maskv, vals[rv, idxv] + delta*derivs[rv, idxv], 0)
vv = np.vectorize(evaluate_row)
return vv(idx, mask, row, deltas)
def evaluate_hist_x_multi_y_product(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (npts)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
bins : array_like (N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (npdf, npts)
The histogram values
"""
#assert np.ndim(x) < 2 and np.ndim(row) == 2
idx, mask0 = get_bin_indices(bins, x)
mask = np.ones(row.shape) * mask0
if derivs is None:
return np.where(mask, vals[:,idx][np.squeeze(row)], 0)
deltas = x - bins[idx]
return np.where(mask, vals[:,idx][np.squeeze(row)] + deltas*derivs[:,idx][np.squeeze(row)] , 0)
def evaluate_hist_x_multi_y_2d(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (npdf, npts)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
bins : array_like (N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (npdf, npts)
The histogram values
"""
assert np.ndim(x) >= 2 and np.ndim(row) >= 2
idx, mask = get_bin_indices(bins, x)
if derivs is None:
deltas = np.zeros(idx.shape)
else:
deltas = x - bins[idx]
def evaluate_row(idxv, maskv, rv, delta):
if derivs is None:
return np.where(maskv, vals[rv, idxv], 0)
return np.where(maskv, vals[rv, idxv] + delta*derivs[rv, idxv], 0)
vv = np.vectorize(evaluate_row)
return vv(idx, mask, row, deltas)
def evaluate_hist_x_multi_y(x, row, bins, vals, derivs=None):
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like
X values to interpolate at
row : array_like
Which rows to interpolate at
bins : array_like (N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like
The histogram values
Notes
-----
Depending on the shape of 'x' and 'row' this will
use one of the three specific implementations.
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return evaluate_hist_x_multi_y_product(xx, rr, bins, vals, derivs)
if case_idx == CASE_2D:
return evaluate_hist_x_multi_y_2d(xx, rr, bins, vals, derivs)
return evaluate_hist_x_multi_y_flat(xx, rr, bins, vals, derivs)
def evaluate_hist_multi_x_multi_y_flat(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
bins : array_like (npdf, N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (n)
The histogram values
"""
def evaluate_row(xv, rv):
bins_row = bins[rv]
idx, mask = get_bin_indices(bins_row, xv)
delta = xv - bins_row[idx]
if derivs is None:
return np.where(mask, vals[rv, idx], 0)
return np.where(mask, vals[rv, idx] + delta*derivs[rv, idx], 0)
vv = np.vectorize(evaluate_row)
return vv(x, row)
def evaluate_hist_multi_x_multi_y_product(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (npts)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
bins : array_like (npdf, N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (npdf, npts)
The histogram values
"""
def evaluate_row(rv):
bins_flat = bins[rv].flatten()
idx, mask = get_bin_indices(bins_flat, x)
delta = x - bins_flat[idx]
if derivs is None:
return np.where(mask, np.squeeze(vals[rv])[idx], 0).flatten()
return np.where(mask, np.squeeze(vals[rv])[idx] + delta* np.squeeze(derivs[rv])[idx], 0)
vv = np.vectorize(evaluate_row, signature="(1)->(%i)" % (x.size))
return vv(row)
def evaluate_hist_multi_x_multi_y_2d(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (npdf, npts)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
bins : array_like (npdf, N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (npdf, npts)
The histogram values
"""
nx = np.shape(x)[-1]
def evaluate_row(rv, xv):
flat_bins = bins[rv].flatten()
idx, mask = get_bin_indices(flat_bins, xv)
delta = xv - flat_bins[idx]
if derivs is None:
return np.where(mask, np.squeeze(vals[rv])[idx], 0).flatten()
return np.where(mask, np.squeeze(vals[rv])[idx] + delta*np.squeeze(derivs[rv])[idx], 0).flatten()
vv = np.vectorize(evaluate_row, signature="(1),(%i)->(%i)" % (nx, nx))
return vv(row, x)
def evaluate_hist_multi_x_multi_y(x, row, bins, vals, derivs=None):
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like
X values to interpolate at
row : array_like
Which rows to interpolate at
bins : array_like (npdf, N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like
The histogram values
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return evaluate_hist_multi_x_multi_y_product(xx, rr, bins, vals, derivs)
if case_idx == CASE_2D:
return evaluate_hist_multi_x_multi_y_2d(xx, rr, bins, vals, derivs)
return evaluate_hist_multi_x_multi_y_flat(xx, rr, bins, vals, derivs)
def interpolate_x_multi_y_flat(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
xvals : array_like (npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
def single_row(xv, rv):
return interp1d(xvals, yvals[rv], **kwargs)(xv)
vv = np.vectorize(single_row)
return vv(x, row)
def interpolate_x_multi_y_product(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
rr = np.squeeze(row)
return interp1d(xvals, yvals[rr], **kwargs)(x)
def interpolate_x_multi_y_2d(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
nx = np.shape(x)[-1]
def evaluate_row(rv, xv):
return interp1d(xvals, yvals[rv], **kwargs)(xv)
vv = np.vectorize(evaluate_row, signature="(1),(%i)->(%i)" % (nx, nx))
return vv(row, x)
def interpolate_x_multi_y(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like
The interpoalted values
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return interpolate_x_multi_y_product(xx, rr, xvals, yvals, **kwargs)
if case_idx == CASE_2D:
return interpolate_x_multi_y_2d(xx, rr, xvals, yvals, **kwargs)
return interpolate_x_multi_y_flat(xx, rr, xvals, yvals, **kwargs)
def interpolate_multi_x_multi_y_flat(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
def single_row(xv, rv):
return interp1d(xvals[rv], yvals[rv], **kwargs)(xv)
vv = np.vectorize(single_row)
return vv(x, row)
def interpolate_multi_x_multi_y_product(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
rr = np.squeeze(row)
nx = np.shape(x)[-1]
def single_row(rv):
return interp1d(xvals[rv], yvals[rv], **kwargs)(x)
vv = np.vectorize(single_row, signature="()->(%i)" % (nx))
return vv(rr)
def interpolate_multi_x_multi_y_2d(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
nx = np.shape(x)[-1]
def evaluate_row(rv, xv):
return interp1d(xvals[rv], yvals[rv], **kwargs)(xv)
vv = np.vectorize(evaluate_row, signature="(),(%i)->(%i)" % (nx, nx))
return vv(np.squeeze(row), x)
def interpolate_multi_x_multi_y(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like
The interpoalted values
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return interpolate_multi_x_multi_y_product(xx, rr, xvals, yvals, **kwargs)
if case_idx == CASE_2D:
return interpolate_multi_x_multi_y_2d(xx, rr, xvals, yvals, **kwargs)
return interpolate_multi_x_multi_y_flat(xx, rr, xvals, yvals, **kwargs)
def interpolate_multi_x_y_flat(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
def single_row(xv, rv):
return interp1d(xvals[rv], yvals, **kwargs)(xv)
vv = np.vectorize(single_row)
return vv(x, row)
def interpolate_multi_x_y_product(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
rr = np.squeeze(row)
nx = np.shape(x)[-1]
def single_row(rv):
return interp1d(xvals[rv], yvals, **kwargs)(x)
vv = np.vectorize(single_row, signature="()->(%i)" % (nx))
return vv(rr)
def interpolate_multi_x_y_2d(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
nx = np.shape(x)[-1]
def evaluate_row(rv, xv):
return interp1d(xvals[rv], yvals, **kwargs)(xv)
vv = np.vectorize(evaluate_row, signature="(),(%i)->(%i)" % (nx, nx))
return vv(np.squeeze(row), x)
def interpolate_multi_x_y(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf)
Y-avlues used for the inteolation
Returns
-------
vals : array_like
The interpoalted values
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return interpolate_multi_x_y_product(xx, rr, xvals, yvals, **kwargs)
if case_idx == CASE_2D:
return interpolate_multi_x_y_2d(xx, rr, xvals, yvals, **kwargs)
return interpolate_multi_x_y_flat(xx, rr, xvals, yvals, **kwargs)
def profile(x_data, y_data, x_bins, std=True):
"""Make a 'profile' plot
Paramters
---------
x_data : array_like (n)
The x-values
y_data : array_like (n)
The y-values
x_bins : array_like (nbins+1)
The values of the bin edges
std : bool
If true, return the standard deviations, if false return the errors on the means
Returns
-------
vals : array_like (nbins)
The means
errs : array_like (nbins)
The standard deviations or errors on the means
"""
idx, mask = get_bin_indices(x_bins, x_data)
count = np.zeros(x_bins.size-1)
vals = | np.zeros(x_bins.size-1) | numpy.zeros |
from pathlib import Path
import numpy as np
import pytest
from hnswlib_searcher import HnswlibSearcher
from jina import Document, DocumentArray, Executor
_DIM = 10
@pytest.fixture
def two_elem_index():
index = HnswlibSearcher(dim=_DIM, metric='l2')
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
index.index(da, {})
return index, da
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.metric == 'cosine'
def test_empty_search():
index = HnswlibSearcher(dim=_DIM)
da = DocumentArray([Document(embedding=np.random.normal(size=(_DIM,)))])
index.search(da, {})
assert len(da[0].matches) == 0
def test_index_no_docs():
index = HnswlibSearcher(dim=_DIM)
index.index(None, {})
def test_index_empty_docs():
index = HnswlibSearcher(dim=_DIM)
da = DocumentArray()
index.index(da, {})
def test_update_no_docs():
index = HnswlibSearcher(dim=_DIM)
index.update(None, {})
def test_update_empty_docs():
index = HnswlibSearcher(dim=_DIM)
da = DocumentArray()
index.update(da, {})
def test_search_no_docs():
index = HnswlibSearcher(dim=_DIM)
index.search(None, {})
def test_searh_empty_docs():
index = HnswlibSearcher(dim=_DIM)
da = DocumentArray()
index.search(da, {})
def test_index():
NUM_DOCS = 1000
index = HnswlibSearcher(dim=_DIM)
embeddings = np.random.normal(size=(NUM_DOCS, _DIM))
da1 = DocumentArray([Document(embedding=emb) for emb in embeddings])
da2 = DocumentArray([Document(embedding=emb) for emb in embeddings])
index.index(da1, {})
assert len(index._ids_to_inds) == NUM_DOCS
assert index._index.element_count == NUM_DOCS
assert set(index._ids_to_inds.keys()) == set(da1.get_attributes('id'))
index.index(da2, {})
assert len(index._ids_to_inds) == 2 * NUM_DOCS
assert index._index.element_count == 2 * NUM_DOCS
assert set(index._ids_to_inds.keys()) == set(da1.get_attributes('id')).union(
da2.get_attributes('id')
)
def test_index_with_update(two_elem_index):
index, da = two_elem_index
da_search = DocumentArray(
[
Document(embedding=np.ones(_DIM) * 1.1),
Document(embedding=np.ones(_DIM) * 2.1),
]
)
# switch embeddings of a and b
da[0].embedding = np.ones(_DIM) * 2.0
da[1].embedding = np.ones(_DIM) * 1.0
index.index(da, {})
assert index._ids_to_inds == {'a': 0, 'b': 1}
assert index._index.element_count == 2
index.search(da_search, {})
assert [m.id for m in da_search[0].matches] == ['b', 'a']
assert [m.id for m in da_search[1].matches] == ['a', 'b']
def test_index_wrong_dim():
index = HnswlibSearcher(dim=10)
embeddings = np.random.normal(size=(2, 11))
da1 = DocumentArray([Document(embedding=emb) for emb in embeddings])
with pytest.raises(ValueError, match='Attempted to index'):
index.index(da1, {})
@pytest.mark.parametrize('limit', [5, 10])
@pytest.mark.parametrize(
['metric', 'is_distance'],
[
('cosine', True),
('euclidean', True),
('inner_product', True),
('cosine', False),
('euclidean', False),
('inner_product', False),
],
)
def test_search_basic(limit: int, metric: str, is_distance: bool):
index = HnswlibSearcher(
dim=_DIM, metric=metric, limit=limit, is_distance=is_distance
)
embeddings_ind = np.random.normal(size=(1000, _DIM))
embeddings_search = | np.random.normal(size=(10, _DIM)) | numpy.random.normal |
#solarnmf_plotting.py
#<NAME>
#3 April 2015
import logging
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from scipy.ndimage.interpolation import rotate
class Plotter(object):
def __init__(self,toption,input_type,u,v,A,T,div=None,q=None,angle=0.0,fig_size=(8,8),print_format='eps',print_dpi=1000,**kwargs):
self.toption = toption
self.input_type = input_type
self.u = u
self.v = v
self.A = A
self.T = T
#Configure logger
self.logger = logging.getLogger(type(self).__name__)
if div is not None:
self.div = div
if not q:
self.q = self.u.shape[1]
else:
self.q = q
if self.input_type == 'matrix':
self.ny,self.nx = np.shape(T)
else:
self.ny,self.nx = np.shape(T)[0],np.shape(T)[0]
#set optional member variables
self.angle = angle
self.print_format = print_format
self.print_dpi = print_dpi
self.fs = 18
self.cm = 'Blues'
self.zero_tol = 1.e-5
self.fig_size = fig_size
self.yaxis_format = FormatStrFormatter('%3.1f')
#Preprocessing
self.A = self.rotate_back(A)
self.get_components()
#Check data type
if self.toption == 'simulation':
try:
self.target = kwargs['target']
except:
raise ValueError("Please specify list of target sources when plotting simulation results.")
if self.input_type == 'timeseries':
try:
self.ny = kwargs['Tmat'].shape[0]
except:
raise ValueError("Please specify matrix representation of time series when using 1D representation.")
def rotate_back(self,mat):
"""Rotate back and reshape"""
#rotate matrix back
if self.angle != 0.0:
mat_rot = rotate(mat,-self.angle)
else:
mat_rot = mat
#get indices of rotated matrix
ny_r,nx_r = mat_rot.shape
#calculate differences
delta_y = int(np.round((ny_r - self.ny)/2.0))
delta_x = int(np.round((nx_r - self.nx)/2.0))
#Return cut and rotated matrix
res = mat_rot[delta_y:((ny_r - delta_y)),delta_x:((nx_r - delta_x))]
if not np.shape(res) == (self.ny,self.nx):
self.logger.warning("Rotated dimensions do not match original dimensions; (%d,%d) != (ny=%d,nx=%d)"%(np.shape(res)[0],np.shape(res)[1],self.ny,self.nx))
self.logger.warning("Adjusting dimensions for compatibility.")
diff_row = self.ny - np.shape(res)[0]
diff_col = self.nx - np.shape(res)[1]
#adjust row dim
if diff_row < 0:
#remove last row
res = res[:self.ny,:]
elif diff_row > 0:
#add last row below zero tol
res = np.vstack([res,0.9*self.zero_tol*np.ones(np.shape(res)[1])])
else:
self.logger.warning("No adjustment on rows, %d==%d"%(np.shape(res)[0],self.ny))
#adjust col dim
if diff_col < 0:
#remove last col
res = res[:,:self.nx]
elif diff_col > 0:
res = np.hstack([res,0.9*self.zero_tol*np.ones([np.shape(res)[0],1])])
else:
self.logger.warning("No adjustment on columns, %d==%d"%(np.shape(res)[1],self.nx))
return res
def get_components(self):
"""Separate A matrix into components"""
self.components = []
for i in range(self.q):
self.components.append(self.rotate_back(np.outer(self.u[:,i],self.v[i,:])))
def plot_obs_pred_total(self,**kwargs):
"""Plot original observation and recovered result"""
if self.input_type == 'matrix':
fig,ax = plt.subplots(1,2,figsize=self.fig_size)
plt.tight_layout()
imT = ax[0].imshow(np.ma.masked_where(self.T<self.zero_tol* | np.max(self.T) | numpy.max |
import numpy as np
import pickle
| np.random.seed(0) | numpy.random.seed |
#!/usr/bin/env python3
# ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
import os, sys
import math, pdb
import numpy as np
from typing import List, Tuple
class BoxView:
"""
Utilities for box calculations.
Create a view on the original data rather than make a copy
Dimensions:
(left, top, right, bottom)
[left, right), [top, bottom)
"""
_box = None
def __init__(self, box):
self._box = self._as_array(box)
def __getitem__(self, index:int):
return self._box[index]
def __setitem__(self, index:int, v):
self._box[index] = v
def __len__(self):
return 4
def _as_array(self, box) -> np.ndarray:
assert not (box is None)
if isinstance(box, BoxView):
return box.array
else:
assert isinstance(box, np.ndarray)
assert box.size == 4
return box.reshape([-1])
def _as_readonly_array(self, box) -> np.ndarray:
if isinstance(box, BoxView):
return box.array
elif isinstance(box, np.ndarray):
assert box.size == 4
return box.reshape([-1])
elif box is None:
return np.zeros([4])
else:
r = np.array(box)
assert r.size == 4
return r.reshape([-1])
@property
def array(self) -> np.ndarray:
return self._box
@array.setter
def array(self, v):
self._box = self._as_array(v)
@property
def width(self):
return max(self._box[2] - self._box[0], 0)
@property
def height(self):
return max(self._box[3] - self._box[1], 0)
@property
def valid(self):
return self._box[2] > self._box[0] and self._box[3] > self._box[1]
@property
def area(self):
return self.width * self.height
@property
def center(self):
return np.float64(self._box[:2] + self._box[2:] - 1) / 2
@property
def vertices(self) -> np.ndarray:
"""
return [left-top, left-bottom, right-bottom, right-top]
"""
return np.array([
(self._box[0], self._box[1]),
(self._box[0], self._box[3] - 1),
(self._box[2] - 1, self._box[3] - 1),
(self._box[2] - 1, self._box[1])
])
def intersect(self, other) -> "Box":
other = self._as_readonly_array(other)
left_top = np.max([self._box[:2], other[:2]], axis=0)
right_bottom = np.min([self._box[2:], other[2:]], axis=0)
r = Box(np.concatenate([left_top, right_bottom]))
return r if r.valid else Box()
def IoU(self, other) -> float:
inter_area = self.intersect(other).area
area_a = self.area
area_b = Box(other).area
if area_a == 0 and area_b == 0:
return 0.
else:
return inter_area / (area_a + area_b - inter_area)
def move_by(self, offset:np.ndarray):
assert len(offset) == 2
self._box[0] += offset[0]
self._box[1] += offset[1]
self._box[2] += offset[0]
self._box[3] += offset[1]
return self
def move_by_points(self,offset:np.array):
assert len(offset) == 4
self._box[0] += offset[0]
self._box[1] += offset[1]
self._box[2] += offset[2]
self._box[3] += offset[3]
return self
def is_legal(self):
return self._box[0] > -1 and self._box[1] > -1 and self._box[2] > -1 \
and self._box[3] > -1
def clip_boundary(self,height,width):
self._box[0] = np.max([0,self._box[0]])
self._box[1] = np.max([0,self._box[1]])
self._box[2] = np.min([width,self._box[2]])
self._box[3] = np.min([height,self._box[3]])
def expend_by(self, width:int):
self._box[:2] -= width
self._box[2:] += width
return self
def get_rotated_vertices(self, degree):
"""
Get counter-clockwise rotated vertices of the box.
The rotation center is the center of the box
"""
vertices = np.float64(self.vertices)
# pdb.set_trace()
center = self.center
center = center.reshape([1, -1])
vertices -= center
theta = degree / 180.0 * math.pi
cos_theta = math.cos(theta)
sin_theta = math.sin(theta)
rot_mat = | np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]]) | numpy.array |
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
import time
import os
import sys
import numpy as np
sys.path.append(os.path.abspath('../'))
from loaders.aec_loader import aec_loader
from utils.mat_helpers import *
from algorithms.audio_processing import *
from algorithms.ssaec_fast import *
class feature_generator(object):
# --------------------------------------------------------------------------
def __init__(self,):
self.aec_loader = aec_loader(name='aec')
self.ssaec = ssaec_fast(wlen=512, tail_length=0.250)
self.fs = self.aec_loader.fs
self.samples = int(self.fs*15)
self.silence = int(self.fs*5)
self.dataset_dir = self.aec_loader.dataset_dir
self.scenarios = ['nearend', 'farend', 'doubletalk']
self.modes = ['real','simu','hard']
self.train_set_length = 5000
self.test_set_length = len(self.aec_loader.d_test)
self.blind_test_set_length = len(self.aec_loader.d_test_blind)
self.nband = 25
self.Q_long = create_mel_filterbank(nbin=513, fs=16e3, nband=self.nband)
self.Q_short = create_mel_filterbank(nbin=129, fs=16e3, nband=self.nband)
#-------------------------------------------------------------------------
def compensate_delay(self, x, d):
Fx = rfft(x)
Fd = rfft(d)
Phi = Fd*np.conj(Fx)
Phi /= np.abs(Phi) + 1e-3
Phi[0] = 0
tmp = irfft(Phi)
tau = np.argmax(np.abs(tmp))
x = np.roll(x, tau)
#print(tau/self.fs)
return x
#-------------------------------------------------------------------------
#scenarios = ['nearend', 'farend', 'doubletalk']
#modes = ['real','simu','hard']
def load_train(self, nbatch=1, mode=None, scenario=None, idx=None, p_modes=[0.3, 0.3, 0.4], p_scenarios=[0.1, 0.1, 0.8]):
mode0 = mode
scenario0 = scenario
idx0 = idx
x = np.zeros((nbatch, self.samples-self.silence), dtype=np.float32)
y = np.zeros((nbatch, self.samples-self.silence), dtype=np.float32)
d = np.zeros((nbatch, self.samples-self.silence), dtype=np.float32)
e = np.zeros((nbatch, self.samples-self.silence), dtype=np.float32)
s = np.zeros((nbatch, self.samples-self.silence), dtype=np.float32)
for b in range(nbatch):
if mode0==None:
mode = np.random.choice(self.modes, p=p_modes)
else:
mode = mode0
if scenario0==None:
scenario = np.random.choice(self.scenarios, p=p_scenarios)
else:
scenario = scenario0
if idx0==None:
idx = | np.random.choice(self.train_set_length) | numpy.random.choice |
import unittest
from scipy.stats import norm
import warnings
import pickle
import tensorflow as tf
import sys
import os
import numpy as np
import scipy.stats as stats
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from cde.density_estimator import NormalizingFlowEstimator
from cde.density_estimator.normalizing_flows import InvertedPlanarFlow, AffineFlow, IdentityFlow, InvertedRadialFlow
class TestFlows(unittest.TestCase):
def test_planar_invertibel(self):
with tf.Session() as sess:
u = tf.constant([[-2.], [1.], [10.], [2.]])
w = tf.constant([[80.], [-1.], [1.], [1.]])
# Compute w * û
inv = sess.run(w * InvertedPlanarFlow._u_circ(u, w))
for i in inv:
self.assertGreater(i, -1.)
def test_affine_shift_and_scale(self):
with tf.Session() as sess:
base_dist = tf.distributions.Normal(loc=0., scale=1.)
# shift the distribution three to the right
transf_dist = tf.distributions.Normal(loc=3., scale=1.)
flow = AffineFlow(tf.constant([[0., 3.]]), 1)
flow_dist = tf.contrib.distributions.TransformedDistribution(distribution=base_dist, bijector=flow)
# eval the samples so they stay constant
samples = sess.run(base_dist.sample([1000]))
# the output is of shape (?, 1) so it needs to be squeezed
pdf_estimate = tf.squeeze(flow_dist.prob(samples))
pdf_actual = transf_dist.prob(samples)
pdf_estimate, pdf_actual = sess.run([pdf_estimate, pdf_actual])
self.assertLessEqual(np.mean(np.abs(pdf_actual - pdf_estimate)), 0.1)
def _test_flow_correct_dims_NN(self, flow_name):
"""
General structure:
flow_params = MLP(x)
pdf(y|x) = flow(y, flow_params)
The tensor being transformed (=y) are of shape (batch_size, event_dims)
- batch_size = len(x) == len(y)
- event_dims = rank(y)
For each element of x, the MLP outputs one parametrization for the flows
for each of these parameters, the flow transforms one element of y
therefore len(x) == len(y)
the event dimension describes the rank of the base probability distribution that's being transformed
Tensorflow's MultivariateNormal doesn't implement a CDF. Therefore we switch to a Normal for 1-D Problems
Caveat:
MultivariateNormal PDF output shape: (batch_size, )
UnivariateNormal PDF output shape: (batch_size, 1)
Therefore we adapt the output shape of the ildj to be (batch_size, 1) for 1-D, (batch_size, ) for N-D
The flows are transforming tensors (batch_size, event_size)
Forward: (batch_size, event_size) -> (batch_size, event_size)
Inverse: (batch_size, event_size) -> (batch_size, event_size)
ILDJ: (batch_size, event_size) -> (batch_size, 1) [1-D] or (batch_size, ) [N-D]
This forms a transformed distribution:
Sample: -> (batch_size, event_size)
PDF: (batch_size, event_size) -> (batch_size, 1) [1-D] or (batch_size, ) [N-D]
CDF: (batch_size, event_size) -> (batch_size, 1) [EXISTS ONLY FOR 1-D!]
"""
tests = [
{
'x': [[1.], [0.], [2.], [4.], [1.]],
'y': [[1.], [0.], [2.], [3.], [1.]],
'ndim_x': 1,
'ndim_y': 1
},
{
'x': [[1., 1.], [0., 0.], [2., 2.], [4., 4.], [1., 1.]],
'y': [[1., 1.], [0., 0.], [2., 2.], [3., 3.], [1., 1.]],
'ndim_x': 2,
'ndim_y': 2
}
]
with tf.Session() as sess:
for test in tests:
model = NormalizingFlowEstimator('nf_dimtest_' + flow_name + str(tests.index(test)),
test['ndim_x'], test['ndim_y'],
random_seed=22, n_training_epochs=2,
flows_type=(flow_name,))
x, y = np.array(test['x']), np.array(test['y'])
model.fit(x, y)
p = model.pdf(x, y)
self.assertEqual(p.shape, (len(y),))
# every test has equal first and last elements, theses are basic sanity tests
self.assertAlmostEqual(p[0], p[-1], places=5)
self.assertNotAlmostEqual(p[0], p[1], places=5)
def _test_flow_correct_dims(self, flow_class):
tests = [
([[1.], [2.], [1.]], 1),
([[1., 1.], [2., 2.], [1., 1.]], 2),
]
with tf.Session() as sess:
for test in tests:
y, event_dims = test
batch_size = len(y)
y = np.array(y, dtype=np.float32)
if event_dims == 1:
base_dist = tf.distributions.Normal(loc=0., scale=1.)
else:
base_dist = tf.contrib.distributions.MultivariateNormalDiag(loc=[0.] * event_dims,
scale_diag=[1.] * event_dims)
params = tf.ones(shape=(batch_size, flow_class.get_param_size(event_dims)))
flow = flow_class(params, event_dims)
flow_dist = tf.contrib.distributions.TransformedDistribution(distribution=base_dist, bijector=flow)
# reverse should transform (batch_size, event_dims) -> (batch_size, event_dims)
self.assertEqual(y.shape, (batch_size, event_dims))
inverse_y = flow.inverse(y).eval()
self.assertEqual(inverse_y.shape, (batch_size, event_dims))
# ildj is a reduction over event_dims
# therefore transforms: (batch_size, event_dims) -> (batch_size, 1)
self.assertEqual(y.shape, (batch_size, event_dims))
ildj_y = flow.inverse_log_det_jacobian(y).eval()
if event_dims == 1:
self.assertEqual(ildj_y.shape, (batch_size, 1))
else:
self.assertEqual(ildj_y.shape, (batch_size,))
# probability: (batch_size, event_dims) -> (batch_size, 1)
self.assertEqual(y.shape, (batch_size, event_dims))
p = flow_dist.prob(y).eval()
if event_dims == 1:
self.assertEqual(p.shape, (batch_size, 1))
else:
self.assertEqual(p.shape, (batch_size,))
# the first an same element of every test is the same, this is a basic sanity test
self.assertEqual(p[0], p[2])
self.assertNotEqual(p[0], p[1])
def test_affine_flow_correct_dimension(self):
self._test_flow_correct_dims(AffineFlow)
self._test_flow_correct_dims_NN('affine')
def test_identity_flow_correct_dimension(self):
self._test_flow_correct_dims(IdentityFlow)
# we don't test NN dimensions for the Identity flow as it contains no trainable variables
def test_planar_flow_correct_dimension(self):
self._test_flow_correct_dims(InvertedPlanarFlow)
self._test_flow_correct_dims_NN('planar')
def test_radial_flow_correct_dimension(self):
self._test_flow_correct_dims(InvertedRadialFlow)
self._test_flow_correct_dims_NN('radial')
class Test_NF_2d_gaussian(unittest.TestCase):
def get_samples(self, mu=2, std=1.0):
np.random.seed(22)
data = np.random.normal([mu, mu], std, size=(2000, 2))
X = data[:, 0]
Y = data[:, 1]
return X, Y
def test_NF_radial_with_2d_gaussian(self):
mu = 200
std = 23
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_radial", 1, 1, flows_type=('radial',),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_NF_affine_with_2d_gaussian(self):
mu = 3
std = 2
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_affine", 1, 1, flows_type=('affine',),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_NF_planar_with_2d_gaussian(self):
mu = 200
std = 23
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_planar", 1, 1, flows_type=('planar',),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = | np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20) | numpy.arange |
"""
Test cases for the regi0.geographic.outliers._is_std_outlier function.
"""
import numpy as np
import pytest
from regi0.geographic.outliers import _is_std_outlier
@pytest.fixture()
def values():
return np.array([52, 56, 53, 57, 51, 59, 1, 99])
def test_std(values):
result = _is_std_outlier(values)
expected = | np.array([False, False, False, False, False, False, True, False]) | numpy.array |
########################################
# MIT License
#
# Copyright (c) 2020 <NAME>
########################################
'''
Definition of different minimization functions.
'''
from ..base import parameters
from ..pdfs import dataset
import functools
import numpy as np
import warnings
__all__ = ['binned_chisquare', 'binned_maximum_likelihood',
'binned_extended_chisquare', 'binned_extended_maximum_likelihood',
'unbinned_maximum_likelihood', 'unbinned_extended_maximum_likelihood']
# Names of different FCNs
BINNED_CHISQUARE = 'chi2'
BINNED_EXTENDED_CHISQUARE = 'echi2'
BINNED_MAXIMUM_LIKELIHOOD = 'bml'
BINNED_EXTENDED_MAXIMUM_LIKELIHOOD = 'beml'
UNBINNED_MAXIMUM_LIKELIHOOD = 'uml'
UNBINNED_EXTENDED_MAXIMUM_LIKELIHOOD = 'ueml'
def data_type_for_fcn(fcn):
'''
Get the associated data type for a given FCN.
:param fcn: FCN to consider.
:type fcn: str
:returns: data type associated to the FCN.
:rtype: str
:raises ValueError: if the FCN is unknown.
'''
if fcn in (BINNED_CHISQUARE, BINNED_EXTENDED_CHISQUARE, BINNED_MAXIMUM_LIKELIHOOD, BINNED_EXTENDED_MAXIMUM_LIKELIHOOD):
return dataset.BINNED
elif fcn in (UNBINNED_MAXIMUM_LIKELIHOOD,
UNBINNED_EXTENDED_MAXIMUM_LIKELIHOOD):
return dataset.UNBINNED
else:
raise ValueError(f'Unknown FCN type "{fcn}"')
def evaluate_constraints(constraints=None):
'''
Calculate the values of the constraints, if any.
:param constraints: functions defining constraints to different parameters.
:type contraints: list(PDF) or None
:returns: evaluation of the product of constraints.
:rtype: float
'''
if constraints is None:
return 0.
res = 1.
for c in constraints:
res *= c.function(normalized=True)
return -2. * | np.log(res) | numpy.log |
"""Script that tests the compiled TF-KDTree
"""
import sys
sys.path.append("../") #TODO: Hack
import os
import unittest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
assert(not tf.executing_eagerly())
from tf_nearest_neighbor import nn_distance, buildKDTree, searchKDTree
import sys
from IPython.utils import io
import timeit
from scipy.spatial import cKDTree
np.random.seed(0)
class TestKNNImplementation(unittest.TestCase):
def __init__(self, test_name = None):
super(TestKNNImplementation, self).__init__(test_name)
self.sess = tf.compat.v1.InteractiveSession()
def referenceSolution(self, points_ref, points_query, k):
kdtree = cKDTree(points_ref)
dists, inds = kdtree.query(points_query, k)
return dists, inds
def executeTest(self, nr_refs, nr_query, k, d=3):
points_ref = np.random.uniform(size=(nr_refs, d)).astype(np.float32) * 1e3
points_query = np.random.uniform(size=(nr_query, d)).astype(np.float32) * 1e3
points_ref_tf = tf.compat.v1.placeholder(dtype=tf.float32, shape=points_ref.shape)
points_query_tf = tf.compat.v1.placeholder(dtype=tf.float32, shape=points_query.shape)
dists_ref, inds_ref = self.referenceSolution(points_ref, points_query, k=k)
nn_distance_result = nn_distance(points_ref, points_query, nr_nns_searches=k)
dists_knn, inds_knn = self.sess.run(nn_distance_result, feed_dict={points_ref_tf: points_ref, points_query_tf: points_query})
#Shape checks
self.assertTrue(inds_knn.shape[-1] == k)
self.assertTrue(inds_knn.shape[0] == points_query.shape[0])
self.assertTrue(np.all(inds_knn.shape == dists_knn.shape))
self.assertTrue((dists_ref.ndim == 1 and dists_knn.ndim == 2 and dists_knn.shape[-1] == 1)
or np.all(dists_ref.shape == dists_knn.shape))
self.checkSuccessful(points_ref, points_query, k, dists_ref, inds_ref, dists_knn, inds_knn)
def checkSuccessful(self, points_ref, points_query, k, dists_ref, inds_ref, dists_knn, inds_knn):
if dists_ref.ndim == 1:
#dists_knn = dists_knn[..., 0]
#inds_knn = inds_knn[..., 0]
dists_ref = dists_ref[..., np.newaxis]
inds_ref = inds_ref[..., np.newaxis]
self.assertTrue(
np.allclose(dists_ref ** 2, np.sum((points_query[:, np.newaxis] - points_ref[inds_ref]) ** 2, axis=-1),
atol=1e-5))
self.assertTrue(
np.allclose(dists_knn, | np.sum((points_query[:, np.newaxis] - points_ref[inds_knn]) ** 2, axis=-1) | numpy.sum |
import numpy as np
import pickle as pk
import os, sys, torch
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
import bayesiancoresets as bc
#make it so we can import models/etc from parent folder
sys.path.insert(1, os.path.join(sys.path[0], '../common'))
from model_neurlinr import *
from neural import *
def linearize():
args_dict = dict()
c = -1
for beta in [0.2]:
for tr in range(10): # trial number
for nm in ["BCORES", "SVI"]: # coreset method
for i0 in [.1]:
for f_rate in [30, 0]:
for dnm in ["boston"]: #["year"]: #, "prices2018"]:
c += 1
args_dict[c] = (tr, nm, dnm, f_rate, beta, i0)
return args_dict
mapping = linearize()
#tr, algnm, dnm, f_rate, beta, i0 = mapping[int(sys.argv[1])]
tr, algnm, dnm, f_rate, beta, i0 = mapping[0]
# randomize datapoints order
def unison_shuffled_copies(a, b):
assert a.shape[0] == b.shape[0]
p = np.random.permutation(a.shape[0])
return a[p], b[p]
# Parse input arguments
np.random.seed(int(tr))
#Specify results folder
results_fldr = 'results'
if not os.path.exists(results_fldr):
os.mkdir(results_fldr)
## Prepare and read dataset
if dnm=='synthetic':
N = 3000 # number of data points
X, Y = build_synthetic_dataset(N)
else:
X, Y = load_data(dnm, data_dir='../data')
#X, Y = load_data(dnm, data_dir='/home/dm754/rds/hpc-work/zellner_neural/data')
N = Y.shape[0] # number of data points
if dnm=='boston':
init_size = 10
batch_size = 20
out_features = 20 # dimension of the ouput of the neural encoder used for lin reg
weight_decay = 1.
initial_lr = 1e-2
n_subsample_select = 3
elif dnm=='year':
init_size = 200
batch_size = 100
out_features = 100
weight_decay = 3.
initial_lr = 1e-2
n_subsample_select = 100
elif dnm=='prices2018':
init_size = 200
batch_size = 100
out_features = 5
weight_decay = 1.
initial_lr = 1e-2
n_subsample_select = 100
test_size = int(0.1*N)
tss = min(500, test_size) # test set sample size
# Split datasets
X, Y = unison_shuffled_copies(X.astype(np.float32), Y.astype(np.float32))
X_init, Y_init, X, Y, X_test, Y_test=(
X[:init_size,:], Y[:init_size], X[init_size:-test_size,:],
Y[init_size:-test_size], X[-test_size:,:], Y[-test_size:])
X, Y, X_init, Y_init, X_test, Y_test, input_mean, input_std, output_mean, output_std = preprocessing(X, Y, X_init, Y_init, X_test, Y_test)
print(X.shape)
exit()
#Specify priors
#get empirical mean/std
datastd = Y.std()
datamn = Y.mean()
groups = list(np.split(np.arange(X.shape[0]), range(batch_size, X.shape[0], batch_size)))
X, Y = perturb(X, Y, f_rate=0.01*f_rate, groups=groups) # corrupt datapoints
Z_init = np.hstack((X_init, Y_init)).astype(np.float32)
Z = np.hstack((X, Y)).astype(np.float32)
Z_test = np.hstack((X_test, Y_test)).astype(np.float32)[:1000,:]
# Specify encoder and coreset hyperparameters
nl = NeuralLinear(Z_init, out_features=out_features, input_mean=input_mean, input_std=input_std, output_mean=output_mean, output_std=output_std, seed=tr)
train_nn_freq = 1 # frequency of nn training wrt coreset iterations
VI_opt_itrs = 500
n_subsample_opt = 1000
proj_dim = 100
SVI_step_sched = lambda i : i0/(1.+i)
#BPSVI_step_sched = lambda m: lambda i : i0/(1.+i)
BCORES_step_sched = lambda i : i0/(1.+i)
M = 20 # max num of coreset iterations
mu0 = datamn*np.ones(out_features)
ey = np.eye(out_features)
Sig0 = (datastd**2+datamn**2)*ey
Sig0inv = np.linalg.inv(Sig0)
#create function to output log_likelihood given param samples
print('Creating log-likelihood function')
deep_encoder = lambda nl, pts: (np.hstack((nl.encode(torch.from_numpy(pts[:, :-1].astype(np.float32))).detach().numpy(),
pts[:,-1][:,np.newaxis].astype(np.float32))))
log_likelihood = lambda pts, th, nl: neurlinr_loglikelihood(deep_encoder(nl, pts), th, datastd**2)
grad_log_likelihood = lambda pts, th, nl: NotImplementedError
beta_likelihood = lambda pts, th, beta, nl: neurlinr_beta_likelihood(deep_encoder(nl, pts), th, beta, datastd**2)
grad_beta = lambda pts, th, beta, nl : NotImplementedError
print('Creating black box projector for sampling from coreset posterior')
'''
def sampler_w(n, wts, pts):
if pts.shape[0] == 0:
wts = np.zeros(1)
pts = np.zeros((1, Z.shape[1]))
muw, LSigw, LSigwInv = weighted_post(mu0, Sig0inv, datastd**2, deep_encoder(nl, pts), wts)
return muw + np.random.randn(n, muw.shape[0]).dot(LSigw.T)
'''
def sampler_w(n, wts, pts):
if pts.shape[0] == 0:
wts = np.zeros(1)
pts = np.zeros((1, Z.shape[1]))
sigsq = datastd**2
z = deep_encoder(nl, pts)
X = z[:, :-1]
Y = z[:, -1]
Sigp = np.linalg.inv(Sig0inv + (wts[:, np.newaxis]*X).T.dot(X)/sigsq)
mup = np.dot(Sigp, np.dot(Sig0inv,np.ones(out_features)) + (wts[:, np.newaxis]*Y[:,np.newaxis]*X).sum(axis=0)/datastd**2)
return np.random.multivariate_normal(mup, Sigp, n)
prj_w = bc.BlackBoxProjector(sampler_w, proj_dim, log_likelihood, grad_log_likelihood, nl=nl)
prj_bw = bc.BetaBlackBoxProjector(sampler_w, proj_dim, beta_likelihood, log_likelihood, grad_beta, nl=nl)
#create coreset construction objects
print('Creating coreset construction objects')
in_batches = True
if in_batches:
sparsevi = bc.SparseVICoreset(Z, prj_w, opt_itrs=VI_opt_itrs, n_subsample_opt=n_subsample_opt, n_subsample_select=n_subsample_select,
step_sched=SVI_step_sched, wts=np.ones(init_size), idcs=1e7+np.arange(init_size), pts=Z_init, groups=groups, initialized=True, enforce_new=False)
bcoresvi = bc.BetaCoreset(Z, prj_bw, opt_itrs=VI_opt_itrs, n_subsample_opt=n_subsample_opt, n_subsample_select=n_subsample_select,
step_sched=BCORES_step_sched, beta=beta, learn_beta=False, wts=np.ones(init_size), idcs=1e7+np.arange(init_size), pts=Z_init, groups=groups, initialized=True)
unif = bc.UniformSamplingCoreset(Z, wts=np.ones(init_size), idcs=1e7+np.arange(init_size), pts=Z_init, groups=groups)
else:
raise NotImplementedError("Supported only batch data acquisition")
algs = {'BCORES': bcoresvi,
#'BPSVI': bpsvi,
'SVI': sparsevi,
'RAND': unif,
'PRIOR': None}
alg = algs[algnm]
# Diagnostics
nlls = | np.zeros(M+1) | numpy.zeros |
import numpy
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# This Program calculate state-space matrices of a 2DOF Quarter Car Model
# A Matrix give the system matrix
# B1 Matrix represents the disturbance dynamics
# B2 Matrix represents the actuator dynamics
# m1: chassis mass
# J: chassis mass inertia
# m2: left front shaft mass
# m3: left rear shaft mass
# m4: right front shaft mass
# m5: right rear shaft mass
#Left
# k1: left front suspension stiffness
# k2: left front tire stiffness
# c1: left front suspension damping
# c2: left front tire damping
# k3: left rear suspension stiffness
# k4: left rear tire stiffness
# c3: left rear suspension damping
# c4: left rear tire damping
# Right
# k5: right front suspension stiffness
# k6: right front tire stiffness
# c5: right front suspension damping
# c6: right front tire damping
# k7: right rear suspension stiffness
# k8: right rear tire stiffness
# c7: right rear suspension damping
# c8: right rear tire damping
# Inertial parameters
m1 = 1000
m2 = 30
m3 = 30
m4 = 30
m5 = 30
J1 = 1000
J2 = 2000
# Geometric Parameters
L1 = 1.2
L2 = 1.5
L3 = 0.5
L4 = 0.5
# Left Front Suspension Parameters
k1 = 15000
k2 = 220000
c1 = 100
c2 = 1000
# Left Rear Suspension Parameters
k3 = 15000
k4 = 220000
c3 = 100
c4 = 1000
# Right Front Suspension Parameters
k5 = 15000
k6 = 220000
c5 = 100
c6 = 1000
# Right Rear Suspension Parameters
k7 = 15000
k8 = 220000
c7 = 100
c8 = 1000
# inertia matrix
M_row1 = [m1, 0, 0, 0, 0, 0, 0]
M_row2 = [0, m2, 0, 0, 0, 0, 0]
M_row3 = [0, 0, m3, 0, 0, 0, 0]
M_row4 = [0, 0, 0, m4, 0, 0, 0]
M_row5 = [0, 0, 0, 0, m5, 0, 0]
M_row6 = [0, 0, 0, 0, 0, J1, 0]
M_row7 = [0, 0, 0, 0, 0, 0, J2]
M = numpy.array((M_row1, M_row2, M_row3, M_row4, M_row5, M_row6, M_row7))
print(M)
M_inv = numpy.linalg.inv(M)
# Rigidity Matrix
K_row1 = [k1+k3+k5+k7, -k1, -k3, -k5, -k7, L1*(k1+k5)-L2*(k3+k7), L3*(k1+k3)-L4*(k5+k7)]
K_row2 = [-k1, k1+k2, 0, 0, 0, -k1*L1, -k1*L3]
K_row3 = [-k3, 0, k3+k4, 0, 0, k3*L2, -k3*L3]
K_row4 = [-k5, 0, 0, k5+k6, 0, -k5*L1, k5*L4]
K_row5 = [-k7, 0, 0, 0, k7+k8, k7*L2, k7*L4]
K_row6 = [L1*(k1+k5)-L2*(k3+k7), -k1*L1, k3*L2, -k5*L1, k7*L2, L1**2*(k1+k5)+L2**2*(k3+k7), L1*(k1*L3-k5*L4)-L2*(k3*L3-k7*L4)]
K_row7 = [L3*(k1+k3)-L4*(k5+k7), -k1*L3, -k3*L3, k5*L4, k7*L4, L3*(k1*L1-k3*L2)-L4*(k5*L1-k7*L2), L3**2*(k1+k3)+L4**2*(k5+k7)]
K = numpy.array((K_row1, K_row2, K_row3, K_row4, K_row5, K_row6, K_row7))
print(K)
# Rigidity Matrix
C_row1 = [c1+c3+c5+c7, -c1, -c3, -c5, -c7, L1*(c1+c5)-L2*(c3+c7), L3*(c1+c3)-L4*(c5+c7)]
C_row2 = [-c1, c1+c2, 0, 0, 0, -c1*L1, -c1*L3]
C_row3 = [-c3, 0, c3+c4, 0, 0, c3*L2, -c3*L3]
C_row4 = [-c5, 0, 0, c5+c6, 0, -c5*L1, c5*L4]
C_row5 = [-c7, 0, 0, 0, c7+c8, c7*L2, c7*L4]
C_row6 = [L1*(c1+c5)-L2*(c3+c7), -c1*L1, c3*L2, -c5*L1, c7*L2, L1**2*(c1+c5)+L2**2*(c3+c7), L1*(c1*L3-c5*L4)-L2*(c3*L3-c7*L4)]
C_row7 = [L3*(c1+c3)-L4*(c5+c7), -c1*L3, -c3*L3, c5*L4, c7*L4, L3*(c1*L1-c3*L2)-L4*(c5*L1-c7*L2), L3**2*(c1+c3)+L4**2*(c5+c7)]
C = numpy.array((C_row1, C_row2, C_row3, C_row4, C_row5, C_row6, C_row7))
print(C)
print('Check K')
print(K-numpy.transpose(K))
print('Check C')
print(C-numpy.transpose(C))
s = len(M)
print(s)
# A Matrix
A1 = numpy.concatenate((numpy.zeros((s, s)), numpy.identity(s)), axis=1)
A2 = numpy.concatenate((-numpy.dot(M_inv, K), -numpy.dot(M_inv, C)), axis=1)
A = numpy.concatenate((A1, A2), axis=0)
# B matrices
B12_row1 = [0, 0, 0, 0, 0, 0, 0, 0]
B12_row2 = [k2 / m2, c2 / m2, 0, 0, 0, 0, 0, 0]
B12_row3 = [0, 0, k4 / m3, c4 / m3, 0, 0, 0, 0]
B12_row4 = [0, 0, 0, 0, k6 / m4, c6 / m4, 0, 0]
B12_row5 = [0, 0, 0, 0, 0, 0, k8 / m5, c8 / m5]
B12_row6 = [0, 0, 0, 0, 0, 0, 0, 0]
B12_row7 = [0, 0, 0, 0, 0, 0, 0, 0]
sb12 = len(B12_row7)
B11 = numpy.zeros((s, sb12))
B12 = numpy.array((B12_row1, B12_row2, B12_row3, B12_row4, B12_row5, B12_row6, B12_row7))
B1 = numpy.concatenate((B11, B12), axis=0)
B22_row1 = numpy.array([1 / m1, 1 / m1, 0, 0])
B22_row2 = numpy.array([-1 / m2, 0, 0, 0])
B22_row3 = numpy.array([0, -1 / m3, 0, 0])
B22_row4 = numpy.array([0, 0, -1 / m2, 0])
B22_row5 = numpy.array([0, 0, 0, -1 / m3])
B22_row6 = numpy.array([-L1 / J1, L2 / J1, -L1 / J1, L2 / J1])
B22_row7 = numpy.array([-L3 / J2, -L3 / J2, L4 / J2, L4 / J2])
sb22 = len(B22_row7)
B21 = numpy.zeros((s, sb22))
B22 = numpy.array((B22_row1, B22_row2, B22_row3, B22_row4, B22_row5, B22_row6, B22_row7))
B2 = numpy.concatenate((B21, B22), axis=0)
# C matrix
Cc = numpy.identity(2 * s)
# D matrix
sc = len(Cc)
sb1 = len(B1[0])
sb2 = len(B2[0])
Dd1 = numpy.zeros((sc, sb1))
Dd2 = numpy.zeros((sc, sb2))
# System is xdot=Ax + B1w + B2u
# Output is y=Ccx + Dd1w + Dd2u
print("Autonomous system matrix")
print(A)
print("disturbance matrix")
print(B1)
print("Actuator matrix")
print(B2)
D, V = numpy.linalg.eig(A)
print(D)
# state space function
def full_car(x, t):
w = numpy.transpose( | numpy.array([1, 0, 1, 0, 1, 0, 1, 0]) | numpy.array |
"""Error operation in PyQuest-cffi"""
# Copyright 2019 HQS Quantum Simulations GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyquest_cffi.questlib import quest, _PYQUEST, tqureg, ffi_quest
import numpy as np
from typing import Tuple, Sequence, List
import warnings
from pyquest_cffi import cheat
class mixDensityMatrix(_PYQUEST):
r"""Modifies qureg to become (1-probability)*qureg + probability*qureg_other
Both registers must be equal-dimension density matrices, and prob must be in [0, 1].
Args:
qureg: quantum register to be modified
probability: the probability of qureg_other in the modified qureg
qureg_other: the quantum register to be mixed into qureg
"""
def call_interactive(self, qureg: tqureg,
probability: float,
qureg_other: tqureg) -> None:
r"""Interactive call of PyQuest-cffi
Args:
qureg: quantum register to be modified
probability: the probability of qureg_other in the modified qureg
qureg_other: the quantum register to be mixed into qureg
Raises:
RuntimeError: Both quregs must be density matrix,
but at least one of them is a wavefunction
RuntimeError: Qureg and Qureg_other must be defined for the same number of qubits
"""
if not qureg.isDensityMatrix or not qureg_other.isDensityMatrix:
raise RuntimeError("Both quregs must be density matrix, "
+ "but at least one of them is a wavefunction")
elif not (cheat.getNumQubits()(qureg=qureg) == cheat.getNumQubits()(qureg=qureg_other)):
raise RuntimeError("Qureg and Qureg_other must be defined "
+ "for the same number of qubits")
else:
quest.mixDensityMatrix(qureg, probability, qureg_other)
def Kraus_matrices(self, probability: float, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
r"""The definition of the Kraus Operator as a matrix
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Raises:
NotImplementedError: not implemented
"""
raise NotImplementedError()
def superoperator_matrix(self, probability: float, **kwargs) -> np.ndarray:
r"""The definition of the superoperator acting on the density matrix written as a vector
.. math::
\rho = A \rho B \\
\vec{\rho} = \mathcal{L} \vec{\rho}
where A and B are arbitrary matrices
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Raises:
NotImplementedError: not implemented
"""
raise NotImplementedError()
class mixDephasing(_PYQUEST):
r"""One qubit dephasing error
Apply the dephasing :math:`\sigma^z` operator to a qubit q with probability p
Can also be expressed as a Kraus operator :math:`\mathcal{K}`
.. math::
\rho &= (1-p) \rho + p \sigma^z_q \rho \sigma^z_q \\
\rho &= \mathcal{K} \rho \mathcal{K} \\
\vec{\rho} &= \mathcal{L} \vec{\rho} \\
\mathcal{L} &= \begin{pmatrix}
1 & 0 & 0 & 0\\
0 & 1-2p & 0 & 0\\
0 & 0 & 1-2p & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
"""
def call_interactive(self, qureg: tqureg,
qubit: int,
probability: float) -> None:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix
qubit: The qubit dephasing
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
Raises:
RuntimeError: Probability of mixDephasing needs to be smaller that 1/2
RuntimeError: Qureg has to be a density matrix qureg but wavefunction qureg was used
"""
if probability > 1 / 2:
raise RuntimeError(
"Probability of mixDephasing needs to be smaller that 1/2")
if qureg.isDensityMatrix:
quest.mixDephasing(qureg, qubit, probability)
else:
raise RuntimeError("Qureg has to be a density matrix qureg but "
+ "wavefunction qureg was used")
def Kraus_matrices(self, probability: float, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
r"""The definition of the Kraus Operator as a matrix
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Returns:
Tuple[np.ndarray]
"""
sqp = np.sqrt(probability)
sqmp = np.sqrt(1 - probability)
dephasing = np.array([[sqp, 0], [0, -sqp]], dtype=np.complex)
residual = np.array([[sqmp, 0], [0, sqmp]], dtype=np.complex)
return (residual, dephasing)
def superoperator_matrix(self, probability: float, **kwargs) -> np.ndarray:
r"""The definition of the superoperator acting on the density matrix written as a vector
.. math::
\rho = A \rho B \\
\vec{\rho} = \mathcal{L} \vec{\rho}
where A and B are arbitrary matrices
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Returns:
np.ndarray
"""
matrix = np.array([[1, 0, 0, 0],
[0, 1 - 2 * probability, 0, 0],
[0, 0, 1 - 2 * probability, 0],
[0, 0, 0, 1]], dtype=np.complex)
return matrix
class mixDepolarising(_PYQUEST):
r"""One qubit depolarisation error
Apply the depolarisation operators :math:`\sigma^x`, :math:`\sigma^y` and :math:`\sigma^z`
to a qubit q with an evenly distributed probability p`
.. math::
\rho = (1-p) \rho + \frac{p}{3} \left( \sigma^x_q \rho \sigma^x_q
+ \sigma^y_q \rho \sigma^y_q + \sigma^z_q \rho \sigma^z_q \right)
Args:
qureg: a qureg containing a density matrix
qubit: The qubit depolarising
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 3/4
"""
def call_interactive(self, qureg: tqureg,
qubit: int,
probability: float) -> None:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix
qubit: The qubit depolarising
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
Raises:
RuntimeError: Probability of mixDepolarising needs to be smaller that 3/4
RuntimeError: Qureg has to be a density matrix qureg but wavefunction qureg was used
"""
if probability > 3 / 4:
raise RuntimeError(
"Probability of mixDepolarising needs to be smaller that 3/4")
if qureg.isDensityMatrix:
quest.mixDepolarising(qureg, qubit, probability)
else:
raise RuntimeError("Qureg has to be a density matrix qureg but "
+ "wavefunction qureg was used")
def Kraus_matrices(self,
probability: float,
**kwargs
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
r"""The definition of the Kraus Operator as a matrix
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Returns:
Tuple[np.ndarray]
"""
sqp = np.sqrt(probability / 3)
sqmp = np.sqrt(1 - probability)
residual = np.array([[sqmp, 0],
[0, sqmp]], dtype=np.complex)
depol1 = np.array([[0, sqp],
[sqp, 0]], dtype=np.complex)
depol2 = np.array([[0, -1j * sqp],
[1j * sqp, 0]], dtype=np.complex)
depol3 = np.array([[sqp, 0],
[0, -sqp]], dtype=np.complex)
return (residual, depol1, depol2, depol3)
def superoperator_matrix(self, probability: float, **kwargs) -> np.ndarray:
r"""The definition of the Superoperator acting on the density matrix written as a vector
.. math::
\rho = A \rho B \\
\vec{\rho} = \mathcal{L} \vec{\rho}
where A and B are arbitrary matrices
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Returns:
np.ndarray
"""
one_plus = 1 - 2 / 3 * probability
one_minus = 1 - 4 / 3 * probability
two_three = 2 / 3 * probability
matrix = np.array([[one_plus, 0, 0, two_three],
[0, one_minus, 0, 0],
[0, 0, one_minus, 0],
[two_three, 0, 0, one_plus]], dtype=np.complex)
return matrix
class mixDamping(_PYQUEST):
r"""One qubit damping error
Apply a pure damping error corresponding to zero temperature environments
.. math::
\rho &= \mathcal{K} \rho \mathcal{K}\\
Args:
qureg: a qureg containing a density matrix
qubit: The damped qubit
probability: The probability/ relative amplitude with which the dephasing occurs
"""
def call_interactive(self, qureg: tqureg,
qubit: int,
probability: float) -> None:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix
qubit: The damped qubit
probability: The probability/relative amplitude with which the dephasing occurs
Raises:
RuntimeError: Qureg has to be a density matrix qureg but wavefunction qureg was used
"""
if qureg.isDensityMatrix:
quest.mixDamping(qureg, qubit, probability)
else:
raise RuntimeError("Qureg has to be a density matrix qureg but "
+ "wavefunction qureg was used")
def Kraus_matrices(self, probability: float, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
r"""The definition of the Kraus Operator as a matrix
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Returns:
Tuple[np.ndarray]
"""
sqp = np.sqrt(probability)
sqmp = np.sqrt(1 - probability)
damping = np.array([[0, sqp], [0, 0]], dtype=np.complex)
residual = np.array([[1, 0], [0, sqmp]], dtype=np.complex)
return (residual, damping)
def superoperator_matrix(self, probability: float, **kwargs) -> np.ndarray:
r"""The definition of the Superoperator acting on the density matrix written as a vector
.. math::
\rho = A \rho B \\
\vec{\rho} = \mathcal{L} \vec{\rho}
where A and B are arbitrary matrices
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Returns:
np.ndarray
"""
sqmp = np.sqrt(1 - probability)
matrix = np.zeros((16, 16), dtype=np.complex)
matrix = np.array([[1, 0, 0, probability],
[0, sqmp, 0, 0],
[0, 0, sqmp, 0],
[0, 0, 0, 1 - probability]], dtype=np.complex)
return matrix
class mixTwoQubitDepolarising(_PYQUEST):
r"""Two qubit depolarisation error
Apply any tensor product of two operators :math:`U` :math:`\sigma^x`, :math:`\sigma^y`
and :math:`\sigma^z` to two qubits q1 and q2 with an evenly distributed probability p`
.. math::
\rho &= (1-p) \rho + \frac{p}{15} \sum_{A, B \in \{ I, \sigma^x, \sigma^y, \sigma^z\}}
A_{q1}B_{q2} \rho B_{q2}A_{q1} \\
\rho &= \mathcal{K} \rho \mathcal{K}
Args:
qureg: a qureg containing a density matrix
qubit1: The first qubit dephasing
qubit2: The second qubit dephasing
probability: The probability/ relative amplitude with which the depolarisation occurs.
Needs to be smaller than :math:`\frac{15}{16}`
"""
def call_interactive(self, qureg: tqureg,
qubit1: int,
qubit2: int,
probability: float) -> None:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix
qubit1: The first qubit dephasing
qubit2: The second qubit dephasing
probability: The probability/ relative amplitude with which the depolarisation occurs.
Needs to be smaller than :math:`\frac{15}{16}`
Raises:
RuntimeError: Probability of twoQubitDepolariseErrors needs to be smaller that 15/16
RuntimeError: Qureg has to be a density matrix qureg but wavefunction qureg was used
"""
if probability > 15 / 16:
raise RuntimeError(
"Probability of twoQubitDepolariseErrors needs to be smaller that 15/16")
if qureg.isDensityMatrix:
quest.mixTwoQubitDepolarising(qureg, qubit1, qubit2, probability)
else:
raise RuntimeError("Qureg has to be a density matrix qureg but "
+ "wavefunction qureg was used")
def superoperator_matrix(self, probability: float, **kwargs) -> None:
r"""The definition of the superoperator acting on the density matrix written as a vector
.. math::
\rho = A \rho B \\
\vec{\rho} = \mathcal{L} \vec{\rho}
where A and B are arbitrary matrices
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Raises:
NotImplementedError: not implemented
"""
raise NotImplementedError()
class mixTwoQubitDephasing(_PYQUEST):
r"""Two qubit dephasing error
Apply the dephasing :math:`\sigma^z` operator to two qubits q1 and q2 with probability p
Can also be expressed as a Kraus operator :math:`\mathcal{K}`
.. math::
\rho &= (1-p) \rho + \frac{p}{3} \left( \sigma^z_{q1} \rho \sigma^z_{q1}
+ \sigma^z_{q2} \rho \sigma^z_{q2}
+ \sigma^z_{q1}\sigma^z_{q2} \rho \sigma^z_{q2} \sigma^z_{q1} \right)\\
\rho &= \mathcal{K} \rho \mathcal{K}
Args:
qureg: a qureg containing a density matrix
qubit1: The first qubit dephasing
qubit2: The second qubit dephasing
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 3/4
"""
def call_interactive(self, qureg: tqureg,
qubit1: int,
qubit2: int,
probability: float) -> None:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix
qubit1: The first qubit dephasing
qubit2: The second qubit dephasing
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 3/4
Raises:
RuntimeError: Probability of twoQubitDepolariseErrors needs to be smaller that 3/4
RuntimeError: Qureg has to be a density matrix qureg but wavefunction qureg was used
"""
if probability > 3 / 4:
raise RuntimeError(
"Probability of twoQubitDepolariseErrors needs to be smaller that 3/4")
if qureg.isDensityMatrix:
quest.mixTwoQubitDephasing(qureg, qubit1, qubit2, probability)
else:
raise RuntimeError("Qureg has to be a density matrix qureg but "
+ "wavefunction qureg was used")
def Kraus_matrices(self,
probability: float,
**kwargs
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
r"""The definition of the Kraus Operator as a matrix
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Returns:
Tuple[np.ndarray]
"""
sqp = np.sqrt(probability / 3)
sqmp = np.sqrt(1 - probability)
residual = np.array([[sqmp, 0, 0, 0],
[0, sqmp, 0, 0],
[0, 0, sqmp, 0],
[0, 0, 0, sqmp]], dtype=np.complex)
dephasing1 = np.array([[sqp, 0, 0, 0],
[0, 0, sqp, 0],
[0, 0, -sqp, 0],
[0, 0, 0, -sqp]], dtype=np.complex)
dephasing2 = np.array([[sqp, 0, 0, 0],
[0, 0, -sqp, 0],
[0, 0, sqp, 0],
[0, 0, 0, -sqp]], dtype=np.complex)
dephasing3 = np.array([[-sqp, 0, 0, 0],
[0, 0, sqp, 0],
[0, 0, sqp, 0],
[0, 0, 0, -sqp]], dtype=np.complex)
return (residual, dephasing1, dephasing2, dephasing3)
def superoperator_matrix(self, probability: float, **kwargs) -> np.ndarray:
r"""The definition of the superoperator acting on the density matrix written as a vector
.. math::
\rho = A \rho B \\
\vec{\rho} = \mathcal{L} \vec{\rho}
where A and B are arbitrary matrices
Args:
probability: The probability/ relative amplitude with which the dephasing occurs,
probability needs to be smaller than 1/2
**kwargs: Additional keyword arguments
Returns:
np.ndarray
Raises:
NotImplementedError: not implemented
"""
raise NotImplementedError()
matrix = np.zeros((16, 16), dtype=np.complex)
for ci in range(0, 16):
matrix[ci, ci] = 1 if (ci % 4) == 1 else 1 - 2 * (probability)
return matrix
class applyOneQubitDephaseError(mixDephasing):
r"""One Qubit Dephasing - deprecated"""
def __init__(self, *args, **kwargs) -> None:
r"""Initialisation
Args:
*args: Arguments
**kwargs: Additional keyword arguments
"""
warnings.warn(
"applyOneQubitDephaseError will be removed in future versions, use mixDephasing",
DeprecationWarning)
super().__init__(*args, **kwargs)
class applyOneQubitDepolariseError(mixDepolarising):
r"""One Qubit Depolarisation - deprecated"""
def __init__(self, *args, **kwargs) -> None:
r"""Initialisation
Args:
*args: Arguments
**kwargs: Additional keyword arguments
"""
warnings.warn(
"applyOneQubitDepolariseError will be removed in future versions, use mixDepolarising",
DeprecationWarning)
super().__init__(*args, **kwargs)
class applyOneQubitDampingError(mixDamping):
r"""One Qubit Damping - deprecated"""
def __init__(self, *args, **kwargs) -> None:
r"""Initialisation
Args:
*args: Arguments
**kwargs: Additional keyword arguments
"""
warnings.warn(
"applyOneQubitDampingError will be removed in future versions, use mixDamping",
DeprecationWarning)
super().__init__(*args, **kwargs)
class applyTwoQubitDephaseError(mixTwoQubitDephasing):
r"""Two Qubit Dephasing - deprecated"""
def __init__(self, *args, **kwargs) -> None:
r"""Initialisation
Args:
*args: Arguments
**kwargs: Additional keyword arguments
"""
warnings.warn(
"applyTwoQubitDephaseError will be removed in future versions,"
+ " use mixTwoQubitDephasing",
DeprecationWarning)
super().__init__(*args, **kwargs)
class applyTwoQubitDepolariseError(mixTwoQubitDepolarising):
r"""Two Qubit Depolarisation - deprecated"""
def __init__(self, *args, **kwargs) -> None:
r"""Initialisation
Args:
*args: Arguments
**kwargs: Additional keyword arguments
"""
warnings.warn(
"applyTwoQubitDepolariseError will be removed in future versions,"
+ " use mixTwoQubitDepolarising",
DeprecationWarning)
super().__init__(*args, **kwargs)
class mixMultiQubitKrausMap(_PYQUEST):
r"""Error affecting multiple qubits
An error acting on multiple qubtis simultaneously is defined by a set of Kraus operators
Args:
qureg: a qureg containing a density matrix
qubits: The qubits the Kraus operators are acting on
operators: The Kraus operators
"""
def call_interactive(self, qureg: tqureg,
qubits: Sequence[int],
operators: Sequence[np.ndarray],
) -> None:
r"""Interactive call of PyQuest-cffi
Args:
qureg: a qureg containing a density matrix
qubits: The qubits the Kraus operators are acting on
operators: The Kraus operators
Raises:
RuntimeError: Number of target qubits and dimension of Kraus operators mismatch
RuntimeError: Not a valid Kraus map
"""
for op in operators:
if 2**len(qubits) != op.shape[0] or 2**len(qubits) != op.shape[1]:
raise RuntimeError("Number of target qubits"
+ " and dimension of Kraus operators mismatch")
operator_sum = np.sum([op.conjugate().T @ op for op in operators], axis=0)
if not np.array_equal(operator_sum, | np.eye(operators[0].shape[0]) | numpy.eye |
import os
import re
import logging
import numpy as np
import struct
import binascii
import math
from MiscLibs.common_functions import nans
class RtbRowe(object):
"""
Class to read data from RTB files
Single file to read in RTB data, then create an list of objects. The lists will contain all the
data from the file.
This file was specifically created to work in QRevPy to decode RTI data and follow QRevPy's data format.
"""
# Prevent Magic Numbers
HEADER_SIZE = 32 # Header size in bytes
CHECKSUM_SIZE = 4 # Checksum size in bytes
MAX_DATASETS = 20 # Maximum number of datasets in an ensemble
BYTES_INT32 = 4 # Bytes in Int32
BYTES_FLOAT = 4 # Bytes in Float
NUM_DATASET_HEADER_ELEMENTS = 6 # Number of elements in dataset header
BAD_VEL = 88.888 # RTB Bad Velocity
def __init__(self, file_path: str):
"""
Constructor initializing instance variables.
:param file_path: Full Path of RTB file to be read
"""
# File path
self.file_name = file_path
# Count the number of ensembles in the file to initialize the np.array
self.num_ens, self.num_beams, self.num_bins = self.get_file_info(file_path=file_path)
# List of all the ensemble data decoded
# Instrument Specific data
self.Inst = Inst(num_ens=self.num_ens)
# ADCP Configuration values
self.Cfg = Cfg(num_ens=self.num_ens)
# ADCP Sensors like temp and compass
self.Sensor = Sensor(num_ens=self.num_ens)
# Water velocity data and quality
self.Wt = Wt(num_ens=self.num_ens,
num_beams=self.num_beams,
num_bins=self.num_bins)
# Range Tracking Data
self.Rt = RT(num_ens=self.num_ens,
num_beams=self.num_beams)
# Bottom Track Data
self.Bt = BT(num_ens=self.num_ens,
num_beams=self.num_beams)
# NMEA data
self.Nmea = Nmea(num_ens=self.num_ens)
# Water Gage Data
self.Gage = Gage(num_ens=self.num_ens)
self.Gps = Gps(num_ens=self.num_ens)
self.Gps2 = Gps2(num_ens=self.num_ens)
# Surface velocity data
self.Surface = Surface(num_ens=self.num_ens,
num_beams=self.num_beams,
max_surface_bins=0) # TODO: NOT USED RIGHT NOW
self.AutoMode = []
# River Bottom Track data
self.River_BT = RiverBT(num_ens=self.num_ens,
num_subsystems=10) # TODO: NOT SET CORRECTLY, NEED TO READ IN IN CHECK
# Keep track of ensemble index
# This is used only for 3 or 4 beam ensembles
# Vertical beams are merged with 3 or 4 beam ensemble
self.ens_index = 0
# Read in the given file path
self.rtb_read(file_path=file_path)
@staticmethod
def count_ensembles(file_path: str):
"""
Get the file information like the number of ensembles in the file.
"""
# RTB ensemble delimiter
delimiter = b'\x80' * 16
# Block size to read in data
# BLOCK_SIZE = 4096
block_size = 4096
# Keep count of the number of ensembles found
ens_count = 0
# Search for the number of ensembles by looking for the delimiter
# Check to ensure file exists
if os.path.exists(file_path):
with open(file_path, "rb") as f:
data = f.read(block_size) # Read in data
# Verify data was found
while data:
# Check for the delimiter
if delimiter in data:
ens_count += 1
# Read the next batch of data
data = f.read(block_size)
return ens_count
def get_file_info(self, file_path: str):
"""
Get the file information like the number of ensembles,
number of beams and number of bins.
This only counts 3 or 4 beam ensembles. Vertical beams
will be merged with 4 beam ensembles.
:param file_path File path to inspect.
:return NumEnsembles, NumBeams, NumBins
"""
# RTB ensemble delimiter
delimiter = b'\x80' * 16
# Block size to read in data
block_size = 4096
# Create a buffer
buff = bytes()
# Keep count of the number of ensembles found
ens_count = 0
num_beams = 0
num_bins = 0
# Search for the number of ensembles by looking for the delimiter
# Check to ensure file exists
if os.path.exists(file_path):
with open(file_path, "rb") as f:
data = f.read(block_size) # Read in data
# Verify data was found
while data:
# Accumulate the buffer
buff += data
# Check for the delimiter
if delimiter in buff:
# If delimiter found, split at the delimiter to get the remaining buffer data
chunks = buff.split(delimiter)
# Put the remaining data back in the buffer
buff = chunks.pop()
# Take out the ens data
for chunk in chunks:
# Process the binary ensemble data
# Verify the ENS data is good
# This will check that all the data is there and the checksum is good
if self.verify_ens_data(delimiter + chunk):
# Get the ensemble info
bin_count, beams_count = self.get_ens_info(delimiter + chunk)
# Verify we have 3 or 4 beam data ensemble
# Vertical beam is not counted and is merged with 4 beam ensembles
if beams_count > 2:
ens_count += 1
# Set the largest beam and bin number
num_beams = max(beams_count, num_beams)
num_bins = max(bin_count, num_bins)
# Read the next batch of data
data = f.read(block_size)
# Process whatever is remaining in the buffer
# Verify the ENS data is good
# This will check that all the data is there and the checksum is good
if self.verify_ens_data(delimiter + buff):
# Get the ensemble info
bin_count, beams_count = self.get_ens_info(delimiter + buff)
# Verify we have 3 or 4 beam data ensemble
# Vertical beam is not counted and is merged with 4 beam ensembles
if beams_count > 2:
ens_count += 1
# Set the largest beam and bin number
num_beams = max(beams_count, num_beams)
num_bins = max(bin_count, num_bins)
return ens_count, num_beams, num_bins
def get_ens_info(self, ens_bytes: list):
"""
Decode the datasets to an ensemble to get the general information about the ensemble.
This includes the number of beams and bins.
Use verify_ens_data if you are using this
as a static method to verify the data is correct.
:param ens_bytes: Ensemble binary data. Decode the dataset.
:return: Return number of beam and number of bins.
"""
packet_pointer = self.HEADER_SIZE
ens_len = len(ens_bytes)
num_elements = 0
element_multiplier = 0
# Decode the ensemble datasets
# Limit the number of attempts to look for new datasets
for x in range(self.MAX_DATASETS):
# Check if we are at the end of the payload
if packet_pointer >= ens_len - RtbRowe.CHECKSUM_SIZE - RtbRowe.HEADER_SIZE:
break
# Get the dataset info
# ds_type = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 0), RtbRowe.BYTES_INT32, ens_bytes)
num_elements = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 1), RtbRowe.BYTES_INT32, ens_bytes)
element_multiplier = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 2), RtbRowe.BYTES_INT32,
ens_bytes)
# image = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 3), RtbRowe.BYTES_INT32, ens_bytes)
# name_len = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 4), RtbRowe.BYTES_INT32, ens_bytes)
name = str(ens_bytes[packet_pointer +
(RtbRowe.BYTES_INT32 * 5):packet_pointer + (RtbRowe.BYTES_INT32 * 5) + 8], 'UTF-8')
# Beam velocity will contain all the information we need
if "E000001" in name:
# Ensure this is not vertical beam data
if element_multiplier > 1:
# Return the number bins and number of beams
return num_elements, element_multiplier
return num_elements, element_multiplier
def rtb_read(self, file_path: str):
"""
Reads the binary RTB file and assigns values to object instance variables.
:param file_path: Full file path
"""
# RTB ensemble delimiter
# DELIMITER = b'\x80' * 16
delimiter = b'\x80' * 16
# Block size to read in data
block_size = 4096
# Get the total file size to keep track of total bytes read and show progress
# file_size = os.path.getsize(file_path)
bytes_read = 0
# Create a buffer
buff = bytes()
# Assign default values
# n_velocities = 4
# max_surface_bins = 5
# Check to ensure file exists
if os.path.exists(file_path):
# file_info = os.path.getsize(file_path)
with open(file_path, "rb") as f:
data = f.read(block_size) # Read in data
# Keep track of bytes read
# bytes_read += block_size
# self.file_progress(bytes_read, file_size, fullname)
# Verify data was found
while data:
# Accumulate the buffer
buff += data
# Check for the delimiter
if delimiter in buff:
# If delimiter found, split at the delimiter to get the remaining buffer data
chunks = buff.split(delimiter)
# Put the remaining data back in the buffer
buff = chunks.pop()
# Take out the ens data
for chunk in chunks:
# Process the binary ensemble data
self.decode_ens(delimiter + chunk)
# Read the next batch of data
data = f.read(block_size)
# Keep track of bytes read
bytes_read += block_size
# self.file_progress(bytes_read, file_size, ens_file_path)
# self.file_progress(block_size, file_size, fullname)
# Process whatever is remaining in the buffer
self.decode_ens(delimiter + buff)
# self.Gps2.corr_qual = np.array(self.Gps2.corr_qual)
# self.Gps2.lat_deg = np.array(self.Gps2.lat_deg)
def decode_ens(self, ens_bytes: list):
"""
Attempt to decode the ensemble. This will verify the checksum passes.
If the checksum is good, then decode the data.
When the data is decoded, automatically add it to list of ensembles.
:param ens_bytes: Ensemble byte array to decode.
:return
"""
# Verify the ENS data is good
# This will check that all the data is there and the checksum is good
if self.verify_ens_data(ens_bytes):
# Decode the ens binary data
logging.debug("Decoding binary data to ensemble: " + str(len(ens_bytes)))
# Decode the data
self.decode_data_sets(ens_bytes)
def verify_ens_data(self, ens_bytes: list, ens_start: int = 0):
"""
Get the ensemble number and the ensemble size. Verify
we have all the ensemble bytes in the buffer by comparing aginst
the ensemble size. Then check the checksum and verify it is correct.
:param ens_bytes: Ensemble binary data.
:param ens_start: Start location in the ens_data
:return True if the ensemble is good and checksum passes.
"""
try:
# Ensemble Length
ens_len = len(ens_bytes)
# Verify at least the minimum number of bytes are available to verify the ensemble
if ens_len <= self.HEADER_SIZE + self.CHECKSUM_SIZE:
return False
# Check Ensemble number
ens_num = struct.unpack("I", ens_bytes[ens_start + 16:ens_start + 20])
# Check ensemble size
payload_size = struct.unpack("I", ens_bytes[ens_start + 24:ens_start + 28])
# Ensure the entire ensemble is in the buffer
if ens_len >= ens_start + self.HEADER_SIZE + payload_size[0] + self.CHECKSUM_SIZE:
# Check checksum
checksum_loc = ens_start + self.HEADER_SIZE + payload_size[0]
checksum = struct.unpack("I", ens_bytes[checksum_loc:checksum_loc + self.CHECKSUM_SIZE])
# Calculate Checksum
# Use only the payload for the checksum
ens = ens_bytes[ens_start + self.HEADER_SIZE:ens_start + self.HEADER_SIZE + payload_size[0]]
calc_checksum = binascii.crc_hqx(ens, 0)
# Verify checksum
if checksum[0] == calc_checksum:
logging.debug(ens_num[0])
return True
else:
logging.warning("Ensemble fails checksum. {:#04x} {:#04x}".format(checksum[0], calc_checksum))
return False
else:
logging.warning("Incomplete ensemble.")
return False
except Exception as e:
logging.error("Error verifying Ensemble. " + str(e))
return False
# return False
def decode_data_sets(self, ens_bytes: list):
"""
Decode the datasets to an ensemble.
Use verify_ens_data if you are using this
as a static method to verify the data is correct.
:param ens_bytes: Ensemble binary data. Decode the dataset.
:return: Return the decoded ensemble.
"""
packet_pointer = self.HEADER_SIZE
ens_len = len(ens_bytes)
# Flag if BT data found
# bt_data_found = False
# bt_adcp3_data_found = False
# ancillary_adcp3_found = False
# Flag if this ensemble is vertical beam ensemble
is_vert_ens = False
# Decode the ensemble datasets
# Limit the number of attempts to look for new datasets
for x in range(self.MAX_DATASETS):
# Check if we are at the end of the payload
if packet_pointer >= ens_len - RtbRowe.CHECKSUM_SIZE - RtbRowe.HEADER_SIZE:
break
# Get the dataset info
ds_type = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 0), RtbRowe.BYTES_INT32, ens_bytes)
num_elements = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 1), RtbRowe.BYTES_INT32, ens_bytes)
element_multiplier = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 2), RtbRowe.BYTES_INT32,
ens_bytes)
# image = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 3), RtbRowe.BYTES_INT32, ens_bytes)
name_len = RtbRowe.get_int32(packet_pointer + (RtbRowe.BYTES_INT32 * 4), RtbRowe.BYTES_INT32, ens_bytes)
name = str(ens_bytes[packet_pointer
+ (RtbRowe.BYTES_INT32 * 5):packet_pointer + (RtbRowe.BYTES_INT32 * 5) + 8], 'UTF-8')
# Calculate the dataset size
data_set_size = RtbRowe.get_data_set_size(ds_type, name_len, num_elements, element_multiplier)
# Beam Velocity
if "E000001" in name:
logging.debug(name)
# Test if this ensemble is a vertical beam
if element_multiplier == 1:
is_vert_ens = True
# Do nothing else for vertical beam
else:
self.Wt.decode_vel(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
num_elements=num_elements,
element_multiplier=element_multiplier,
name_len=name_len)
# Instrument Velocity
if "E000002" in name:
logging.debug(name)
# Test if this ensemble is a vertical beam
if element_multiplier == 1:
is_vert_ens = True
# Do nothing else for vertical beam
else:
self.Wt.decode_instr_vel(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
num_elements=num_elements,
element_multiplier=element_multiplier,
name_len=name_len)
# Earth Velocity
if "E000003" in name:
logging.debug(name)
# Test if this ensemble is a vertical beam
if element_multiplier == 1:
is_vert_ens = True
# Do nothing else for vertical beam
else:
self.Wt.decode_earth_vel(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
num_elements=num_elements,
element_multiplier=element_multiplier,
name_len=name_len)
# Amplitude
if "E000004" in name:
logging.debug(name)
# Check for vertical beam data
if element_multiplier == 1:
is_vert_ens = True
# Do nothing else for vertical beam
else:
self.Wt.decode_rssi(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
num_elements=num_elements,
element_multiplier=element_multiplier,
name_len=name_len)
# Correlation
if "E000005" in name:
logging.debug(name)
# num_repeats = None
# if (len(self.Cfg.wp_repeat_n) > self.ens_index-1 > 0 and
# not np.isnan(self.Cfg.wp_repeat_n[self.ens_index-1])):
# num_repeats = self.Cfg.wp_repeat_n[self.ens_index-1]
# Check for vertical beam data
if element_multiplier < 2:
is_vert_ens = True
# Do nothing else for vertical beam
else:
self.Wt.decode_corr(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
num_elements=num_elements,
element_multiplier=element_multiplier,
# num_repeats=num_repeats,
name_len=name_len)
# Good Beam
if "E000006" in name:
logging.debug(name)
# Get the number of pings used in the ensemble
# pings_per_ens = 1
# if len(self.Cfg.wp) > self.ens_index-1 > 0 and not np.isnan(self.Cfg.wp[self.ens_index-1]):
# pings_per_ens = self.Cfg.wp[self.ens_index-1]
# Check if vertical beam data
if element_multiplier < 2:
is_vert_ens = True
# Do nothing else for vertical beam
else:
self.Wt.decode_pgb(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
num_elements=num_elements,
element_multiplier=element_multiplier,
# pings_per_ens=pings_per_ens,
name_len=name_len)
# Good Earth
if "E000007" in name:
logging.debug(name)
# Get the number of pings used in the ensemble
# pings_per_ens = 1
# if len(self.Cfg.wp) > self.ens_index-1 > 0 and not np.isnan(self.Cfg.wp[self.ens_index-1]):
# pings_per_ens = self.Cfg.wp[self.ens_index-1]
# Test if this ensemble is a vertical beam
if element_multiplier < 2:
is_vert_ens = True
# Do nothing else for vertical beam
else:
self.Wt.decode_pg_earth(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
num_elements=num_elements,
element_multiplier=element_multiplier,
# pings_per_ens=pings_per_ens,
name_len=name_len)
# Ensemble Data
if "E000008" in name:
logging.debug(name)
# This should have already been flagged if the data is vertical
# from the velocity, amplitude or correlation data
# If it is vertical beam data, do not add the data
if not is_vert_ens:
self.Cfg.decode_ensemble_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
self.Inst.decode_ensemble_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
self.Sensor.decode_ensemble_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# Ancillary Data
if "E000009" in name:
logging.debug(name)
# This should have already been flagged if the data is vertical
# from the velocity, amplitude or correlation data
# If it is vertical beam data, do not add the data
if not is_vert_ens:
# Configuration data
self.Cfg.decode_ancillary_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# Sensor data
self.Sensor.decode_ancillary_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# New ADCP3 Hardware includes this data
# There are additional values at the end
if num_elements > 19:
self.Sensor.decode_ancillary_adcp3_data(
ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
self.Cfg.decode_ancillary_adcp3_data(
ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# Bottom Track
if "E000010" in name:
logging.debug(name)
# This should have already been flagged if the data is vertical
# from the velocity, amplitude or correlation data
# If it is vertical beam data, do not add the data
if not is_vert_ens:
# Populate Bottom Track data
self.Bt.decode(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# Populate Config data
self.Cfg.decode_bottom_track_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# Populate Sensor data
self.Sensor.decode_bottom_track_data(
ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# Check if ADCP 3 data is available
# Number of elements. 74 for 4 Beam system, 59 for 3 beam, 29 for 1 beam
if num_elements > 74:
self.Sensor.decode_bottom_track_adcp3_data(
ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# NMEA data
if "E000011" in name:
logging.debug(name)
# This should have already been flagged if the data is vertical
# from the velocity, amplitude or correlation data
# If it is vertical beam data, do not add the data
if not is_vert_ens:
self.Nmea.decode(ens_bytes=ens_bytes[packet_pointer:packet_pointer+data_set_size],
ens_index=self.ens_index,
name_len=name_len)
self.Gps2.decode(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
# num_ens=self.num_ens,
name_len=name_len)
self.Gps.decode(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
# num_ens=self.num_ens,
name_len=name_len)
# System Setup
if "E000014" in name:
logging.debug(name)
# This should have already been flagged if the data is vertical
# from the velocity, amplitude or correlation data
# If it is vertical beam data, do not add the data
if not is_vert_ens:
# Configuration data
# Check if the Cfg is already created from other dataset
self.Cfg.decode_systemsetup_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
name_len=name_len)
self.Sensor.decode_systemsetup_data(
ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
name_len=name_len)
self.Inst.decode_systemsetup_data(
ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
name_len=name_len)
# Range Tracking
if "E000015" in name:
logging.debug(name)
# This should have already been flagged if the data is vertical
# from the velocity, amplitude or correlation data
# If it is vertical beam data, do not add the data
if is_vert_ens:
# Vertical beam data
# Add the vertical beam data to the previous ensemble
if self.ens_index-1 >= 0:
self.Sensor.decode_vert_rt(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index-1,
name_len=name_len)
else:
self.Rt.decode(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
name_len=name_len)
if "E000016" in name:
logging.debug(name)
# This should have already been flagged if the data is vertical
# from the velocity, amplitude or correlation data
# If it is vertical beam data, do not add the data
if not is_vert_ens:
self.Gage.decode_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
name_len=name_len)
if "R000001" in name:
logging.debug(name)
# This should have already been flagged if the data is vertical
# from the velocity, amplitude or correlation data
# If it is vertical beam data, do not add the data
if not is_vert_ens:
self.River_BT.decode_data(ens_bytes=ens_bytes[packet_pointer:packet_pointer + data_set_size],
ens_index=self.ens_index,
name_len=name_len)
if "R000002" in name:
logging.debug(name)
# RiverTimeStamp
if "R000003" in name:
logging.debug(name)
# RiverNmea
if "R000004" in name:
logging.debug(name)
# RiverBThump
if "R000005" in name:
logging.debug(name)
# RiverStationID
if "R000006" in name:
logging.debug(name)
# RiverTransectID
# Move to the next dataset
packet_pointer += data_set_size
# Increment the ensemble index if not a vertical beam
if not is_vert_ens:
self.ens_index += 1
@staticmethod
def get_data_set_size(ds_type: int, name_len: int, num_elements: int, element_multiplier: int):
"""
Get the dataset size.
:param ds_type: Dataset type. (Int, float, ...)
:param name_len: Length of the name.
:param num_elements: Number of elements.
:param element_multiplier: Element element multiplier.
:return: Size of the dataset in bytes.
"""
# Number of bytes in the data type
datatype_size = 4
if ds_type == 50: # Byte Datatype
datatype_size = 1
elif ds_type == 20: # Int Datatype
datatype_size = 4
elif ds_type == 10: # Float Datatype
datatype_size = 4
return ((num_elements * element_multiplier) * datatype_size) + RtbRowe.get_base_data_size(name_len)
@staticmethod
def get_base_data_size(name_len: int):
"""
Get the size of the header for a dataset.
:param name_len: Length of the name.
:return: Dataset header size in bytes.
"""
return name_len + (RtbRowe.BYTES_INT32 * (RtbRowe.NUM_DATASET_HEADER_ELEMENTS - 1))
@staticmethod
def is_float_close(a, b, rel_tol=1e-06, abs_tol=0.0):
"""
Check if the float values are the same.
:param a: First float value
:param b: Second float value
:param rel_tol: Value within this
:param abs_tol: Absolute value within this
:return:
"""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
@staticmethod
def is_bad_velocity(vel):
"""
Check if the velocity given is good or bad.
:param vel: Velocity value to check.
:return: True if Bad Velocity.
"""
if vel >= RtbRowe.BAD_VEL:
return True
if RtbRowe.is_float_close(vel, RtbRowe.BAD_VEL):
return True
if vel is None:
return True
return False
@staticmethod
def get_int32(start: int, num_bytes: int, ens_bytes: list):
"""
Convert the bytes given into an Int32.
This will look in the ens given.
:param start: Start location in the ens_bytes.
:param num_bytes: Number of bytes in the int32.
:param ens_bytes: Buffer containing the bytearray data.
:return: Int32 of the data in the buffer.
"""
try:
return struct.unpack("i", ens_bytes[start:start + num_bytes])[0]
except Exception as e:
logging.error("Error creating a Int32 from bytes. " + str(e))
return 0
@staticmethod
def get_float(start: int, num_bytes: int, ens: list):
"""
Convert the bytes given into an int32.
This will look in the ens given.
:param start: Start location.
:param num_bytes: Number of bytes in the float.
:param ens: Buffer containing the bytearray data.
:return: Float of the data in the buffer.
"""
try:
return struct.unpack("f", ens[start:start + num_bytes])[0]
except Exception as e:
logging.debug("Error creating a float from bytes. " + str(e))
return 0.0
@staticmethod
# def nans(num_ens: int, dtype=float):
def nans(shape, dtype=float):
"""
Create a numpy array filled with NaN
@param shape: tuple
Shape of array to be filled with nans
@param dtype: Data Type
Type of array
"""
empty_arr = np.empty(shape, dtype=dtype)
empty_arr.fill(np.nan)
return empty_arr
class Wt:
"""
Water Profile data.
Beam Velocity.
Velocity data in the Beam Coordinate Transform. (Raw Velocity Data)
Correlation, Amplitude and Good Beam data for data quality.
"""
def __init__(self, num_beams: int, num_bins: int, num_ens: int):
"""
:param num_beams Number of beams on the system. Not including vertical beam
:param num_bins Number of bins/cells.
:param num_ens Number of ensembles in file.
"""
self.corr = nans([num_beams, num_bins, num_ens]) # Correlation in fraction
self.pergd = nans([num_beams, num_bins, num_ens]) # Percent Good in percentage
self.rssi = nans([num_beams, num_bins, num_ens]) # RSSI/Amplitude in dB
self.vel_beam_mps = nans([num_beams, num_bins, num_ens]) # Beam Velocity in m/s.
self.vel_mps = nans([num_beams, num_bins, num_ens]) # Velocity in m/s for Qrev use
self.vel_earth_mps = nans([num_beams, num_bins, num_ens]) # Earth Velocity in m/s
self.pergd_earth = nans([num_beams, num_bins, num_ens]) # Percent Good in Earth data
self.vel_instr_mps = nans([num_beams, num_bins, num_ens]) # Instrument Velocity in m/s
def decode_vel(self, ens_bytes: list, ens_index: int, num_elements:
int, element_multiplier: int, name_len: int = 8):
"""
Decode the ensemble data for the Beam velocity.
Initialize the list of velocity data. [beam][bin]
RTB is m/s
RTB Bad Value is 88.888
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble Index.
:param element_multiplier: Number of beams.
:param num_elements; Number of bins.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Initialize the array
vel = np.empty(shape=[element_multiplier, num_elements])
# Create a 2D list of velocities
# [beam][bin]
for beam in range(element_multiplier):
for bin_num in range(num_elements):
vel[beam][bin_num] = RtbRowe.get_float(packet_pointer, RtbRowe.BYTES_FLOAT, ens_bytes)
# Move the pointer
packet_pointer += RtbRowe.BYTES_FLOAT
self.vel_beam_mps[:element_multiplier, :num_elements, ens_index] = vel
def decode_rssi(self, ens_bytes: list, ens_index: int, num_elements: int, element_multiplier:
int, name_len: int = 8):
"""
Decode the ensemble data for the Amplitude data.
Amplitude data which reports signal strength.
Amplitude values range from 0 dB - 140 dB.
Values below 25dB is considered noise. The noise floor
with systems with high EMI can be as a 40dB.
Initialize the list of amplitude data. [beam][bin]
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index to store the data.
:param element_multiplier: Number of beams.
:param num_elements; Number of bins.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Initialize the array
amp = np.empty(shape=[element_multiplier, num_elements], dtype=np.float)
# Create a 2D list of velocities
# [beam][bin]
for beam in range(element_multiplier):
for bin_num in range(num_elements):
amp[beam][bin_num] = RtbRowe.get_float(packet_pointer, RtbRowe.BYTES_FLOAT, ens_bytes)
# Move the pointer
packet_pointer += RtbRowe.BYTES_FLOAT
self.rssi[:element_multiplier, :num_elements, ens_index] = amp
def decode_corr(self, ens_bytes: list, ens_index: int, num_elements: int, element_multiplier: int,
name_len: int = 8):
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Initialize the array
corr = np.empty(shape=[element_multiplier, num_elements], dtype=np.float)
# Create a 2D list of velocities
# [beam][bin]
for beam in range(element_multiplier):
for bin_num in range(num_elements):
corr[beam][bin_num] = RtbRowe.get_float(packet_pointer, RtbRowe.BYTES_FLOAT, ens_bytes)
# Move the pointer
packet_pointer += RtbRowe.BYTES_FLOAT
self.corr[:element_multiplier, :num_elements, ens_index] = corr
def decode_pgb(self, ens_bytes: list, ens_index: int, num_elements: int, element_multiplier: int,
name_len: int = 8): # , pings_per_ens: int = 1):
"""
Decode the ensemble data for the Good Beam Ping data.
Good Beam Pings. This give a number of pings
that were used when averaging pings together.
This will give a number of pings, so you will
need to look at the settings to know the actual number
of pings attempted.
Initialize the list of Good Beam data. [beam][bin]
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble Index to store the data.
:param element_multiplier: Number of beams.
:param num_elements; Number of bins.
:param name_len: Length of the name of the dataset.
# :param pings_per_ens: Number of pings in the ensemble
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Initialize the array
pings = np.empty(shape=[element_multiplier, num_elements], dtype=np.int)
# Create a 2D list
# [beam][bin]
for beam in range(element_multiplier):
for bin_num in range(num_elements):
pings[beam][bin_num] = RtbRowe.get_int32(packet_pointer, RtbRowe.BYTES_INT32, ens_bytes)
# Move the pointer
packet_pointer += RtbRowe.BYTES_INT32
self.pergd[:element_multiplier, :num_elements, ens_index] = pings
def decode_earth_vel(self, ens_bytes: list, ens_index: int, num_elements: int, element_multiplier: int,
name_len: int = 8):
"""
Decode the ensemble data for the Earth velocity.
Initialize the list of velocity data. [beam][bin]
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param element_multiplier: Number of beams.
:param num_elements; Number of bins.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Initialize the array
vel = np.empty(shape=[element_multiplier, num_elements])
# Create a 2D list of velocities
# [beam][bin]
for beam in range(element_multiplier):
for bin_num in range(num_elements):
vel[beam][bin_num] = RtbRowe.get_float(packet_pointer, RtbRowe.BYTES_FLOAT, ens_bytes)
# Move the pointer
packet_pointer += RtbRowe.BYTES_FLOAT
self.vel_earth_mps[:element_multiplier, :num_elements, ens_index] = vel
self.vel_mps[:element_multiplier, :num_elements, ens_index] = vel
def decode_pg_earth(self, ens_bytes: list, ens_index: int, num_elements: int, element_multiplier: int,
name_len: int = 8): # , pings_per_ens: int = 1):
"""
Decode the ensemble data for the Good Earth Ping data.
Initialize the list of Good Beam data. [beam][bin]
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param element_multiplier: Number of beams.
:param num_elements; Number of bins.
:param name_len: Length of the name of the dataset.
# :param pings_per_ens: Number of pings in the ensemble
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Initialize the array
pings = np.empty(shape=[element_multiplier, num_elements], dtype=np.int)
# Create a 2D list
# [beam][bin]
for beam in range(element_multiplier):
for bin_num in range(num_elements):
pings[beam][bin_num] = RtbRowe.get_int32(packet_pointer, RtbRowe.BYTES_INT32, ens_bytes)
# Move the pointer
packet_pointer += RtbRowe.BYTES_INT32
# Reshape the data from [beam, bin] to [bin, beam]
# pings = np.reshape(pings, [num_elements, element_multiplier])
# Add the data the numpy array [:num_beams, :num_bins, ens_index]
# self.pergd_earth[:element_multiplier, :num_elements, ens_index] = pings.T
self.pergd_earth[:element_multiplier, :num_elements, ens_index] = pings
def decode_instr_vel(self, ens_bytes: list, ens_index: int, num_elements: int, element_multiplier: int,
name_len: int = 8):
"""
Decode the ensemble data for the Instrument velocity.
Initialize the list of velocity data. [beam][bin]
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param element_multiplier: Number of beams.
:param num_elements; Number of bins.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Initialize the array
vel = np.empty(shape=[element_multiplier, num_elements])
# Create a 2D list of velocities
# [beam][bin]
for beam in range(element_multiplier):
for bin_num in range(num_elements):
vel[beam][bin_num] = RtbRowe.get_float(packet_pointer, RtbRowe.BYTES_FLOAT, ens_bytes)
# Move the pointer
packet_pointer += RtbRowe.BYTES_FLOAT
# Reshape the data from [beam, bin] to [bin, beam]
# vel = np.reshape(vel, [num_elements, element_multiplier])
# Add the data the numpy array [:num_beams, :num_bins, ens_index]
# self.vel_instr_mps[:element_multiplier, :num_elements, ens_index] = vel.T
self.vel_instr_mps[:element_multiplier, :num_elements, ens_index] = vel
class Inst:
"""
Instrument specific values.
"""
def __init__(self, num_ens: int):
"""
Initialize the values.
"""
self.firm_major = RtbRowe.nans(num_ens) # Firmware Major Number
self.firm_minor = RtbRowe.nans(num_ens) # Firmware Minor Number
self.firm_rev = RtbRowe.nans(num_ens) # Firmware Revision
self.data_type = [''] * num_ens # Data type "Real" or "Simu"
self.firm_ver = RtbRowe.nans(num_ens) # Firmware version as a string
self.beam_ang = RtbRowe.nans(num_ens) # Beam Angle in degrees
self.beams = RtbRowe.nans(num_ens) # Number of beams used in velocity measurement
self.freq = RtbRowe.nans(num_ens) # System frequency in Khz
self.pat = [''] * num_ens # Beam Pattern: Concave, Convex or n/a
self.res_RDI = 0 # Reserved
self.sensor_CFG = RtbRowe.nans(num_ens) # Sensor Configuration
self.xducer = [''] * num_ens # Indicates if transducer is attached: 'Not Attached',
# Attached, n/a
self.t_matrix = np.tile([np.nan], [4, 4]) # Transformation matrix
self.demod = RtbRowe.nans(num_ens) # Demodulation code
def decode_ensemble_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ensemble data for the Instrument data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.beams[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 2, RtbRowe.BYTES_INT32,
ens_bytes)
firm_rev = struct.unpack("B", ens_bytes[packet_pointer + RtbRowe.BYTES_INT32 * 21 + 0:
packet_pointer + RtbRowe.BYTES_INT32 * 21 + 1])[0]
firm_minor = struct.unpack("B", ens_bytes[packet_pointer + RtbRowe.BYTES_INT32 * 21 + 1:
packet_pointer + RtbRowe.BYTES_INT32 * 21 + 2])[0]
firm_major = struct.unpack("B", ens_bytes[packet_pointer + RtbRowe.BYTES_INT32 * 21 + 2:
packet_pointer + RtbRowe.BYTES_INT32 * 21 + 3])[0]
self.firm_rev[ens_index] = firm_rev
self.firm_minor[ens_index] = firm_minor
self.firm_major[ens_index] = firm_major
self.firm_ver[ens_index] = round(firm_minor + (firm_rev/100.0), 2)
# Determine the beam angle based on the subsystem type
ss_code = str(ens_bytes[packet_pointer + RtbRowe.BYTES_INT32 * 21 + 3:
packet_pointer + RtbRowe.BYTES_INT32 * 21 + 4], "UTF-8")
if ss_code == "A" or ss_code == "B" or ss_code == "C":
self.beam_ang[ens_index] = 0
else:
self.beam_ang[ens_index] = 20
def decode_systemsetup_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the system setup data for the Configuration data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble Index.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Get the frequency and convert from Hz to kHz
freq = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6, RtbRowe.BYTES_FLOAT, ens_bytes)
freq_khz = freq / 1000
if 60 < freq_khz < 80:
freq_khz = 75
elif 130 < freq_khz < 160:
freq_khz = 150
elif 200 < freq_khz < 400:
freq_khz = 300
elif 400 < freq_khz < 800:
freq_khz = 600
elif 1000 < freq_khz < 1300:
freq_khz = 1200
elif 1800 < freq_khz < 2500:
freq_khz = 2400
# else:
# freq_khz = np.nan
# Set the value
self.freq[ens_index] = freq_khz
# Options are Concave, Convex or n/a
self.pat[ens_index] = 'Concave'
# Options are 'Not Attached', Attached or n/a
self.xducer[ens_index] = 'Attached'
# Options are 'Simu' or 'Real'
self.data_type = 'Real'
# Transformation matrix
self.t_matrix[0] = [-1.4619, 1.4619, 0, 0]
self.t_matrix[1] = [0, 0, -1.4619, 1.4619]
self.t_matrix[2] = [-0.2660, -0.2660, -0.2660, -0.2660]
self.t_matrix[3] = [0.25, 0.25, -0.25, -0.25]
class Cfg:
"""
System configuration data is all the data that describes the ADCP configuration and date and time.
This includes the bin size, blank and number of bins, the ensemble number, date and time.
"""
def __init__(self, num_ens: int):
"""
Initialize all the values
:param num_ens: Number of ensembles.
"""
self.ens_num = RtbRowe.nans(num_ens) # Ensemble number
# self.num_bins = RtbRowe.nans(num_ens) # Replaced with wn Number of bins
self.desired_ping_count = RtbRowe.nans(num_ens) # Avg Ping Count configured in seconds
# self.actual_ping_count = RtbRowe.nans(num_ens) # Replaced with wp. Avg Ping Count actually
# output in seconds
# self.serial_num_index = RtbRowe.nans(num_ens) # np.empty(num_ens, dtype=str) # Serial Number Index
# self.serial_num_index = RtbRowe.nans(num_ens, dtype=str)
# self.subsystem_code = RtbRowe.nans(num_ens, dtype=str)# SubsystemCode(Identifier of frequency and orientation)
# self.subsystem_config = RtbRowe.nans(num_ens) # Subsystem Config. System allows multiple configures
# of the same frequency. This identifies each configuration
self.status = RtbRowe.nans(num_ens) # Status code
# self.year = RtbRowe.nans(num_ens) # Year
# self.month = RtbRowe.nans(num_ens) # Month
# self.day = RtbRowe.nans(num_ens) # Day
# self.hour = RtbRowe.nans(num_ens) # Hour
# self.minute = RtbRowe.nans(num_ens) # Minute
# self.second = RtbRowe.nans(num_ens) # Second
# self.hsec = RtbRowe.nans(num_ens) # Hundredth Second
# ADCP 3 values
self.current_system = RtbRowe.nans(num_ens)
self.status_2 = RtbRowe.nans(num_ens)
self.burst_index = RtbRowe.nans(num_ens)
self.first_ping_time = RtbRowe.nans(num_ens) # First Ping Time in seconds.
self.last_ping_time = RtbRowe.nans(num_ens) # Last Ping Time in seconds. (If averaging pings,
# this will be the last ping)
self.salinity = RtbRowe.nans(num_ens) # Water Salinity set by the user in PPT
self.speed_of_sound = RtbRowe.nans(num_ens) # Speed of Sound in m/s.
self.bt_first_ping_time = RtbRowe.nans(num_ens)
self.bt_last_ping_time = RtbRowe.nans(num_ens)
self.bt_speed_of_sound = RtbRowe.nans(num_ens)
self.bt_status = RtbRowe.nans(num_ens)
self.bt_num_beams = RtbRowe.nans(num_ens)
self.bt_actual_ping_count = RtbRowe.nans(num_ens)
self.bt_samples_per_second = RtbRowe.nans(num_ens) # Bottom Track Samples Per Second
self.bt_system_freq_hz = RtbRowe.nans(num_ens) # Bottom Track System Frequency (Hz)
self.bt_cpce = RtbRowe.nans(num_ens) # Bottom Track Carrier cycles per Code Elements
self.bt_nce = RtbRowe.nans(num_ens) # Bottom Track Number of Code Elements contained in a lag
self.bt_repeat_n = RtbRowe.nans(num_ens) # Bottom Track Number of times the NCE is repeated in the
# transmit signal
self.wp_samples_per_second = RtbRowe.nans(num_ens) # Water Profile Samples per Second
# self.wp_system_freq_hz = [] # Water Profile System Frequency (Hz)
self.wp_cpce = RtbRowe.nans(num_ens) # Water Profile Carrier cycles per Code Elements
self.wp_nce = RtbRowe.nans(num_ens) # Water Profile Number of Code Elements contained in a lag
self.wp_repeat_n = RtbRowe.nans(num_ens) # Water Profile Number of times the NCE is repeated in the
# transmit signal
self.wp_lag_samples = RtbRowe.nans(num_ens) # Water Profile Lag Samples
# self.bt_broadband = RtbRowe.nans(num_ens) # Bottom Track Broadband
self.bt_lag_length = RtbRowe.nans(num_ens) # Bottom Track Pulse to Pulse Lag (m)
self.bt_narrowband = RtbRowe.nans(num_ens) # Bottom Track Long Range Switch Depth (m)
self.bt_beam_mux = RtbRowe.nans(num_ens) # Bottom Track Beam Multiplex
# self.wp_broadband = RtbRowe.nans(num_ens) # Water Profile Mode
self.lag_cm = RtbRowe.nans(num_ens) # Water Profile Lag Length
self.lag_near_bottom = RtbRowe.nans(num_ens) # Water Profile Lag Near Bottom
self.wp_transmit_bandwidth = RtbRowe.nans(num_ens) # Water Profile Transmit Bandwidth
self.wp_receive_bandwidth = RtbRowe.nans(num_ens) # Water Profile Receive Bandwidth
self.wp_beam_mux = RtbRowe.nans(num_ens) # WP Beam Mux
self.ba = RtbRowe.nans(num_ens) # Bottom Track Amplitude Threshold
self.bc = RtbRowe.nans(num_ens) # Bottom Track Correlation Threshold
self.be_mmps = RtbRowe.nans(num_ens) # Bottom Track Error Velocity Threshold
self.bg = RtbRowe.nans(num_ens) # Bottom Track Percent Good Threshold
self.bm = RtbRowe.nans(num_ens) # * Bottom Track Mode
self.bp = RtbRowe.nans(num_ens) # Bottom Track Number of Pings
self.bx_dm = RtbRowe.nans(num_ens) # Maximum Tracking depth in decimeters
self.code_reps = RtbRowe.nans(num_ens) # Number of code repetitions
self.coord_sys = ['Earth'] * num_ens # Coordinate System
self.cpu_ser_no = RtbRowe.nans([num_ens, 8]) # CPU Serial Number
self.cq = RtbRowe.nans(num_ens) # Transmit Power
self.cx = RtbRowe.nans(num_ens) # Low Latency Trigger
self.dist_bin1_cm = RtbRowe.nans(num_ens) # * Distance to center of bin 1 from transducer
self.ea_deg = RtbRowe.nans(num_ens) # Heading alignment
self.eb_deg = RtbRowe.nans(num_ens) # Heading bias
self.sensor_avail = [''] * num_ens # Sensor availability code
self.ex = [''] * num_ens # Coordinate transformation codes
self.ez = [''] * num_ens # Sensor codes
self.head_src = [''] * num_ens # Heading sources
self.lag_cm = RtbRowe.nans(num_ens) # * Lag Length in centimeter
self.map_bins = [''] * num_ens # * Bin Mapping
self.n_beams = RtbRowe.nans(num_ens) # * Number of velocity beams
self.pitch_src = [''] * num_ens # Source of pitch data
self.ref_lay_end_cell = RtbRowe.nans(num_ens) # Reference Layer end cell
self.ref_lay_str_cell = RtbRowe.nans(num_ens) # Reference Layer start cell
self.roll_src = [''] * num_ens # Source of roll data
self.sal_src = [''] * num_ens # Salinity Source
self.wm = RtbRowe.nans(num_ens) # * Water Mode
self.sos_src = [''] * num_ens # Speed of Sound source
self.temp_src = [''] * num_ens # Temperature Source
self.tp_sec = RtbRowe.nans(num_ens) # * Time between Pings
self.use_3beam = [''] * num_ens # Setting to use 3-Beam solution or not
self.use_pr = [''] * num_ens # Setting to use pitch and roll or not
self.wa = RtbRowe.nans(num_ens) # Water Track amplitude threshold
self.wb = RtbRowe.nans(num_ens) # Water Track bandwidth threshold
self.wc = RtbRowe.nans(num_ens) # Water Track correlation threshold
self.we_mmps = RtbRowe.nans(num_ens) # Water Track error velocity threshold
self.wf_cm = RtbRowe.nans(num_ens) # * Blank after Transmit in cm
self.wg_per = RtbRowe.nans(num_ens) # Water Track percent good threshold
self.wj = RtbRowe.nans(num_ens) # Receiver Gain setting
self.wn = RtbRowe.nans(num_ens) # * Number of depth cells (bins)
self.wp = RtbRowe.nans(num_ens) # * Number of water pings
self.ws_cm = RtbRowe.nans(num_ens) # * Bin size in cm
self.xdcr_dep_srs = [''] * num_ens # Salinity Source
self.xmit_pulse_cm = RtbRowe.nans(num_ens) # Transmit Pulse length
self.lag_near_bottom = RtbRowe.nans(num_ens) # Lag near bottom setting
def decode_ensemble_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ensemble data for the configuration data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index to store the data.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.ens_num[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 0, RtbRowe.BYTES_INT32,
ens_bytes)
self.wn[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 1, RtbRowe.BYTES_INT32, ens_bytes)
self.n_beams[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 2, RtbRowe.BYTES_INT32,
ens_bytes)
self.desired_ping_count[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 3,
RtbRowe.BYTES_INT32, ens_bytes)
self.wp[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 4, RtbRowe.BYTES_INT32, ens_bytes)
self.status[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 5, RtbRowe.BYTES_INT32,
ens_bytes)
# self.year.append(RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 6, RtbRowe.BYTES_INT32, ens_bytes))
# self.month[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 7, RtbRowe.BYTES_INT32,
# ens_bytes)
# self.day[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 8, RtbRowe.BYTES_INT32,
# ens_bytes)
# self.hour[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 9, RtbRowe.BYTES_INT32,
# ens_bytes)
# self.minute[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 10, RtbRowe.BYTES_INT32,
# ens_bytes)
# self.second[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 11, RtbRowe.BYTES_INT32,
# ens_bytes)
# self.hsec[ens_index] = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 12, RtbRowe.BYTES_INT32,
# ens_bytes)
# self.serial_num_index[ens_index] = str(
# ens_bytes[packet_pointer + RtbRowe.BYTES_INT32 * 13:packet_pointer + RtbRowe.BYTES_INT32 * 21], "UTF-8")
# self.subsystem_code[ens_index] = str(ens_bytes[packet_pointer +
# RtbRowe.BYTES_INT32 * 21 + 3:
# packet_pointer + RtbRowe.BYTES_INT32 * 21 + 4], "UTF-8")
# self.subsystem_config[ens_index] = struct.unpack("B", ens_bytes[packet_pointer +
# RtbRowe.BYTES_INT32 * 22 + 3:packet_pointer +
# RtbRowe.BYTES_INT32 * 22 + 4])[0]
# With our data format, Beam, Instrument and Earth
# Are always available. The Data stored in vel_mps
# will always be Earth data.
self.coord_sys[ens_index] = 'Earth'
def decode_ancillary_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ancillary data for the Configuration data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index to store the data.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.wf_cm[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 0, RtbRowe.BYTES_FLOAT,
ens_bytes) * 100.0
self.ws_cm[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1, RtbRowe.BYTES_FLOAT,
ens_bytes) * 100.0
first_ping_time = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2, RtbRowe.BYTES_FLOAT, ens_bytes)
last_ping_time = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 3, RtbRowe.BYTES_FLOAT, ens_bytes)
self.first_ping_time[ens_index] = first_ping_time
self.last_ping_time[ens_index] = last_ping_time
self.tp_sec[ens_index] = last_ping_time - first_ping_time
# If there is any value other than 0
# Then pressure sensor is available
pressure = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10, RtbRowe.BYTES_FLOAT, ens_bytes)
if pressure > 0:
self.use_pr = 'Yes'
else:
self.use_pr = 'No'
# Our system will always perform bin mapping
self.map_bins = 'Yes'
self.dist_bin1_cm[ens_index] = self.wf_cm[ens_index]
self.use_3beam = 'Yes'
# Based on the data, unable to determine
# which source was used for the Speed of Sound source
# Could be calculated or a fixed value.
# Options: 'Manual EC', 'Calculated', 'SVSS Sensor', N/a'
self.sos_src = 'Calculated'
# Based on the data, unable to determine
# which source was used for the heading source
# Could be compass, fixed value or GPS
# Options: "Manual EH", 'Int. Sensor', 'N/a'
self.head_src = 'Int. Sensor'
# Pitch source currently only internal sensor
self.pitch_src = "Int. Sensor"
self.roll_src = "Int. Sensor"
# Based on the data, unable to determine
# which source was used for depth source.
# Could be RT or pressure
# Options: "Manual ES", 'Int. Sensor', 'N/a'
self.xdcr_dep_srs = "Manual ES"
# Based on the data, unable to determine
# which source was used for temperature
# Could be internal sensor or fixed value
# Options: 'Manual ET', 'Int. Sensor', 'N/a'
self.temp_src = 'Int. Sensor'
self.salinity[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.speed_of_sound[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12,
RtbRowe.BYTES_FLOAT, ens_bytes)
def decode_ancillary_adcp3_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ancillary data for the Configuration data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index to store the data.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.current_system[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 0,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.status_2[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.burst_index[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2,
RtbRowe.BYTES_FLOAT, ens_bytes)
def decode_systemsetup_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the system setup data for the Configuration data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index to store the data.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.bt_samples_per_second[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 0,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_system_freq_hz[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_cpce[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_nce[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 3,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_repeat_n[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.wp_samples_per_second[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5,
RtbRowe.BYTES_FLOAT, ens_bytes)
# self.wp_system_freq_hz.append(RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6, RtbRowe.BYTES_FLOAT,
# ens_bytes))
self.wp_cpce[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.wp_nce[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.wp_repeat_n[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.wp_lag_samples[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bm[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_lag_length[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 14,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_narrowband[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 15,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_beam_mux[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 16,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.wm[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 17,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.lag_cm[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 18,
RtbRowe.BYTES_FLOAT, ens_bytes) * 100.0
self.wp_transmit_bandwidth[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 19,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.wp_receive_bandwidth[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 20,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.wp_beam_mux[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 22,
RtbRowe.BYTES_FLOAT, ens_bytes)
# Use the same as lag
self.lag_near_bottom[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 18,
RtbRowe.BYTES_FLOAT, ens_bytes) * 100.0
# Assume always a 20 degree beam angle for now
beam_angle = 20
# Get the speed of sound from this ensemble or previous
speed_of_sound = 1500
if not np.isnan(self.speed_of_sound[ens_index]):
speed_of_sound = self.speed_of_sound[ens_index]
elif ens_index-1 >= 0 and not np.isnan(self.speed_of_sound[ens_index-1]):
speed_of_sound = self.speed_of_sound[ens_index-1]
# Calculate lag length
# and Xmit Pulse Length
# sample_rate = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5, RtbRowe.BYTES_FLOAT, ens_bytes)
# lag_samples = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10, RtbRowe.BYTES_FLOAT, ens_bytes)
cpce = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7, RtbRowe.BYTES_FLOAT, ens_bytes)
nce = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8, RtbRowe.BYTES_FLOAT, ens_bytes)
repeats_n = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9, RtbRowe.BYTES_FLOAT, ens_bytes)
sys_freq_hz = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6, RtbRowe.BYTES_FLOAT, ens_bytes)
# meters_per_sample = math.cos(math.pi * (beam_angle/180.0)) * speed_of_sound / 2 / sample_rate
# lag_m = lag_samples * meters_per_sample
meters_per_cycle = math.cos(math.pi * (beam_angle/180.0)) * speed_of_sound / 2 / sys_freq_hz
xmt_m = cpce * nce * repeats_n * meters_per_cycle
self.xmit_pulse_cm[ens_index] = xmt_m * 100.0
def decode_bottom_track_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the system Bottom Track data for the Configuration data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index to store the data.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.bt_first_ping_time[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 0,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_last_ping_time[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_speed_of_sound[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_status[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 11,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_num_beams[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_actual_ping_count[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13,
RtbRowe.BYTES_FLOAT, ens_bytes)
class Sensor:
"""
System sensor data is all the data from the sensors within the ADCP. This includes compass and
temperature sensor.
"""
def __init__(self, num_ens: int):
"""
Initialize all the values
Set the flag if using PD0 format data. This will change the scale of the pressure sensor and orientation
of the roll value.
:param num_ens: Number of ensembles in the file.
"""
self.voltage = RtbRowe.nans(num_ens) # Voltage input to ADCP
self.transmit_boost_neg_volt = RtbRowe.nans(num_ens) # Transmitter Boost Negative Voltage
self.raw_mag_field_strength = RtbRowe.nans(num_ens) # Raw magnetic field strength (uT) (micro Tesla)
self.raw_mag_field_strength2 = RtbRowe.nans(num_ens) # Raw magnetic field strength (uT) (micro Tesla)
self.raw_mag_field_strength3 = RtbRowe.nans(num_ens) # Raw magnetic field strength (uT) (micro Tesla)
self.pitch_gravity_vec = RtbRowe.nans(num_ens) # Pitch Gravity Vector
self.roll_gravity_vec = RtbRowe.nans(num_ens) # Roll Gravity Vector
self.vertical_gravity_vec = RtbRowe.nans(num_ens) # Vertical Gravity Vector
self.bt_heading = RtbRowe.nans(num_ens)
self.bt_pitch = RtbRowe.nans(num_ens)
self.bt_roll = RtbRowe.nans(num_ens)
self.bt_water_temp = RtbRowe.nans(num_ens)
self.bt_system_temp = RtbRowe.nans(num_ens)
self.bt_salinity = RtbRowe.nans(num_ens)
self.bt_pressure = RtbRowe.nans(num_ens)
self.bt_transducer_depth = RtbRowe.nans(num_ens)
# ADCP 3 Values
self.hs1_temp = RtbRowe.nans(num_ens)
self.hs2_temp = RtbRowe.nans(num_ens)
self.rcv1_temp = RtbRowe.nans(num_ens)
self.rcv2_temp = RtbRowe.nans(num_ens)
self.vinf = RtbRowe.nans(num_ens)
self.vg = RtbRowe.nans(num_ens)
self.vt = RtbRowe.nans(num_ens)
self.vtl = RtbRowe.nans(num_ens)
self.d3v3 = RtbRowe.nans(num_ens)
self.bt_hs1_temp = RtbRowe.nans(num_ens)
self.bt_hs2_temp = RtbRowe.nans(num_ens)
self.bt_rcv1_temp = RtbRowe.nans(num_ens)
self.bt_rcv2_temp = RtbRowe.nans(num_ens)
self.bt_vinf = RtbRowe.nans(num_ens)
self.bt_vg = RtbRowe.nans(num_ens)
self.bt_vt = RtbRowe.nans(num_ens)
self.bt_vtl = RtbRowe.nans(num_ens)
self.bt_d3v3 = RtbRowe.nans(num_ens)
self.bt_sounder_range = RtbRowe.nans(num_ens)
self.bt_sounder_snr = RtbRowe.nans(num_ens)
self.bt_sounder_amp = RtbRowe.nans(num_ens)
self.echo_sounder_depth = RtbRowe.nans(num_ens)
self.ambient_temp = RtbRowe.nans(num_ens) # ADC ambient temperature
self.attitude_temp = RtbRowe.nans(num_ens) # ADC attitude temperature
self.attitude = RtbRowe.nans(num_ens) # ADC attitude
self.bit_test = RtbRowe.nans(num_ens) # Bit Test Result
self.contam_sensor = RtbRowe.nans(num_ens) # ADC contamination sensor
self.date = RtbRowe.nans([num_ens, 3]) # date as int
self.date_y2k = RtbRowe.nans([num_ens, 4]) # Date Y2K compatible
self.date_not_y2k = RtbRowe.nans([num_ens, 3]) # Date not Y2K compatible
self.error_status_word = [''] * num_ens # Error Status codes
self.heading_deg = RtbRowe.nans(num_ens) # Heading to magnetic north in degrees
self.heading_std_dev_deg = RtbRowe.nans(num_ens) # Standard deviation of headings for an ensemble
self.mpt_msc = RtbRowe.nans([num_ens, 3]) # Minimum time prior to ping
self.num = RtbRowe.nans(num_ens) # Ensemble number
self.num_fact = RtbRowe.nans(num_ens) # Number fraction
self.num_tot = RtbRowe.nans(num_ens) # Number total
self.orient = [''] * num_ens # Orientation of ADCP
self.pitch_std_dev_deg = RtbRowe.nans(num_ens) # Standard deviation of pitch for an ensemble
self.pitch_deg = RtbRowe.nans(num_ens) # Pitch in degrees
self.pressure_neg = RtbRowe.nans(num_ens) # ADC pressure negative
self.pressure_pos = RtbRowe.nans(num_ens) # ADC pressure positive
self.pressure_pascal = RtbRowe.nans(num_ens) # Pressure at transducer face in deca-pascals
self.pressure_var_pascal = RtbRowe.nans(num_ens) # Pressure variance in deca-pascals
self.roll_std_dev_deg = RtbRowe.nans(num_ens) # Standard deviation of roll for an ensemble
self.roll_deg = RtbRowe.nans(num_ens) # Roll in degrees
self.salinity_ppt = RtbRowe.nans(num_ens) # Salinity in parts per thousand PPT
self.sos_mps = RtbRowe.nans(num_ens) # Speed of Sound in m/s
self.temperature_deg_c = RtbRowe.nans(num_ens) # Water Temperature in degrees C
self.time = RtbRowe.nans([num_ens, 4]) # Time
self.time_y2k = RtbRowe.nans([num_ens, 4]) # Y2K compatible time
self.xdcr_depth_dm = RtbRowe.nans(num_ens) # Transducer depth in decimeters
self.xmit_current = RtbRowe.nans(num_ens) # Transmit current
self.xmit_voltage = RtbRowe.nans(num_ens) # Transmit voltage
self.vert_beam_eval_amp = RtbRowe.nans(num_ens) # Vertical beam amplitude
self.vert_beam_RSSI_amp = RtbRowe.nans(num_ens) # Vertical beam return signal strength indicator
self.vert_beam_range_m = RtbRowe.nans(num_ens) # Vertical beam range in m
self.vert_beam_gain = [''] * num_ens # Vertical beam gain setting
self.vert_beam_status = np.zeros(num_ens) # Vertical beam status
def decode_ensemble_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the system setup data for the Sensor data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
year = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 6, RtbRowe.BYTES_INT32, ens_bytes)
month = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 7, RtbRowe.BYTES_INT32, ens_bytes)
day = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 8, RtbRowe.BYTES_INT32, ens_bytes)
hour = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 9, RtbRowe.BYTES_INT32, ens_bytes)
minute = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 10, RtbRowe.BYTES_INT32, ens_bytes)
second = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 11, RtbRowe.BYTES_INT32, ens_bytes)
hsec = RtbRowe.get_int32(packet_pointer + RtbRowe.BYTES_INT32 * 12, RtbRowe.BYTES_INT32, ens_bytes)
ens_date = [year, month, day]
ens_date_y2k = [2000, year-2000, month, day]
ens_date_not_y2k = [year - 2000, month, day]
ens_time = [hour, minute, second, hsec]
self.date[ens_index] = ens_date
self.date_not_y2k[ens_index] = ens_date_not_y2k
self.date_y2k[ens_index] = ens_date_y2k
self.time_y2k[ens_index] = ens_time
self.time[ens_index] = ens_time
def decode_systemsetup_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the system setup data for the Sensor data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.voltage[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 11,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.xmit_voltage[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.transmit_boost_neg_volt[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 21,
RtbRowe.BYTES_FLOAT, ens_bytes)
def decode_ancillary_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ancillary data for the Sensor data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index to store the data.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.heading_deg[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.pitch_deg[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.temperature_deg_c[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.ambient_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.salinity_ppt[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.sos_mps[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.raw_mag_field_strength[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.raw_mag_field_strength2[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 14,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.raw_mag_field_strength3[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 15,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.pitch_gravity_vec[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 16,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.roll_gravity_vec[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 17,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.vertical_gravity_vec[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 18,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.roll_deg[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.pressure_pascal[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.xdcr_depth_dm[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 11,
RtbRowe.BYTES_FLOAT, ens_bytes)
def decode_ancillary_adcp3_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ancillary ADCP3 data for the Sensor data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.hs1_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.hs2_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 14,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.rcv1_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 15,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.rcv2_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 16,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.vinf[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 17,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.vg[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 18,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.vt[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 16,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.vtl[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 17,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.d3v3[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 18,
RtbRowe.BYTES_FLOAT, ens_bytes)
def decode_bottom_track_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the bottom track data for the Sensor data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.bt_heading[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_pitch[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 3,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_water_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_system_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_salinity[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_roll[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_pressure[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_transducer_depth[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9,
RtbRowe.BYTES_FLOAT, ens_bytes)
def decode_bottom_track_adcp3_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the bottom track ADCP3 data for the Sensor data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Get the number of beams
num_beams = int(RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12, RtbRowe.BYTES_FLOAT, ens_bytes))
# 14 raw values plus 15 values for each beam
data_index = 14 + (15 * num_beams)
self.bt_hs1_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 1),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_hs2_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 2),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_rcv1_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 3),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_rcv2_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 4),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_vinf[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 5),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_vg[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 6),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_vt[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 7),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_vtl[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 8),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_d3v3[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 9),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_sounder_range[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 11),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_sounder_snr[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 12),
RtbRowe.BYTES_FLOAT, ens_bytes)
self.bt_sounder_amp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * (data_index + 13),
RtbRowe.BYTES_FLOAT, ens_bytes)
def decode_vert_rt(self, ens_bytes: list, ens_index: int, name_len: int = 8):
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Get the number of beams
num_beams = int(RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 0, RtbRowe.BYTES_FLOAT, ens_bytes))
if num_beams == 1:
self.vert_beam_RSSI_amp[ens_index] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1,
RtbRowe.BYTES_FLOAT, ens_bytes))
self.vert_beam_range_m[ens_index] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2,
RtbRowe.BYTES_FLOAT, ens_bytes))
self.vert_beam_eval_amp[ens_index] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4,
RtbRowe.BYTES_FLOAT, ens_bytes))
self.vert_beam_gain[ens_index] = 'L'
self.vert_beam_status[ens_index] = 1
class BT:
"""
Bottom Tracking used to measure the depth and vessel speed (Speed over Ground).
"""
def __init__(self, num_ens: int, num_beams: int):
"""
Set the flag if using PD0 format data.
If using PD0 format, then the beams will be rearranged to match PD0 beam order
and the scale will change from percentage to counts.
The value has to be converted from
:param num_ens: Number of ensembles.
:param num_beams: Number of velocity beams.
"""
self.num_beams = 0
self.corr = RtbRowe.nans([num_beams, num_ens])
self.depth_m = RtbRowe.nans([num_beams, num_ens])
self.eval_amp = RtbRowe.nans([num_beams, num_ens])
self.ext_depth_cm = RtbRowe.nans(num_ens)
self.pergd = RtbRowe.nans([num_beams, num_ens])
self.rssi = RtbRowe.nans([num_beams, num_ens])
self.snr = RtbRowe.nans([num_beams, num_ens])
self.vel_mps = RtbRowe.nans([num_beams, num_ens])
self.instr_vel = RtbRowe.nans([num_beams, num_ens])
self.instr_good = RtbRowe.nans([num_beams, num_ens])
self.earth_vel = RtbRowe.nans([num_beams, num_ens])
self.earth_good = RtbRowe.nans([num_beams, num_ens])
self.pulse_coh_snr = RtbRowe.nans([num_beams, num_ens])
self.pulse_coh_amp = RtbRowe.nans([num_beams, num_ens])
self.pulse_coh_vel = RtbRowe.nans([num_beams, num_ens])
self.pulse_coh_noise = RtbRowe.nans([num_beams, num_ens])
self.pulse_coh_corr = RtbRowe.nans([num_beams, num_ens])
def decode(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ensemble data for the Bottom Traack data.
Initialize the list of Bottom Track data. [beam]
:param ens_bytes: Byte array containing the Bottom Track data.
:param ens_index: Ensemble index to store the data.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Get the number of beams
self.num_beams = int(RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12,
RtbRowe.BYTES_FLOAT, ens_bytes))
# Get the ping count
# Value stored in Cfg but needed for conversion to PD0
# bt_actual_ping_count = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13,
# RtbRowe.BYTES_FLOAT, ens_bytes)
# Initialize the array
snr = np.empty(shape=[self.num_beams])
depth = np.empty(shape=[self.num_beams])
amp = np.empty(shape=[self.num_beams])
corr = np.empty(shape=[self.num_beams])
beam_vel = np.empty(shape=[self.num_beams])
beam_good = np.empty(shape=[self.num_beams])
instr_vel = np.empty(shape=[self.num_beams])
instr_good = np.empty(shape=[self.num_beams])
earth_vel = np.empty(shape=[self.num_beams])
earth_good = np.empty(shape=[self.num_beams])
pulse_coh_snr = np.empty(shape=[self.num_beams])
pulse_coh_amp = np.empty(shape=[self.num_beams])
pulse_coh_vel = np.empty(shape=[self.num_beams])
pulse_coh_noise = np.empty(shape=[self.num_beams])
pulse_coh_corr = np.empty(shape=[self.num_beams])
# Index to start at for the following data
index = 14
# Range Values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
# Store RTB data
depth[beam] = value
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.depth_m[:self.num_beams, ens_index] = depth.T
# SNR values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
snr[beam] = value
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.snr[:self.num_beams, ens_index] = snr.T
# Amplitude values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
amp[beam] = value
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.rssi[:self.num_beams, ens_index] = amp.T
# Correlation values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
corr[beam] = value
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.corr[:self.num_beams, ens_index] = corr.T
# Beam Velocity values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
# Check for bad velocity and convert
if RtbRowe.is_bad_velocity(value):
value = np.nan
beam_vel[beam] = value
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.vel_mps[:self.num_beams, ens_index] = beam_vel.T
# Beam Good Pings values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
beam_good[beam] = int(value)
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.pergd[:self.num_beams, ens_index] = beam_good.T
# Instrument Velocity values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
# Check for bad velocity and convert
if RtbRowe.is_bad_velocity(value):
value = np.nan
instr_vel[beam] = value
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.instr_vel[:self.num_beams, ens_index] = instr_vel.T
# Instrument Good Pings values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
instr_good[beam] = int(value)
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.instr_good[:self.num_beams, ens_index] = instr_good.T
# Earth Velocity values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
# Check for bad velocity and convert
if RtbRowe.is_bad_velocity(value):
value = np.nan
earth_vel[beam] = -value
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.earth_vel[:self.num_beams, ens_index] = earth_vel.T
self.vel_mps[:self.num_beams, ens_index] = earth_vel.T
# Earth Good Pings values
for beam in range(self.num_beams):
# Get the value
value = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
earth_good[beam] = int(value)
# Increment for the next beam
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.earth_good[:self.num_beams, ens_index] = earth_good.T
# Pulse Coherent SNR values
for beam in range(self.num_beams):
pulse_coh_snr[beam] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.pulse_coh_snr[:self.num_beams, ens_index] = pulse_coh_snr.T
# Pulse Coherent Amplitude values
for beam in range(self.num_beams):
pulse_coh_amp[beam] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.pulse_coh_amp[:self.num_beams, ens_index] = pulse_coh_amp.T
# Pulse Coherent Velocity values
for beam in range(self.num_beams):
pulse_coh_vel[beam] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.pulse_coh_vel[:self.num_beams, ens_index] = pulse_coh_vel.T
# Pulse Coherent Noise values
for beam in range(self.num_beams):
pulse_coh_noise[beam] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.pulse_coh_noise[:self.num_beams, ens_index] = pulse_coh_noise.T
# Pulse Coherent Correlation values
for beam in range(self.num_beams):
pulse_coh_corr[beam] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
# Add the data the numpy array [:num_beams, ens_index]
self.pulse_coh_corr[:self.num_beams, ens_index] = pulse_coh_corr.T
class RT:
"""
Range Tracking values to measure the surface when upward looking.
When downward looking, values are used as an echo sounder using
the profile ping.
"""
def __init__(self, num_ens: int, num_beams: int):
"""
Set the flag if using PD0 format data.
If using PD0 format, then the beams will be rearranged to match PD0 beam order
and the scale will change from percentage to counts.
The value has to be converted from
:param num_ens: Number of ensembles.
:param num_beams: Number of velocity beams.
"""
self.num_beams = 0
self.snr = RtbRowe.nans([num_beams, num_ens])
self.depth = RtbRowe.nans([num_beams, num_ens])
self.pings = RtbRowe.nans([num_beams, num_ens])
self.amp = RtbRowe.nans([num_beams, num_ens])
self.corr = RtbRowe.nans([num_beams, num_ens])
self.beam_vel = RtbRowe.nans([num_beams, num_ens])
self.instr_vel = RtbRowe.nans([num_beams, num_ens])
self.earth_vel = RtbRowe.nans([num_beams, num_ens])
def decode(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ensemble data for the Range Tracking data.
Initialize the list of Range Tracking data. [beam]
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble Index.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Get the number of beams
self.num_beams = int(RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 0, RtbRowe.BYTES_FLOAT,
ens_bytes))
# Initialize the array
snr = np.empty(shape=[self.num_beams], dtype=np.float)
depth = np.empty(shape=[self.num_beams], dtype=np.float)
pings = np.empty(shape=[self.num_beams], dtype=np.float)
amp = np.empty(shape=[self.num_beams], dtype=np.float)
corr = np.empty(shape=[self.num_beams], dtype=np.float)
beam_vel = np.empty(shape=[self.num_beams], dtype=np.float)
instr_vel = np.empty(shape=[self.num_beams], dtype=np.float)
earth_vel = np.empty(shape=[self.num_beams], dtype=np.float)
if self.num_beams == 4:
snr[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1, RtbRowe.BYTES_FLOAT, ens_bytes))
snr[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2, RtbRowe.BYTES_FLOAT, ens_bytes))
snr[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 3, RtbRowe.BYTES_FLOAT, ens_bytes))
snr[3] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[3] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 11, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[3] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 14, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 15, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[3] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 16, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 17, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 18, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 19, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[3] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 20, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 21, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 22, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 23, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[3] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 24, RtbRowe.BYTES_FLOAT, ens_bytes))
instr_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 25, RtbRowe.BYTES_FLOAT,
ens_bytes))
instr_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 26, RtbRowe.BYTES_FLOAT,
ens_bytes))
instr_vel[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 27, RtbRowe.BYTES_FLOAT,
ens_bytes))
instr_vel[3] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 28, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 29, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 30, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 31, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[3] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 32, RtbRowe.BYTES_FLOAT,
ens_bytes))
elif self.num_beams == 3:
snr[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1, RtbRowe.BYTES_FLOAT, ens_bytes))
snr[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2, RtbRowe.BYTES_FLOAT, ens_bytes))
snr[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 3, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 11, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 14, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 15, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 16, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 17, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 18, RtbRowe.BYTES_FLOAT, ens_bytes))
instr_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 19, RtbRowe.BYTES_FLOAT,
ens_bytes))
instr_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 20, RtbRowe.BYTES_FLOAT,
ens_bytes))
instr_vel[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 21, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 22, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 23, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[2] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 24, RtbRowe.BYTES_FLOAT,
ens_bytes))
elif self.num_beams == 2:
snr[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1, RtbRowe.BYTES_FLOAT, ens_bytes))
snr[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 3, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 11, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12, RtbRowe.BYTES_FLOAT, ens_bytes))
instr_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13, RtbRowe.BYTES_FLOAT,
ens_bytes))
instr_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 14, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 15, RtbRowe.BYTES_FLOAT,
ens_bytes))
earth_vel[1] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 16, RtbRowe.BYTES_FLOAT,
ens_bytes))
elif self.num_beams == 1:
snr[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1, RtbRowe.BYTES_FLOAT, ens_bytes))
depth[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2, RtbRowe.BYTES_FLOAT, ens_bytes))
pings[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 3, RtbRowe.BYTES_FLOAT, ens_bytes))
amp[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4, RtbRowe.BYTES_FLOAT, ens_bytes))
corr[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5, RtbRowe.BYTES_FLOAT, ens_bytes))
beam_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6, RtbRowe.BYTES_FLOAT, ens_bytes))
instr_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7, RtbRowe.BYTES_FLOAT, ens_bytes))
earth_vel[0] = (RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8, RtbRowe.BYTES_FLOAT, ens_bytes))
# Add the data the numpy array [:num_beams, ens_index]
self.snr[:self.num_beams, ens_index] = snr.T
self.depth[:self.num_beams, ens_index] = depth.T
self.pings[:self.num_beams, ens_index] = pings.T
self.amp[:self.num_beams, ens_index] = amp.T
self.corr[:self.num_beams, ens_index] = corr.T
self.beam_vel[:self.num_beams, ens_index] = beam_vel.T
self.instr_vel[:self.num_beams, ens_index] = instr_vel.T
self.earth_vel[:self.num_beams, ens_index] = earth_vel.T
class Gage:
"""
Gage Height data from the ensemble dataset.
"""
def __init__(self, num_ens: int):
"""
Initialize all the values.
Initializing the lists as list and converting
to numpy array after data is decoded.
:param num_ens: Number of ensembles in the file.
"""
self.status = RtbRowe.nans(num_ens)
self.avg_range = RtbRowe.nans(num_ens)
self.sd = RtbRowe.nans(num_ens)
self.avg_sn = RtbRowe.nans(num_ens)
self.n = RtbRowe.nans(num_ens)
self.salinity = RtbRowe.nans(num_ens)
self.pressure = RtbRowe.nans(num_ens)
self.depth = RtbRowe.nans(num_ens)
self.water_temp = RtbRowe.nans(num_ens)
self.backplane_temp = RtbRowe.nans(num_ens)
self.speed_of_sound = RtbRowe.nans(num_ens)
self.heading = RtbRowe.nans(num_ens)
self.pitch = RtbRowe.nans(num_ens)
self.roll = RtbRowe.nans(num_ens)
self.avg_s = RtbRowe.nans(num_ens)
self.avg_n1 = RtbRowe.nans(num_ens)
self.avg_n2 = RtbRowe.nans(num_ens)
self.gain_frac = RtbRowe.nans(num_ens)
self.pings = RtbRowe.nans(num_ens)
self.snr_thresh = RtbRowe.nans(num_ens)
self.gain_thresh = RtbRowe.nans(num_ens)
self.stat_thresh = RtbRowe.nans(num_ens)
self.xmt_cycles = RtbRowe.nans(num_ens)
self.depth_offset = RtbRowe.nans(num_ens)
def decode_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the ancillary data for the Configuration data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Index into array.
:param name_len: Length of the name of the dataset.
Parameters
----------
ens_bytes
ens_index
name_len
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
self.status[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 0, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.avg_range[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 1, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.sd[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 2, RtbRowe.BYTES_FLOAT, ens_bytes)
self.avg_sn[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 3, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.n[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 4, RtbRowe.BYTES_FLOAT, ens_bytes)
self.salinity[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 5, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.pressure[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 6, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.depth[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 7, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.water_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 8, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.backplane_temp[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 9,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.speed_of_sound[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 10,
RtbRowe.BYTES_FLOAT, ens_bytes)
self.heading[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 11, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.pitch[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 12, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.roll[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 13, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.avg_s[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 14, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.avg_n1[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 15, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.avg_n2[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 16, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.gain_frac[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 17, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.pings[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 18, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.snr_thresh[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 19, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.gain_thresh[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 20, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.stat_thresh[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 21, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.xmt_cycles[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 22, RtbRowe.BYTES_FLOAT,
ens_bytes)
self.depth_offset[ens_index] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 23, RtbRowe.BYTES_FLOAT,
ens_bytes)
class RiverBT:
"""
River Bottom Track data from the ensemble dataset.
"""
def __init__(self, num_ens: int, num_subsystems: int):
"""
Initialize all the values.
Initializing the lists as list and converting
to numpy array after data is decoded.
:param num_ens: Number of ensembles in the file.
"""
self.num_subsystems = RtbRowe.nans(num_ens) # Number of subsystems to decode
self.ping_count = RtbRowe.nans([num_ens, num_subsystems]) # Pings averaged
self.status = RtbRowe.nans([num_ens, num_subsystems]) # Data status
self.beams = RtbRowe.nans([num_ens, num_subsystems]) # Number of beams
self.nce = RtbRowe.nans([num_ens, num_subsystems]) # Number of code elements
self.repeats_n = RtbRowe.nans([num_ens, num_subsystems]) # Number of code repeats
self.cpce = RtbRowe.nans([num_ens, num_subsystems]) # Codes per code elements
self.bb = RtbRowe.nans([num_ens, num_subsystems]) # Broadband
self.ll = RtbRowe.nans([num_ens, num_subsystems])
self.beam_mux = RtbRowe.nans([num_ens, num_subsystems]) # Beam Mux setup
self.nb = RtbRowe.nans([num_ens, num_subsystems]) # Narrowband
self.ping_sec = RtbRowe.nans([num_ens, num_subsystems]) # Ping time in seconds
self.heading = RtbRowe.nans([num_ens, num_subsystems]) # Heading 0 to 360
self.pitch = RtbRowe.nans([num_ens, num_subsystems]) # Pitch -90 to 90
self.roll = RtbRowe.nans([num_ens, num_subsystems]) # Roll -180 to 180
self.water_temp = RtbRowe.nans([num_ens, num_subsystems]) # Water Temperature in C
self.backplane_temp = RtbRowe.nans([num_ens, num_subsystems]) # Internal System temperature in C
self.salinity = RtbRowe.nans([num_ens, num_subsystems]) # Salinity in PPT
self.pressure = RtbRowe.nans([num_ens, num_subsystems]) # Pressure in Pascal
self.depth = RtbRowe.nans([num_ens, num_subsystems]) # Pressure converted to m
self.speed_of_sound = RtbRowe.nans([num_ens, num_subsystems]) # Speed of Sound in m/s
self.mx = RtbRowe.nans([num_ens, num_subsystems])
self.my = RtbRowe.nans([num_ens, num_subsystems])
self.mz = RtbRowe.nans([num_ens, num_subsystems])
self.gp = RtbRowe.nans([num_ens, num_subsystems])
self.gr = RtbRowe.nans([num_ens, num_subsystems])
self.gz = RtbRowe.nans([num_ens, num_subsystems])
self.samples_per_sec = RtbRowe.nans([num_ens, num_subsystems]) # Samples per second
self.system_freq_hz = RtbRowe.nans([num_ens, num_subsystems]) # System frequency in Hz
self.bt_range = RtbRowe.nans([num_ens, num_subsystems]) # Bottom Track Range in m
self.bt_snr = RtbRowe.nans([num_ens, num_subsystems]) # Bottom Track SNR in dB
self.bt_amp = RtbRowe.nans([num_ens, num_subsystems]) # Bottom Track Amplitude in dB
self.bt_noise_amp_bp = RtbRowe.nans([num_ens, num_subsystems]) # Noise in Amplitude Back Porch
self.bt_noise_amp_fp = RtbRowe.nans([num_ens, num_subsystems]) # Noise in Amplitude Front Porch
self.bt_corr = RtbRowe.nans([num_ens, num_subsystems]) # Bottom Track Correlation in fraction
self.vel = RtbRowe.nans([num_ens, num_subsystems]) # Bottom Track Beam Velocity in m/s
self.beam_n = RtbRowe.nans([num_ens, num_subsystems])
def decode_data(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the River Bottom data data.
:param ens_bytes: Byte array containing the ensemble data.
:param ens_index: Ensemble index in the file.
:param name_len: Length of the name of the dataset.
"""
# Determine where to start in the ensemble data
packet_pointer = RtbRowe.get_base_data_size(name_len)
num_subsystems = int(RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * 0, RtbRowe.BYTES_FLOAT,
ens_bytes))
self.num_subsystems[ens_index] = num_subsystems
# Start of the data
index = 1
# Create a temp list to hold all the values for each subsystem
# Accumulate the list then add it to the data type
# Index will keep track of where we are located in the data
ping_count = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
ping_count[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
self.ping_count[ens_index, :num_subsystems] = ping_count.T
status = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
status[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.status[ens_index, :num_subsystems] = status.T
beams = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
beams[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.beams[ens_index, :num_subsystems] = beams.T
nce = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
nce[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.nce[ens_index, :num_subsystems] = nce.T
repeats_n = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
repeats_n[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
self.repeats_n[ens_index, :num_subsystems] = repeats_n.T
cpce = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
cpce[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.cpce[ens_index, :num_subsystems] = cpce.T
bb = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
bb[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.bb[ens_index, :num_subsystems] = bb.T
ll = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
ll[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.ll[ens_index, :num_subsystems] = ll.T
beam_mux = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
beam_mux[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
self.beam_mux[ens_index, :num_subsystems] = beam_mux.T
nb = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
nb[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.nb[ens_index, :num_subsystems] = nb.T
ps = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
ps[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.ping_sec[ens_index, :num_subsystems] = ps.T
hdg = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
hdg[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.heading[ens_index, :num_subsystems] = hdg.T
ptch = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
ptch[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.pitch[ens_index, :num_subsystems] = ptch.T
roll = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
roll[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.roll[ens_index, :num_subsystems] = roll.T
wt = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
wt[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.water_temp[ens_index, :num_subsystems] = wt.T
sys_temp = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
sys_temp[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
self.backplane_temp[ens_index, :num_subsystems] = sys_temp.T
sal = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
sal[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.salinity[ens_index, :num_subsystems] = sal.T
pres = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
pres[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.pressure[ens_index, :num_subsystems] = pres.T
depth = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
depth[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.depth[ens_index, :num_subsystems] = depth.T
sos = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
sos[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.speed_of_sound[ens_index, :num_subsystems] = sos.T
mx = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
mx[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.mx[ens_index, :num_subsystems] = mx.T
my = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
my[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.my[ens_index, :num_subsystems] = my.T
mz = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
mz[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.mz[ens_index, :num_subsystems] = mz.T
gp = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
gp[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.gp[ens_index, :num_subsystems] = gp.T
gr = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
gr[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.gr[ens_index, :num_subsystems] = gr.T
gz = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
gz[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.gz[ens_index, :num_subsystems] = gz.T
sps = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
sps[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.samples_per_sec[ens_index, :num_subsystems] = sps.T
freq = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
freq[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.system_freq_hz[ens_index, :num_subsystems] = freq.T
bt_range = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
bt_range[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
self.bt_range[ens_index, :num_subsystems] = bt_range.T
snr = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
snr[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.bt_snr[ens_index, :num_subsystems] = snr.T
amp = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
amp[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.bt_amp[ens_index, :num_subsystems] = amp.T
noise_bp = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
noise_bp[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
self.bt_noise_amp_bp[ens_index, :num_subsystems] = noise_bp.T
noise_fp = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
noise_fp[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT,
ens_bytes)
index += 1
self.bt_noise_amp_fp[ens_index, :num_subsystems] = noise_fp.T
corr = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
corr[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.bt_corr[ens_index, :num_subsystems] = corr.T
vel = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
vel[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.vel[ens_index, :num_subsystems] = vel.T
beam_n = np.empty(shape=[num_subsystems], dtype=np.float)
for sb in range(num_subsystems):
beam_n[sb] = RtbRowe.get_float(packet_pointer + RtbRowe.BYTES_FLOAT * index, RtbRowe.BYTES_FLOAT, ens_bytes)
index += 1
self.beam_n[ens_index, :num_subsystems] = beam_n.T
class Surface:
"""
Surf data are to accommodate RiverRay and RiverPro. pd0_read sets these
values to nan when reading Rio Grande or StreamPro data
"""
def __init__(self, num_ens: int, num_beams: int, max_surface_bins: int):
"""
Initialize all the values.
:param num_ens: Number of ensembles in the file.
:param num_beams: Number of beams on the system.
:param max_surface_bins: Number of surface bins.
"""
self.no_cells = RtbRowe.nans(num_ens) # Number of surface cells in the
# ensemble
self.cell_size_cm = RtbRowe.nans(num_ens) # Cell size in cm
self.dist_bin1_cm = RtbRowe.nans(num_ens) # Distance to center of cell 1 in cm
self.vel_mps = np.tile([np.nan], [num_beams, max_surface_bins, num_ens]) # 3D array of velocity data in each
# cell and ensemble
self.corr = RtbRowe.nans([num_beams, max_surface_bins, num_ens]) # 3D array of correlation data for
# each beam, cell, and ensemble
self.pergd = RtbRowe.nans([num_beams, max_surface_bins, num_ens]) # 3D array of percent good data for
# each beam, cell, and ensemble
self.rssi = RtbRowe.nans([num_beams, max_surface_bins, num_ens]) # 3D array of signal strength data
# for each beam, cell, and ensemble
class Nmea:
"""
NMEA data from the ensemble dataset.
Store the raw NMEA strings.
Also store the last NMEA values in the message.
"""
def __init__(self, num_ens: int):
"""
Initialize all the values.
Initializing the lists as list and converting
to numpy array after data is decoded.
:param num_ens: Number of ensembles in the file.
"""
self.gga = [''] * num_ens
self.gsa = [''] * num_ens
self.vtg = [''] * num_ens
self.dbt = [''] * num_ens
self.hdt = [''] * num_ens
# GGA
self.gga_delta_time = RtbRowe.nans(num_ens) # float
self.gga_header = RtbRowe.nans(num_ens) # str
self.utc = RtbRowe.nans(num_ens) # float
self.lat_deg = RtbRowe.nans(num_ens) # float
self.lat_ref = RtbRowe.nans(num_ens) # str
self.lon_deg = RtbRowe.nans(num_ens) # float
self.lon_ref = RtbRowe.nans(num_ens) # str
self.corr_qual = RtbRowe.nans(num_ens) # float
self.num_sats = RtbRowe.nans(num_ens) # int
self.hdop = RtbRowe.nans(num_ens) # float
self.alt = RtbRowe.nans(num_ens) # float
self.alt_unit = RtbRowe.nans(num_ens) # str
self.geoid = RtbRowe.nans(num_ens) # str
self.geoid_unit = RtbRowe.nans(num_ens) # str
self.d_gps_age = RtbRowe.nans(num_ens) # float
self.ref_stat_id = RtbRowe.nans(num_ens) # float
# VTG
self.vtg_delta_time = RtbRowe.nans(num_ens) # float
self.vtg_header = RtbRowe.nans(num_ens) # str
self.course_true = RtbRowe.nans(num_ens) # float
self.true_indicator = RtbRowe.nans(num_ens) # str
self.course_mag = RtbRowe.nans(num_ens) # float
self.mag_indicator = RtbRowe.nans(num_ens) # str
self.speed_knots = RtbRowe.nans(num_ens) # float
self.knots_indicator = RtbRowe.nans(num_ens) # str
self.speed_kph = RtbRowe.nans(num_ens) # float
self.kph_indicator = RtbRowe.nans(num_ens) # str
self.mode_indicator = RtbRowe.nans(num_ens) # str
# HDT
self.hdt_header = RtbRowe.nans(num_ens)
self.heading = RtbRowe.nans(num_ens)
self.rel_true_north = RtbRowe.nans(num_ens)
# Used to keep track of the last nmea message to process
self.last_gga = None
self.last_vtg = None
self.last_hdt = None
def decode(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the NMEA dataset. This will be the raw NMEA messages
from the ADCP containing GPS data.
:param ens_bytes Bytes for dataset.
:param ens_index: Ensemble index in the file.
:param name_len: Name length to get the start location.
"""
# Clear the previous NMEA messages
self.last_gga = None
self.last_vtg = None
self.last_hdt = None
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Convert all the messages to a string
nmea_str = str(ens_bytes[packet_pointer:], "UTF-8")
# Accumulate NMEA messages
for msg in nmea_str.split():
self.decode_nmea(msg, ens_index)
# Decode the last messages
if self.last_gga:
self.decode_gga(self.last_gga, ens_index)
if self.last_vtg:
self.decode_vtg(self.last_vtg, ens_index)
if self.last_hdt:
self.decode_hdt(self.last_hdt, ens_index)
def decode_nmea(self, nmea_str: str, ens_index: int):
"""
Verify the NMEA message is by checking the checksum.
Then add the message to the list and decode each message.
:param nmea_str NMEA string to decode.
:param ens_index: Ensemble index in the file.
"""
# Verify the NMEA string is good
if Nmea.check_nmea_checksum(nmea_str):
# Add each message to the list
# Decode the data
if 'gga' in nmea_str or 'GGA' in nmea_str:
self.gga[ens_index] += nmea_str
self.last_gga = nmea_str
if 'gsa' in nmea_str or 'GSA' in nmea_str:
self.gsa[ens_index] += nmea_str
if 'vtg' in nmea_str or 'VTG' in nmea_str:
self.vtg[ens_index] += nmea_str
self.last_vtg = nmea_str
if 'dbt' in nmea_str or 'DBT' in nmea_str:
self.dbt[ens_index] += nmea_str
if 'hdt' in nmea_str or 'HDT' in nmea_str:
self.hdt[ens_index] += nmea_str
self.last_hdt = nmea_str
def decode_gga(self, nmea_str: str, ens_index: int):
"""
Decode GGA message. Update the variables.
Store the last result for the ensemble.
:param nmea_str NMEA string.
:param ens_index: Ensemble index.
"""
try:
if nmea_str:
temp_array = np.array(nmea_str.split(','))
temp_array[temp_array == '999.9'] = ''
# self.gga_delta_time = delta_time
self.gga_header[ens_index] = temp_array[0]
self.utc[ens_index] = float(temp_array[1])
lat_str = temp_array[2]
lat_deg = float(lat_str[0:2])
lat_deg = lat_deg + float(lat_str[2:]) / 60
self.lat_deg[ens_index] = lat_deg
self.lat_ref[ens_index] = temp_array[3]
lon_str = temp_array[4]
lon_num = float(lon_str)
lon_deg = np.floor(lon_num / 100)
lon_deg = lon_deg + (((lon_num / 100.) - lon_deg) * 100.) / 60.
self.lon_deg[ens_index] = lon_deg
self.lon_ref[ens_index] = temp_array[5]
self.corr_qual[ens_index] = float(temp_array[6])
self.num_sats[ens_index] = float(temp_array[7])
self.hdop[ens_index] = float(temp_array[8])
self.alt[ens_index] = float(temp_array[9])
self.alt_unit[ens_index] = temp_array[10]
self.geoid[ens_index] = temp_array[11]
self.geoid_unit[ens_index] = temp_array[12]
self.d_gps_age[ens_index] = float(temp_array[13])
idx_star = temp_array[14].find('*')
self.ref_stat_id[ens_index] = float(temp_array[15][:idx_star])
except (ValueError, EOFError, IndexError):
pass
def decode_vtg(self, nmea_str: str, ens_index: int):
"""
Decode the VTG message and set all the variables.
Decode the last message.
:param nmea_str: NMEA string.
:param ens_index: Ensemble index.
"""
try:
if nmea_str:
temp_array = np.array(nmea_str.split(','))
temp_array[temp_array == '999.9'] = ''
# self.vtg_delta_time = delta_time
self.vtg_header[ens_index] = temp_array[0]
self.course_true[ens_index] = Nmea.valid_number(temp_array[1])
self.true_indicator[ens_index] = temp_array[2]
self.course_mag[ens_index] = Nmea.valid_number(temp_array[3])
self.mag_indicator[ens_index] = temp_array[4]
self.speed_knots[ens_index] = Nmea.valid_number(temp_array[5])
self.knots_indicator[ens_index] = temp_array[6]
self.speed_kph[ens_index] = Nmea.valid_number(temp_array[7])
self.kph_indicator[ens_index] = temp_array[8]
idx_star = temp_array[9].find('*')
self.mode_indicator[ens_index] = temp_array[9][:idx_star]
except (ValueError, EOFError, IndexError):
pass
def decode_hdt(self, nmea_str: str, ens_index: int):
"""
Decode the HDT message and set all the variables.
Decode the last message.
:param nmea_str: NMEA string.
:param ens_index: Ensemble Index.
"""
try:
if nmea_str:
temp_array = np.array(nmea_str.split(','))
temp_array[temp_array == '999.9'] = ''
# self.vtg_delta_time = delta_time
self.hdt_header[ens_index] = temp_array[0]
self.heading[ens_index] = Nmea.valid_number(temp_array[1])
idx_star = temp_array[2].find('*')
self.rel_true_north[ens_index] = temp_array[2][:idx_star]
except (ValueError, EOFError, IndexError):
pass
@staticmethod
def valid_number(data_in):
"""
Check to see if data_in can be converted to float.
:param data_in: str String to be converted to float
:return Returns a float of data_in or nan if conversion is not possible
"""
try:
data_out = float(data_in)
except ValueError:
data_out = np.nan
return data_out
@staticmethod
def check_nmea_checksum(nmea_str: str):
"""
Calculate the NMEA checksum. Verify the
checksum value matches the given value.
:param nmea_str NMEA string.
:return TRUE = Good checksum
"""
try:
# Remove newline and spaces at the end
nmea_str = nmea_str.rstrip('\n')
# Get the checksum value
checksum = nmea_str[len(nmea_str) - 2:]
checksum = int(checksum, 16)
# Get the data from the string
nmea_data = re.sub("(\n|\r\n)", "", nmea_str[nmea_str.find("$") + 1:nmea_str.find("*")])
# Calculate the checksum
calc_checksum = 0
for c in nmea_data:
calc_checksum ^= ord(c)
calc_checksum = calc_checksum & 0xFF
# Verify the checksum matches
if calc_checksum == checksum:
return True
return False
except Exception as ex:
logging.error(ex)
return False
class Gps(object):
"""
Class to hold GPS data from WinRiver.
RTB format does not include all this information.
Specifically the velocities. I typically use a
geodectic calculator like pygeodesy to do these
calculations.
Attributes
----------
alt_m: np.array(float)
Altitude in meters
gga_diff: np.array(int)
Differential correction indicator
gga_hdop: np.array(float)
Horizontal dilution of precision
gga_n_stats: np.array(int)
Number of satellites
gga_vel_e_mps: np.array(float)
Velocity in east direction from GGA data
gga_vel_n_mps: np.array(float)
Velocity in north direction from GGA data
gsa_p_dop: np.array(int)
Position dilution of precision
gsa_sat: np.array(int)
Satellites
gsa_v_dop: np.array(float)
Vertical dilution of precision
lat_deg: np.array(float)
Latitude in degrees
long_deg: np.array(float)
Longitude in degrees
vtg_vel_e_mps: np.array(float)
Velocity in east direction from VTG data
vtg_vel_n_mps: np.array(float)
Velocity in north direction from VTG data
"""
def __init__(self, num_ens: int):
"""
Initialize instance variables.
:param num_ens: Number of ensembles
"""
self.alt_m = RtbRowe.nans(num_ens)
self.gga_diff = RtbRowe.nans(num_ens)
self.gga_hdop = RtbRowe.nans(num_ens)
self.gga_n_stats = RtbRowe.nans(num_ens)
self.gga_vel_e_mps = RtbRowe.nans(num_ens)
self.gga_vel_n_mps = RtbRowe.nans(num_ens)
self.gsa_p_dop = RtbRowe.nans(num_ens)
self.gsa_sat = RtbRowe.nans([num_ens, 6])
self.gsa_v_dop = RtbRowe.nans(num_ens)
self.lat_deg = RtbRowe.nans(num_ens)
self.long_deg = RtbRowe.nans(num_ens)
self.vtg_vel_e_mps = RtbRowe.nans(num_ens)
self.vtg_vel_n_mps = RtbRowe.nans(num_ens)
# def decode(self, ens_bytes: list, ens_index: int, num_ens: int, name_len: int = 8):
def decode(self, ens_bytes: list, ens_index: int, name_len: int = 8):
"""
Decode the NMEA dataset. This will be the raw NMEA messages
from the ADCP containing GPS data.
:param ens_bytes Bytes for dataset.
:param ens_index: Ensemble index in the file.
# :param num_ens: Number of ensembles in the file.
:param name_len: Name length to get the start location.
"""
packet_pointer = RtbRowe.get_base_data_size(name_len)
# Convert all the messages to a string
nmea_str = str(ens_bytes[packet_pointer:], "UTF-8")
# Decode each NMEA message
for msg in nmea_str.split():
# Verify the NMEA string is good
if Nmea.check_nmea_checksum(msg):
# Add each message to the list
# Decode the data
if 'gga' in msg or 'GGA' in msg:
self.decode_gga(nmea_str, ens_index)
def decode_gga(self, nmea_str: str, ens_index: int):
"""
Decode GGA message. Update the variables.
:param nmea_str NMEA string.
:param ens_index: Ensemble index.
"""
try:
if nmea_str:
temp_array = np.array(nmea_str.split(','))
temp_array[temp_array == '999.9'] = ''
# self.gga_delta_time = delta_time
# self.gga_header.append(temp_array[0])
# self.utc.append(float(temp_array[1]))
lat_str = temp_array[2]
lat_deg = float(lat_str[0:2])
lat_deg = lat_deg + float(lat_str[2:]) / 60
self.lat_deg[ens_index] = lat_deg
# self.lat_ref.append(temp_array[3])
lon_str = temp_array[4]
lon_num = float(lon_str)
lon_deg = np.floor(lon_num / 100)
lon_deg = lon_deg + (((lon_num / 100.) - lon_deg) * 100.) / 60.
self.long_deg[ens_index] = lon_deg
# self.lon_ref.append(temp_array[5])
self.gga_diff[ens_index] = float(temp_array[6])
self.gga_n_stats[ens_index] = float(temp_array[7])
self.gga_hdop[ens_index] = float(temp_array[8])
self.alt_m[ens_index] = float(temp_array[9])
# self.alt_unit.append(temp_array[10])
# self.geoid.append(temp_array[11])
# self.geoid_unit.append(temp_array[12])
# self.d_gps_age.append(float(temp_array[13]))
# idx_star = temp_array[14].find('*')
# self.ref_stat_id.append(float(temp_array[15][:idx_star]))
except (ValueError, EOFError, IndexError):
pass
class Gps2(object):
"""Class to hold GPS data for WinRiver II.
Attributes
----------
gga_delta_time: np.array(float)
Time between ping and gga data
gga_header: list
GGA header
gga_sentence: list
GGA sentence
utc: np.array(float)
UTC time
lat_deg: np.array(float)
Latitude in degrees
lat_ref: list
Latitude reference
lon_deg: np.array(float)
Longitude in degrees
lon_ref: list
Longitude reference
corr_qual: np.array(float)
Differential quality indicator
num_sats: np.array(int)
Number of satellites
hdop: np.array(float)
Horizontal dilution of precision
alt: np.array(float)
Altitude
alt_unit: list
Units for altitude
geoid: np.array(float)
Geoid height
geoid_unit: list
Units for geoid height
d_gps_age: np.array(float)
Age of differential correction
ref_stat_id: np.array(float)
Reference station ID
vtg_delta_time: np.array(float)
Time between ping and VTG data
vtg_header: list
VTG header
vtg_sentence: list
VTG sentence
course_true: np.array(float)
Course relative to true north
true_indicator: list
True north indicator
course_mag: np.array(float)
Course relative to magnetic north
mag_indicator: list
Magnetic north indicator
speed_knots: np.array(float)
Speed in knots
knots_indicator: list
Knots indicator
speed_kph: np.array(float)
Speed in kilometers per hour
kph_indicator: list
Kilometers per hour indicator
mode_indicator: list
Mode indicator
dbt_delta_time: np.array(float)
Time between ping and echo sounder data
dbt_header: list
Echo sounder header
depth_ft: np.array(float)
Depth in ft from echo sounder
ft_indicator: list
Feet indicator
depth_m: np.array(float)
Depth in meters from echo sounder
m_indicator: list
Meters indicator
depth_fath: np.array(float)
Depth in fathoms from echo sounder
fath_indicator: list
Fathoms indicator
hdt_delta_time: np.array(float)
Time between ping and external heading data
hdt_header: list
External heading header
heading_deg: np.array(float)
Heading in degrees from external heading
h_true_indicator: list
Heading indicator to true north
gga_velE_mps: np.array(float)
Velocity in east direction in m/s from GGA for WR
gga_velN_mps: np.array(float)
Velocity in north direction in m/s from GGA for WR
vtg_velE_mps: np.array(float)
Velocity in east direction in m/s from VTG for WR
vtg_velN_mps: np.array(float)
Velocity in north direction in m/s from VTG for WR
"""
def __init__(self, num_ens: int):
"""
Initialize instance variables.
Parameters
----------
:param num_ens: Number of ensembles
"""
self.gga_delta_time = np.full([num_ens, 20], np.nan)
self.gga_header = [x[:] for x in [[''] * 20] * num_ens]
self.gga_sentence = [x[:] for x in [[''] * 20] * num_ens]
self.utc = np.full([num_ens, 20], np.nan)
self.lat_deg = np.zeros([num_ens, 20])
# self.lat_deg = [[np.nan]] * num_ens
self.lat_ref = [x[:] for x in [[''] * 20] * num_ens]
self.lon_deg = np.zeros([num_ens, 20])
self.lon_ref = [x[:] for x in [[''] * 20] * num_ens]
self.corr_qual = np.full([num_ens, 20], np.nan)
# self.corr_qual = [[np.nan]] * num_ens
self.num_sats = | np.full([num_ens, 20], np.nan) | numpy.full |
# --------------
# Importing the neccessary packages
import numpy as np
import cv2
from matplotlib import pyplot as plt
#load the image 'path' using opencv library cv2
image = cv2.imread(path)
#convert the above image into grayscale
image_gray=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells=[np.hsplit(row,100) for row in np.vsplit(image_gray, 50)]
cells_array=np.array(cells)
# create labels for train and test data
train_data=cells_array[:,:50].reshape(-1, 400).astype(np.float32)
test_data=cells_array[:,50:100].reshape(-1,400).astype(np.float32)
# Instatiate the KNN model, train the data
labels=np.arange(10)
train_labels=np.repeat(labels,250)[:, np.newaxis]
test_labels=train_labels.copy()
knn=cv2.ml.KNearest_create()
knn.train(train_data,cv2.ml.ROW_SAMPLE, train_labels)
# Test the model on test data
_, result,_,_=knn.findNearest(test_data, k=5)
no_of_matches=result==test_labels
correct_matches=np.count_nonzero(no_of_matches)
accuracy=(correct_matches/result.size)*100
print(accuracy)
# Save the knn model (Modify the variables according to your code)
save_path = user_data_dir + "knn_data.npz"
np.savez(save_path, train= train_data, train_labels= train_labels)
# Load the saved model
with np.load(save_path) as data:
train=data['train']
train_labels=data['train_labels']
knn_saved=cv2.ml.KNearest_create()
knn_saved.train(train,cv2.ml.ROW_SAMPLE, train_labels)
# Load the test image saved in 'path1'
test_img=cv2.imread(path1)
test_img_gray=cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY)
blur_sg=cv2.GaussianBlur(test_img_gray, (5,5),0)
(T,th)=cv2.threshold(blur_sg, 155, 255, cv2.THRESH_BINARY_INV)
th=th[400:880, 200:750]
test_img_resize=cv2.resize(th,(20,20))
x=np.array(test_img_resize)
test_img_reshape=x.reshape(-1,400).astype(np.float32)
_,result,_,_=knn_saved.findNearest(test_img_reshape,k=1)
fig, ax=plt.subplots(nrows=1, ncols=2, figsize=(15,7))
ax[0].imshow(test_img)
ax[0].title.set_text('Original Image')
ax[1].imshow(test_img_resize, 'gray')
ax[1].title.set_text('Preprocessed Image')
fig.show();
print(int(result))
# Create deskew function
def deskew(input_image):
SZ=20
m=cv2.moments(input_image)
if abs(m['mu02']) < 1e-2:
return input_image.copy()
skew=m['mu11']/m['mu02']
M= | np.float32([[1,skew, -0.5*SZ*skew],[0,1,0]]) | numpy.float32 |
import os
import json
from collections import OrderedDict
import warnings
import numpy as np
from .camera_model import CameraModel
from .camera_model import is_rotation_matrix
from pymvg.util import pretty_json_dump, normalize_M, \
parse_radfile, my_rq, center
from pymvg.align import estsimt
class MultiCameraSystem:
def __init__(self,cameras):
self._cameras=OrderedDict()
for camera in cameras:
self.append(camera)
def append(self,camera):
assert isinstance(camera, CameraModel)
name = camera.name
if name in self._cameras:
raise ValueError('Cannot create MultiCameraSystem with '
'multiple identically-named cameras.')
self._cameras[name]=camera
@classmethod
def from_dict(cls, d):
cam_dict_list = d['camera_system']
cams = [CameraModel.from_dict(cd) for cd in cam_dict_list]
return MultiCameraSystem( cameras=cams )
def get_pymvg_str( self ):
d = self.to_dict()
d['__pymvg_file_version__']='1.0'
buf = pretty_json_dump(d)
return buf
def save_to_pymvg_file( self, fname ):
buf = self.get_pymvg_str()
with open(fname,mode='w') as fd:
fd.write(buf)
@classmethod
def from_pymvg_str(cls, buf):
d = json.loads(buf)
assert d['__pymvg_file_version__']=='1.0'
cam_dict_list = d['camera_system']
cams = [CameraModel.from_dict(cd) for cd in cam_dict_list]
return MultiCameraSystem( cameras=cams )
@classmethod
def from_pymvg_file(cls, fname):
with open(fname,mode='r') as fd:
buf = fd.read()
return MultiCameraSystem.from_pymvg_str(buf)
@classmethod
def from_mcsc(cls, dirname ):
'''create MultiCameraSystem from output directory of MultiCamSelfCal'''
# FIXME: This is a bit convoluted because it's been converted
# from multiple layers of internal code. It should really be
# simplified and cleaned up.
do_normalize_pmat=True
all_Pmat = {}
all_Res = {}
all_K = {}
all_distortion = {}
opj = os.path.join
with open(opj(dirname,'camera_order.txt'),mode='r') as fd:
cam_ids = fd.read().strip().split('\n')
with open(os.path.join(dirname,'Res.dat'),'r') as res_fd:
for i, cam_id in enumerate(cam_ids):
fname = 'camera%d.Pmat.cal'%(i+1)
pmat = np.loadtxt(opj(dirname,fname)) # 3 rows x 4 columns
if do_normalize_pmat:
pmat_orig = pmat
pmat = normalize_M(pmat)
all_Pmat[cam_id] = pmat
all_Res[cam_id] = map(int,res_fd.readline().split())
# load non linear parameters
rad_files = [ f for f in os.listdir(dirname) if f.endswith('.rad') ]
for cam_id_enum, cam_id in enumerate(cam_ids):
filename = os.path.join(dirname,
'basename%d.rad'%(cam_id_enum+1,))
if os.path.exists(filename):
K, distortion = parse_radfile(filename)
all_K[cam_id] = K
all_distortion[cam_id] = distortion
else:
if len(rad_files):
raise RuntimeError(
'.rad files present but none named "%s"'%filename)
warnings.warn('no non-linear data (e.g. radial distortion) '
'in calibration for %s'%cam_id)
all_K[cam_id] = None
all_distortion[cam_id] = None
cameras = []
for cam_id in cam_ids:
w,h = all_Res[cam_id]
Pmat = all_Pmat[cam_id]
M = Pmat[:,:3]
K,R = my_rq(M)
if not is_rotation_matrix(R):
# RQ may return left-handed rotation matrix. Make right-handed.
R2 = -R
K2 = -K
assert np.allclose(np.dot(K2,R2), np.dot(K,R))
K,R = K2,R2
P = np.zeros((3,4))
P[:3,:3] = K
KK = all_K[cam_id] # from rad file or None
distortion = all_distortion[cam_id]
# (ab)use PyMVG's rectification to do coordinate transform
# for MCSC's undistortion.
# The intrinsic parameters used for 3D -> 2D.
ex = P[0,0]
bx = P[0,2]
Sx = P[0,3]
ey = P[1,1]
by = P[1,2]
Sy = P[1,3]
if KK is None:
rect = np.eye(3)
KK = P[:,:3]
else:
# Parameters used to define undistortion coordinates.
fx = KK[0,0]
fy = KK[1,1]
cx = KK[0,2]
cy = KK[1,2]
rect = np.array([[ ex/fx, 0, (bx+Sx-cx)/fx ],
[ 0, ey/fy, (by+Sy-cy)/fy ],
[ 0, 0, 1 ]]).T
if distortion is None:
distortion = np.zeros((5,))
C = center(Pmat)
rot = R
t = -np.dot(rot, C)[:,0]
d = {'width':w,
'height':h,
'P':P,
'K':KK,
'R':rect,
'translation':t,
'Q':rot,
'D':distortion,
'name':cam_id,
}
cam = CameraModel.from_dict(d)
cameras.append( cam )
return MultiCameraSystem( cameras=cameras )
def __eq__(self, other):
assert isinstance( self, MultiCameraSystem )
if not isinstance( other, MultiCameraSystem ):
return False
if len(self.get_names()) != len(other.get_names()):
return False
for name in self.get_names():
if self._cameras[name] != other._cameras[name]:
return False
return True
def __ne__(self,other):
return not (self==other)
def get_names(self):
result = list(self._cameras.keys())
return result
def get_camera_dict(self):
return self._cameras
def get_camera(self,name):
return self._cameras[name]
def to_dict(self):
return {'camera_system':
[self._cameras[name].to_dict() for name in self._cameras]}
def find3d(self,pts,undistort=True):
"""Find 3D coordinate using all data given
Implements a linear triangulation method to find a 3D
point. For example, see Hartley & Zisserman section 12.2
(p.312).
By default, this function will undistort 2D points before
finding a 3D point.
"""
# for info on SVD, see Hartley & Zisserman (2003) p. 593 (see
# also p. 587)
# Construct matrices
A=[]
for name,xy in pts:
cam = self._cameras[name]
if undistort:
xy = cam.undistort( [xy] )
Pmat = cam.get_M() # Pmat is 3 rows x 4 columns
row2 = Pmat[2,:]
x,y = xy[0,:]
A.append( x*row2 - Pmat[0,:] )
A.append( y*row2 - Pmat[1,:] )
# Calculate best point
A=np.array(A)
u,d,vt=np.linalg.svd(A)
X = vt[-1,0:3]/vt[-1,3] # normalize
return X
def find2d(self,camera_name,xyz,distorted=True):
cam = self._cameras[camera_name]
xyz = np.array(xyz)
rank1 = xyz.ndim==1
xyz = np.atleast_2d(xyz)
pix = cam.project_3d_to_pixel( xyz, distorted=distorted ).T
if rank1:
# convert back to rank1
pix = pix[:,0]
return pix
def get_aligned_copy(self, other):
"""return copy of self that is scaled, translated, and rotated to best match other"""
assert isinstance( other, MultiCameraSystem)
orig_names = self.get_names()
new_names = other.get_names()
names = set(orig_names).intersection( new_names )
if len(names) < 3:
raise ValueError('need 3 or more cameras in common to align.')
orig_points = np.array([ self._cameras[name].get_camcenter() for name in names ]).T
new_points = np.array([ other._cameras[name].get_camcenter() for name in names ]).T
s,R,t = estsimt(orig_points,new_points)
assert is_rotation_matrix(R)
new_cams = []
for name in self.get_names():
orig_cam = self._cameras[name]
new_cam = orig_cam.get_aligned_camera(s,R,t)
new_cams.append( new_cam )
result = MultiCameraSystem(new_cams)
return result
def build_example_system(n=6,z=5.0):
base = CameraModel.load_camera_default()
x = np.linspace(0, 2*n, n)
theta = np.linspace(0, 2*np.pi, n)
cams = []
for i in range(n):
# cameras are spaced parallel to the x axis
center = np.array( (x[i], 0.0, z) )
# cameras are looking at +y
lookat = center + np.array( (0,1,0))
# camera up direction rotates around the y axis
up = -np.sin(theta[i]), 0, | np.cos(theta[i]) | numpy.cos |
import os, time, sys
import numpy as np
from pypianoroll import Multitrack, BinaryTrack, StandardTrack
import cfp
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
def smoothing(roll):
# step1 Turn consecutively pitch labels into notes.
new_map = np.zeros(roll.shape)
min_note_frames = 3
last_midinote = 0
count = 0
for i in range(len(roll)):
midinote = np.argmax(roll[i,:])
if midinote > 0 and midinote == last_midinote:
count+= 1
else:
if count >= min_note_frames:
new_map[i-count-1:i,last_midinote] = 1
last_midinote = midinote
count = 0
note_map = new_map
else_map = roll - note_map
# Step2 Connect the breakpoint near the note.
new_map = np.zeros(roll.shape)
for i in range(len(else_map)):
midinote = np.argmax(else_map[i,:])
if midinote > 0:
if note_map[i-1,midinote-1] > 0:
new_map[i,midinote-1] = 1
else_map[i,midinote] = 0
elif note_map[i-1,midinote+1] > 0:
new_map[i,midinote+1] = 1
else_map[i,midinote] = 0
elif (i+1)<len(else_map) and note_map[i+1,midinote-1] > 0:
new_map[i,midinote-1] = 1
else_map[i,midinote] = 0
elif (i+1)<len(else_map) and note_map[i+1,midinote+1] > 0:
new_map[i,midinote+1] = 1
else_map[i,midinote] = 0
note_map = note_map + new_map
# step3 Turn vibrato pitch labels into notes.
new_map = np.zeros(roll.shape)
min_note_frames = 3
last_midinote = 0
note_list = []
count = 0
for i in range(len(else_map)):
midinote = np.argmax(else_map[i,:])
if midinote > 0 and np.abs(midinote - last_midinote) <= 1:
last_midinote = midinote
note_list.append(midinote)
count+= 1
else:
if count >= min_note_frames:
median_note = note_list[int((len(note_list)/2))]
new_map[i-count-1:i,median_note] = 1
else_map[i-count-1:i,:] = 0
last_midinote = midinote
note_list = []
count = 0
note_map = note_map + new_map
# step4 Connect nearby notes with the same pitch label.
last_midinote = 0
for i in range(len(note_map)):
midinote = np.argmax(note_map[i,:])
if last_midinote !=0 and midinote == 0:
if (i+1)<len(note_map) and np.argmax(note_map[i+1,:]) == last_midinote:
note_map[i,last_midinote] = 1
elif (i+2)<len(note_map) and np.argmax(note_map[i+2,:]) == last_midinote:
note_map[i:i+2,last_midinote] = 1
elif (i+3)<len(note_map) and np.argmax(note_map[i+3,:]) == last_midinote:
note_map[i:i+3,last_midinote] = 1
elif (i+4)<len(note_map) and np.argmax(note_map[i+4,:]) == last_midinote:
note_map[i:i+4,last_midinote] = 1
elif (i+5)<len(note_map) and np.argmax(note_map[i+5,:]) == last_midinote:
note_map[i:i+5,last_midinote] = 1
last_midinote = midinote
else:
last_midinote = midinote
return note_map
class MSnet(nn.Module):
def __init__(self):
super(MSnet, self).__init__()
self.conv1 = nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(3, 32, 5, padding=2),
nn.SELU()
)
self.pool1 = nn.MaxPool2d((3,1), return_indices=True)
self.conv2 = nn.Sequential(
nn.BatchNorm2d(32),
nn.Conv2d(32, 64, 5, padding=2),
nn.SELU()
)
self.pool2 = nn.MaxPool2d((4,1), return_indices=True)
self.conv3 = nn.Sequential(
nn.BatchNorm2d(64),
nn.Conv2d(64, 128, 5, padding=2),
nn.SELU()
)
self.pool3 = nn.MaxPool2d((4,1), return_indices=True)
self.bottom = nn.Sequential(
nn.BatchNorm2d(128),
nn.Conv2d(128, 1, (6,5), padding=(0,2)),
nn.SELU()
)
self.up_pool3 = nn.MaxUnpool2d((4,1))
self.up_conv3 = nn.Sequential(
nn.BatchNorm2d(128),
nn.Conv2d(128, 64, 5, padding=2),
nn.SELU()
)
self.up_pool2 = nn.MaxUnpool2d((4,1))
self.up_conv2 = nn.Sequential(
nn.BatchNorm2d(64),
nn.Conv2d(64, 32, 5, padding=2),
nn.SELU()
)
self.up_pool1 = nn.MaxUnpool2d((3,1))
self.up_conv1 = nn.Sequential(
nn.BatchNorm2d(32),
nn.Conv2d(32, 1, 5, padding=2),
nn.SELU()
)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
c1, ind1 = self.pool1(self.conv1(x))
c2, ind2 = self.pool2(self.conv2(c1))
c3, ind3 = self.pool3(self.conv3(c2))
bm = self.bottom(c3)
u3 = self.up_conv3(self.up_pool3(c3, ind3))
u2 = self.up_conv2(self.up_pool2(u3, ind2))
u1 = self.up_conv1(self.up_pool1(u2, ind1))
out = torch.cat((bm, u1), dim=2)
out = torch.squeeze(out,1)
output = self.softmax(out)
return output
def est(output):
CenFreq = cfp.get_CenFreq(StartFreq=32.7, StopFreq=2093.0, NumPerOct=48)
song_len = output.shape[-1]
est_time = np.arange(song_len)*0.02322
output = output[0,:,:]
amax_freq = np.argmax(output, axis=0)
est_freq = | np.zeros(song_len) | numpy.zeros |
import numpy as np
import matplotlib.image as mimg
import matplotlib.pyplot as plt
from digitizing import digitizing_point
import os
from projective_coefficients import projective_coefficients
from projective_transformation import transform_points
from quadratic_a_matrix import a_mat_quadratic
if __name__ == "__main__":
# ************************** Data Prepration ***************************
gcps = np.array([[5, 82, 320], [1000, 118, 400], [1000, 931, 420],
[18, 1000, 390], [194, 249, 303.5], [-23, 721, 422.3],
[247, 1119, 445.0], [257, 855, 447.8], [596, 531, 423.9],
[770, 362, 405.1]])
u = 6
img = mimg.imread('img.jpg')
# *************** Get 10 Raw points *******************
if os.path.isfile('raw_data.txt'):
raw_data = np.loadtxt('raw_data.txt')
else:
print('\nUse right click to add point,\n '
'Backspace to delete worng point and\n '
'Enter to finish digitizing\n')
height = np.empty((0, gcps.shape[0]))
raw_data = digitizing_point(img_mat=img, height_value=height)
np.savetxt('raw_data.txt', raw_data)
raw_data = np.loadtxt('raw_data_20.txt')
# *************** Get 5 Personal Raw points *******************
if os.path.isfile('personal_raw_data_20.txt'):
personal_raw_data = np.loadtxt('personal_raw_data_20.txt')
else:
print('\nUse right click to add point,\n '
'Backspace to delete worng point and\n '
'Enter to finish digitizing\n')
height = np.array([[320, 400, 420, 370, 430]])
personal_raw_data = digitizing_point(img_mat=img, height_value=height)
np.savetxt('personal_raw_data.txt', personal_raw_data)
personal_raw_data = np.loadtxt('personal_raw_data.txt')
# *************** Find the Coefficients of Projective Transformation *******************
x_cap_proj = projective_coefficients(gcp=gcps, raw=raw_data)
# *************** Create dataset with 15 points *******************
personal_raw_data_projected = transform_points(personal_raw_data, x_cap_proj)
full_dataset = np.concatenate((gcps, personal_raw_data_projected), axis=0)
# *************** RMSE Estimation with 10 and 15 Points *******************
x_cap_quad = []
for i in [10, 15]:
a_mat = a_mat_quadratic(full_dataset[:i, :])
x_cap = np.linalg.inv(np.transpose(a_mat).dot(a_mat)).dot(np.transpose(a_mat)).dot(full_dataset[:i, 2])
z_cap = a_mat.dot(x_cap)
print(f'x_cap of {i} point is equal to:\n {x_cap} \n')
z = full_dataset[:i, 2]
v = [z_cap[i] - z[i] for i in range(z_cap.shape[0])]
rmse = np.sqrt((np.transpose(np.array(v)).dot(np.array(v))) / (i - u))
print(f'RMSE of {i} point is equal to: {rmse} ')
print('\n', '*' * 25, '\n')
x_cap_quad.append(x_cap)
# *************** Z0 Estimation with 10 and 15 Points *******************
loi = np.array([[260, 520]])
z0_10point = a_mat_quadratic(loi).dot(x_cap_quad[0])
z0_15point = a_mat_quadratic(loi).dot(x_cap_quad[1])
print(f'z0_10point: {z0_10point[0]},\t z0_15point: {z0_15point[0]}')
print('\n', '*' * 25, '\n')
# *************** Plot 3D: Personal Points and GCPs *******************
plt.figure(1)
ax = plt.axes(projection='3d')
ax.scatter3D(full_dataset[:10, 0], full_dataset[:10, 1], full_dataset[:10, 2], marker='^', cmap='Reds', s=300,
label='GCPs')
ax.scatter3D(full_dataset[10:15, 0], full_dataset[10:15, 1], full_dataset[10:15, 2], marker='o', cmap='Reds', s=300,
label='Personal Points')
ax.set_xlabel('$x$', fontsize=30)
ax.set_ylabel('$y$', fontsize=30)
ax.set_zlabel('$z$', fontsize=30)
plt.title("Plot 3D: Personal Points and GCPs", fontsize=30)
plt.legend(loc='upper left', numpoints=1, ncol=3, fontsize=30, bbox_to_anchor=(0, 0))
# *************** Plot 3D Surface *******************
plt.figure(2)
xgrid = np.linspace(-50, 1100, 50)
ygrid = np.linspace(0, 1200, 50)
x, y = np.meshgrid(xgrid, ygrid)
z = np.array([a_mat_quadratic([[x[j][i], y[j][i]]]).dot(x_cap_quad[1]) for j in np.arange(ygrid.shape[0])
for i in np.arange(xgrid.shape[0])])
ax = plt.axes(projection='3d')
ax.plot_surface(x.reshape(50, 50), y.reshape(50, 50), z.reshape(50, 50), rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.scatter3D(full_dataset[:10, 0], full_dataset[:10, 1], full_dataset[:10, 2], marker='^', cmap='Reds', s=300,
label='GCPs')
ax.scatter3D(full_dataset[10:15, 0], full_dataset[10:15, 1], full_dataset[10:15, 2], marker='o', cmap='Reds', s=300,
label='Personal Points')
ax.set_xlabel('$x$', fontsize=30)
ax.set_ylabel('$y$', fontsize=30)
ax.set_zlabel('$z$', fontsize=30)
ax.set_title('Plot 3D Surface, Quadratic Polynomial', fontsize=30)
plt.legend(loc='upper left', numpoints=1, ncol=3, fontsize=30, bbox_to_anchor=(0, 0))
# *************** Height Profile *******************
plt.figure(3)
x_height = np.linspace(5, 770, 120)
y_height = np.linspace(82, 362, 120)
z_height = []
dist = []
d = np.sqrt((x_height[0] - x_height[1]) ** 2 + (y_height[0] - y_height[1]) ** 2)
z_height = np.empty((0, 1))
dist = | np.empty((0, 1)) | numpy.empty |
# coding: utf-8
import os
import chainer
import numpy as np
from PIL import Image
from glob import glob
import cv2
from functions import onehot2label
from generator import ResNetDeepLab
from options import get_options
def resize_and_crop(img):
w, h = img.size
out = None
if h < w:
out = img.resize((int(256 * w / h), 256))
left = int(out.width / 2 - 128)
out = out.crop((left, 0, left + 256, 256))
else:
out = img.resize((256, int(256 * h / w)))
top = int(out.height / 2 - 128)
out = out.crop((0, top, 256, top + 256))
return out
# in default, not removing
def remove_noise(img, ksize=5):
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img_mask = cv2.medianBlur(img, ksize)
indexes = np.where((img_mask == [0, 0, 0]).all(axis=2))
img_mask[indexes] = img[indexes]
img_mask = cv2.cvtColor(img_mask, cv2.COLOR_BGR2RGB)
return img_mask
def img_save(x, path):
img_array = np.transpose(x, (1, 2, 0))
img_array = np.uint8(img_array * 255)
img = Image.fromarray(img_array)
img.save(path)
def is_exist_color(img, rgb_list, threshold_num=1):
class_color = np.array(rgb_list).astype('uint8')
class_color = class_color.reshape(1, 1, 3)
class_color = np.repeat(class_color, img.shape[0], axis=0)
class_color = np.repeat(class_color, img.shape[1], axis=1)
mask = np.sum(img == rgb_list, axis=2) == 3
out = np.sum(mask) >= threshold_num
return out
def main():
out_dir = 'predict_to'
in_dir = 'predict_from'
gen_npz = 'pretrained/gen.npz'
opt = get_options()
gen = ResNetDeepLab(opt)
gen.to_gpu(0)
chainer.serializers.load_npz(gen_npz, gen)
gen.to_cpu()
num = 0
os.makedirs(out_dir, exist_ok=True)
files = glob(in_dir + '/*.*')
for filename in files:
print(filename)
img = resize_and_crop(Image.open(filename))
img_array = np.array(img, dtype='float32')
img_array = img_array.transpose((2, 0, 1)) / 255
x = chainer.Variable(img_array[np.newaxis, :3, :, :])
out = gen(x)
onehot = out.array[0]
x = x.array[0]
out = onehot2label(onehot)
bg_onehot = np.argmax(onehot, axis=0)
bg_onehot = bg_onehot == 4
bg_threshold = 0.5
bg_num = x * bg_onehot
bg_num = bg_num > bg_threshold
bg_num = np.sum(bg_num, axis=0)
bg_num = np.sum(bg_num == 3)
bg_ratio = bg_num / np.sum(bg_onehot)
if bg_ratio < 0.6:
print('Black Background')
continue
out = np.transpose(out * 255, (1, 2, 0)).astype('uint8')
# out = remove_noise(out, ksize=ksize)
# exist eye ?
if not is_exist_color(out, [255, 0, 0], threshold_num=32):
print('No Eye')
continue
# exist face ?
if not is_exist_color(out, [0, 255, 0], threshold_num=100):
print('No Face')
continue
# exist hair ?
if not is_exist_color(out, [0, 0, 255], threshold_num=100):
print('No Hair')
continue
x = | np.transpose(x * 255, (1, 2, 0)) | numpy.transpose |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.